Compare commits
3034 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a4e329c18b | ||
|
|
ca204ddd2f | ||
|
|
ff08f9d798 | ||
|
|
ac11473833 | ||
|
|
09fd83ab9b | ||
|
|
6699d33760 | ||
|
|
f7c8377abf | ||
|
|
0dcc0e0504 | ||
|
|
5f41899705 | ||
|
|
5e060b2222 | ||
|
|
6f04c25e3d | ||
|
|
375cce29c6 | ||
|
|
67518a59ac | ||
|
|
a3ea8ecac5 | ||
|
|
497872693f | ||
|
|
748a84d871 | ||
|
|
d5dac84e12 | ||
|
|
75e1b40fb4 | ||
|
|
5eedf782f4 | ||
|
|
1949425ab9 | ||
|
|
0a80ec80e3 | ||
|
|
a22a5b9e72 | ||
|
|
3fe4fd4c35 | ||
|
|
827a4498e0 | ||
|
|
8dbbd94299 | ||
|
|
6b0cf4663d | ||
|
|
dc5d42addc | ||
|
|
ef967d8f8a | ||
|
|
27ffc7f373 | ||
|
|
9e5a6351fc | ||
|
|
bcf4aedcde | ||
|
|
11cf23da7d | ||
|
|
eea6f38881 | ||
|
|
2489ea3699 | ||
|
|
0b85a8da88 | ||
|
|
327da8e260 | ||
|
|
00778dca31 | ||
|
|
79aff2df31 | ||
|
|
f35e967516 | ||
|
|
6449da6c8d | ||
|
|
755c7d5026 | ||
|
|
1da4bd72df | ||
|
|
5551349349 | ||
|
|
c6d25f69d5 | ||
|
|
45065c23d5 | ||
|
|
ddf80f5ea1 | ||
|
|
c048ca80a4 | ||
|
|
22385be515 | ||
|
|
4d0483f5b8 | ||
|
|
6b19490393 | ||
|
|
1e0d466002 | ||
|
|
9de7a72cce | ||
|
|
66b3acc274 | ||
|
|
0bc3a521b5 | ||
|
|
3419cb0112 | ||
|
|
a94d89efa7 | ||
|
|
66680a3056 | ||
|
|
ad4600964e | ||
|
|
82259d1380 | ||
|
|
ca4e38aa01 | ||
|
|
1aab084ecb | ||
|
|
36aed35957 | ||
|
|
32107b4f95 | ||
|
|
3d29f7c2fa | ||
|
|
01a991f56f | ||
|
|
6696e61c7b | ||
|
|
81c827ee51 | ||
|
|
83cad63ce0 | ||
|
|
06136af805 | ||
|
|
6ad333d6b2 | ||
|
|
29caf85104 | ||
|
|
d6a04bb772 | ||
|
|
c548021921 | ||
|
|
b2e0712190 | ||
|
|
767f2f2dfe | ||
|
|
1ffebbb568 | ||
|
|
be9df2bea7 | ||
|
|
9d5e9bbc18 | ||
|
|
454873221c | ||
|
|
18481a100b | ||
|
|
ca1f30a911 | ||
|
|
84628108fc | ||
|
|
dd314c41e3 | ||
|
|
c229f33e9e | ||
|
|
8eb3f9e789 | ||
|
|
7fbd5177c2 | ||
|
|
fdf72eb511 | ||
|
|
b13e34f831 | ||
|
|
6d51834a95 | ||
|
|
863258d782 | ||
|
|
287f2f56d6 | ||
|
|
525a320424 | ||
|
|
0f4a8d7be8 | ||
|
|
d4c0a99114 | ||
|
|
89d09838d8 | ||
|
|
0d87f94cb7 | ||
|
|
9bf8ab7048 | ||
|
|
dcbddef611 | ||
|
|
906802abe3 | ||
|
|
da1d26001f | ||
|
|
a13ae5a0da | ||
|
|
e4cfcae652 | ||
|
|
11db3989ce | ||
|
|
40f7e832b4 | ||
|
|
b22d00e541 | ||
|
|
54dc176725 | ||
|
|
d5819181ea | ||
|
|
c0371e9104 | ||
|
|
65d3bd728b | ||
|
|
20062b44dc | ||
|
|
a6b919eb53 | ||
|
|
1f81b77911 | ||
|
|
6cd7c60549 | ||
|
|
25a5035503 | ||
|
|
9dae6c7aee | ||
|
|
ff4ef1b574 | ||
|
|
84b03efa0b | ||
|
|
4c21320d1b | ||
|
|
2cebb0dc60 | ||
|
|
3cdd5754df | ||
|
|
800802b8aa | ||
|
|
17c6348b57 | ||
|
|
7309c02f0b | ||
|
|
ee3f158f4e | ||
|
|
d08757ce9e | ||
|
|
9ba42aa556 | ||
|
|
59290e39f9 | ||
|
|
c624cce88e | ||
|
|
78f691d2de | ||
|
|
49258dd3f6 | ||
|
|
ed01c59916 | ||
|
|
422f3449a2 | ||
|
|
4a3652ec09 | ||
|
|
147ed42ad3 | ||
|
|
62ff2d803f | ||
|
|
0fcddce69e | ||
|
|
ace082066a | ||
|
|
65efef1eee | ||
|
|
12f1e19d68 | ||
|
|
0934f737d5 | ||
|
|
267844ebe6 | ||
|
|
ebd053c87e | ||
|
|
64e401e224 | ||
|
|
276ce052a3 | ||
|
|
119f784d19 | ||
|
|
35aeeaa6e1 | ||
|
|
561405ab00 | ||
|
|
960b2bb8e6 | ||
|
|
440536a93d | ||
|
|
9742796ee7 | ||
|
|
375aefa209 | ||
|
|
33b208ab6f | ||
|
|
f398650166 | ||
|
|
7e89bca5e6 | ||
|
|
dcd5c43da4 | ||
|
|
6da08262d7 | ||
|
|
ffc9c38722 | ||
|
|
a8854947c0 | ||
|
|
07f23aaa7d | ||
|
|
2626e8f22c | ||
|
|
09351e9459 | ||
|
|
f11b7d5105 | ||
|
|
07bde2b665 | ||
|
|
ebe7524415 | ||
|
|
a27a7add3d | ||
|
|
e12599c1b9 | ||
|
|
cd0338fbae | ||
|
|
7c6491c2d3 | ||
|
|
88decb6e0c | ||
|
|
1d8432b8a4 | ||
|
|
0a461d8248 | ||
|
|
a70f7aca07 | ||
|
|
365ef1fdf7 | ||
|
|
ea27ac6fd7 | ||
|
|
7a9488ff37 | ||
|
|
c297d0112e | ||
|
|
067eb23d8e | ||
|
|
12f4af742f | ||
|
|
e4fe9fae2a | ||
|
|
030da8c2f6 | ||
|
|
55e8dd550a | ||
|
|
1521d50399 | ||
|
|
654cfb6480 | ||
|
|
c46744f366 | ||
|
|
c2f9ad7a21 | ||
|
|
e1193212b5 | ||
|
|
a7415d4d2e | ||
|
|
6925ac25c4 | ||
|
|
a296425994 | ||
|
|
0c48f08f5c | ||
|
|
b363bff1d8 | ||
|
|
ef6ec8a15a | ||
|
|
8cf83c984e | ||
|
|
ba98243cc2 | ||
|
|
0d01bd908e | ||
|
|
bf3ef2d19a | ||
|
|
7da5124067 | ||
|
|
beeab54ae3 | ||
|
|
b79052aaf2 | ||
|
|
16be82b959 | ||
|
|
5d58c7c6fb | ||
|
|
9204145746 | ||
|
|
f73117f9b1 | ||
|
|
85fc54b205 | ||
|
|
4f6966d7b3 | ||
|
|
9e84e2fd2b | ||
|
|
f83fd59dca | ||
|
|
4ebdfcd13a | ||
|
|
0fa47f18ed | ||
|
|
a1425b457d | ||
|
|
7ef7fd19e7 | ||
|
|
6f00efa350 | ||
|
|
e1a28848fa | ||
|
|
7fdede579a | ||
|
|
4d10ba4297 | ||
|
|
bffcc2042e | ||
|
|
724f8e89a1 | ||
|
|
452e55a53c | ||
|
|
3bd3027251 | ||
|
|
bbc4aed3d9 | ||
|
|
aaf4946b27 | ||
|
|
31d0183d45 | ||
|
|
b309822199 | ||
|
|
422f60a145 | ||
|
|
f65429145e | ||
|
|
5adefb466b | ||
|
|
bdcd3d87e5 | ||
|
|
32059ae9d5 | ||
|
|
9bebf1c1a6 | ||
|
|
c0b24aefba | ||
|
|
e3f69e0246 | ||
|
|
7c7924e9fa | ||
|
|
97c9b992cb | ||
|
|
40d4e167cd | ||
|
|
58b2cc380f | ||
|
|
20a4e41872 | ||
|
|
b51bc7ee24 | ||
|
|
7826e9880c | ||
|
|
fb6204ea8b | ||
|
|
79192cf65b | ||
|
|
6ea3f42e2f | ||
|
|
6a75bd77e3 | ||
|
|
d47580a144 | ||
|
|
0353c3870f | ||
|
|
4e0e691546 | ||
|
|
c6d8592484 | ||
|
|
13d9780df4 | ||
|
|
e9de839d87 | ||
|
|
fbd0a2e3c4 | ||
|
|
d3d4267731 | ||
|
|
584ded2182 | ||
|
|
b6751f7ebc | ||
|
|
721d7ab3ab | ||
|
|
e01c1eaceb | ||
|
|
23def40bc5 | ||
|
|
f5ee93796d | ||
|
|
e8be434498 | ||
|
|
061fd48df7 | ||
|
|
6579f28b64 | ||
|
|
258fd145ff | ||
|
|
6530776a62 | ||
|
|
51af8df31d | ||
|
|
235f710853 | ||
|
|
c3cb0280ef | ||
|
|
6c73b6212c | ||
|
|
0c538a584f | ||
|
|
6ae1cc8f3f | ||
|
|
37123cef8f | ||
|
|
61a008f7e4 | ||
|
|
bf0bbe0be7 | ||
|
|
df57d2776b | ||
|
|
948d8e6d02 | ||
|
|
44cdef7934 | ||
|
|
fd0c9a1305 | ||
|
|
6cfdf4ec05 | ||
|
|
358ff6a608 | ||
|
|
41fbdba104 | ||
|
|
c22d11cedd | ||
|
|
5d586a9f3a | ||
|
|
a789c8c4c7 | ||
|
|
697c41a3f6 | ||
|
|
e44baa1094 | ||
|
|
e6e73b4f52 | ||
|
|
7ea8e7e667 | ||
|
|
a55ead5ea8 | ||
|
|
836092a666 | ||
|
|
3944b3d216 | ||
|
|
10699eeb34 | ||
|
|
6c89d8d35c | ||
|
|
be7551b9f4 | ||
|
|
70d0569f08 | ||
|
|
1db32d692b | ||
|
|
8fd29082c0 | ||
|
|
9bf079b725 | ||
|
|
e180dd0710 | ||
|
|
a7dd535d47 | ||
|
|
db27e8f000 | ||
|
|
7451b6f9ae | ||
|
|
e0b12b7512 | ||
|
|
22680dc602 | ||
|
|
6ade6d30a8 | ||
|
|
38c00872e1 | ||
|
|
c2108421c2 | ||
|
|
342dbd2e19 | ||
|
|
21f22b5099 | ||
|
|
60614e6f74 | ||
|
|
3053c56cac | ||
|
|
d149dbc91f | ||
|
|
e761d38fd1 | ||
|
|
98140f6cac | ||
|
|
60a4b9316b | ||
|
|
7c671b5373 | ||
|
|
d402e722cf | ||
|
|
8548a130c7 | ||
|
|
3d2027227b | ||
|
|
3fa5b8bca5 | ||
|
|
5240b44452 | ||
|
|
a56151fec9 | ||
|
|
63f539b382 | ||
|
|
c14d739360 | ||
|
|
58677dd53f | ||
|
|
6ac8ccde46 | ||
|
|
f1297a3694 | ||
|
|
e8ee400a3f | ||
|
|
6a08efeef9 | ||
|
|
4aa0070e3d | ||
|
|
b42f34c359 | ||
|
|
24e16b7f59 | ||
|
|
d6965b0676 | ||
|
|
9028d2085f | ||
|
|
7c7292935e | ||
|
|
1e6912ea2e | ||
|
|
9e0d12d3b0 | ||
|
|
b402c367d3 | ||
|
|
0a4ece5f5b | ||
|
|
9c09bd19b4 | ||
|
|
a9880ee7b9 | ||
|
|
74f8a30f86 | ||
|
|
1b7c295199 | ||
|
|
594f0d17d1 | ||
|
|
9d319cfa2d | ||
|
|
ed8a9d975b | ||
|
|
ca673f9899 | ||
|
|
a43da62254 | ||
|
|
6e9146e746 | ||
|
|
f571d8ffad | ||
|
|
48b6c4811f | ||
|
|
c1eb79e4ba | ||
|
|
e27335acdd | ||
|
|
216bda58da | ||
|
|
7141dceee2 | ||
|
|
ac55443278 | ||
|
|
2066c478ab | ||
|
|
98c9d51791 | ||
|
|
42f8ef3315 | ||
|
|
245f47cebb | ||
|
|
48e8efe3e8 | ||
|
|
58c0f57647 | ||
|
|
b1875f0b82 | ||
|
|
b7fb2e4387 | ||
|
|
a68df457d8 | ||
|
|
1262654d97 | ||
|
|
11c4606874 | ||
|
|
95f9b27e70 | ||
|
|
31550a2c6a | ||
|
|
915b7a4a56 | ||
|
|
61aa197b0b | ||
|
|
422807514c | ||
|
|
81287e960c | ||
|
|
79d154ed73 | ||
|
|
49281bbe45 | ||
|
|
5df7309979 | ||
|
|
80fa484467 | ||
|
|
4e96a6faec | ||
|
|
eba289a7ff | ||
|
|
889b5b4f3b | ||
|
|
cef22c70ab | ||
|
|
9e33d0c4c0 | ||
|
|
f694afbbf4 | ||
|
|
d0674e0ff9 | ||
|
|
30b926add4 | ||
|
|
c3812ce1e3 | ||
|
|
b32d1a2c9f | ||
|
|
60b0fa81ec | ||
|
|
499159870c | ||
|
|
fda61b067c | ||
|
|
7535e312e0 | ||
|
|
7fad9f604f | ||
|
|
1b53ffcac7 | ||
|
|
c738cfec93 | ||
|
|
56e4a9a914 | ||
|
|
3c884f8e30 | ||
|
|
5bae3b0577 | ||
|
|
1c63ea1448 | ||
|
|
3d4d960d60 | ||
|
|
794e817208 | ||
|
|
37c23eccfe | ||
|
|
e374874125 | ||
|
|
160903fce7 | ||
|
|
2dce4306b4 | ||
|
|
3de7713017 | ||
|
|
1cd033e521 | ||
|
|
e534e9bae8 | ||
|
|
f9f57e9505 | ||
|
|
92f4a6bb94 | ||
|
|
66bea2b5ed | ||
|
|
ad6c328135 | ||
|
|
d949acb1f2 | ||
|
|
759088002d | ||
|
|
7d80b5ad28 | ||
|
|
e70812f03f | ||
|
|
b9b52e74c6 | ||
|
|
a1e299a355 | ||
|
|
24f0eebcf7 | ||
|
|
f498eb8fde | ||
|
|
abe4267553 | ||
|
|
3a11348119 | ||
|
|
ad64190bec | ||
|
|
9648c4323f | ||
|
|
a1a283688c | ||
|
|
82b840c16e | ||
|
|
16126a2c9c | ||
|
|
9b7b3755fe | ||
|
|
54490cf65e | ||
|
|
cb016ad861 | ||
|
|
c520de11de | ||
|
|
422e25c99f | ||
|
|
1def627443 | ||
|
|
64cee0b633 | ||
|
|
24f95767b4 | ||
|
|
ed001d94da | ||
|
|
f3e8e5e057 | ||
|
|
3b0dc92952 | ||
|
|
1a686239ed | ||
|
|
c777fe5471 | ||
|
|
b7edc3ed82 | ||
|
|
97f14b7a08 | ||
|
|
6793503ed0 | ||
|
|
fa833f7684 | ||
|
|
d67ecf893d | ||
|
|
faee59ee15 | ||
|
|
217b7ea68c | ||
|
|
a020fc52c9 | ||
|
|
1ef3782dd4 | ||
|
|
7515590324 | ||
|
|
e3a000e0d4 | ||
|
|
27cd2f8e96 | ||
|
|
e1547d7835 | ||
|
|
63d1860dc0 | ||
|
|
f480e57344 | ||
|
|
7dc7ff22d2 | ||
|
|
67a05dfccd | ||
|
|
b6bc042302 | ||
|
|
1312405966 | ||
|
|
07d2add674 | ||
|
|
57d0f9794f | ||
|
|
269c7a065c | ||
|
|
b6946e78a2 | ||
|
|
2b70d1d332 | ||
|
|
b37afd68ec | ||
|
|
00c08c574e | ||
|
|
bbc79796dc | ||
|
|
760cc7d6be | ||
|
|
9a72025afb | ||
|
|
74302f60ab | ||
|
|
62962c05f1 | ||
|
|
118ff85fbf | ||
|
|
5f8e60a1b7 | ||
|
|
66e15a54a4 | ||
|
|
ad80606a44 | ||
|
|
d8fa38d55a | ||
|
|
6401dd7cc7 | ||
|
|
fe211fc563 | ||
|
|
7d008bd5b6 | ||
|
|
66ff2def8c | ||
|
|
de9b9c9dfb | ||
|
|
d765359f4b | ||
|
|
4de4823a65 | ||
|
|
23c4d592f8 | ||
|
|
311f06745a | ||
|
|
1b79f6a7cf | ||
|
|
8e1a7bdfff | ||
|
|
02a66a01c3 | ||
|
|
ce833d91ce | ||
|
|
155d3474d6 | ||
|
|
265687b56d | ||
|
|
0d69c0cd64 | ||
|
|
f54e9d0b1c | ||
|
|
b982076e52 | ||
|
|
7060596a30 | ||
|
|
e51c9e50b5 | ||
|
|
5088e91566 | ||
|
|
276f499c82 | ||
|
|
5c203ce6c6 | ||
|
|
47cd1c5286 | ||
|
|
06e2756ee4 | ||
|
|
1c9a2128cf | ||
|
|
9e515ea7c4 | ||
|
|
00aaf0f796 | ||
|
|
3694811d06 | ||
|
|
81b96ae123 | ||
|
|
7c60ee3c85 | ||
|
|
b2e379cf7a | ||
|
|
08b454423b | ||
|
|
f3aa54b770 | ||
|
|
3a07e92b60 | ||
|
|
7eecc49c3a | ||
|
|
9ab2fd7f9e | ||
|
|
bf2b590273 | ||
|
|
9151d34d40 | ||
|
|
339d906e54 | ||
|
|
f47c865555 | ||
|
|
58df2f0bdc | ||
|
|
c71b1d63e5 | ||
|
|
a07296770c | ||
|
|
8154575d70 | ||
|
|
d757df8a4b | ||
|
|
c5688fef9a | ||
|
|
19655a15f1 | ||
|
|
f345b0f595 | ||
|
|
58707f8a2a | ||
|
|
c6089ccb33 | ||
|
|
f585a15eff | ||
|
|
08e69af572 | ||
|
|
294b4bcbac | ||
|
|
67008b5d15 | ||
|
|
a29f5a4849 | ||
|
|
1b1c08f7fb | ||
|
|
0c72be0403 | ||
|
|
b4bd89b96b | ||
|
|
5bb8b2add6 | ||
|
|
93b42ccfea | ||
|
|
ff86154a03 | ||
|
|
fcee67e317 | ||
|
|
155900e62f | ||
|
|
9c514c9808 | ||
|
|
62e80c602d | ||
|
|
dbb248df52 | ||
|
|
66779f1c5f | ||
|
|
2c856b67ca | ||
|
|
bf45581104 | ||
|
|
e88b2890d1 | ||
|
|
1b5ae71d1f | ||
|
|
d4ff835bf1 | ||
|
|
e27b0adbc8 | ||
|
|
e59fa8637a | ||
|
|
58f758c816 | ||
|
|
feb6999d9a | ||
|
|
71f61bbc47 | ||
|
|
6d3ea64a35 | ||
|
|
1fca2bfab1 | ||
|
|
ce41afb756 | ||
|
|
b4a42a640d | ||
|
|
58b26cb4c8 | ||
|
|
b453c32743 | ||
|
|
3cd398b098 | ||
|
|
d3127b8eb1 | ||
|
|
6de1d0cb33 | ||
|
|
6c718578a5 | ||
|
|
0d241d52eb | ||
|
|
212eaa3a05 | ||
|
|
f3ab3fe5e2 | ||
|
|
b8c56ff940 | ||
|
|
38da737e6c | ||
|
|
1b2ea7a1df | ||
|
|
a9e5fc8539 | ||
|
|
9b213115e7 | ||
|
|
5534347328 | ||
|
|
2355029dc1 | ||
|
|
8d25335b01 | ||
|
|
c0b5900a37 | ||
|
|
35a9290528 | ||
|
|
c9145ad4d8 | ||
|
|
3851628a43 | ||
|
|
d72ac92694 | ||
|
|
2555951be4 | ||
|
|
669bff78c4 | ||
|
|
c90d1f2527 | ||
|
|
40cebc250f | ||
|
|
ddd495fb48 | ||
|
|
58f2044637 | ||
|
|
dfe3fdc1cc | ||
|
|
705131e172 | ||
|
|
88759407c7 | ||
|
|
6c99cc611c | ||
|
|
3457bcbfcd | ||
|
|
eb385457b2 | ||
|
|
4ea8b4cb4f | ||
|
|
91bdcf8994 | ||
|
|
8d03c52e15 | ||
|
|
0fbc9a44d3 | ||
|
|
632035aabd | ||
|
|
a51e0047b7 | ||
|
|
726730bb0e | ||
|
|
faff1771c4 | ||
|
|
b06cd06ec1 | ||
|
|
95751d8009 | ||
|
|
14e565a004 | ||
|
|
ce694701a9 | ||
|
|
12d03e4030 | ||
|
|
0b1ce6be8f | ||
|
|
28a6adaaa4 | ||
|
|
36990a0514 | ||
|
|
ebac0dc628 | ||
|
|
29d58f2414 | ||
|
|
dca0054e93 | ||
|
|
983fe58959 | ||
|
|
91c9b8d062 | ||
|
|
b384570de3 | ||
|
|
0507852a34 | ||
|
|
7b6ff135fb | ||
|
|
bf24de88ed | ||
|
|
ff6d4ab39a | ||
|
|
66fde7a2e6 | ||
|
|
e8efaa4cd9 | ||
|
|
00947d6492 | ||
|
|
cf70fb1b4e | ||
|
|
ef1a992cf0 | ||
|
|
1f6a73f0db | ||
|
|
f2e596f6ec | ||
|
|
77ba9e728d | ||
|
|
cf9efefd96 | ||
|
|
4fb1603001 | ||
|
|
c5aac1251d | ||
|
|
b155bc564b | ||
|
|
055c48ab33 | ||
|
|
6663e1eda6 | ||
|
|
649afef512 | ||
|
|
4514f3fc11 | ||
|
|
095bef9554 | ||
|
|
f00351c106 | ||
|
|
936fce68d0 | ||
|
|
d978ac97f1 | ||
|
|
dd5978f222 | ||
|
|
0ebe0ce585 | ||
|
|
c8cfad7c00 | ||
|
|
83a16dec19 | ||
|
|
820c531814 | ||
|
|
1727b8df3b | ||
|
|
a025a15f5d | ||
|
|
72e5876c64 | ||
|
|
aeed2eb9ad | ||
|
|
46bc5ca73b | ||
|
|
0b3feb9d4c | ||
|
|
ca8692c747 | ||
|
|
a61d58716f | ||
|
|
6b646b6127 | ||
|
|
318aa5e0d3 | ||
|
|
49e99e9d51 | ||
|
|
1dfd974432 | ||
|
|
cc396f59cf | ||
|
|
ad2cd97618 | ||
|
|
aa8b9cc508 | ||
|
|
6a2cf09ee0 | ||
|
|
c6fd88116b | ||
|
|
8f0dbdeaba | ||
|
|
007c09b84e | ||
|
|
73f3c068ef | ||
|
|
9a92fa4a60 | ||
|
|
576af710be | ||
|
|
b5642bd068 | ||
|
|
128f322252 | ||
|
|
17d7e57a2e | ||
|
|
50288e6b01 | ||
|
|
ab3e44e4bd | ||
|
|
61607990c8 | ||
|
|
b65275235f | ||
|
|
e298a71834 | ||
|
|
3f6fa1e3db | ||
|
|
f2c2abe628 | ||
|
|
ff5b467fbe | ||
|
|
8c10941142 | ||
|
|
f5764d8dc6 | ||
|
|
81ca4f12dd | ||
|
|
941c469ab9 | ||
|
|
8fcd819e6f | ||
|
|
9abdaed20c | ||
|
|
eb94342f78 | ||
|
|
d563eb2336 | ||
|
|
3ee6f085db | ||
|
|
7cca69a136 | ||
|
|
093a5a260e | ||
|
|
b6d46fd52f | ||
|
|
2c072c0ed6 | ||
|
|
1f39bf8a78 | ||
|
|
fdd8499ffc | ||
|
|
9398ea7af5 | ||
|
|
29dce1a59c | ||
|
|
c729ee425f | ||
|
|
c489f23810 | ||
|
|
47a544230a | ||
|
|
c13c81f09d | ||
|
|
20544a4447 | ||
|
|
b688ebeefa | ||
|
|
1854050df3 | ||
|
|
c7f4a649df | ||
|
|
ef5c8e6839 | ||
|
|
d571f300e5 | ||
|
|
ce96527dd9 | ||
|
|
f8b8b53985 | ||
|
|
b20e142249 | ||
|
|
7c6dc9dda8 | ||
|
|
5875571215 | ||
|
|
975e6b1563 | ||
|
|
f6fd7c83e3 | ||
|
|
c2965c0fb0 | ||
|
|
fdad55956e | ||
|
|
bb399e56b0 | ||
|
|
fa68cbad1b | ||
|
|
995ef1348a | ||
|
|
0f03393010 | ||
|
|
4b1ffc23f5 | ||
|
|
c7137dffa8 | ||
|
|
5a3375ce52 | ||
|
|
8e834fd9f5 | ||
|
|
02046744eb | ||
|
|
68d7ec9155 | ||
|
|
7537dce0f0 | ||
|
|
5f41b74707 | ||
|
|
25d961d4e0 | ||
|
|
08c4e514f8 | ||
|
|
91b1d812ce | ||
|
|
1f05d9f79d | ||
|
|
9f8cffe887 | ||
|
|
995bee143a | ||
|
|
f10e56be7e | ||
|
|
2f8e10db46 | ||
|
|
5418e15e63 | ||
|
|
bcf84cc153 | ||
|
|
ce8520c9e6 | ||
|
|
0b3928c33e | ||
|
|
73d72651b4 | ||
|
|
adbedd488c | ||
|
|
13b72f6bc2 | ||
|
|
c5aa96a3aa | ||
|
|
d927c0e45f | ||
|
|
31660c4c5f | ||
|
|
4321adab71 | ||
|
|
68f151f5c0 | ||
|
|
ecad083ffc | ||
|
|
fee43e8474 | ||
|
|
4838ab74b3 | ||
|
|
fef9259aaa | ||
|
|
ad7c10727a | ||
|
|
ccd42c1d1a | ||
|
|
bd8eadb75b | ||
|
|
70a9d0d3a2 | ||
|
|
7cd3824863 | ||
|
|
db9021f9c1 | ||
|
|
a2418c6040 | ||
|
|
1fb29d59b7 | ||
|
|
8c4a217f03 | ||
|
|
bda7c39e55 | ||
|
|
3583283ebb | ||
|
|
4feacf2213 | ||
|
|
73eb731881 | ||
|
|
186e36752d | ||
|
|
421728a985 | ||
|
|
39a5701184 | ||
|
|
27948c777e | ||
|
|
c64ed46d05 | ||
|
|
c64465ff7e | ||
|
|
095200bd16 | ||
|
|
2c667a159c | ||
|
|
bac408044f | ||
|
|
4edcfe1f7c | ||
|
|
9259dcb6f5 | ||
|
|
7ef933c7cf | ||
|
|
7d312822c1 | ||
|
|
1b3e5c6ea6 | ||
|
|
efe8401e92 | ||
|
|
0b845c2532 | ||
|
|
fe60412a17 | ||
|
|
5c39e6f2fb | ||
|
|
a225a241d7 | ||
|
|
553a486d17 | ||
|
|
c73374a221 | ||
|
|
94e26dee4f | ||
|
|
4617ef2bb8 | ||
|
|
8afa8c1091 | ||
|
|
578608d301 | ||
|
|
0d45d8669e | ||
|
|
73708da60d | ||
|
|
c810cad7c8 | ||
|
|
94bba415b1 | ||
|
|
4f7629a4cb | ||
|
|
4015f31f28 | ||
|
|
9dccbe1b07 | ||
|
|
9a88df7f28 | ||
|
|
a47f622e7e | ||
|
|
3529148455 | ||
|
|
01d8286bd9 | ||
|
|
21b6f2d593 | ||
|
|
528ff5d28c | ||
|
|
ba7d2aecbb | ||
|
|
0236b97d49 | ||
|
|
26f6b1eeff | ||
|
|
dc447ccebe | ||
|
|
7ec29638f4 | ||
|
|
4c9562af20 | ||
|
|
71942fd322 | ||
|
|
550b979ac5 | ||
|
|
3878a5a46f | ||
|
|
e443a6a1ea | ||
|
|
963494ec6f | ||
|
|
42d73118fd | ||
|
|
f2f819d70f | ||
|
|
525cdb8830 | ||
|
|
a6764e82f2 | ||
|
|
1de18b89dd | ||
|
|
882518c111 | ||
|
|
8027531d07 | ||
|
|
30706355a4 | ||
|
|
dfe99507b8 | ||
|
|
c1717c9a6c | ||
|
|
1fd1a58a7a | ||
|
|
fad07507be | ||
|
|
a20c211162 | ||
|
|
9f6ab6b817 | ||
|
|
bf3d6c0e6e | ||
|
|
241023f3fc | ||
|
|
1292c44b41 | ||
|
|
b4fce47049 | ||
|
|
e7780cd8c8 | ||
|
|
af96c8ea53 | ||
|
|
7d26b81075 | ||
|
|
b8ada63ac3 | ||
|
|
cfaac12af1 | ||
|
|
6028efd26c | ||
|
|
62a566ef2c | ||
|
|
94419f434c | ||
|
|
21f349c032 | ||
|
|
28e36f7925 | ||
|
|
6c02076333 | ||
|
|
7414bdf0e3 | ||
|
|
e6326b2929 | ||
|
|
17cdcebd04 | ||
|
|
a14babdc73 | ||
|
|
aadc6a763a | ||
|
|
f16af8bf88 | ||
|
|
5ceaef4500 | ||
|
|
1ac7219a92 | ||
|
|
d4cc9871c4 | ||
|
|
961c30e7c0 | ||
|
|
13e85b3147 | ||
|
|
50a3c7fa0b | ||
|
|
bd9d2671d7 | ||
|
|
62b40636e0 | ||
|
|
eeff451bc5 | ||
|
|
56fcb20f94 | ||
|
|
7134266acf | ||
|
|
2e4ac88ad9 | ||
|
|
51547fa216 | ||
|
|
2005fc97a8 | ||
|
|
0772d9250e | ||
|
|
aa6047c460 | ||
|
|
045cba78b4 | ||
|
|
8989d0d4b6 | ||
|
|
c521117b99 | ||
|
|
e0f52a8ab8 | ||
|
|
6c23fadf7e | ||
|
|
869952d113 | ||
|
|
07ab051ee4 | ||
|
|
f2d98fc0c7 | ||
|
|
2b41cec840 | ||
|
|
6cf77040e7 | ||
|
|
20b70bc5fd | ||
|
|
4905e7193a | ||
|
|
9c1f4b8e72 | ||
|
|
9857c17631 | ||
|
|
7e34bb946f | ||
|
|
47b748851b | ||
|
|
a6f99cf534 | ||
|
|
a120a6bc32 | ||
|
|
d557d1a190 | ||
|
|
e0286e5085 | ||
|
|
4b41e898a4 | ||
|
|
668e164793 | ||
|
|
fa2e6188d0 | ||
|
|
7fde9ebbc2 | ||
|
|
aef7c3b9bb | ||
|
|
a0b76bd608 | ||
|
|
c1fab7f8d8 | ||
|
|
f42c8f2abe | ||
|
|
aa5846b282 | ||
|
|
594a0ade38 | ||
|
|
d45cc23171 | ||
|
|
d795734352 | ||
|
|
4da9fdd1d5 | ||
|
|
6b218caa21 | ||
|
|
5c138007d0 | ||
|
|
1acfc46f46 | ||
|
|
fbffb08aae | ||
|
|
8640a62319 | ||
|
|
fa782e70a4 | ||
|
|
afd72abc6e | ||
|
|
71f72e167e | ||
|
|
6595c7601e | ||
|
|
67c0506290 | ||
|
|
6447be4534 | ||
|
|
3741617ebd | ||
|
|
ab4e8b2cf0 | ||
|
|
474165d7aa | ||
|
|
94e067a2e2 | ||
|
|
4293c89166 | ||
|
|
ec82c37da5 | ||
|
|
552a4b998a | ||
|
|
0d2061b268 | ||
|
|
8a260defc2 | ||
|
|
e14c87597a | ||
|
|
f3f19d35aa | ||
|
|
ced90e1d84 | ||
|
|
17e4033340 | ||
|
|
044d3a013d | ||
|
|
1fc9dd7b68 | ||
|
|
8147866c09 | ||
|
|
7bd1972f94 | ||
|
|
2c9dcfe27b | ||
|
|
1b79b0f3ff | ||
|
|
c637e6cf31 | ||
|
|
d3a9f5bb88 | ||
|
|
7eb0415a8a | ||
|
|
bdbc8fa08f | ||
|
|
63f3af0f94 | ||
|
|
686f890fbf | ||
|
|
220fbe6544 | ||
|
|
ae44a94325 | ||
|
|
3718d6dcd4 | ||
|
|
90b3838173 | ||
|
|
19d3ecc76f | ||
|
|
6fba4ebb13 | ||
|
|
c31974c913 | ||
|
|
6177fa5dd8 | ||
|
|
cfe72159d0 | ||
|
|
8321e4a647 | ||
|
|
3084330d0c | ||
|
|
b566649e79 | ||
|
|
10a6180e4a | ||
|
|
cbe9e78977 | ||
|
|
74145b1f39 | ||
|
|
359e56751b | ||
|
|
5899784aa4 | ||
|
|
9e8959c56d | ||
|
|
1bff2292a6 | ||
|
|
cf9247754e | ||
|
|
eefab15958 | ||
|
|
0e23732631 | ||
|
|
37c044fb4b | ||
|
|
6da5fa01b9 | ||
|
|
616930f9d3 | ||
|
|
b9c31fa7c4 | ||
|
|
17b339972c | ||
|
|
39f8bd91b9 | ||
|
|
aa4e37d085 | ||
|
|
f59b66b7d4 | ||
|
|
8f0ea7a02d | ||
|
|
a1dc00890e | ||
|
|
dfbcc363d1 | ||
|
|
1047f973d5 | ||
|
|
e32977dd73 | ||
|
|
b5f78ec1e8 | ||
|
|
e0f290fdc8 | ||
|
|
fc00a4e3b2 | ||
|
|
db1f6ded88 | ||
|
|
4644af2ccc | ||
|
|
2e3e8687e1 | ||
|
|
ca42a45802 | ||
|
|
9350ecb62b | ||
|
|
a4a026e8da | ||
|
|
342fd03e72 | ||
|
|
e3f1fd9b63 | ||
|
|
e4a4dfd038 | ||
|
|
a377e99088 | ||
|
|
1d3d7a3033 | ||
|
|
e7086cb3a3 | ||
|
|
4f2a97073e | ||
|
|
7407e3b45d | ||
|
|
01ef7340aa | ||
|
|
1c960d22c1 | ||
|
|
ece0606fed | ||
|
|
2666422b99 | ||
|
|
e6d59216d4 | ||
|
|
4e8615f276 | ||
|
|
91e4d95660 | ||
|
|
45456fa24c | ||
|
|
4588258d80 | ||
|
|
c12e48f966 | ||
|
|
ec8f50a658 | ||
|
|
99c9191784 | ||
|
|
6bb02d141f | ||
|
|
07bb2a5f3f | ||
|
|
417861a48e | ||
|
|
b7e878de64 | ||
|
|
05edb5514b | ||
|
|
e90ec847b6 | ||
|
|
6344fa2a86 | ||
|
|
7e288acc90 | ||
|
|
29b0e4a8a5 | ||
|
|
27ff222cfb | ||
|
|
11f7b83522 | ||
|
|
f7177be3b6 | ||
|
|
875b417fde | ||
|
|
2573107b32 | ||
|
|
5b85005945 | ||
|
|
1ee984478f | ||
|
|
fd693dc526 | ||
|
|
e73531ce9b | ||
|
|
53ad1645cf | ||
|
|
ecea13757b | ||
|
|
af9c4a7dd0 | ||
|
|
80d8d6c3bc | ||
|
|
d648811233 | ||
|
|
34695acb85 | ||
|
|
a63de12182 | ||
|
|
f16910d616 | ||
|
|
64b3f3cec1 | ||
|
|
6a685727d0 | ||
|
|
32d25f76fc | ||
|
|
69cafe8674 | ||
|
|
18ba8d9166 | ||
|
|
e97fd7e81c | ||
|
|
cdb64b0d33 | ||
|
|
8d4d3b03bb | ||
|
|
addefe79e1 | ||
|
|
b764d3b8f6 | ||
|
|
611fd884bd | ||
|
|
826090e099 | ||
|
|
7399de6ecc | ||
|
|
25cb5e7505 | ||
|
|
5c13ec3121 | ||
|
|
d8aff3a7e3 | ||
|
|
f44927b9f8 | ||
|
|
c0110cb5af | ||
|
|
1f8e1142a0 | ||
|
|
1e51de88d6 | ||
|
|
30995b5397 | ||
|
|
eb60f67054 | ||
|
|
78193ceec1 | ||
|
|
f0e08e7687 | ||
|
|
10b8259259 | ||
|
|
6826149a8f | ||
|
|
eb0b77bf4d | ||
|
|
9d81467937 | ||
|
|
fd8ccaf01a | ||
|
|
c9debc50b1 | ||
|
|
2b30e3b6d7 | ||
|
|
6e90ec6111 | ||
|
|
8dd38f4775 | ||
|
|
fbd73f248f | ||
|
|
3fcefe6c32 | ||
|
|
f740d2c291 | ||
|
|
bf6585a40f | ||
|
|
8c2dd7b3f0 | ||
|
|
4167c437a8 | ||
|
|
0ddaef3c9a | ||
|
|
2fc6aaf936 | ||
|
|
1c0519f1c7 | ||
|
|
6bbe7800be | ||
|
|
2694149489 | ||
|
|
a17ac50118 | ||
|
|
656a77d585 | ||
|
|
7455476c60 | ||
|
|
36cda57c81 | ||
|
|
9f1f203b84 | ||
|
|
b41a8ca93f | ||
|
|
e3cf0c0e10 | ||
|
|
de18bce9aa | ||
|
|
3cc407bc0e | ||
|
|
00a0a12138 | ||
|
|
b08767a4f9 | ||
|
|
ac6bde7a98 | ||
|
|
d2d41d68dd | ||
|
|
944b7f7617 | ||
|
|
53825eb073 | ||
|
|
1a7f49513f | ||
|
|
885a2ce7ef | ||
|
|
14ba80a0af | ||
|
|
5fa22fdf82 | ||
|
|
bcaae2eb91 | ||
|
|
767a41e263 | ||
|
|
252d6c5301 | ||
|
|
7a4e65ad4b | ||
|
|
a582aa89a9 | ||
|
|
acefa1da12 | ||
|
|
a88698f3fc | ||
|
|
ebc6755b33 | ||
|
|
c8eff34388 | ||
|
|
f19b03825b | ||
|
|
25178cdbe1 | ||
|
|
a461538d58 | ||
|
|
b43ee62947 | ||
|
|
ebe6f418f3 | ||
|
|
391e79f8ee | ||
|
|
c7fcb7a84b | ||
|
|
87f4ed591e | ||
|
|
440d2e28ed | ||
|
|
6cb8980404 | ||
|
|
fe752bbd35 | ||
|
|
c74d451fa2 | ||
|
|
12d743fb35 | ||
|
|
6acb9f7910 | ||
|
|
eb6f5c6927 | ||
|
|
7ccb4c8ea3 | ||
|
|
4ce986d47d | ||
|
|
91ef085d7d | ||
|
|
97aaa24733 | ||
|
|
faf6441633 | ||
|
|
00c151b463 | ||
|
|
106b20cdbf | ||
|
|
c069b3b1e8 | ||
|
|
a2ae9f1f27 | ||
|
|
4cd6d86426 | ||
|
|
fa72f1947a | ||
|
|
9ee7d3935d | ||
|
|
1071fe0ac7 | ||
|
|
0be003377f | ||
|
|
ca3f497b56 | ||
|
|
034b84b707 | ||
|
|
1624523c4e | ||
|
|
313afe14ce | ||
|
|
01180b316f | ||
|
|
ee7d061001 | ||
|
|
60c5949a74 | ||
|
|
2ebbd4c94d | ||
|
|
785115c62b | ||
|
|
e643fc382c | ||
|
|
34aad82ac3 | ||
|
|
0c29468f90 | ||
|
|
9301dae63e | ||
|
|
2475d4a205 | ||
|
|
be75fc3474 | ||
|
|
785e049af3 | ||
|
|
be4e49e6d7 | ||
|
|
1307d604e7 | ||
|
|
45d57018eb | ||
|
|
03bf348530 | ||
|
|
cab60ef735 | ||
|
|
a3791104f9 | ||
|
|
2b3e40bb2a | ||
|
|
0c1dcad429 | ||
|
|
101ef0cf62 | ||
|
|
0debe0a80c | ||
|
|
d22e62ac8a | ||
|
|
1ee17383f8 | ||
|
|
b59c79c458 | ||
|
|
bcb6444f89 | ||
|
|
c2b14693b4 | ||
|
|
92d35409de | ||
|
|
351a08f813 | ||
|
|
a58dc787a9 | ||
|
|
7079edc2d0 | ||
|
|
da89583ccc | ||
|
|
a42a1f08e9 | ||
|
|
ebd5253e22 | ||
|
|
6411645ffc | ||
|
|
c0c322ba16 | ||
|
|
d35c5cd491 | ||
|
|
7a353028e7 | ||
|
|
2d8d3b7857 | ||
|
|
4190293b07 | ||
|
|
421b4c0aff | ||
|
|
cd69a7cb85 | ||
|
|
0c9ba9e86c | ||
|
|
1b4d2a41c9 | ||
|
|
0787d2b47a | ||
|
|
97bf1d85ab | ||
|
|
207a493fab | ||
|
|
1f3f9e131e | ||
|
|
4ddedfaaf9 | ||
|
|
3ebebef95f | ||
|
|
9f7ad47598 | ||
|
|
3c83cd8be2 | ||
|
|
963b3b768c | ||
|
|
f6709fb5d6 | ||
|
|
921599948b | ||
|
|
5df3cafa99 | ||
|
|
1a2143c1fe | ||
|
|
dd25281305 | ||
|
|
49d0301dde | ||
|
|
d90e56eb45 | ||
|
|
838ada8864 | ||
|
|
65a106792a | ||
|
|
ee4bfcbb81 | ||
|
|
a087f089b8 | ||
|
|
afbe8bf001 | ||
|
|
2a3ef0be06 | ||
|
|
3403909354 | ||
|
|
005d0c5f53 | ||
|
|
8aaaeb29cc | ||
|
|
230f8abd04 | ||
|
|
a18bbb5f2f | ||
|
|
60fce4f1dc | ||
|
|
9af65efcdb | ||
|
|
bc194a7d8c | ||
|
|
c28f691f32 | ||
|
|
ff1f114989 | ||
|
|
cac230206d | ||
|
|
79ae15d5e8 | ||
|
|
0cce0a8877 | ||
|
|
225fd035ae | ||
|
|
fb7d1346b5 | ||
|
|
491a744481 | ||
|
|
f366026435 | ||
|
|
1a0d4ed668 | ||
|
|
63a8c76946 | ||
|
|
f355a68bc9 | ||
|
|
c87e6526c1 | ||
|
|
af3a5076d6 | ||
|
|
18f2e21414 | ||
|
|
8a8cdeebb4 | ||
|
|
12b33f4ea4 | ||
|
|
01b3a09d7d | ||
|
|
0d6c1c7790 | ||
|
|
95e366b6c6 | ||
|
|
77701143bf | ||
|
|
02dea7b09b | ||
|
|
c26f93c4a0 | ||
|
|
c826ac28ef | ||
|
|
1893b0eb30 | ||
|
|
05527b13db | ||
|
|
ae5d9c8bfc | ||
|
|
9117c2a4ec | ||
|
|
bab4bb9904 | ||
|
|
33bae6f49b | ||
|
|
32d619a56b | ||
|
|
642432cf2a | ||
|
|
61e9598b08 | ||
|
|
d4e34c7514 | ||
|
|
bfe7a5e452 | ||
|
|
77d916ffec | ||
|
|
831abf7977 | ||
|
|
817a491087 | ||
|
|
9a8dacc514 | ||
|
|
8adf80d98b | ||
|
|
62686a6213 | ||
|
|
3a089242f8 | ||
|
|
9d70c38504 | ||
|
|
aeb464f3ca | ||
|
|
7076717b20 | ||
|
|
c0a4fcea0a | ||
|
|
aa2b195c86 | ||
|
|
1d0872e7ca | ||
|
|
33988637b5 | ||
|
|
d4f6ad7225 | ||
|
|
078fefed03 | ||
|
|
5b10af85b4 | ||
|
|
4caf95e5dd | ||
|
|
8e1bcf53bb | ||
|
|
064f9be7e4 | ||
|
|
adcfb44cb7 | ||
|
|
3d79773ba2 | ||
|
|
6aa8cbbf20 | ||
|
|
742e73c9c2 | ||
|
|
f8de2bdedc | ||
|
|
59879b7fa7 | ||
|
|
27abae21b8 | ||
|
|
0819c8a51a | ||
|
|
9dcd3cd491 | ||
|
|
49767cccd2 | ||
|
|
29fb447daa | ||
|
|
f6fe5b552d | ||
|
|
bd0801a887 | ||
|
|
05b1c66aa8 | ||
|
|
80ae592c23 | ||
|
|
ba6de4c4d4 | ||
|
|
46ea9170cb | ||
|
|
7d318aeefa | ||
|
|
0aa3cf677a | ||
|
|
72961c5858 | ||
|
|
a05711a37a | ||
|
|
efc9e1d673 | ||
|
|
a11ac188c2 | ||
|
|
60350d298a | ||
|
|
838dad8759 | ||
|
|
a728dfe0c6 | ||
|
|
0c7cbe3566 | ||
|
|
832b0185c7 | ||
|
|
b1719b26d1 | ||
|
|
ccf6a921c7 | ||
|
|
197c570baa | ||
|
|
0fe09f1d40 | ||
|
|
4a91954532 | ||
|
|
b8b5cec35c | ||
|
|
43c203333e | ||
|
|
1c6393b131 | ||
|
|
22f04e72e5 | ||
|
|
5f3debf65b | ||
|
|
fd8ef27535 | ||
|
|
a80ec5d8bb | ||
|
|
530a16291c | ||
|
|
7be8f4dc6e | ||
|
|
9792b17597 | ||
|
|
99f1e3ff35 | ||
|
|
5ba71cd2f1 | ||
|
|
b7df7ce5d5 | ||
|
|
405829dc30 | ||
|
|
451a851118 | ||
|
|
e97c376681 | ||
|
|
7541e243bc | ||
|
|
50a8116ae9 | ||
|
|
bf6fe5e962 | ||
|
|
e4f8799323 | ||
|
|
1f95524996 | ||
|
|
a50d5d351b | ||
|
|
067810fa98 | ||
|
|
a9285b8a94 | ||
|
|
ec6bcfeb83 | ||
|
|
7abec1888f | ||
|
|
fdcbf7aacf | ||
|
|
445bfdf242 | ||
|
|
0fba1901c8 | ||
|
|
fc5b9c8235 | ||
|
|
f490f44501 | ||
|
|
7e02082209 | ||
|
|
d869ac95fa | ||
|
|
5c856460a6 | ||
|
|
3613695f91 | ||
|
|
8fb7d476b8 | ||
|
|
dd8df483cd | ||
|
|
65459a99b6 | ||
|
|
2129584fd6 | ||
|
|
2da9c216c3 | ||
|
|
c6e26c5a16 | ||
|
|
fd57fa4913 | ||
|
|
8c4d22b3f9 | ||
|
|
c221774c51 | ||
|
|
23686b1391 | ||
|
|
0fffba5423 | ||
|
|
0e0eb747b5 | ||
|
|
f6f8695a8e | ||
|
|
b2141a96e2 | ||
|
|
4280aca82c | ||
|
|
c08889b021 | ||
|
|
57ebe382f9 | ||
|
|
73089bbfdf | ||
|
|
3a04552f98 | ||
|
|
b67bf2227e | ||
|
|
dde3b59e7b | ||
|
|
947800b95f | ||
|
|
7aa4c083a9 | ||
|
|
fcc77d1383 | ||
|
|
997cd1e332 | ||
|
|
2e88e23002 | ||
|
|
39ca192c41 | ||
|
|
f7fa71bc28 | ||
|
|
fbfbb26fd2 | ||
|
|
493bd188d5 | ||
|
|
9fd95df5cf | ||
|
|
54de3bf27a | ||
|
|
4587c3e53e | ||
|
|
be18bc6fc3 | ||
|
|
212cbbd3a2 | ||
|
|
6f9e690345 | ||
|
|
115d06edf0 | ||
|
|
e135435ce2 | ||
|
|
cd09adc3cc | ||
|
|
2491e9b5ad | ||
|
|
e63c83955a | ||
|
|
4b72aa33f3 | ||
|
|
ff9683b0fc | ||
|
|
607237571f | ||
|
|
28ca7df297 | ||
|
|
856c955386 | ||
|
|
e1c9016d90 | ||
|
|
953c5036bf | ||
|
|
37fa980565 | ||
|
|
f648b8e026 | ||
|
|
678c3ae132 | ||
|
|
c1c31ed9b2 | ||
|
|
777be05348 | ||
|
|
0bb3e4a98c | ||
|
|
9a91815b94 | ||
|
|
000e621eb6 | ||
|
|
093d7ba858 | ||
|
|
ce006a7a91 | ||
|
|
9d795061af | ||
|
|
1d1fc019dc | ||
|
|
bb664d9bbf | ||
|
|
bfc7b339f7 | ||
|
|
f30f8905ec | ||
|
|
3bae525026 | ||
|
|
df00805a2a | ||
|
|
a88ee96518 | ||
|
|
3cc2f9bd57 | ||
|
|
d1b684b782 | ||
|
|
6460d4ad3a | ||
|
|
19ea392d5d | ||
|
|
fb4d016176 | ||
|
|
afec747d9e | ||
|
|
7388fcce41 | ||
|
|
a6f9f9f968 | ||
|
|
29759721e0 | ||
|
|
1941b20521 | ||
|
|
e6969acb50 | ||
|
|
9489531431 | ||
|
|
32b7c0ca9b | ||
|
|
4ac57b4edf | ||
|
|
685a1e0ba3 | ||
|
|
e350aab1bd | ||
|
|
0dd6986e28 | ||
|
|
6d0102a70c | ||
|
|
f96a2a18c1 | ||
|
|
f955b04a6f | ||
|
|
2fd6ac319b | ||
|
|
82fbf452a8 | ||
|
|
ba69736f55 | ||
|
|
c75c6b6858 | ||
|
|
de61745bb2 | ||
|
|
3fab0fcd4c | ||
|
|
03bcd94ae5 | ||
|
|
0343bc7777 | ||
|
|
565d19acfd | ||
|
|
960acf1982 | ||
|
|
ece911521e | ||
|
|
5d95e59742 | ||
|
|
01d084bbfd | ||
|
|
7918fc2844 | ||
|
|
31b30a6df2 | ||
|
|
d217b59e0b | ||
|
|
169a4b9d32 | ||
|
|
15f3ffb165 | ||
|
|
02db1010dd | ||
|
|
935ea66681 | ||
|
|
26060e702f | ||
|
|
65d4ca2563 | ||
|
|
3c619a8da5 | ||
|
|
ded9b6c14e | ||
|
|
609abbbd7c | ||
|
|
1b4e504fad | ||
|
|
0a3a445828 | ||
|
|
c7e18bd5be | ||
|
|
083d202fe4 | ||
|
|
8365a8328b | ||
|
|
58f21e4b3a | ||
|
|
5bd7408b2f | ||
|
|
c671e8dd1d | ||
|
|
a3aed3c4c3 | ||
|
|
c008649584 | ||
|
|
516f8f287c | ||
|
|
66148690c6 | ||
|
|
cadd7f546f | ||
|
|
a3ff317f1c | ||
|
|
d8d4b0c0c7 | ||
|
|
d616f8c854 | ||
|
|
b6fa8b8eec | ||
|
|
36d2e6999b | ||
|
|
076c00063d | ||
|
|
ea8104c6a2 | ||
|
|
ca3e9336e1 | ||
|
|
f92ab48166 | ||
|
|
c10267ce2b | ||
|
|
9bd6a62ab3 | ||
|
|
0dbea6ca58 | ||
|
|
6523b23221 | ||
|
|
29c406dda0 | ||
|
|
483c8f246d | ||
|
|
645f283108 | ||
|
|
da6fd45000 | ||
|
|
fb3ef5f388 | ||
|
|
86bc76e352 | ||
|
|
644058174e | ||
|
|
4573868c08 | ||
|
|
09166a52f8 | ||
|
|
aaac1aaca9 | ||
|
|
59898c16c6 | ||
|
|
0dacdf480b | ||
|
|
fdf9f68298 | ||
|
|
7be5e1734c | ||
|
|
bfe414670f | ||
|
|
e435a46db5 | ||
|
|
84bd881e68 | ||
|
|
a901117b8c | ||
|
|
6bccb8a8a6 | ||
|
|
3de1e0e485 | ||
|
|
492b852a1f | ||
|
|
8a137405d4 | ||
|
|
f431f5ed72 | ||
|
|
980fc9608f | ||
|
|
07be258dca | ||
|
|
dbdb29594c | ||
|
|
53d55bb92f | ||
|
|
3f3efff065 | ||
|
|
57b078f2c7 | ||
|
|
1fc6ef3d4f | ||
|
|
c2567831d9 | ||
|
|
e8671fd7c2 | ||
|
|
4950ee48a0 | ||
|
|
5fa45f3b8c | ||
|
|
3b6584cc8d | ||
|
|
7be1195281 | ||
|
|
1fae8d086d | ||
|
|
10636d8a1f | ||
|
|
c67f02eaf0 | ||
|
|
0b32f61062 | ||
|
|
2ee6c26676 | ||
|
|
a89477ddf5 | ||
|
|
2f520c8d47 | ||
|
|
33db7a0fb6 | ||
|
|
50b9897182 | ||
|
|
f8ac5538e2 | ||
|
|
1985be26b2 | ||
|
|
fdfc739b72 | ||
|
|
bde9dbc57a | ||
|
|
80510e5f16 | ||
|
|
773f20ed5e | ||
|
|
f323174d07 | ||
|
|
987589eabc | ||
|
|
1004bd86ac | ||
|
|
03f69dd394 | ||
|
|
d14c24bbf3 | ||
|
|
48dc011b2a | ||
|
|
b341810e60 | ||
|
|
46d9aee6dd | ||
|
|
36a1a7998b | ||
|
|
40498aac9d | ||
|
|
440b87094a | ||
|
|
0832dfb32e | ||
|
|
be09188bda | ||
|
|
5d2219d299 | ||
|
|
900cce20a1 | ||
|
|
36bb327024 | ||
|
|
5d9667d27a | ||
|
|
fad04ca995 | ||
|
|
074bd0dfda | ||
|
|
b41fa5e15f | ||
|
|
beceb45d23 | ||
|
|
9450edf462 | ||
|
|
785a7397f8 | ||
|
|
3d1f03c286 | ||
|
|
8ff40f52e0 | ||
|
|
6577f2ef03 | ||
|
|
41d0383fb7 | ||
|
|
1cf51b14f7 | ||
|
|
372e04f69a | ||
|
|
e2107ce45e | ||
|
|
a817cafe3d | ||
|
|
ab14df043a | ||
|
|
5feff6b1e5 | ||
|
|
06b0f62e79 | ||
|
|
40d110efe4 | ||
|
|
f23318fbcf | ||
|
|
cbab49d65f | ||
|
|
b5a3b3db66 | ||
|
|
9cafa46dd3 | ||
|
|
f6bff97d26 | ||
|
|
d04b47b3ca | ||
|
|
862199143e | ||
|
|
57e8abcb63 | ||
|
|
ed31c54961 | ||
|
|
4bfa69bffa | ||
|
|
2857fa2ef7 | ||
|
|
e681431454 | ||
|
|
5b568aa9d4 | ||
|
|
471943269c | ||
|
|
28a5e2f0e6 | ||
|
|
b4c22ce6ce | ||
|
|
5248097f90 | ||
|
|
8e2c22d0bd | ||
|
|
888f2936ad | ||
|
|
4e894bac1f | ||
|
|
f96acf6e27 | ||
|
|
be56a282f2 | ||
|
|
2459eafb71 | ||
|
|
ed681d0830 | ||
|
|
5f4eb9f9d0 | ||
|
|
d1cd5c0a73 | ||
|
|
5429c74c10 | ||
|
|
3734abed4c | ||
|
|
abf5de69fb | ||
|
|
7582dc53d2 | ||
|
|
174d7c774d | ||
|
|
a9518cc5be | ||
|
|
2f190d812a | ||
|
|
d411cf4472 | ||
|
|
1ae49b9ead | ||
|
|
0bf162f64a | ||
|
|
6423636177 | ||
|
|
b6aaee01ce | ||
|
|
3511376c2c | ||
|
|
584cfc3db2 | ||
|
|
eaa7d899f0 | ||
|
|
84cc651b46 | ||
|
|
b7243660c4 | ||
|
|
e722992439 | ||
|
|
fff1d54858 | ||
|
|
a5f29019d9 | ||
|
|
208c5380f4 | ||
|
|
29191af877 | ||
|
|
2d6066f985 | ||
|
|
3ea5e5c33a | ||
|
|
dbd7969a3e | ||
|
|
af3069073a | ||
|
|
65661f24e2 | ||
|
|
ed2eba9028 | ||
|
|
10c1590b1d | ||
|
|
114e172603 | ||
|
|
09c8380b3d | ||
|
|
ba567babf4 | ||
|
|
9403aa9bd1 | ||
|
|
34b8bbcbe4 | ||
|
|
6b36992d34 | ||
|
|
6533a4647d | ||
|
|
9c910c2049 | ||
|
|
43dc23a47d | ||
|
|
61a2bf469a | ||
|
|
fe1d46a8ea | ||
|
|
a88bb8684f | ||
|
|
c7b42148a5 | ||
|
|
bc1abb6a23 | ||
|
|
d307d48def | ||
|
|
1bb40084fc | ||
|
|
8f0efa16ca | ||
|
|
8da5fac69e | ||
|
|
e2cdb6c758 | ||
|
|
ef2c35dbb1 | ||
|
|
04a1a7c2b5 | ||
|
|
d21d70a5cf | ||
|
|
e73b778d2b | ||
|
|
723102766b | ||
|
|
a4a46a8618 | ||
|
|
6ae82e04d5 | ||
|
|
19cca11e00 | ||
|
|
c8f87a9c92 | ||
|
|
f1e884ce2b | ||
|
|
86f3124720 | ||
|
|
ae6fed15cc | ||
|
|
4b309fa8b5 | ||
|
|
378e476e48 | ||
|
|
2a1067c82b | ||
|
|
a54b81cf74 | ||
|
|
2d4236f76e | ||
|
|
166080b29c | ||
|
|
3b0910f664 | ||
|
|
e489996713 | ||
|
|
54fe363257 | ||
|
|
84ced1c497 | ||
|
|
b161312183 | ||
|
|
1dd3158c7e | ||
|
|
1f647b120a | ||
|
|
7d0a30fa8f | ||
|
|
d95e04fd1f | ||
|
|
5dd83d3cf2 | ||
|
|
14e1aac9b5 | ||
|
|
5d1c51a37f | ||
|
|
58912d4ac5 | ||
|
|
6114f69cca | ||
|
|
d6c2921f2b | ||
|
|
29ca1290b3 | ||
|
|
3fcb0cc37c | ||
|
|
61c73287dc | ||
|
|
89905ec43d | ||
|
|
aa4b102108 | ||
|
|
e4bc35151f | ||
|
|
2bfb16291f | ||
|
|
56da498b7e | ||
|
|
1bba1a62b1 | ||
|
|
d367d1cde6 | ||
|
|
4a84ca9a02 | ||
|
|
3c46f7d266 | ||
|
|
16131c3d3f | ||
|
|
a70d37a676 | ||
|
|
6892e84ad2 | ||
|
|
73f455745c | ||
|
|
021abfca18 | ||
|
|
7d66f7ff0d | ||
|
|
470b37be7e | ||
|
|
f6cfab9901 | ||
|
|
51572b5da0 | ||
|
|
91ca28b7e3 | ||
|
|
04cedce9a1 | ||
|
|
5e0d789440 | ||
|
|
d7011163b8 | ||
|
|
149e4267cd | ||
|
|
fc8a39e0f5 | ||
|
|
9a479d1b55 | ||
|
|
fc095bf054 | ||
|
|
1af06aed96 | ||
|
|
9236936a55 | ||
|
|
125152460f | ||
|
|
6d90fb0bc3 | ||
|
|
b889d5017b | ||
|
|
72b08f9cc5 | ||
|
|
681950dadd | ||
|
|
a67d9337b8 | ||
|
|
2f1182e8a9 | ||
|
|
cbb4d854ab | ||
|
|
35598d5648 | ||
|
|
5c76b9e45a | ||
|
|
0b8fea4cb4 | ||
|
|
5fa93ebdc7 | ||
|
|
8aa0aed566 | ||
|
|
2eb32a0ed7 | ||
|
|
bac9e2bfd5 | ||
|
|
e4d74ae11d | ||
|
|
8a0a8558cf | ||
|
|
2185a3b674 | ||
|
|
9e3c306a5b | ||
|
|
b1c30df8e3 | ||
|
|
69816f8691 | ||
|
|
b4ec65785d | ||
|
|
3c93644146 | ||
|
|
fb58560d15 | ||
|
|
9da80e9fda | ||
|
|
bb5a5dd65e | ||
|
|
6ab77f5eb5 | ||
|
|
4f57d7f761 | ||
|
|
1563bd3dda | ||
|
|
df3346387f | ||
|
|
77b66653ed | ||
|
|
53e1c8b268 | ||
|
|
d876686a00 | ||
|
|
7546a56736 | ||
|
|
00caf0bcd8 | ||
|
|
9634494ba9 | ||
|
|
e1ac0db05c | ||
|
|
6f3e77a2df | ||
|
|
4a20a2a8ba | ||
|
|
bc3ca5f068 | ||
|
|
fd43be8d0b | ||
|
|
836ba14b70 | ||
|
|
a14dfb769a | ||
|
|
2588fa6a8f | ||
|
|
3077fd279d | ||
|
|
f3605ddc71 | ||
|
|
6aaa4aee6a | ||
|
|
e3748da860 | ||
|
|
36e6fb5fc8 | ||
|
|
86b503f87f | ||
|
|
50a783ff01 | ||
|
|
f6ca701917 | ||
|
|
a84604dceb | ||
|
|
da9546ba24 | ||
|
|
e75d3e3584 | ||
|
|
1439eb39a9 | ||
|
|
8226a4ce4d | ||
|
|
e1a68497d6 | ||
|
|
c4615a1224 | ||
|
|
65c0d8b51f | ||
|
|
a9e256ce8c | ||
|
|
fa28dcbf32 | ||
|
|
2656320d04 | ||
|
|
7e1674e43a | ||
|
|
fc104dfb56 | ||
|
|
5d4327eb14 | ||
|
|
b4f6c4f9d5 | ||
|
|
0e514ed80b | ||
|
|
14c6c9321a | ||
|
|
386126b1b2 | ||
|
|
de0927289e | ||
|
|
edb0937024 | ||
|
|
43a4840daf | ||
|
|
5e98445b22 | ||
|
|
e617b45ba3 | ||
|
|
20283bb55b | ||
|
|
515dbf2c78 | ||
|
|
2887e280d6 | ||
|
|
8826705e71 | ||
|
|
8917afab2a | ||
|
|
49233ec26a | ||
|
|
1e1cbbee80 | ||
|
|
39a5b17d31 | ||
|
|
782a54a8a1 | ||
|
|
35a55e10aa | ||
|
|
9e80ed0fa8 | ||
|
|
5299f3dcf6 | ||
|
|
7b1564898b | ||
|
|
4e01126ff2 | ||
|
|
55b56328da | ||
|
|
ce764bf2d9 | ||
|
|
d71537d431 | ||
|
|
ae1ba45350 | ||
|
|
c4182f8c33 | ||
|
|
028f8aaa97 | ||
|
|
d3f11fdbd3 | ||
|
|
8672b2f3ec | ||
|
|
de753a149e | ||
|
|
2d4bbbf49d | ||
|
|
76d242e024 | ||
|
|
260c152166 | ||
|
|
9f4c1ef9f9 | ||
|
|
bd7fdb5e6c | ||
|
|
a381910e86 | ||
|
|
d182ef0391 | ||
|
|
7319122e92 | ||
|
|
4809fa4f19 | ||
|
|
792bef615c | ||
|
|
ee01f80dc1 | ||
|
|
98671a73f4 | ||
|
|
f33a950103 | ||
|
|
132bf34b69 | ||
|
|
01b08e1e43 | ||
|
|
000a943cce | ||
|
|
c6a456c7c7 | ||
|
|
cc2329d4fd | ||
|
|
84d0433cc3 | ||
|
|
f82e346f02 | ||
|
|
a113dd4def | ||
|
|
98f793155f | ||
|
|
a38bd413ab | ||
|
|
9e1535e203 | ||
|
|
d8e405511e | ||
|
|
037a409919 | ||
|
|
571d1479a4 | ||
|
|
ae1934f7db | ||
|
|
39e05a2dad | ||
|
|
7b46bbb628 | ||
|
|
d2527e36eb | ||
|
|
029994a83b | ||
|
|
37047919ab | ||
|
|
0b45d48e85 | ||
|
|
0c660f8335 | ||
|
|
ce9a247a9d | ||
|
|
b4bd46d067 | ||
|
|
1d8b686446 | ||
|
|
2b192f7dca | ||
|
|
979114db45 | ||
|
|
6d0152c8e2 | ||
|
|
dabed96af4 | ||
|
|
36becd972a | ||
|
|
7498035d24 | ||
|
|
39a0359dd5 | ||
|
|
49a3c43741 | ||
|
|
fa3ea5ee4d | ||
|
|
05af95dade | ||
|
|
ae680d79ed | ||
|
|
97a5c1ac1d | ||
|
|
74d35f0860 | ||
|
|
de7ff902de | ||
|
|
317f26f0bf | ||
|
|
dd96ada3c6 | ||
|
|
8f39754812 | ||
|
|
ac4371fa98 | ||
|
|
9985c4a344 | ||
|
|
fecfaae8dc | ||
|
|
9b120e68b8 | ||
|
|
377bffe281 | ||
|
|
804b6f2282 | ||
|
|
cb58daf38d | ||
|
|
a9398d210b | ||
|
|
df1c2383da | ||
|
|
ff8b1b4ae3 | ||
|
|
4cce21b125 | ||
|
|
6baf810885 | ||
|
|
9a48b2e942 | ||
|
|
c0c9c984d1 | ||
|
|
31fe017888 | ||
|
|
3fed478e4d | ||
|
|
0afc5d0b1a | ||
|
|
ba5a0d47eb | ||
|
|
be7bc658fc | ||
|
|
c89bbf5130 | ||
|
|
e59e3a9f00 | ||
|
|
6146be1474 | ||
|
|
730d2a9ad2 | ||
|
|
d008941cb3 | ||
|
|
df7a3e65ee | ||
|
|
0707f3d963 | ||
|
|
976d6fb03f | ||
|
|
f1aafbc06f | ||
|
|
7cb5444dbb | ||
|
|
3bede6e65f | ||
|
|
ad90bb4645 | ||
|
|
2220fd18ca | ||
|
|
bb3df5785a | ||
|
|
6e54eda41f | ||
|
|
df4c0adf0b | ||
|
|
7cbe4afdb8 | ||
|
|
16f150caae | ||
|
|
7229b41fc7 | ||
|
|
2cd5037878 | ||
|
|
53ee6383db | ||
|
|
a09478f374 | ||
|
|
d3c1d77a35 | ||
|
|
8824400c3e | ||
|
|
6e8eff9bb9 | ||
|
|
f5884d1608 | ||
|
|
56949a58bc | ||
|
|
7d256879c5 | ||
|
|
f9512fda58 | ||
|
|
beb63cb152 | ||
|
|
11ff73b578 | ||
|
|
0ed4a404e4 | ||
|
|
6c86501d11 | ||
|
|
2fe8932c1d | ||
|
|
0ab68aa9fb | ||
|
|
2f92b06869 | ||
|
|
03e94f9f53 | ||
|
|
606e29d390 | ||
|
|
3ecadf4aad | ||
|
|
0170d19fa7 | ||
|
|
ce1d2904c7 | ||
|
|
ea41f830fd | ||
|
|
e1a4a7b8c0 | ||
|
|
b381e8ee73 | ||
|
|
45e1429ae8 | ||
|
|
7b1d63a786 | ||
|
|
e204b4d81f | ||
|
|
325ed747d8 | ||
|
|
cbf3dba28d | ||
|
|
4329f72abf | ||
|
|
ad1cdba338 | ||
|
|
016c3915d7 | ||
|
|
79fa18132b | ||
|
|
673caf41a0 | ||
|
|
c441638fc0 | ||
|
|
ae18397ca6 | ||
|
|
426ce616c0 | ||
|
|
5cda979209 | ||
|
|
cc7e67b01a | ||
|
|
6999a9c011 | ||
|
|
bbdc8663d3 | ||
|
|
4bfeeecb05 | ||
|
|
bbc7b4aeed | ||
|
|
d3062b2e46 | ||
|
|
b7777fb46c | ||
|
|
99250ec527 | ||
|
|
dcf5f60237 | ||
|
|
399dd78b2a | ||
|
|
78d0ca3775 | ||
|
|
618a614cbf | ||
|
|
35f39ca291 | ||
|
|
f2e206700c | ||
|
|
adb77af1d9 | ||
|
|
3a34746668 | ||
|
|
fe17058700 | ||
|
|
9bee0a2071 | ||
|
|
b7f69844e1 | ||
|
|
99dc3b59bc | ||
|
|
602bf9c017 | ||
|
|
c3d1891ccd | ||
|
|
4d8f2db924 | ||
|
|
6599b366dc | ||
|
|
ba16ace697 | ||
|
|
7ade9baa15 | ||
|
|
d9e345f23d | ||
|
|
a505d992ee | ||
|
|
13262a5698 | ||
|
|
fa454b1b99 | ||
|
|
8375094c69 | ||
|
|
91079d3f15 | ||
|
|
63412a9fcc | ||
|
|
d98648f03b | ||
|
|
c37fe91672 | ||
|
|
4d40fb6b60 | ||
|
|
be3b788b8f | ||
|
|
723e54013a | ||
|
|
4d566f68b6 | ||
|
|
31f817d189 | ||
|
|
59231668c5 | ||
|
|
cadca752c4 | ||
|
|
edf215e6fd | ||
|
|
e12dd079fd | ||
|
|
04a509d45e | ||
|
|
269a659200 | ||
|
|
2c31bf46b5 | ||
|
|
5b787334c8 | ||
|
|
f761afb1ef | ||
|
|
8f6639f825 | ||
|
|
fc17d9d7df | ||
|
|
ab092e88a8 | ||
|
|
877c17251d | ||
|
|
ffe43f6098 | ||
|
|
66f49b67d6 | ||
|
|
08d6dc5227 | ||
|
|
56a1e29cdd | ||
|
|
7cea6b6fc9 | ||
|
|
4b57e80e6a | ||
|
|
0059a232a6 | ||
|
|
45676fdc8d | ||
|
|
a161fcc89b | ||
|
|
e32c5f534f | ||
|
|
426d691c95 | ||
|
|
e9a4c8ab97 | ||
|
|
a55cfebd09 | ||
|
|
34cc02f8c7 | ||
|
|
624d9fddb7 | ||
|
|
47fbe43324 | ||
|
|
1245f07a2d | ||
|
|
839975b0cf | ||
|
|
8c1233393f | ||
|
|
9cdb0568cc | ||
|
|
74e05b83ea | ||
|
|
4ded9e7d49 | ||
|
|
716272a1e2 | ||
|
|
9cc8352593 | ||
|
|
43a1031e38 | ||
|
|
a5547b2f30 | ||
|
|
b0aa23540b | ||
|
|
bece1b5201 | ||
|
|
ffaa6c4a17 | ||
|
|
fbf72f0ec4 | ||
|
|
e316a923d4 | ||
|
|
fd0370c07a | ||
|
|
909b8a8f9c | ||
|
|
316f2fee21 | ||
|
|
4a0fe3b143 | ||
|
|
a1292fac81 | ||
|
|
7f98be4f91 | ||
|
|
fd73b8875d | ||
|
|
f9ab1daa3c | ||
|
|
d27b847442 | ||
|
|
3002c7a17f | ||
|
|
dac6bc2228 | ||
|
|
4bd3dbf2ce | ||
|
|
226df1c23a | ||
|
|
2665230a09 | ||
|
|
4f0c2b794c | ||
|
|
e756064c19 | ||
|
|
17dfb0af01 | ||
|
|
ff74f517df | ||
|
|
207e09500a | ||
|
|
52c745bc62 | ||
|
|
498c6cfae9 | ||
|
|
71f8b9e473 | ||
|
|
3a31fa4768 | ||
|
|
477a9a180f | ||
|
|
da48df06d2 | ||
|
|
65e69738cc | ||
|
|
549c134bb8 | ||
|
|
d206721fc1 | ||
|
|
39fad63ccf | ||
|
|
5602d02b1b | ||
|
|
81989eed1c | ||
|
|
192efb84a0 | ||
|
|
8672347f93 | ||
|
|
5e5d4a513b | ||
|
|
88b6358472 | ||
|
|
dd8d5e2c42 | ||
|
|
d91e2328fb | ||
|
|
64795a03e3 | ||
|
|
2a16735495 | ||
|
|
292f25f9ca | ||
|
|
c92e37775a | ||
|
|
c8e2f614fa | ||
|
|
f6ed3d1456 | ||
|
|
84686753e8 | ||
|
|
91f01309da | ||
|
|
86d63f919d | ||
|
|
57a1fc9d33 | ||
|
|
c95a864975 | ||
|
|
7a83db6180 | ||
|
|
c43aa22cdb | ||
|
|
a8513da7ff | ||
|
|
d1a6303e49 | ||
|
|
53534d3956 | ||
|
|
cc07a0e295 | ||
|
|
e7bc62500b | ||
|
|
c8fb9ef3a5 | ||
|
|
eb5e6214bc | ||
|
|
568d6ee10e | ||
|
|
6aef1af76e | ||
|
|
a54852e129 | ||
|
|
668118def1 | ||
|
|
73e6b160f8 | ||
|
|
6fec141de6 | ||
|
|
31cde6c555 | ||
|
|
b1a980f344 | ||
|
|
00d9fbd220 | ||
|
|
4f4c9679bf | ||
|
|
3dab71729d | ||
|
|
2f6f758670 | ||
|
|
c0347cde85 | ||
|
|
090c8981dd | ||
|
|
2f2e76f9c6 | ||
|
|
dd7f21244b | ||
|
|
49be9d08f3 | ||
|
|
bba5b3c037 | ||
|
|
26298c4a5f | ||
|
|
fbb572948d | ||
|
|
a652b513d3 | ||
|
|
ccfeaeb22d | ||
|
|
4c12799a95 | ||
|
|
0f8d42c577 | ||
|
|
03c7578713 | ||
|
|
de6797c560 | ||
|
|
eb7d830296 | ||
|
|
02db4c7671 | ||
|
|
a05b8b56e3 | ||
|
|
eca3898410 | ||
|
|
46ae08ecb7 | ||
|
|
2028cc29b7 | ||
|
|
f6360e0bf3 | ||
|
|
9abda1bc59 | ||
|
|
2a94cc76a6 | ||
|
|
150b315a7b | ||
|
|
a07174c191 | ||
|
|
fb839ae6ca | ||
|
|
bdc426a774 | ||
|
|
32fff3798c | ||
|
|
2b02c6635d | ||
|
|
771baa66ee | ||
|
|
a82029b0cf | ||
|
|
0c2a901af4 | ||
|
|
bd18f4b8ef | ||
|
|
bf7b79f2f0 | ||
|
|
45e8598d32 | ||
|
|
8391d480c9 | ||
|
|
d17f853a5f | ||
|
|
ef5a41057f | ||
|
|
c115c9e048 | ||
|
|
6941315432 | ||
|
|
8b071cc665 | ||
|
|
959f6c538a | ||
|
|
217b3b59c0 | ||
|
|
ec916a3197 | ||
|
|
22eb72e0f9 | ||
|
|
07ba64c666 | ||
|
|
f22bc59fe3 | ||
|
|
0ce8666cc0 | ||
|
|
5427a9e422 | ||
|
|
6901b64fce | ||
|
|
5e9f5efbe3 | ||
|
|
a7a0017aa8 | ||
|
|
32c47b1509 | ||
|
|
9078b17a41 | ||
|
|
14a3694a9a | ||
|
|
b9b4db3df5 | ||
|
|
bc1d7edc58 | ||
|
|
39e430018b | ||
|
|
6549a40cf4 | ||
|
|
4e75d8fda9 | ||
|
|
5a6f60a954 | ||
|
|
a61cc2cb24 | ||
|
|
31933c8a60 | ||
|
|
78bccd032d | ||
|
|
ae21db77ec | ||
|
|
ac7503d95f | ||
|
|
69c4b17a9b | ||
|
|
a7165b0f73 | ||
|
|
cc0fca35ec | ||
|
|
8917a3ea8f | ||
|
|
dae0d5321f | ||
|
|
34415db7ed | ||
|
|
28e46e0e7c | ||
|
|
0c011b889b | ||
|
|
0962ba43c0 | ||
|
|
b8c48fb477 | ||
|
|
2a7d04fec4 | ||
|
|
7379423325 | ||
|
|
bd854e1750 | ||
|
|
74a3c74514 | ||
|
|
3d6d131889 | ||
|
|
b0569d873a | ||
|
|
d9433699db | ||
|
|
92234857f7 | ||
|
|
8efa361728 | ||
|
|
1be3eacad5 | ||
|
|
34d6b0a601 | ||
|
|
eb432a49ed | ||
|
|
04811c00cb | ||
|
|
06093d4f79 | ||
|
|
2055a60bcb | ||
|
|
cc892744bc | ||
|
|
577ee16108 | ||
|
|
392a8ac7ea | ||
|
|
226920064b | ||
|
|
19865b865f | ||
|
|
e3f812c2fe | ||
|
|
c9f79dee66 | ||
|
|
c659788022 | ||
|
|
aeb987ceb1 | ||
|
|
b478982484 | ||
|
|
fe71ee57b3 | ||
|
|
fba3d21a35 | ||
|
|
455576300c | ||
|
|
821968903c | ||
|
|
452fa53c0d | ||
|
|
95fe1e818f | ||
|
|
a61042bca0 | ||
|
|
b4abfae4de | ||
|
|
c02c8646a6 | ||
|
|
3ff2ca8d41 | ||
|
|
65fd0d15ae | ||
|
|
415840088e | ||
|
|
c4f6c89b65 | ||
|
|
539b41f421 | ||
|
|
b2ff326ced | ||
|
|
8b95d16220 | ||
|
|
c11f14f3a0 | ||
|
|
98b65e67f2 | ||
|
|
a478822b8e | ||
|
|
23aa69f56f | ||
|
|
b36f3db9de | ||
|
|
e93f086485 | ||
|
|
930e9ee55c | ||
|
|
38961ba10e | ||
|
|
93b5b7474b | ||
|
|
f862ddc9ff | ||
|
|
b59032304c | ||
|
|
3ba4d535e3 | ||
|
|
c579439c1e | ||
|
|
5b37e9aea4 | ||
|
|
46e5ac9672 | ||
|
|
1820389a05 | ||
|
|
35e3a89385 | ||
|
|
5f890e85e7 | ||
|
|
10bc7f7042 | ||
|
|
a65fd9dee8 | ||
|
|
1bb4c76deb | ||
|
|
aab44f9fc8 | ||
|
|
0a848e7578 | ||
|
|
90bce60b85 | ||
|
|
c22d51ee41 | ||
|
|
a458e684bc | ||
|
|
87b4662993 | ||
|
|
3a100339b9 | ||
|
|
47eb3c8888 | ||
|
|
4672a6fac3 | ||
|
|
82743704e4 | ||
|
|
cc2d064ab4 | ||
|
|
27214f8657 | ||
|
|
28de614dfb | ||
|
|
850183c269 | ||
|
|
2a5ef6d3f5 | ||
|
|
1d231c6cc3 | ||
|
|
20c71acb3b | ||
|
|
52ad7c6e9c | ||
|
|
5aaaffe4d1 | ||
|
|
5354ba3662 | ||
|
|
2daf13c4c8 | ||
|
|
16a90f3d3a | ||
|
|
8a0ff15242 | ||
|
|
8c993dfd35 | ||
|
|
2a6fb1e456 | ||
|
|
9e6cd36af4 | ||
|
|
f25f992a30 | ||
|
|
841d7ef2f2 | ||
|
|
a7a49be850 | ||
|
|
d5eab7da3b | ||
|
|
9b10241561 | ||
|
|
76448ab555 | ||
|
|
9584af5cb4 | ||
|
|
6fabddcb0b | ||
|
|
5efeabb0c6 | ||
|
|
806f402bba | ||
|
|
5432087d96 | ||
|
|
02cb14c7b8 | ||
|
|
9bdb45be7c | ||
|
|
514c0562e0 | ||
|
|
371275ec34 | ||
|
|
ec24a3c361 | ||
|
|
d89e797bfc | ||
|
|
55e469c7fe | ||
|
|
fb99ceacc7 | ||
|
|
daf10907e4 | ||
|
|
b3b2868f55 | ||
|
|
25b00abca1 | ||
|
|
8d0767352b | ||
|
|
918a253851 | ||
|
|
63711067e6 | ||
|
|
7158b38897 | ||
|
|
7f317b9093 | ||
|
|
7c4309ea24 | ||
|
|
5013290486 | ||
|
|
8cf3e9a620 | ||
|
|
060699c3b8 | ||
|
|
2ca6c631ac | ||
|
|
967e25878f | ||
|
|
182683814b | ||
|
|
99cbfa1567 | ||
|
|
3f8c8d70ad | ||
|
|
9c567fad92 | ||
|
|
33f58d583d | ||
|
|
0abb3a6843 | ||
|
|
3663951d11 | ||
|
|
1e169685f4 | ||
|
|
f38a3e7585 | ||
|
|
b8da5d45ce | ||
|
|
659df6e220 | ||
|
|
d601768016 | ||
|
|
16ddc6a83b | ||
|
|
340dc9cadb | ||
|
|
55fced3942 | ||
|
|
7bbf49fd65 | ||
|
|
eea6c2d02c | ||
|
|
70eaa450db | ||
|
|
55796a118d | ||
|
|
9a22d1a690 | ||
|
|
c9d21d53e6 | ||
|
|
e1015c2759 | ||
|
|
d7fa47d732 | ||
|
|
3d6e01a58f | ||
|
|
f9713e8733 | ||
|
|
0e44829720 | ||
|
|
93db889a10 | ||
|
|
0df7385c4e | ||
|
|
1a3fa6411c | ||
|
|
64614756d1 | ||
|
|
bb1fd54d4d | ||
|
|
d85288a6c0 | ||
|
|
3402acb606 | ||
|
|
7fdc25df3c | ||
|
|
ea699cbdc2 | ||
|
|
fe6a3f4267 | ||
|
|
fe8198c8cd | ||
|
|
675e61385f | ||
|
|
67acac1082 | ||
|
|
d02e1db018 | ||
|
|
0da515071b | ||
|
|
524d80ae1c | ||
|
|
3b71bc3df1 | ||
|
|
22ef9534e0 | ||
|
|
c206d12d5c | ||
|
|
6ad29a470c | ||
|
|
2d45e61a9b | ||
|
|
b98fb013ae | ||
|
|
345a965fa3 | ||
|
|
c02c120579 | ||
|
|
f0ece82111 | ||
|
|
4da681f58a | ||
|
|
68ba866c38 | ||
|
|
9622347faa | ||
|
|
8363663ea8 | ||
|
|
b588ea194c | ||
|
|
465ba76788 | ||
|
|
cf313d5761 | ||
|
|
9618cb5643 | ||
|
|
8c1958c9ad | ||
|
|
2db34139f0 | ||
|
|
9c02ab789d | ||
|
|
e0cccf6ed2 | ||
|
|
89c1a41305 | ||
|
|
202ec21bab | ||
|
|
6dcb27632e | ||
|
|
3141aa5144 | ||
|
|
5443efd7d7 | ||
|
|
62771583e7 | ||
|
|
5526f122b7 | ||
|
|
9c144587fe | ||
|
|
098bf5a1e8 | ||
|
|
4c37ca71ee | ||
|
|
0c52809591 | ||
|
|
53e730f8d5 | ||
|
|
8e248e0853 | ||
|
|
2a0758bdfe | ||
|
|
f55ba3f6c1 | ||
|
|
db51e65b42 | ||
|
|
72a2ed958b | ||
|
|
d0b91a40d4 | ||
|
|
bd74bf7994 | ||
|
|
f28d4b78e7 | ||
|
|
7536dbfee5 | ||
|
|
b76cc583fb | ||
|
|
955af6b3ec | ||
|
|
1073317a3e | ||
|
|
839ab37d40 | ||
|
|
9dd0ef187d | ||
|
|
fd8473f267 | ||
|
|
cc4910dd30 | ||
|
|
50de5d05b0 | ||
|
|
7844dc4f2d | ||
|
|
c48795a948 | ||
|
|
19b67e89a2 | ||
|
|
f017fd97c1 | ||
|
|
ce3336e3f4 | ||
|
|
54c5788b86 | ||
|
|
4cb7b26f03 | ||
|
|
3dfb62e996 | ||
|
|
d5c711d081 | ||
|
|
73b62bb15c | ||
|
|
18b8bd43ad | ||
|
|
8fffcd8091 | ||
|
|
c8e3a476fc | ||
|
|
808cee9665 | ||
|
|
92eafbc2a6 | ||
|
|
2548800c3f | ||
|
|
9dce8a5388 | ||
|
|
76484bd5c9 | ||
|
|
e4ed35fe01 | ||
|
|
f5e45c1a8a | ||
|
|
a2f83ff032 | ||
|
|
2b2f7a6dec | ||
|
|
49c15c0d44 | ||
|
|
1b938b2003 | ||
|
|
5f80760a8c | ||
|
|
dd59e872ff | ||
|
|
aa1a3b9a74 | ||
|
|
32953405b1 | ||
|
|
c1a3dd41dd | ||
|
|
63dc6a68df | ||
|
|
a39316e004 | ||
|
|
988b4d0254 | ||
|
|
f541636840 | ||
|
|
48613558d4 | ||
|
|
4b66ee2f8f | ||
|
|
abbde130ab | ||
|
|
ccb8144557 | ||
|
|
1240c78ef6 | ||
|
|
66c8b6f2bc | ||
|
|
6271a33d08 | ||
|
|
5364011a5b | ||
|
|
d78f42d2fd | ||
|
|
1a869547d7 | ||
|
|
e4bc9f6fb0 | ||
|
|
e5857161ff | ||
|
|
abdc4f39cb | ||
|
|
7ebca553ef | ||
|
|
c2962752eb | ||
|
|
ab5839b461 | ||
|
|
89a725a433 | ||
|
|
645609d441 | ||
|
|
fc4ea65936 | ||
|
|
d75cd820b0 | ||
|
|
cb3e08dda4 | ||
|
|
44a93c1922 | ||
|
|
9cba595fd0 | ||
|
|
56fc2764e4 | ||
|
|
0c4f1762c9 | ||
|
|
c2c865b0cb | ||
|
|
a66d318820 | ||
|
|
a16f72f52e | ||
|
|
99e2391b2a | ||
|
|
80c1cdf024 | ||
|
|
0fa5a6015e | ||
|
|
9d0a4f3d68 | ||
|
|
1a641392d9 | ||
|
|
36b817d008 | ||
|
|
24d19a5f78 | ||
|
|
3fb4a2b0ff | ||
|
|
0772cdda0f | ||
|
|
f6f072cb9a | ||
|
|
5265b12cc7 | ||
|
|
ff0875868e | ||
|
|
e79dbad602 | ||
|
|
6a9cc13e3e | ||
|
|
d1a6d6b1cf | ||
|
|
7a0ca05233 | ||
|
|
15884f368d | ||
|
|
b03fb9c2f6 | ||
|
|
3d4984133e | ||
|
|
13ae0ce7b0 | ||
|
|
3a67002cfe | ||
|
|
9f4d4e5adf | ||
|
|
d2fc14fb97 | ||
|
|
3730819857 | ||
|
|
297f08c683 | ||
|
|
61f556745a | ||
|
|
435f693892 | ||
|
|
72f78f8a56 | ||
|
|
2597fe78ba | ||
|
|
eb06006d6c | ||
|
|
c48dc097ff | ||
|
|
585257d340 | ||
|
|
675543240e | ||
|
|
7d1fe818be | ||
|
|
0a4641c24e | ||
|
|
11bfc807d7 | ||
|
|
e83f644c3f | ||
|
|
6b97a8be28 | ||
|
|
90798f14b5 | ||
|
|
8ae75e7f6e | ||
|
|
fc32b57798 | ||
|
|
337a188660 | ||
|
|
11d063e3c4 | ||
|
|
e846458009 | ||
|
|
2d123a11ad | ||
|
|
c2a6ca8d3a | ||
|
|
fcdf839b6b | ||
|
|
d55dd56fd2 | ||
|
|
e0d12b46d8 | ||
|
|
f3ed95d4de | ||
|
|
5baa8b5673 | ||
|
|
bb5303272b | ||
|
|
d55866d375 | ||
|
|
4b9e47cec9 | ||
|
|
7b1cf2c495 | ||
|
|
62dc0b953b | ||
|
|
7c3d5cadd5 | ||
|
|
f060db0b30 | ||
|
|
5e936fbf0e | ||
|
|
3820232241 | ||
|
|
707061efac | ||
|
|
7a06c4873e | ||
|
|
1a1e23fc76 | ||
|
|
d1c2a61d19 | ||
|
|
152d0cdec6 | ||
|
|
514f5802b5 | ||
|
|
ee9b9b3971 | ||
|
|
da1f3d61be | ||
|
|
27291f2e5f | ||
|
|
eeb1282f0c | ||
|
|
5d1badfe67 | ||
|
|
43f104bdf7 | ||
|
|
0a9c17b9d1 | ||
|
|
799b010631 | ||
|
|
2d83941aaa | ||
|
|
470abee092 | ||
|
|
39433f2a29 | ||
|
|
f6a9a0a45a | ||
|
|
5b8d4fb047 | ||
|
|
afcfbb458d | ||
|
|
8f24d239af | ||
|
|
b7a29a4bac | ||
|
|
a42105881f | ||
|
|
dc3cd62125 | ||
|
|
958ffe7a8a | ||
|
|
b46b3c5c3c | ||
|
|
fd1b14fd1d | ||
|
|
eb198e5969 | ||
|
|
70fcbd7006 | ||
|
|
b015a3bd8a | ||
|
|
bc404d4fc1 | ||
|
|
3fb43b91bf | ||
|
|
6e8188ed64 | ||
|
|
a4a0c0e2cc | ||
|
|
c7abfe67b5 | ||
|
|
db6f53e2c9 | ||
|
|
acabdc2f99 | ||
|
|
169aa4716e | ||
|
|
c0753320a0 | ||
|
|
38d875b06f | ||
|
|
1ada6cf768 | ||
|
|
2b528c5f81 | ||
|
|
f6dd4752e7 | ||
|
|
b19c7875a4 | ||
|
|
d99a3ef14b | ||
|
|
fc8fa83fcc | ||
|
|
6dcd99468b | ||
|
|
d5ba7b80d3 | ||
|
|
a3b81ef7bc | ||
|
|
015974a27e | ||
|
|
4cf756ebe6 | ||
|
|
823497a2af | ||
|
|
66fe484f0d | ||
|
|
216321aa9e | ||
|
|
5a52cb608c | ||
|
|
1181b332f7 | ||
|
|
4e3476a669 | ||
|
|
58b1777198 | ||
|
|
0c7a58fcc7 | ||
|
|
17ae51c0a0 | ||
|
|
4790aced15 | ||
|
|
3f0017d1f1 | ||
|
|
cb72262ad8 | ||
|
|
195e227c04 | ||
|
|
f5603b0780 | ||
|
|
752882a022 | ||
|
|
9731b961d0 | ||
|
|
2920409404 | ||
|
|
7dbbfc22b6 | ||
|
|
aaaa68ea7f | ||
|
|
af753de481 | ||
|
|
3956819c78 | ||
|
|
168aa57810 | ||
|
|
706af2920f | ||
|
|
4d078a8854 | ||
|
|
b6a4182904 | ||
|
|
4251a5a451 | ||
|
|
34aa77e4e1 | ||
|
|
c27d511736 | ||
|
|
d6f8ac0226 | ||
|
|
ef11abcbfd | ||
|
|
6fa704d6fc | ||
|
|
0400fcdca4 | ||
|
|
d936eb6518 | ||
|
|
3b7d0c42f1 | ||
|
|
5b1907fe61 | ||
|
|
e800af54f9 | ||
|
|
d4c2b723a5 | ||
|
|
6451b3cd83 | ||
|
|
c4628d4604 | ||
|
|
ee6d01fd1c | ||
|
|
5668736389 | ||
|
|
1aef4ce20d | ||
|
|
7c419dfc50 | ||
|
|
ce7893ee44 | ||
|
|
4c1293a74c | ||
|
|
d43599243c | ||
|
|
be60d1e7e3 | ||
|
|
fb313356f7 | ||
|
|
d20697beb3 | ||
|
|
048ed061c2 | ||
|
|
91f9d4c7a9 | ||
|
|
a60dbb5533 | ||
|
|
471b1c3eeb | ||
|
|
94750fb61f | ||
|
|
5b57313c8a | ||
|
|
794a9f969b | ||
|
|
c52c47e122 | ||
|
|
e67dbbdb8a | ||
|
|
204190f807 | ||
|
|
411ebe4d17 | ||
|
|
ee29b9428b | ||
|
|
85f53ef2dd | ||
|
|
960c09cdce | ||
|
|
b05e90e4e4 | ||
|
|
7cc7e15174 | ||
|
|
0f79c3cc0e | ||
|
|
ae3d6fd776 | ||
|
|
118ca5cf6d | ||
|
|
a11a0f289c | ||
|
|
090c9e665b | ||
|
|
c8e5455df0 | ||
|
|
fd29fe11b4 | ||
|
|
07d80f76d0 | ||
|
|
eef12cb900 | ||
|
|
06216aad53 | ||
|
|
64b52c4383 | ||
|
|
ad2ff90851 | ||
|
|
8664cff859 | ||
|
|
aa6f253374 | ||
|
|
f60f943d0c | ||
|
|
46dda58355 | ||
|
|
bfcc562c35 | ||
|
|
87426e5dda | ||
|
|
99308ab4fb | ||
|
|
d4d21d5ef3 | ||
|
|
f8e7255c32 | ||
|
|
e99063e12b | ||
|
|
5dd8b8802b | ||
|
|
27ed042c56 | ||
|
|
6708f40005 | ||
|
|
7122b3b3b6 | ||
|
|
d36392b74f | ||
|
|
7dddd06583 | ||
|
|
c86d445cb7 | ||
|
|
6c036d7b59 | ||
|
|
e78c864650 | ||
|
|
25a0d49af9 | ||
|
|
7489da49cb | ||
|
|
4df712624e | ||
|
|
73ffb58518 | ||
|
|
a4953785d9 | ||
|
|
d92e71a1f0 | ||
|
|
a8c3dfb0c1 | ||
|
|
2c06255f0e | ||
|
|
a527559526 | ||
|
|
7e6a197ddb | ||
|
|
603b361fb9 | ||
|
|
2632a7102d | ||
|
|
63453fbfa0 | ||
|
|
50f9272850 | ||
|
|
3932bf0353 | ||
|
|
ce2422324c | ||
|
|
0aa216915b | ||
|
|
60afc7f3ed | ||
|
|
1dd3521190 | ||
|
|
44785a9a8c | ||
|
|
e91fba82a8 | ||
|
|
84d6480b4e | ||
|
|
c0e296f4a9 | ||
|
|
0dc4b113d8 | ||
|
|
c8e55ab2ac | ||
|
|
fb9930004c | ||
|
|
a185ad1144 | ||
|
|
678b088a13 | ||
|
|
fac19d258d | ||
|
|
70e9329e64 | ||
|
|
600f9ce254 | ||
|
|
a11c71cea9 | ||
|
|
cc4cc806ea | ||
|
|
7fe09c8342 | ||
|
|
43d9ef7f62 | ||
|
|
482bc289bf | ||
|
|
d9b1587982 | ||
|
|
552118eb7f | ||
|
|
537af60e33 | ||
|
|
aad4163d22 | ||
|
|
cc86f94474 | ||
|
|
d505c5b2f2 | ||
|
|
71bf5b9e77 | ||
|
|
7eda43c99e | ||
|
|
81b865b89d | ||
|
|
b0d41823bd | ||
|
|
519b0b245a | ||
|
|
75e7c3dd06 | ||
|
|
691e2767a4 | ||
|
|
1f2ced896a | ||
|
|
112a2d0866 | ||
|
|
b1702de522 | ||
|
|
ff3f514f6b | ||
|
|
09da6904f5 | ||
|
|
acb718d355 | ||
|
|
26106eb0ac | ||
|
|
26438f7232 | ||
|
|
df1ef3deb6 | ||
|
|
6c86cf7605 | ||
|
|
631ba25e04 | ||
|
|
d2aaf0b491 | ||
|
|
e51a32881b | ||
|
|
1710779157 | ||
|
|
b8779764b5 | ||
|
|
681a357e07 | ||
|
|
e876d54a48 | ||
|
|
7568dc8500 | ||
|
|
25e1632628 | ||
|
|
0452f32003 | ||
|
|
9ed823fdbd | ||
|
|
45e28dd9c1 | ||
|
|
2e60a5964e | ||
|
|
ec03f82fb9 | ||
|
|
4543a6f043 | ||
|
|
45bd9ac705 | ||
|
|
8a50ca592a | ||
|
|
7fdc2b2d29 | ||
|
|
7f5ec28488 | ||
|
|
991c3ea68b | ||
|
|
b2b842bf7a | ||
|
|
bd4bf00856 | ||
|
|
68671749d8 | ||
|
|
9d3ec9e627 | ||
|
|
b1528e9dec | ||
|
|
f1fdb5d38f | ||
|
|
6cc7f9978c | ||
|
|
95d09f60f8 | ||
|
|
106e59b753 | ||
|
|
759291db02 | ||
|
|
d8e2812d80 | ||
|
|
404bf0f8d2 | ||
|
|
f44cf642bc | ||
|
|
3c3fed886f | ||
|
|
2c71c8b968 | ||
|
|
901b03b870 | ||
|
|
7331220e06 | ||
|
|
fb86002ef9 | ||
|
|
1d5e05b8ca | ||
|
|
c63192fcb5 | ||
|
|
48764e15a5 | ||
|
|
34bbfb5dd2 | ||
|
|
7df914af06 | ||
|
|
4f13c8de0d | ||
|
|
2a395d12a6 | ||
|
|
9d698d9306 | ||
|
|
b6d1e7a084 | ||
|
|
c5c12d4c8b | ||
|
|
8d252303fc | ||
|
|
712400557e | ||
|
|
dd67d53d14 | ||
|
|
eee5c0ac0b | ||
|
|
0fd1e9c5e6 | ||
|
|
d3cba34bc6 | ||
|
|
8181746695 | ||
|
|
6d01be0c30 | ||
|
|
a2f3d10bee | ||
|
|
c5781c69bb | ||
|
|
4a7e2a44d9 | ||
|
|
e1a9c1ecd9 | ||
|
|
83688c9281 | ||
|
|
06d483fa8d | ||
|
|
7e70093117 | ||
|
|
e49281774d | ||
|
|
9c88980483 | ||
|
|
34c102045a | ||
|
|
fe31495a89 | ||
|
|
592d2d0978 | ||
|
|
edee46e47f | ||
|
|
85485f1702 | ||
|
|
7f7bbdf677 | ||
|
|
312cc00d21 | ||
|
|
8e55ee0e2c | ||
|
|
2270a54ff6 | ||
|
|
bb7ade265d | ||
|
|
c5b792add5 | ||
|
|
2ccdc2b8ef | ||
|
|
c1e25b7ecf | ||
|
|
35b768b719 | ||
|
|
0b6371174e | ||
|
|
15e676e9cd | ||
|
|
2c35f0276f | ||
|
|
3fd9bd4a80 | ||
|
|
9aeef15d1b | ||
|
|
8df662d0d2 | ||
|
|
2d22623b7d | ||
|
|
59269dc1c1 | ||
|
|
81213f2324 | ||
|
|
1ef4f09df5 | ||
|
|
aac7dd6b08 | ||
|
|
0ffb3201b7 | ||
|
|
6f6dc3032c | ||
|
|
d77d0544d0 | ||
|
|
682f546c0e | ||
|
|
4368966e09 | ||
|
|
dbc0cf33a1 | ||
|
|
0f4dd9726c | ||
|
|
b18f5f8c14 | ||
|
|
db876ba75f | ||
|
|
679b21a86c | ||
|
|
5906f9ab98 | ||
|
|
820bb16ca7 | ||
|
|
a1540e27c2 | ||
|
|
d1c9889609 | ||
|
|
b7c6d040dd | ||
|
|
3d7f8e4b3a | ||
|
|
7efa8b54c4 | ||
|
|
aa4631640a | ||
|
|
4a0008df47 | ||
|
|
f284ea72fc | ||
|
|
0a4e0edc85 | ||
|
|
fa48cf27eb | ||
|
|
1c42403e6d | ||
|
|
4319cf7f31 | ||
|
|
1ecef269f7 | ||
|
|
5844ea7e6e | ||
|
|
5376786694 | ||
|
|
5cad90fb4d | ||
|
|
8cb2d3b352 | ||
|
|
ec87f39da5 | ||
|
|
3d296d8898 | ||
|
|
7e758b24c4 | ||
|
|
aacbc98aec | ||
|
|
b6fec590a7 | ||
|
|
e5a79fedac | ||
|
|
148048b035 | ||
|
|
b9a753cd04 | ||
|
|
fb883f0092 | ||
|
|
daf0e883ae | ||
|
|
a641d4a14a | ||
|
|
809ea23587 | ||
|
|
e5c314092d | ||
|
|
0ea373d9d5 | ||
|
|
64b8219245 | ||
|
|
2004230b66 | ||
|
|
0026e871f0 | ||
|
|
52e3e44008 | ||
|
|
84c009da63 | ||
|
|
b9760abe36 | ||
|
|
7b2185eb5f | ||
|
|
23ef3da0f4 | ||
|
|
d34f5a01cb | ||
|
|
e83f0ee307 | ||
|
|
bff3c66d69 | ||
|
|
2ea4dafa08 | ||
|
|
b63b338e95 | ||
|
|
19d0ee130d | ||
|
|
942c3e1529 | ||
|
|
caa8c47b68 | ||
|
|
c328b741cb | ||
|
|
57db688d7c | ||
|
|
9d1d608f4f | ||
|
|
042d82359c | ||
|
|
e85b35c6bd | ||
|
|
6e21a52271 | ||
|
|
4bbf71b7da | ||
|
|
74db0c15ae | ||
|
|
ae191f72a4 | ||
|
|
42e2c5061d | ||
|
|
380c43cb03 | ||
|
|
bc75edd800 | ||
|
|
9774339fef | ||
|
|
026740b5e5 | ||
|
|
21a04332ec | ||
|
|
eec8b4c91e | ||
|
|
ef22d6f628 | ||
|
|
58545efbd7 | ||
|
|
2bd288a677 | ||
|
|
b436da7249 | ||
|
|
a792f32d5b | ||
|
|
777e1c8f1c | ||
|
|
4dab18a94f | ||
|
|
234e98f1b3 | ||
|
|
b31bfd53ab | ||
|
|
23412965f8 | ||
|
|
6a55b153fc | ||
|
|
e847cfc8a0 | ||
|
|
5584709ac9 | ||
|
|
337d9ad755 | ||
|
|
dd247e55e9 | ||
|
|
305eaabb53 | ||
|
|
f6de36cb04 | ||
|
|
100d9d2034 | ||
|
|
e9c755f428 | ||
|
|
89b1b744f2 | ||
|
|
bbd6236385 | ||
|
|
cc4da2ae82 | ||
|
|
b415a62bf9 | ||
|
|
8ab924ad9b | ||
|
|
3c3419475d | ||
|
|
3a7d3387e0 | ||
|
|
3d617de577 | ||
|
|
1ad29032d3 | ||
|
|
c01db6b180 | ||
|
|
32b4b139a4 | ||
|
|
31fef105c7 | ||
|
|
1f5ced7069 | ||
|
|
2a70870469 | ||
|
|
9e9811cbb3 | ||
|
|
a5d6035c28 | ||
|
|
ecfad788d9 | ||
|
|
cf1d0f23cc | ||
|
|
995adaeee4 | ||
|
|
e247be6ead | ||
|
|
30b95cf5ce | ||
|
|
25b8a22648 | ||
|
|
0084da9ca5 | ||
|
|
31d4c1d2fe | ||
|
|
08ce6de4db | ||
|
|
7d4b7deea9 | ||
|
|
b6b739431c | ||
|
|
ad15d9970c | ||
|
|
ff57c860e3 | ||
|
|
635d7e77e1 | ||
|
|
ba9eb684ed | ||
|
|
9594c9c83a | ||
|
|
ff06583c5d | ||
|
|
b0389ca4d2 | ||
|
|
1d085d982b | ||
|
|
fb9d087838 | ||
|
|
18c6686fed | ||
|
|
6648e6506c | ||
|
|
386f6da14d | ||
|
|
d895a2c469 | ||
|
|
5f2d81d154 | ||
|
|
4e3499c0d7 | ||
|
|
26cdb1805d | ||
|
|
506cb21cb1 | ||
|
|
fd51ff6970 | ||
|
|
295d71be0a | ||
|
|
9bbe468c91 | ||
|
|
fbdff4f34f | ||
|
|
0aa480283f | ||
|
|
cd9d31f5f2 | ||
|
|
cbfce49aa1 | ||
|
|
1d1da7362b | ||
|
|
a8c173f043 | ||
|
|
97ab649d16 | ||
|
|
d3e73f1260 | ||
|
|
f3da4b202e | ||
|
|
530f6ad81c | ||
|
|
3252c378aa | ||
|
|
b5ca6a654c | ||
|
|
94749b12ac | ||
|
|
523fa9f71e | ||
|
|
54636781ea | ||
|
|
5187db5ee5 | ||
|
|
0b9c4ae69e | ||
|
|
0d5a8a95c8 | ||
|
|
9cd97c9e1d | ||
|
|
d521191e87 | ||
|
|
fd78993b91 | ||
|
|
937b1fb05d | ||
|
|
80cce858cb | ||
|
|
7bdb0e6b12 | ||
|
|
0743652d92 | ||
|
|
96bec5c9b1 | ||
|
|
cfeb6b8b14 | ||
|
|
481310dea0 | ||
|
|
ea2821d11d | ||
|
|
7a0de1765f | ||
|
|
17c3cb2403 | ||
|
|
35b1bc3753 | ||
|
|
8d38788672 | ||
|
|
c615a4264d | ||
|
|
227d506c53 | ||
|
|
36a86e9ab4 | ||
|
|
f133b051dc | ||
|
|
7af1bdbf4c | ||
|
|
016d7ef645 | ||
|
|
f1e47291cd | ||
|
|
d7e9ae38e4 | ||
|
|
88be981afc | ||
|
|
3f92a43170 | ||
|
|
2101f1d1c8 | ||
|
|
f0f920e49f | ||
|
|
95583fce83 | ||
|
|
a413fa3b17 | ||
|
|
3a8dbf5a99 | ||
|
|
254f12543c | ||
|
|
cf8a64528c | ||
|
|
2b79c4e8b7 | ||
|
|
429f38d0c9 | ||
|
|
2714be99a9 | ||
|
|
d851818035 | ||
|
|
576bf4639c | ||
|
|
9db52838b5 | ||
|
|
bfcd9501c2 | ||
|
|
12252c6005 | ||
|
|
2d89f36687 | ||
|
|
3d608c2625 | ||
|
|
739d0ee61e | ||
|
|
22f07a7bb6 | ||
|
|
16eec4eb41 | ||
|
|
ecb2c5353c | ||
|
|
06d5876b02 | ||
|
|
e5a77853b0 | ||
|
|
9780f0fd9d | ||
|
|
3559830882 | ||
|
|
5594680130 | ||
|
|
50855ec15f | ||
|
|
f9f33e7b5c | ||
|
|
1bec35999b | ||
|
|
632318ad33 | ||
|
|
456e8984b0 | ||
|
|
eea949853a | ||
|
|
85fd1e4a2c | ||
|
|
6682d06c99 | ||
|
|
efa470efc7 | ||
|
|
79d1585250 | ||
|
|
2d1a15b196 | ||
|
|
09431cfc0b | ||
|
|
46cb82bac0 | ||
|
|
b2d71da2a2 | ||
|
|
2d6e1d26c0 | ||
|
|
50734c5edc | ||
|
|
040dc27ea5 | ||
|
|
d7090de0e0 | ||
|
|
cab681c7d1 | ||
|
|
01f990a5c9 | ||
|
|
5763f5ced3 | ||
|
|
f79b0f0fad | ||
|
|
34183b527b | ||
|
|
bceed08fc3 | ||
|
|
5deef27e1d | ||
|
|
1ac8b1f03e | ||
|
|
0b30cc2b7e | ||
|
|
03a8ae62e5 | ||
|
|
e36fb98fb9 | ||
|
|
55258bf099 | ||
|
|
dc109827b7 | ||
|
|
71c28e436a | ||
|
|
2bafc28a9b | ||
|
|
aea48ae1ab | ||
|
|
b3463769dc | ||
|
|
d9e6cfc44d | ||
|
|
57fd172287 | ||
|
|
8d7a497553 | ||
|
|
b31698b9f2 | ||
|
|
eeaff85e47 | ||
|
|
f51ad2e126 | ||
|
|
f57f12c6cc | ||
|
|
5fca2d10b9 | ||
|
|
8fbe1ad70d | ||
|
|
25a304c231 | ||
|
|
9d30ceae8d | ||
|
|
60f6ed6bf6 | ||
|
|
4a2f7d4a99 | ||
|
|
c19a393be9 | ||
|
|
938ffb002e | ||
|
|
372a01290b | ||
|
|
8b163ca49b | ||
|
|
d23810dc53 | ||
|
|
62ed5422dd | ||
|
|
2e76302af7 | ||
|
|
6553828008 | ||
|
|
adcb7bf00e |
@@ -61,6 +61,9 @@ temp/
|
|||||||
deploy/install.sh
|
deploy/install.sh
|
||||||
deploy/sub2api.service
|
deploy/sub2api.service
|
||||||
deploy/sub2api-sudoers
|
deploy/sub2api-sudoers
|
||||||
|
deploy/data/
|
||||||
|
deploy/postgres_data/
|
||||||
|
deploy/redis_data/
|
||||||
|
|
||||||
# GoReleaser
|
# GoReleaser
|
||||||
.goreleaser.yaml
|
.goreleaser.yaml
|
||||||
|
|||||||
22
.gitattributes
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# 确保所有 SQL 迁移文件使用 LF 换行符
|
||||||
|
backend/migrations/*.sql text eol=lf
|
||||||
|
|
||||||
|
# Go 源代码文件
|
||||||
|
*.go text eol=lf
|
||||||
|
|
||||||
|
# 前端 源代码文件
|
||||||
|
*.ts text eol=lf
|
||||||
|
*.tsx text eol=lf
|
||||||
|
*.js text eol=lf
|
||||||
|
*.jsx text eol=lf
|
||||||
|
*.vue text eol=lf
|
||||||
|
|
||||||
|
# Shell 脚本
|
||||||
|
*.sh text eol=lf
|
||||||
|
|
||||||
|
# YAML/YML 配置文件
|
||||||
|
*.yaml text eol=lf
|
||||||
|
*.yml text eol=lf
|
||||||
|
|
||||||
|
# Dockerfile
|
||||||
|
Dockerfile text eol=lf
|
||||||
37
.github/audit-exceptions.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
version: 1
|
||||||
|
exceptions:
|
||||||
|
- package: xlsx
|
||||||
|
advisory: "GHSA-4r6h-8v6p-xvw6"
|
||||||
|
severity: high
|
||||||
|
reason: "Admin export only; switched to dynamic import to reduce exposure (CVE-2023-30533)"
|
||||||
|
mitigation: "Load only on export; restrict export permissions and data scope"
|
||||||
|
expires_on: "2026-07-06"
|
||||||
|
owner: "security@your-domain"
|
||||||
|
- package: xlsx
|
||||||
|
advisory: "GHSA-5pgg-2g8v-p4x9"
|
||||||
|
severity: high
|
||||||
|
reason: "Admin export only; switched to dynamic import to reduce exposure (CVE-2024-22363)"
|
||||||
|
mitigation: "Load only on export; restrict export permissions and data scope"
|
||||||
|
expires_on: "2026-07-06"
|
||||||
|
owner: "security@your-domain"
|
||||||
|
- package: lodash
|
||||||
|
advisory: "GHSA-r5fr-rjxr-66jc"
|
||||||
|
severity: high
|
||||||
|
reason: "lodash _.template not used with untrusted input; only internal admin UI templates"
|
||||||
|
mitigation: "No user-controlled template strings; plan to migrate to lodash-es tree-shaken imports"
|
||||||
|
expires_on: "2026-07-02"
|
||||||
|
owner: "security@your-domain"
|
||||||
|
- package: lodash-es
|
||||||
|
advisory: "GHSA-r5fr-rjxr-66jc"
|
||||||
|
severity: high
|
||||||
|
reason: "lodash-es _.template not used with untrusted input; only internal admin UI templates"
|
||||||
|
mitigation: "No user-controlled template strings; plan to migrate to native JS alternatives"
|
||||||
|
expires_on: "2026-07-02"
|
||||||
|
owner: "security@your-domain"
|
||||||
|
- package: axios
|
||||||
|
advisory: "GHSA-3p68-rc4w-qgx5"
|
||||||
|
severity: critical
|
||||||
|
reason: "NO_PROXY bypass not exploitable; all API calls go to known endpoints via server-side proxy"
|
||||||
|
mitigation: "Proxy configuration not user-controlled; upgrade when axios releases fix"
|
||||||
|
expires_on: "2026-07-10"
|
||||||
|
owner: "security@your-domain"
|
||||||
51
.github/workflows/backend-ci.yml
vendored
@@ -11,28 +11,59 @@ jobs:
|
|||||||
test:
|
test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version-file: backend/go.mod
|
go-version-file: backend/go.mod
|
||||||
check-latest: true
|
check-latest: false
|
||||||
cache: true
|
cache: true
|
||||||
- name: Run tests
|
cache-dependency-path: backend/go.sum
|
||||||
|
- name: Verify Go version
|
||||||
|
run: |
|
||||||
|
go version | grep -q 'go1.26.2'
|
||||||
|
- name: Unit tests
|
||||||
working-directory: backend
|
working-directory: backend
|
||||||
run: go test ./...
|
run: make test-unit
|
||||||
|
- name: Integration tests
|
||||||
|
working-directory: backend
|
||||||
|
run: make test-integration
|
||||||
|
|
||||||
|
frontend:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
- name: Setup pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v6
|
||||||
|
with:
|
||||||
|
node-version: '20'
|
||||||
|
cache: 'pnpm'
|
||||||
|
cache-dependency-path: frontend/pnpm-lock.yaml
|
||||||
|
- name: Install frontend dependencies
|
||||||
|
working-directory: frontend
|
||||||
|
run: pnpm install --frozen-lockfile
|
||||||
|
- name: Frontend typecheck and critical vitest
|
||||||
|
run: make test-frontend
|
||||||
|
|
||||||
golangci-lint:
|
golangci-lint:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version-file: backend/go.mod
|
go-version-file: backend/go.mod
|
||||||
check-latest: true
|
check-latest: false
|
||||||
cache: true
|
cache: true
|
||||||
|
cache-dependency-path: backend/go.sum
|
||||||
|
- name: Verify Go version
|
||||||
|
run: |
|
||||||
|
go version | grep -q 'go1.26.2'
|
||||||
- name: golangci-lint
|
- name: golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v9
|
uses: golangci/golangci-lint-action@v9
|
||||||
with:
|
with:
|
||||||
version: v2.7
|
version: v2.9
|
||||||
args: --timeout=5m
|
args: --timeout=30m
|
||||||
working-directory: backend
|
working-directory: backend
|
||||||
|
|||||||
59
.github/workflows/cla.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
name: "CLA Assistant"
|
||||||
|
|
||||||
|
on:
|
||||||
|
issue_comment:
|
||||||
|
types: [created]
|
||||||
|
pull_request_target:
|
||||||
|
types: [opened, reopened, closed, synchronize]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
actions: write
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
statuses: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
cla-check:
|
||||||
|
if: |
|
||||||
|
github.event_name == 'issue_comment' ||
|
||||||
|
(github.event_name == 'pull_request_target' && github.event.action != 'closed')
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: "CLA Assistant"
|
||||||
|
if: |
|
||||||
|
(github.event.comment.body == 'recheck' ||
|
||||||
|
github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') ||
|
||||||
|
github.event_name == 'pull_request_target'
|
||||||
|
uses: contributor-assistant/github-action@v2.6.1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
path-to-signatures: "cla.json"
|
||||||
|
path-to-document: "https://github.com/Wei-Shaw/sub2api/blob/main/CLA.md"
|
||||||
|
branch: "cla-signatures"
|
||||||
|
allowlist: "dependabot[bot],renovate[bot],bot*"
|
||||||
|
lock-pullrequest-aftermerge: false
|
||||||
|
custom-notsigned-prcomment: |
|
||||||
|
Thank you for your contribution! Before we can merge this PR, we need $you to sign our [Contributor License Agreement (CLA)](https://github.com/Wei-Shaw/sub2api/blob/main/CLA.md).
|
||||||
|
|
||||||
|
**To sign**, please reply with the following comment:
|
||||||
|
|
||||||
|
> I have read the CLA Document and I hereby sign the CLA
|
||||||
|
|
||||||
|
You only need to sign once — it will be valid for all your future contributions to this project.
|
||||||
|
custom-pr-sign-comment: "I have read the CLA Document and I hereby sign the CLA"
|
||||||
|
custom-allsigned-prcomment: "All contributors have signed the CLA. ✅"
|
||||||
|
|
||||||
|
cla-lock:
|
||||||
|
if: github.event_name == 'pull_request_target' && github.event.action == 'closed' && github.event.pull_request.merged == true
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: "Lock merged PR"
|
||||||
|
uses: contributor-assistant/github-action@v2.6.1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
path-to-signatures: "cla.json"
|
||||||
|
path-to-document: "https://github.com/Wei-Shaw/sub2api/blob/main/CLA.md"
|
||||||
|
branch: "cla-signatures"
|
||||||
|
lock-pullrequest-aftermerge: true
|
||||||
293
.github/workflows/release.yml
vendored
@@ -4,6 +4,22 @@ on:
|
|||||||
push:
|
push:
|
||||||
tags:
|
tags:
|
||||||
- 'v*'
|
- 'v*'
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
tag:
|
||||||
|
description: 'Tag to release (e.g., v1.0.0)'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
simple_release:
|
||||||
|
description: 'Simple release: only x86_64 GHCR image, skip other artifacts'
|
||||||
|
required: false
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
|
# 环境变量:合并 workflow_dispatch 输入和 repository variable
|
||||||
|
# tag push 触发时读取 vars.SIMPLE_RELEASE,workflow_dispatch 时使用输入参数
|
||||||
|
env:
|
||||||
|
SIMPLE_RELEASE: ${{ github.event.inputs.simple_release == 'true' || vars.SIMPLE_RELEASE == 'true' }}
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
@@ -15,16 +31,21 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
- name: Update VERSION file
|
- name: Update VERSION file
|
||||||
run: |
|
run: |
|
||||||
VERSION=${GITHUB_REF#refs/tags/v}
|
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||||
|
VERSION=${{ github.event.inputs.tag }}
|
||||||
|
VERSION=${VERSION#v}
|
||||||
|
else
|
||||||
|
VERSION=${GITHUB_REF#refs/tags/v}
|
||||||
|
fi
|
||||||
echo "$VERSION" > backend/cmd/server/VERSION
|
echo "$VERSION" > backend/cmd/server/VERSION
|
||||||
echo "Updated VERSION file to: $VERSION"
|
echo "Updated VERSION file to: $VERSION"
|
||||||
|
|
||||||
- name: Upload VERSION artifact
|
- name: Upload VERSION artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v7
|
||||||
with:
|
with:
|
||||||
name: version-file
|
name: version-file
|
||||||
path: backend/cmd/server/VERSION
|
path: backend/cmd/server/VERSION
|
||||||
@@ -34,25 +55,30 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
|
|
||||||
|
- name: Setup pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
|
||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
uses: actions/setup-node@v4
|
uses: actions/setup-node@v6
|
||||||
with:
|
with:
|
||||||
node-version: '20'
|
node-version: '20'
|
||||||
cache: 'npm'
|
cache: 'pnpm'
|
||||||
cache-dependency-path: frontend/package-lock.json
|
cache-dependency-path: frontend/pnpm-lock.yaml
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: npm ci
|
run: pnpm install --frozen-lockfile
|
||||||
working-directory: frontend
|
working-directory: frontend
|
||||||
|
|
||||||
- name: Build frontend
|
- name: Build frontend
|
||||||
run: npm run build
|
run: pnpm run build
|
||||||
working-directory: frontend
|
working-directory: frontend
|
||||||
|
|
||||||
- name: Upload frontend artifact
|
- name: Upload frontend artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v7
|
||||||
with:
|
with:
|
||||||
name: frontend-dist
|
name: frontend-dist
|
||||||
path: backend/internal/web/dist/
|
path: backend/internal/web/dist/
|
||||||
@@ -63,28 +89,57 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
ref: ${{ github.event.inputs.tag || github.ref }}
|
||||||
|
|
||||||
- name: Download VERSION artifact
|
- name: Download VERSION artifact
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v8
|
||||||
with:
|
with:
|
||||||
name: version-file
|
name: version-file
|
||||||
path: backend/cmd/server/
|
path: backend/cmd/server/
|
||||||
|
|
||||||
- name: Download frontend artifact
|
- name: Download frontend artifact
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v8
|
||||||
with:
|
with:
|
||||||
name: frontend-dist
|
name: frontend-dist
|
||||||
path: backend/internal/web/dist/
|
path: backend/internal/web/dist/
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: '1.24'
|
go-version-file: backend/go.mod
|
||||||
|
check-latest: false
|
||||||
cache-dependency-path: backend/go.sum
|
cache-dependency-path: backend/go.sum
|
||||||
|
|
||||||
|
- name: Verify Go version
|
||||||
|
run: |
|
||||||
|
go version | grep -q 'go1.26.2'
|
||||||
|
|
||||||
|
# Docker setup for GoReleaser
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
if: ${{ env.DOCKERHUB_USERNAME != '' }}
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
env:
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Fetch tags with annotations
|
- name: Fetch tags with annotations
|
||||||
run: |
|
run: |
|
||||||
# 确保获取完整的 annotated tag 信息
|
# 确保获取完整的 annotated tag 信息
|
||||||
@@ -93,7 +148,11 @@ jobs:
|
|||||||
- name: Get tag message
|
- name: Get tag message
|
||||||
id: tag_message
|
id: tag_message
|
||||||
run: |
|
run: |
|
||||||
TAG_NAME=${GITHUB_REF#refs/tags/}
|
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||||
|
TAG_NAME=${{ github.event.inputs.tag }}
|
||||||
|
else
|
||||||
|
TAG_NAME=${GITHUB_REF#refs/tags/}
|
||||||
|
fi
|
||||||
echo "Processing tag: $TAG_NAME"
|
echo "Processing tag: $TAG_NAME"
|
||||||
|
|
||||||
# 获取完整的 tag message(跳过第一行标题)
|
# 获取完整的 tag message(跳过第一行标题)
|
||||||
@@ -109,95 +168,139 @@ jobs:
|
|||||||
echo "$TAG_MESSAGE" >> $GITHUB_OUTPUT
|
echo "$TAG_MESSAGE" >> $GITHUB_OUTPUT
|
||||||
echo "EOF" >> $GITHUB_OUTPUT
|
echo "EOF" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set lowercase owner for GHCR
|
||||||
|
id: lowercase
|
||||||
|
run: echo "owner=$(echo '${{ github.repository_owner }}' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Run GoReleaser
|
- name: Run GoReleaser
|
||||||
uses: goreleaser/goreleaser-action@v6
|
uses: goreleaser/goreleaser-action@v7
|
||||||
with:
|
with:
|
||||||
version: '~> v2'
|
version: '~> v2'
|
||||||
args: release --clean --skip=validate
|
args: release --clean --skip=validate ${{ env.SIMPLE_RELEASE == 'true' && '--config=.goreleaser.simple.yaml' || '' }}
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
TAG_MESSAGE: ${{ steps.tag_message.outputs.message }}
|
TAG_MESSAGE: ${{ steps.tag_message.outputs.message }}
|
||||||
|
GITHUB_REPO_OWNER: ${{ github.repository_owner }}
|
||||||
|
GITHUB_REPO_OWNER_LOWER: ${{ steps.lowercase.outputs.owner }}
|
||||||
|
GITHUB_REPO_NAME: ${{ github.event.repository.name }}
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME || 'skip' }}
|
||||||
|
|
||||||
# ===========================================================================
|
# Update DockerHub description
|
||||||
# Docker Build and Push
|
|
||||||
# ===========================================================================
|
|
||||||
docker:
|
|
||||||
needs: [update-version, build-frontend]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Download VERSION artifact
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: version-file
|
|
||||||
path: backend/cmd/server/
|
|
||||||
|
|
||||||
- name: Download frontend artifact
|
|
||||||
uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: frontend-dist
|
|
||||||
path: backend/internal/web/dist/
|
|
||||||
|
|
||||||
# Extract version from tag
|
|
||||||
- name: Extract version
|
|
||||||
id: version
|
|
||||||
run: |
|
|
||||||
VERSION=${GITHUB_REF#refs/tags/v}
|
|
||||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
|
||||||
echo "Version: $VERSION"
|
|
||||||
|
|
||||||
# Set up Docker Buildx for multi-platform builds
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
# Login to DockerHub
|
|
||||||
- name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
|
|
||||||
# Extract metadata for Docker
|
|
||||||
- name: Extract Docker metadata
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
with:
|
|
||||||
images: |
|
|
||||||
weishaw/sub2api
|
|
||||||
tags: |
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=raw,value=latest,enable={{is_default_branch}}
|
|
||||||
|
|
||||||
# Build and push Docker image
|
|
||||||
- name: Build and push Docker image
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
file: ./Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
push: true
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
build-args: |
|
|
||||||
VERSION=${{ steps.version.outputs.version }}
|
|
||||||
COMMIT=${{ github.sha }}
|
|
||||||
DATE=${{ github.event.head_commit.timestamp }}
|
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
|
|
||||||
# Update DockerHub description (optional)
|
|
||||||
- name: Update DockerHub description
|
- name: Update DockerHub description
|
||||||
uses: peter-evans/dockerhub-description@v4
|
if: ${{ env.SIMPLE_RELEASE != 'true' && env.DOCKERHUB_USERNAME != '' }}
|
||||||
|
uses: peter-evans/dockerhub-description@v5
|
||||||
|
env:
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
repository: weishaw/sub2api
|
repository: ${{ secrets.DOCKERHUB_USERNAME }}/sub2api
|
||||||
short-description: "Sub2API - AI API Gateway Platform"
|
short-description: "Sub2API - AI API Gateway Platform"
|
||||||
readme-filepath: ./deploy/DOCKER.md
|
readme-filepath: ./deploy/DOCKER.md
|
||||||
|
|
||||||
|
# Send Telegram notification
|
||||||
|
- name: Send Telegram Notification
|
||||||
|
if: ${{ env.SIMPLE_RELEASE != 'true' }}
|
||||||
|
env:
|
||||||
|
TELEGRAM_BOT_TOKEN: ${{ secrets.TELEGRAM_BOT_TOKEN }}
|
||||||
|
TELEGRAM_CHAT_ID: ${{ secrets.TELEGRAM_CHAT_ID }}
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
# 检查必要的环境变量
|
||||||
|
if [ -z "$TELEGRAM_BOT_TOKEN" ] || [ -z "$TELEGRAM_CHAT_ID" ]; then
|
||||||
|
echo "Telegram credentials not configured, skipping notification"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||||
|
TAG_NAME=${{ github.event.inputs.tag }}
|
||||||
|
else
|
||||||
|
TAG_NAME=${GITHUB_REF#refs/tags/}
|
||||||
|
fi
|
||||||
|
VERSION=${TAG_NAME#v}
|
||||||
|
REPO="${{ github.repository }}"
|
||||||
|
GHCR_IMAGE="ghcr.io/${REPO,,}" # ${,,} converts to lowercase
|
||||||
|
|
||||||
|
# 获取 tag message 内容并转义 Markdown 特殊字符
|
||||||
|
TAG_MESSAGE='${{ steps.tag_message.outputs.message }}'
|
||||||
|
TAG_MESSAGE=$(echo "$TAG_MESSAGE" | sed 's/\([_*`\[]\)/\\\1/g')
|
||||||
|
|
||||||
|
# 限制消息长度(Telegram 消息限制 4096 字符,预留空间给头尾固定内容)
|
||||||
|
if [ ${#TAG_MESSAGE} -gt 3500 ]; then
|
||||||
|
TAG_MESSAGE="${TAG_MESSAGE:0:3500}..."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 构建消息内容
|
||||||
|
MESSAGE="🚀 *Sub2API 新版本发布!*"$'\n'$'\n'
|
||||||
|
MESSAGE+="📦 版本号: \`${VERSION}\`"$'\n'$'\n'
|
||||||
|
|
||||||
|
# 添加更新内容
|
||||||
|
if [ -n "$TAG_MESSAGE" ]; then
|
||||||
|
MESSAGE+="${TAG_MESSAGE}"$'\n'$'\n'
|
||||||
|
fi
|
||||||
|
|
||||||
|
MESSAGE+="🐳 *Docker 部署:*"$'\n'
|
||||||
|
MESSAGE+="\`\`\`bash"$'\n'
|
||||||
|
# 根据是否配置 DockerHub 动态生成
|
||||||
|
if [ -n "$DOCKERHUB_USERNAME" ]; then
|
||||||
|
DOCKER_IMAGE="${DOCKERHUB_USERNAME}/sub2api"
|
||||||
|
MESSAGE+="# Docker Hub"$'\n'
|
||||||
|
MESSAGE+="docker pull ${DOCKER_IMAGE}:${VERSION}"$'\n'
|
||||||
|
MESSAGE+="# GitHub Container Registry"$'\n'
|
||||||
|
fi
|
||||||
|
MESSAGE+="docker pull ${GHCR_IMAGE}:${VERSION}"$'\n'
|
||||||
|
MESSAGE+="\`\`\`"$'\n'$'\n'
|
||||||
|
MESSAGE+="🔗 *相关链接:*"$'\n'
|
||||||
|
MESSAGE+="• [GitHub Release](https://github.com/${REPO}/releases/tag/${TAG_NAME})"$'\n'
|
||||||
|
if [ -n "$DOCKERHUB_USERNAME" ]; then
|
||||||
|
MESSAGE+="• [Docker Hub](https://hub.docker.com/r/${DOCKER_IMAGE})"$'\n'
|
||||||
|
fi
|
||||||
|
MESSAGE+="• [GitHub Packages](https://github.com/${REPO}/pkgs/container/sub2api)"$'\n'$'\n'
|
||||||
|
MESSAGE+="#Sub2API #Release #${TAG_NAME//./_}"
|
||||||
|
|
||||||
|
# 发送消息
|
||||||
|
curl -s -X POST "https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}/sendMessage" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "$(jq -n \
|
||||||
|
--arg chat_id "${TELEGRAM_CHAT_ID}" \
|
||||||
|
--arg text "${MESSAGE}" \
|
||||||
|
'{
|
||||||
|
chat_id: $chat_id,
|
||||||
|
text: $text,
|
||||||
|
parse_mode: "Markdown",
|
||||||
|
disable_web_page_preview: true
|
||||||
|
}')"
|
||||||
|
|
||||||
|
sync-version-file:
|
||||||
|
needs: [release]
|
||||||
|
if: ${{ needs.release.result == 'success' }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout default branch
|
||||||
|
uses: actions/checkout@v6
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.repository.default_branch }}
|
||||||
|
|
||||||
|
- name: Sync VERSION file to released tag
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||||
|
VERSION=${{ github.event.inputs.tag }}
|
||||||
|
VERSION=${VERSION#v}
|
||||||
|
else
|
||||||
|
VERSION=${GITHUB_REF#refs/tags/v}
|
||||||
|
fi
|
||||||
|
|
||||||
|
CURRENT_VERSION=$(tr -d '\r\n' < backend/cmd/server/VERSION || true)
|
||||||
|
if [ "$CURRENT_VERSION" = "$VERSION" ]; then
|
||||||
|
echo "VERSION file already matches $VERSION"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "$VERSION" > backend/cmd/server/VERSION
|
||||||
|
|
||||||
|
git config user.name "github-actions[bot]"
|
||||||
|
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||||
|
git add backend/cmd/server/VERSION
|
||||||
|
git commit -m "chore: sync VERSION to ${VERSION} [skip ci]"
|
||||||
|
git push origin HEAD:${{ github.event.repository.default_branch }}
|
||||||
|
|||||||
58
.github/workflows/security-scan.yml
vendored
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
name: Security Scan
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 3 * * 1'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
backend-security:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 15
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v6
|
||||||
|
with:
|
||||||
|
go-version-file: backend/go.mod
|
||||||
|
check-latest: false
|
||||||
|
cache-dependency-path: backend/go.sum
|
||||||
|
- name: Verify Go version
|
||||||
|
run: |
|
||||||
|
go version | grep -q 'go1.26.2'
|
||||||
|
- name: Run govulncheck
|
||||||
|
working-directory: backend
|
||||||
|
run: |
|
||||||
|
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
govulncheck ./...
|
||||||
|
|
||||||
|
frontend-security:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
- name: Set up pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
- name: Set up Node.js
|
||||||
|
uses: actions/setup-node@v6
|
||||||
|
with:
|
||||||
|
node-version: '20'
|
||||||
|
cache: 'pnpm'
|
||||||
|
cache-dependency-path: frontend/pnpm-lock.yaml
|
||||||
|
- name: Install dependencies
|
||||||
|
working-directory: frontend
|
||||||
|
run: pnpm install --frozen-lockfile
|
||||||
|
- name: Run pnpm audit
|
||||||
|
working-directory: frontend
|
||||||
|
run: |
|
||||||
|
pnpm audit --prod --audit-level=high --json > audit.json || true
|
||||||
|
- name: Check audit exceptions
|
||||||
|
run: |
|
||||||
|
python tools/check_pnpm_audit_exceptions.py \
|
||||||
|
--audit frontend/audit.json \
|
||||||
|
--exceptions .github/audit-exceptions.yml
|
||||||
36
.gitignore
vendored
@@ -14,6 +14,9 @@ backend/server
|
|||||||
backend/sub2api
|
backend/sub2api
|
||||||
backend/main
|
backend/main
|
||||||
|
|
||||||
|
# Go 测试二进制
|
||||||
|
*.test
|
||||||
|
|
||||||
# 测试覆盖率
|
# 测试覆盖率
|
||||||
*.out
|
*.out
|
||||||
coverage.html
|
coverage.html
|
||||||
@@ -21,6 +24,9 @@ coverage.html
|
|||||||
# 依赖(使用 go mod)
|
# 依赖(使用 go mod)
|
||||||
vendor/
|
vendor/
|
||||||
|
|
||||||
|
# Go 编译缓存
|
||||||
|
backend/.gocache/
|
||||||
|
|
||||||
# ===================
|
# ===================
|
||||||
# Node.js / Vue 前端
|
# Node.js / Vue 前端
|
||||||
# ===================
|
# ===================
|
||||||
@@ -29,6 +35,8 @@ frontend/node_modules/
|
|||||||
frontend/dist/
|
frontend/dist/
|
||||||
*.local
|
*.local
|
||||||
*.tsbuildinfo
|
*.tsbuildinfo
|
||||||
|
vite.config.d.ts
|
||||||
|
vite.config.js.timestamp-*
|
||||||
|
|
||||||
# 日志
|
# 日志
|
||||||
npm-debug.log*
|
npm-debug.log*
|
||||||
@@ -44,6 +52,7 @@ pnpm-debug.log*
|
|||||||
.env.*.local
|
.env.*.local
|
||||||
*.env
|
*.env
|
||||||
!.env.example
|
!.env.example
|
||||||
|
docker-compose.override.yml
|
||||||
|
|
||||||
# ===================
|
# ===================
|
||||||
# IDE / 编辑器
|
# IDE / 编辑器
|
||||||
@@ -73,6 +82,9 @@ temp/
|
|||||||
*.temp
|
*.temp
|
||||||
*.log
|
*.log
|
||||||
*.bak
|
*.bak
|
||||||
|
.cache/
|
||||||
|
.dev/
|
||||||
|
.serena/
|
||||||
|
|
||||||
# ===================
|
# ===================
|
||||||
# 构建产物
|
# 构建产物
|
||||||
@@ -92,6 +104,13 @@ backend/internal/web/dist/*
|
|||||||
# 后端运行时缓存数据
|
# 后端运行时缓存数据
|
||||||
backend/data/
|
backend/data/
|
||||||
|
|
||||||
|
# ===================
|
||||||
|
# 本地配置文件(包含敏感信息)
|
||||||
|
# ===================
|
||||||
|
backend/config.yaml
|
||||||
|
deploy/config.yaml
|
||||||
|
backend/.installed
|
||||||
|
|
||||||
# ===================
|
# ===================
|
||||||
# 其他
|
# 其他
|
||||||
# ===================
|
# ===================
|
||||||
@@ -99,3 +118,20 @@ tests
|
|||||||
CLAUDE.md
|
CLAUDE.md
|
||||||
.claude
|
.claude
|
||||||
scripts
|
scripts
|
||||||
|
.code-review-state
|
||||||
|
#openspec/
|
||||||
|
code-reviews/
|
||||||
|
#AGENTS.md
|
||||||
|
backend/cmd/server/server
|
||||||
|
deploy/docker-compose.override.yml
|
||||||
|
.gocache/
|
||||||
|
vite.config.js
|
||||||
|
docs/*
|
||||||
|
!docs/PAYMENT.md
|
||||||
|
!docs/PAYMENT_CN.md
|
||||||
|
!docs/ADMIN_PAYMENT_INTEGRATION_API.md
|
||||||
|
.serena/
|
||||||
|
.codex/
|
||||||
|
frontend/coverage/
|
||||||
|
aicodex
|
||||||
|
output/
|
||||||
|
|||||||
88
.goreleaser.simple.yaml
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
# 简化版 GoReleaser 配置 - 仅发布 x86_64 GHCR 镜像
|
||||||
|
version: 2
|
||||||
|
|
||||||
|
project_name: sub2api
|
||||||
|
|
||||||
|
before:
|
||||||
|
hooks:
|
||||||
|
- go mod tidy -C backend
|
||||||
|
|
||||||
|
builds:
|
||||||
|
- id: sub2api
|
||||||
|
dir: backend
|
||||||
|
main: ./cmd/server
|
||||||
|
binary: sub2api
|
||||||
|
flags:
|
||||||
|
- -tags=embed
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos:
|
||||||
|
- linux
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
ldflags:
|
||||||
|
- -s -w
|
||||||
|
- -X main.Commit={{.Commit}}
|
||||||
|
- -X main.Date={{.Date}}
|
||||||
|
- -X main.BuildType=release
|
||||||
|
|
||||||
|
# 跳过 archives
|
||||||
|
archives: []
|
||||||
|
|
||||||
|
# 跳过 checksum
|
||||||
|
checksum:
|
||||||
|
disable: true
|
||||||
|
|
||||||
|
changelog:
|
||||||
|
disable: true
|
||||||
|
|
||||||
|
# 仅 GHCR x86_64 镜像
|
||||||
|
dockers:
|
||||||
|
- id: ghcr-amd64
|
||||||
|
goos: linux
|
||||||
|
goarch: amd64
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:latest"
|
||||||
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
use: buildx
|
||||||
|
extra_files:
|
||||||
|
- deploy/docker-entrypoint.sh
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/amd64"
|
||||||
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{ .Commit }}"
|
||||||
|
- "--label=org.opencontainers.image.source=https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}"
|
||||||
|
|
||||||
|
# 跳过 manifests(单架构不需要)
|
||||||
|
docker_manifests: []
|
||||||
|
|
||||||
|
release:
|
||||||
|
github:
|
||||||
|
owner: "{{ .Env.GITHUB_REPO_OWNER }}"
|
||||||
|
name: "{{ .Env.GITHUB_REPO_NAME }}"
|
||||||
|
draft: false
|
||||||
|
prerelease: auto
|
||||||
|
name_template: "Sub2API {{.Version}} (Simple)"
|
||||||
|
# 跳过上传二进制包
|
||||||
|
skip_upload: true
|
||||||
|
header: |
|
||||||
|
> AI API Gateway Platform - 将 AI 订阅配额分发和管理
|
||||||
|
> ⚡ Simple Release: 仅包含 x86_64 GHCR 镜像
|
||||||
|
|
||||||
|
{{ .Env.TAG_MESSAGE }}
|
||||||
|
|
||||||
|
footer: |
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📥 Installation
|
||||||
|
|
||||||
|
**Docker (x86_64 only):**
|
||||||
|
```bash
|
||||||
|
docker pull ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📚 Documentation
|
||||||
|
|
||||||
|
- [GitHub Repository](https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }})
|
||||||
133
.goreleaser.yaml
@@ -52,10 +52,122 @@ changelog:
|
|||||||
# 禁用自动 changelog,完全使用 tag 消息
|
# 禁用自动 changelog,完全使用 tag 消息
|
||||||
disable: true
|
disable: true
|
||||||
|
|
||||||
|
# Docker images
|
||||||
|
dockers:
|
||||||
|
# DockerHub images (skipped if DOCKERHUB_USERNAME is 'skip')
|
||||||
|
- id: amd64
|
||||||
|
goos: linux
|
||||||
|
goarch: amd64
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
use: buildx
|
||||||
|
extra_files:
|
||||||
|
- deploy/docker-entrypoint.sh
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/amd64"
|
||||||
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{ .Commit }}"
|
||||||
|
|
||||||
|
- id: arm64
|
||||||
|
goos: linux
|
||||||
|
goarch: arm64
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
use: buildx
|
||||||
|
extra_files:
|
||||||
|
- deploy/docker-entrypoint.sh
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/arm64"
|
||||||
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{ .Commit }}"
|
||||||
|
|
||||||
|
# GHCR images (owner must be lowercase)
|
||||||
|
- id: ghcr-amd64
|
||||||
|
goos: linux
|
||||||
|
goarch: amd64
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
use: buildx
|
||||||
|
extra_files:
|
||||||
|
- deploy/docker-entrypoint.sh
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/amd64"
|
||||||
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{ .Commit }}"
|
||||||
|
- "--label=org.opencontainers.image.source=https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}"
|
||||||
|
|
||||||
|
- id: ghcr-arm64
|
||||||
|
goos: linux
|
||||||
|
goarch: arm64
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
use: buildx
|
||||||
|
extra_files:
|
||||||
|
- deploy/docker-entrypoint.sh
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/arm64"
|
||||||
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{ .Commit }}"
|
||||||
|
- "--label=org.opencontainers.image.source=https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}"
|
||||||
|
|
||||||
|
# Docker manifests for multi-arch support
|
||||||
|
docker_manifests:
|
||||||
|
# DockerHub manifests (skipped if DOCKERHUB_USERNAME is 'skip')
|
||||||
|
- name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}"
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:latest"
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Major }}.{{ .Minor }}"
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Major }}"
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
# GHCR manifests (owner must be lowercase)
|
||||||
|
- name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}"
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:latest"
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Major }}.{{ .Minor }}"
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Major }}"
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
release:
|
release:
|
||||||
github:
|
github:
|
||||||
owner: Wei-Shaw
|
owner: "{{ .Env.GITHUB_REPO_OWNER }}"
|
||||||
name: sub2api
|
name: "{{ .Env.GITHUB_REPO_NAME }}"
|
||||||
draft: false
|
draft: false
|
||||||
prerelease: auto
|
prerelease: auto
|
||||||
name_template: "Sub2API {{.Version}}"
|
name_template: "Sub2API {{.Version}}"
|
||||||
@@ -71,9 +183,20 @@ release:
|
|||||||
|
|
||||||
## 📥 Installation
|
## 📥 Installation
|
||||||
|
|
||||||
|
**Docker:**
|
||||||
|
```bash
|
||||||
|
{{ if ne .Env.DOCKERHUB_USERNAME "skip" -}}
|
||||||
|
# Docker Hub
|
||||||
|
docker pull {{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}
|
||||||
|
|
||||||
|
{{ end -}}
|
||||||
|
# GitHub Container Registry
|
||||||
|
docker pull ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}
|
||||||
|
```
|
||||||
|
|
||||||
**One-line install (Linux):**
|
**One-line install (Linux):**
|
||||||
```bash
|
```bash
|
||||||
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash
|
curl -sSL https://raw.githubusercontent.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}/main/deploy/install.sh | sudo bash
|
||||||
```
|
```
|
||||||
|
|
||||||
**Manual download:**
|
**Manual download:**
|
||||||
@@ -81,5 +204,5 @@ release:
|
|||||||
|
|
||||||
## 📚 Documentation
|
## 📚 Documentation
|
||||||
|
|
||||||
- [GitHub Repository](https://github.com/Wei-Shaw/sub2api)
|
- [GitHub Repository](https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }})
|
||||||
- [Installation Guide](https://github.com/Wei-Shaw/sub2api/blob/main/deploy/README.md)
|
- [Installation Guide](https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}/blob/main/deploy/README.md)
|
||||||
|
|||||||
73
CLA.md
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
# Sub2API Individual Contributor License Agreement (v1.0)
|
||||||
|
|
||||||
|
Thank you for your interest in contributing to Sub2API ("the Project"). This Contributor License Agreement ("Agreement") documents the rights granted by contributors to the Project.
|
||||||
|
|
||||||
|
By signing this Agreement, you accept and agree to the following terms and conditions for your present and future contributions submitted to the Project.
|
||||||
|
|
||||||
|
## 1. Definitions
|
||||||
|
|
||||||
|
- **"You" (or "Your")** means the copyright owner or legal entity authorized by the copyright owner that is making this Agreement.
|
||||||
|
- **"Contribution"** means any original work of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to the Project for inclusion in, or documentation of, any of the products owned or managed by the Project. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Project or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Project for the purpose of discussing and improving the Project, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution."
|
||||||
|
- **"Project Owner"** means Wesley Liddick, or any individual or legal entity to whom Wesley Liddick has explicitly assigned or transferred ownership of the Project in writing, and their respective successors and assigns.
|
||||||
|
|
||||||
|
## 2. Grant of Copyright License
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this Agreement, You hereby grant to the Project Owner a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Your Contributions and such derivative works. This license includes, without limitation, the right to sublicense, assign, and transfer these rights to any third party, including without limitation any successor, assignee, or acquiring entity of the Project or the Project Owner, and to use Your Contributions under any license, including proprietary or commercial licenses.
|
||||||
|
|
||||||
|
## 3. Moral Rights
|
||||||
|
|
||||||
|
To the fullest extent permitted by applicable law, You irrevocably waive and agree not to assert any moral rights (including rights of attribution and integrity) that You may have in Your Contributions, and agree that the Project Owner and its licensees may use, modify, and distribute Your Contributions without attribution or other obligations arising from moral rights.
|
||||||
|
|
||||||
|
## 4. Grant of Patent License
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this Agreement, You hereby grant to the Project Owner a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer Your Contributions, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your Contribution(s) alone or by combination of Your Contribution(s) with the Project to which such Contribution(s) was submitted.
|
||||||
|
|
||||||
|
## 5. Representations and Warranties
|
||||||
|
|
||||||
|
You represent and warrant that:
|
||||||
|
|
||||||
|
(a) You are legally entitled to grant the above licenses.
|
||||||
|
|
||||||
|
(b) If Your employer(s) has rights to intellectual property that You create that includes Your Contributions, You have received permission to make Contributions on behalf of that employer, or that Your employer has waived such rights for Your Contributions to the Project.
|
||||||
|
|
||||||
|
(c) Each of Your Contributions is Your original creation, or You have sufficient rights to submit it under the terms of this Agreement. You agree to provide, upon request, reasonable documentation or explanation of any third-party materials included in Your Contributions.
|
||||||
|
|
||||||
|
## 6. No Warranty
|
||||||
|
|
||||||
|
Your Contributions are provided on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are not expected to provide support for Your Contributions, except to the extent You desire to provide support.
|
||||||
|
|
||||||
|
## 7. No Obligation
|
||||||
|
|
||||||
|
You understand that the decision to include Your Contribution in any product or project is entirely at the discretion of the Project Owner, and this Agreement does not obligate the Project Owner to use Your Contribution.
|
||||||
|
|
||||||
|
## 8. Retention of Rights
|
||||||
|
|
||||||
|
You retain ownership of the copyright in Your Contributions. This Agreement does not transfer any copyright or other intellectual property rights from You to the Project Owner. This Agreement only grants the licenses described above.
|
||||||
|
|
||||||
|
## 9. Term and Termination
|
||||||
|
|
||||||
|
This Agreement shall remain in effect indefinitely. You may terminate this Agreement prospectively by providing written notice to the Project Owner, but such termination shall not affect the licenses granted for Contributions submitted prior to the effective date of termination. The licenses granted herein for Contributions submitted prior to termination are perpetual and irrevocable.
|
||||||
|
|
||||||
|
## 10. Electronic Signature
|
||||||
|
|
||||||
|
You agree that Your electronic signature (including but not limited to typing a specific phrase in a pull request, issue, or other electronic communication) is legally binding and has the same force and effect as a handwritten signature. You consent to the use of electronic means to enter into this Agreement and acknowledge that this Agreement is enforceable as if executed in a traditional written format.
|
||||||
|
|
||||||
|
## 11. General Provisions
|
||||||
|
|
||||||
|
**Entire Agreement.** This Agreement constitutes the entire agreement between You and the Project Owner with respect to Your Contributions and supersedes all prior or contemporaneous understandings regarding such subject matter.
|
||||||
|
|
||||||
|
**Severability.** If any provision of this Agreement is held to be unenforceable or invalid, that provision will be enforced to the maximum extent possible and the remaining provisions will remain in full force and effect.
|
||||||
|
|
||||||
|
**No Waiver.** The failure of the Project Owner to enforce any provision of this Agreement shall not constitute a waiver of that provision or any other provision.
|
||||||
|
|
||||||
|
**Amendment.** This Agreement may only be modified by a written instrument signed by both parties. Modifications to this Agreement apply only to Contributions submitted after the modified Agreement is published and accepted by You. Prior Contributions remain governed by the version of the Agreement in effect at the time of submission.
|
||||||
|
|
||||||
|
**Notification.** Notices under this Agreement shall be sent to the Project Owner via a GitHub issue on the Project repository. Notices are effective upon receipt.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**By signing this CLA, you acknowledge that you have read and understood this Agreement and agree to be bound by its terms.**
|
||||||
|
|
||||||
|
To sign, reply in the pull request with:
|
||||||
|
|
||||||
|
> I have read the CLA Document and I hereby sign the CLA
|
||||||
346
DEV_GUIDE.md
Normal file
@@ -0,0 +1,346 @@
|
|||||||
|
# sub2api 项目开发指南
|
||||||
|
|
||||||
|
> 本文档记录项目环境配置、常见坑点和注意事项,供 Claude Code 和团队成员参考。
|
||||||
|
|
||||||
|
## 一、项目基本信息
|
||||||
|
|
||||||
|
| 项目 | 说明 |
|
||||||
|
|------|------|
|
||||||
|
| **上游仓库** | Wei-Shaw/sub2api |
|
||||||
|
| **Fork 仓库** | bayma888/sub2api-bmai |
|
||||||
|
| **技术栈** | Go 后端 (Ent ORM + Gin) + Vue3 前端 (pnpm) |
|
||||||
|
| **数据库** | PostgreSQL 16 + Redis |
|
||||||
|
| **包管理** | 后端: go modules, 前端: **pnpm**(不是 npm) |
|
||||||
|
|
||||||
|
## 二、本地环境配置
|
||||||
|
|
||||||
|
### PostgreSQL 16 (Windows 服务)
|
||||||
|
|
||||||
|
| 配置项 | 值 |
|
||||||
|
|--------|-----|
|
||||||
|
| 端口 | 5432 |
|
||||||
|
| psql 路径 | `C:\Program Files\PostgreSQL\16\bin\psql.exe` |
|
||||||
|
| pg_hba.conf | `C:\Program Files\PostgreSQL\16\data\pg_hba.conf` |
|
||||||
|
| 数据库凭据 | user=`sub2api`, password=`sub2api`, dbname=`sub2api` |
|
||||||
|
| 超级用户 | user=`postgres`, password=`postgres` |
|
||||||
|
|
||||||
|
### Redis
|
||||||
|
|
||||||
|
| 配置项 | 值 |
|
||||||
|
|--------|-----|
|
||||||
|
| 端口 | 6379 |
|
||||||
|
| 密码 | 无 |
|
||||||
|
|
||||||
|
### 开发工具
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# golangci-lint v2.7
|
||||||
|
go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.7
|
||||||
|
|
||||||
|
# pnpm (前端包管理)
|
||||||
|
npm install -g pnpm
|
||||||
|
```
|
||||||
|
|
||||||
|
## 三、CI/CD 流水线
|
||||||
|
|
||||||
|
### GitHub Actions Workflows
|
||||||
|
|
||||||
|
| Workflow | 触发条件 | 检查内容 |
|
||||||
|
|----------|----------|----------|
|
||||||
|
| **backend-ci.yml** | push, pull_request | 单元测试 + 集成测试 + golangci-lint v2.7 |
|
||||||
|
| **security-scan.yml** | push, pull_request, 每周一 | govulncheck + gosec + pnpm audit |
|
||||||
|
| **release.yml** | tag `v*` | 构建发布(PR 不触发) |
|
||||||
|
|
||||||
|
### CI 要求
|
||||||
|
|
||||||
|
- Go 版本必须是 **1.25.7**
|
||||||
|
- 前端使用 `pnpm install --frozen-lockfile`,必须提交 `pnpm-lock.yaml`
|
||||||
|
|
||||||
|
### 本地测试命令
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 后端单元测试
|
||||||
|
cd backend && go test -tags=unit ./...
|
||||||
|
|
||||||
|
# 后端集成测试
|
||||||
|
cd backend && go test -tags=integration ./...
|
||||||
|
|
||||||
|
# 代码质量检查
|
||||||
|
cd backend && golangci-lint run ./...
|
||||||
|
|
||||||
|
# 前端依赖安装(必须用 pnpm)
|
||||||
|
cd frontend && pnpm install
|
||||||
|
```
|
||||||
|
|
||||||
|
## 四、常见坑点 & 解决方案
|
||||||
|
|
||||||
|
### 坑 1:pnpm-lock.yaml 必须同步提交
|
||||||
|
|
||||||
|
**问题**:`package.json` 新增依赖后,CI 的 `pnpm install --frozen-lockfile` 失败。
|
||||||
|
|
||||||
|
**原因**:上游 CI 使用 pnpm,lock 文件不同步会报错。
|
||||||
|
|
||||||
|
**解决**:
|
||||||
|
```bash
|
||||||
|
cd frontend
|
||||||
|
pnpm install # 更新 pnpm-lock.yaml
|
||||||
|
git add pnpm-lock.yaml
|
||||||
|
git commit -m "chore: update pnpm-lock.yaml"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 2:npm 和 pnpm 的 node_modules 冲突
|
||||||
|
|
||||||
|
**问题**:之前用 npm 装过 `node_modules`,pnpm install 报 `EPERM` 错误。
|
||||||
|
|
||||||
|
**解决**:
|
||||||
|
```bash
|
||||||
|
cd frontend
|
||||||
|
rm -rf node_modules # 或 PowerShell: Remove-Item -Recurse -Force node_modules
|
||||||
|
pnpm install
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 3:PowerShell 中 bcrypt hash 的 `$` 被转义
|
||||||
|
|
||||||
|
**问题**:bcrypt hash 格式如 `$2a$10$xxx...`,PowerShell 把 `$2a` 当变量解析,导致数据丢失。
|
||||||
|
|
||||||
|
**解决**:将 SQL 写入文件,用 `psql -f` 执行:
|
||||||
|
```bash
|
||||||
|
# 错误示范(PowerShell 会吃掉 $)
|
||||||
|
psql -c "INSERT INTO users ... VALUES ('$2a$10$...')"
|
||||||
|
|
||||||
|
# 正确做法
|
||||||
|
echo "INSERT INTO users ... VALUES ('\$2a\$10\$...')" > temp.sql
|
||||||
|
psql -U sub2api -h 127.0.0.1 -d sub2api -f temp.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 4:psql 不支持中文路径
|
||||||
|
|
||||||
|
**问题**:`psql -f "D:\中文路径\file.sql"` 报错找不到文件。
|
||||||
|
|
||||||
|
**解决**:复制到纯英文路径再执行:
|
||||||
|
```bash
|
||||||
|
cp "D:\中文路径\file.sql" "C:\temp.sql"
|
||||||
|
psql -f "C:\temp.sql"
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 5:PostgreSQL 密码重置流程
|
||||||
|
|
||||||
|
**场景**:忘记 PostgreSQL 密码。
|
||||||
|
|
||||||
|
**步骤**:
|
||||||
|
1. 修改 `C:\Program Files\PostgreSQL\16\data\pg_hba.conf`
|
||||||
|
```
|
||||||
|
# 将 scram-sha-256 改为 trust
|
||||||
|
host all all 127.0.0.1/32 trust
|
||||||
|
```
|
||||||
|
2. 重启 PostgreSQL 服务
|
||||||
|
```powershell
|
||||||
|
Restart-Service postgresql-x64-16
|
||||||
|
```
|
||||||
|
3. 无密码登录并重置
|
||||||
|
```bash
|
||||||
|
psql -U postgres -h 127.0.0.1
|
||||||
|
ALTER USER sub2api WITH PASSWORD 'sub2api';
|
||||||
|
ALTER USER postgres WITH PASSWORD 'postgres';
|
||||||
|
```
|
||||||
|
4. 改回 `scram-sha-256` 并重启
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 6:Go interface 新增方法后 test stub 必须补全
|
||||||
|
|
||||||
|
**问题**:给 interface 新增方法后,编译报错 `does not implement interface (missing method XXX)`。
|
||||||
|
|
||||||
|
**原因**:所有测试文件中实现该 interface 的 stub/mock 都必须补上新方法。
|
||||||
|
|
||||||
|
**解决**:
|
||||||
|
```bash
|
||||||
|
# 搜索所有实现该 interface 的 struct
|
||||||
|
cd backend
|
||||||
|
grep -r "type.*Stub.*struct" internal/
|
||||||
|
grep -r "type.*Mock.*struct" internal/
|
||||||
|
|
||||||
|
# 逐一补全新方法
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 7:Windows 上 psql 连 localhost 的 IPv6 问题
|
||||||
|
|
||||||
|
**问题**:psql 连 `localhost` 先尝试 IPv6 (::1),可能报错后再回退 IPv4。
|
||||||
|
|
||||||
|
**建议**:直接用 `127.0.0.1` 代替 `localhost`。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 8:Windows 没有 make 命令
|
||||||
|
|
||||||
|
**问题**:CI 里用 `make test-unit`,本地 Windows 没有 make。
|
||||||
|
|
||||||
|
**解决**:直接用 Makefile 里的原始命令:
|
||||||
|
```bash
|
||||||
|
# 代替 make test-unit
|
||||||
|
go test -tags=unit ./...
|
||||||
|
|
||||||
|
# 代替 make test-integration
|
||||||
|
go test -tags=integration ./...
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 9:Ent Schema 修改后必须重新生成
|
||||||
|
|
||||||
|
**问题**:修改 `ent/schema/*.go` 后,代码不生效。
|
||||||
|
|
||||||
|
**解决**:
|
||||||
|
```bash
|
||||||
|
cd backend
|
||||||
|
go generate ./ent # 重新生成 ent 代码
|
||||||
|
git add ent/ # 生成的文件也要提交
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 10:前端测试看似正常,但后端调用失败(模型映射被批量误改)
|
||||||
|
|
||||||
|
**典型现象**:
|
||||||
|
- 前端按钮点测看起来正常;
|
||||||
|
- 实际通过 API/客户端调用时返回 `Service temporarily unavailable` 或提示无可用账号;
|
||||||
|
- 常见于 OpenAI 账号(例如 Codex 模型)在批量修改后突然不可用。
|
||||||
|
|
||||||
|
**根因**:
|
||||||
|
- OpenAI 账号编辑页默认不显式展示映射规则,容易让人误以为“没映射也没关系”;
|
||||||
|
- 但在**批量修改同时选中不同平台账号**(OpenAI + Antigravity/Gemini)时,模型白名单/映射可能被跨平台策略覆盖;
|
||||||
|
- 结果是 OpenAI 账号的关键模型映射丢失或被改坏,后端选不到可用账号。
|
||||||
|
|
||||||
|
**修复方案(按优先级)**:
|
||||||
|
1. **快速修复(推荐)**:在批量修改中补回正确的透传映射(例如 `gpt-5.3-codex -> gpt-5.3-codex-spark`)。
|
||||||
|
2. **彻底重建**:删除并重新添加全部相关账号(最稳但成本高)。
|
||||||
|
|
||||||
|
**关键经验**:
|
||||||
|
- 如果某模型已被软件内置默认映射覆盖,通常不需要额外再加透传;
|
||||||
|
- 但当上游模型更新快于本仓库默认映射时,**手动批量添加透传映射**是最简单、最低风险的临时兜底方案;
|
||||||
|
- 批量操作前尽量按平台分组,不要混选不同平台账号。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 坑 11:PR 提交前检查清单
|
||||||
|
|
||||||
|
提交 PR 前务必本地验证:
|
||||||
|
|
||||||
|
- [ ] `go test -tags=unit ./...` 通过
|
||||||
|
- [ ] `go test -tags=integration ./...` 通过
|
||||||
|
- [ ] `golangci-lint run ./...` 无新增问题
|
||||||
|
- [ ] `pnpm-lock.yaml` 已同步(如果改了 package.json)
|
||||||
|
- [ ] 所有 test stub 补全新接口方法(如果改了 interface)
|
||||||
|
- [ ] Ent 生成的代码已提交(如果改了 schema)
|
||||||
|
|
||||||
|
## 五、常用命令速查
|
||||||
|
|
||||||
|
### 数据库操作
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 连接数据库
|
||||||
|
psql -U sub2api -h 127.0.0.1 -d sub2api
|
||||||
|
|
||||||
|
# 查看所有用户
|
||||||
|
psql -U postgres -h 127.0.0.1 -c "\du"
|
||||||
|
|
||||||
|
# 查看所有数据库
|
||||||
|
psql -U postgres -h 127.0.0.1 -c "\l"
|
||||||
|
|
||||||
|
# 执行 SQL 文件
|
||||||
|
psql -U sub2api -h 127.0.0.1 -d sub2api -f migration.sql
|
||||||
|
```
|
||||||
|
|
||||||
|
### Git 操作
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 同步上游
|
||||||
|
git fetch upstream
|
||||||
|
git checkout main
|
||||||
|
git merge upstream/main
|
||||||
|
git push origin main
|
||||||
|
|
||||||
|
# 创建功能分支
|
||||||
|
git checkout -b feature/xxx
|
||||||
|
|
||||||
|
# Rebase 到最新 main
|
||||||
|
git fetch upstream
|
||||||
|
git rebase upstream/main
|
||||||
|
```
|
||||||
|
|
||||||
|
### 前端操作
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 安装依赖(必须用 pnpm)
|
||||||
|
cd frontend
|
||||||
|
pnpm install
|
||||||
|
|
||||||
|
# 开发服务器
|
||||||
|
pnpm dev
|
||||||
|
|
||||||
|
# 构建
|
||||||
|
pnpm build
|
||||||
|
```
|
||||||
|
|
||||||
|
### 后端操作
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 运行服务器
|
||||||
|
cd backend
|
||||||
|
go run ./cmd/server/
|
||||||
|
|
||||||
|
# 生成 Ent 代码
|
||||||
|
go generate ./ent
|
||||||
|
|
||||||
|
# 运行测试
|
||||||
|
go test -tags=unit ./...
|
||||||
|
go test -tags=integration ./...
|
||||||
|
|
||||||
|
# Lint 检查
|
||||||
|
golangci-lint run ./...
|
||||||
|
```
|
||||||
|
|
||||||
|
## 六、项目结构速览
|
||||||
|
|
||||||
|
```
|
||||||
|
sub2api-bmai/
|
||||||
|
├── backend/
|
||||||
|
│ ├── cmd/server/ # 主程序入口
|
||||||
|
│ ├── ent/ # Ent ORM 生成代码
|
||||||
|
│ │ └── schema/ # 数据库 Schema 定义
|
||||||
|
│ ├── internal/
|
||||||
|
│ │ ├── handler/ # HTTP 处理器
|
||||||
|
│ │ ├── service/ # 业务逻辑
|
||||||
|
│ │ ├── repository/ # 数据访问层
|
||||||
|
│ │ └── server/ # 服务器配置
|
||||||
|
│ ├── migrations/ # 数据库迁移脚本
|
||||||
|
│ └── config.yaml # 配置文件
|
||||||
|
├── frontend/
|
||||||
|
│ ├── src/
|
||||||
|
│ │ ├── api/ # API 调用
|
||||||
|
│ │ ├── components/ # Vue 组件
|
||||||
|
│ │ ├── views/ # 页面视图
|
||||||
|
│ │ ├── types/ # TypeScript 类型
|
||||||
|
│ │ └── i18n/ # 国际化
|
||||||
|
│ ├── package.json # 依赖配置
|
||||||
|
│ └── pnpm-lock.yaml # pnpm 锁文件(必须提交)
|
||||||
|
└── .claude/
|
||||||
|
└── CLAUDE.md # 本文档
|
||||||
|
```
|
||||||
|
|
||||||
|
## 七、参考资源
|
||||||
|
|
||||||
|
- [上游仓库](https://github.com/Wei-Shaw/sub2api)
|
||||||
|
- [Ent 文档](https://entgo.io/docs/getting-started)
|
||||||
|
- [Vue3 文档](https://vuejs.org/)
|
||||||
|
- [pnpm 文档](https://pnpm.io/)
|
||||||
78
Dockerfile
@@ -6,30 +6,45 @@
|
|||||||
# Stage 3: Final minimal image
|
# Stage 3: Final minimal image
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
|
|
||||||
|
ARG NODE_IMAGE=node:24-alpine
|
||||||
|
ARG GOLANG_IMAGE=golang:1.26.2-alpine
|
||||||
|
ARG ALPINE_IMAGE=alpine:3.21
|
||||||
|
ARG POSTGRES_IMAGE=postgres:18-alpine
|
||||||
|
ARG GOPROXY=https://goproxy.cn,direct
|
||||||
|
ARG GOSUMDB=sum.golang.google.cn
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# Stage 1: Frontend Builder
|
# Stage 1: Frontend Builder
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
FROM node:20-alpine AS frontend-builder
|
FROM ${NODE_IMAGE} AS frontend-builder
|
||||||
|
|
||||||
WORKDIR /app/frontend
|
WORKDIR /app/frontend
|
||||||
|
|
||||||
|
# Install pnpm
|
||||||
|
RUN corepack enable && corepack prepare pnpm@latest --activate
|
||||||
|
|
||||||
# Install dependencies first (better caching)
|
# Install dependencies first (better caching)
|
||||||
COPY frontend/package*.json ./
|
COPY frontend/package.json frontend/pnpm-lock.yaml ./
|
||||||
RUN npm ci
|
RUN pnpm install --frozen-lockfile
|
||||||
|
|
||||||
# Copy frontend source and build
|
# Copy frontend source and build
|
||||||
COPY frontend/ ./
|
COPY frontend/ ./
|
||||||
RUN npm run build
|
RUN pnpm run build
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# Stage 2: Backend Builder
|
# Stage 2: Backend Builder
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
FROM golang:1.24-alpine AS backend-builder
|
FROM ${GOLANG_IMAGE} AS backend-builder
|
||||||
|
|
||||||
# Build arguments for version info (set by CI)
|
# Build arguments for version info (set by CI)
|
||||||
ARG VERSION=docker
|
ARG VERSION=
|
||||||
ARG COMMIT=docker
|
ARG COMMIT=docker
|
||||||
ARG DATE
|
ARG DATE
|
||||||
|
ARG GOPROXY
|
||||||
|
ARG GOSUMDB
|
||||||
|
|
||||||
|
ENV GOPROXY=${GOPROXY}
|
||||||
|
ENV GOSUMDB=${GOSUMDB}
|
||||||
|
|
||||||
# Install build dependencies
|
# Install build dependencies
|
||||||
RUN apk add --no-cache git ca-certificates tzdata
|
RUN apk add --no-cache git ca-certificates tzdata
|
||||||
@@ -47,16 +62,26 @@ COPY backend/ ./
|
|||||||
COPY --from=frontend-builder /app/backend/internal/web/dist ./internal/web/dist
|
COPY --from=frontend-builder /app/backend/internal/web/dist ./internal/web/dist
|
||||||
|
|
||||||
# Build the binary (BuildType=release for CI builds, embed frontend)
|
# Build the binary (BuildType=release for CI builds, embed frontend)
|
||||||
RUN CGO_ENABLED=0 GOOS=linux go build \
|
# Version precedence: build arg VERSION > cmd/server/VERSION
|
||||||
|
RUN VERSION_VALUE="${VERSION}" && \
|
||||||
|
if [ -z "${VERSION_VALUE}" ]; then VERSION_VALUE="$(tr -d '\r\n' < ./cmd/server/VERSION)"; fi && \
|
||||||
|
DATE_VALUE="${DATE:-$(date -u +%Y-%m-%dT%H:%M:%SZ)}" && \
|
||||||
|
CGO_ENABLED=0 GOOS=linux go build \
|
||||||
-tags embed \
|
-tags embed \
|
||||||
-ldflags="-s -w -X main.Commit=${COMMIT} -X main.Date=${DATE:-$(date -u +%Y-%m-%dT%H:%M:%SZ)} -X main.BuildType=release" \
|
-ldflags="-s -w -X main.Version=${VERSION_VALUE} -X main.Commit=${COMMIT} -X main.Date=${DATE_VALUE} -X main.BuildType=release" \
|
||||||
|
-trimpath \
|
||||||
-o /app/sub2api \
|
-o /app/sub2api \
|
||||||
./cmd/server
|
./cmd/server
|
||||||
|
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
# Stage 3: Final Runtime Image
|
# Stage 3: PostgreSQL Client (version-matched with docker-compose)
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
FROM alpine:3.19
|
FROM ${POSTGRES_IMAGE} AS pg-client
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
# Stage 4: Final Runtime Image
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
FROM ${ALPINE_IMAGE}
|
||||||
|
|
||||||
# Labels
|
# Labels
|
||||||
LABEL maintainer="Wei-Shaw <github.com/Wei-Shaw>"
|
LABEL maintainer="Wei-Shaw <github.com/Wei-Shaw>"
|
||||||
@@ -67,9 +92,21 @@ LABEL org.opencontainers.image.source="https://github.com/Wei-Shaw/sub2api"
|
|||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
tzdata \
|
tzdata \
|
||||||
curl \
|
su-exec \
|
||||||
|
libpq \
|
||||||
|
zstd-libs \
|
||||||
|
lz4-libs \
|
||||||
|
krb5-libs \
|
||||||
|
libldap \
|
||||||
|
libedit \
|
||||||
&& rm -rf /var/cache/apk/*
|
&& rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
|
# Copy pg_dump and psql from the same postgres image used in docker-compose
|
||||||
|
# This ensures version consistency between backup tools and the database server
|
||||||
|
COPY --from=pg-client /usr/local/bin/pg_dump /usr/local/bin/pg_dump
|
||||||
|
COPY --from=pg-client /usr/local/bin/psql /usr/local/bin/psql
|
||||||
|
COPY --from=pg-client /usr/local/lib/libpq.so.5* /usr/local/lib/
|
||||||
|
|
||||||
# Create non-root user
|
# Create non-root user
|
||||||
RUN addgroup -g 1000 sub2api && \
|
RUN addgroup -g 1000 sub2api && \
|
||||||
adduser -u 1000 -G sub2api -s /bin/sh -D sub2api
|
adduser -u 1000 -G sub2api -s /bin/sh -D sub2api
|
||||||
@@ -77,21 +114,24 @@ RUN addgroup -g 1000 sub2api && \
|
|||||||
# Set working directory
|
# Set working directory
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Copy binary from builder
|
# Copy binary/resources with ownership to avoid extra full-layer chown copy
|
||||||
COPY --from=backend-builder /app/sub2api /app/sub2api
|
COPY --from=backend-builder --chown=sub2api:sub2api /app/sub2api /app/sub2api
|
||||||
|
COPY --from=backend-builder --chown=sub2api:sub2api /app/backend/resources /app/resources
|
||||||
|
|
||||||
# Create data directory
|
# Create data directory
|
||||||
RUN mkdir -p /app/data && chown -R sub2api:sub2api /app
|
RUN mkdir -p /app/data && chown sub2api:sub2api /app/data
|
||||||
|
|
||||||
# Switch to non-root user
|
# Copy entrypoint script (fixes volume permissions then drops to sub2api)
|
||||||
USER sub2api
|
COPY deploy/docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||||
|
RUN chmod +x /app/docker-entrypoint.sh
|
||||||
|
|
||||||
# Expose port (can be overridden by SERVER_PORT env var)
|
# Expose port (can be overridden by SERVER_PORT env var)
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|
||||||
# Health check
|
# Health check
|
||||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
||||||
CMD curl -f http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
CMD wget -q -T 5 -O /dev/null http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
||||||
|
|
||||||
# Run the application
|
# Run the application (entrypoint fixes /app/data ownership then execs as sub2api)
|
||||||
ENTRYPOINT ["/app/sub2api"]
|
ENTRYPOINT ["/app/docker-entrypoint.sh"]
|
||||||
|
CMD ["/app/sub2api"]
|
||||||
|
|||||||
62
Dockerfile.goreleaser
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Sub2API Dockerfile for GoReleaser
|
||||||
|
# =============================================================================
|
||||||
|
# This Dockerfile is used by GoReleaser to build Docker images.
|
||||||
|
# It only packages the pre-built binary, no compilation needed.
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
ARG ALPINE_IMAGE=alpine:3.21
|
||||||
|
ARG POSTGRES_IMAGE=postgres:18-alpine
|
||||||
|
|
||||||
|
FROM ${POSTGRES_IMAGE} AS pg-client
|
||||||
|
|
||||||
|
FROM ${ALPINE_IMAGE}
|
||||||
|
|
||||||
|
LABEL maintainer="Wei-Shaw <github.com/Wei-Shaw>"
|
||||||
|
LABEL description="Sub2API - AI API Gateway Platform"
|
||||||
|
LABEL org.opencontainers.image.source="https://github.com/Wei-Shaw/sub2api"
|
||||||
|
|
||||||
|
# Install runtime dependencies
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
ca-certificates \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
su-exec \
|
||||||
|
libpq \
|
||||||
|
zstd-libs \
|
||||||
|
lz4-libs \
|
||||||
|
krb5-libs \
|
||||||
|
libldap \
|
||||||
|
libedit \
|
||||||
|
&& rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
|
# Copy pg_dump and psql from a version-matched PostgreSQL image so backup and
|
||||||
|
# restore work in the runtime container without requiring Docker socket access.
|
||||||
|
COPY --from=pg-client /usr/local/bin/pg_dump /usr/local/bin/pg_dump
|
||||||
|
COPY --from=pg-client /usr/local/bin/psql /usr/local/bin/psql
|
||||||
|
COPY --from=pg-client /usr/local/lib/libpq.so.5* /usr/local/lib/
|
||||||
|
|
||||||
|
# Create non-root user
|
||||||
|
RUN addgroup -g 1000 sub2api && \
|
||||||
|
adduser -u 1000 -G sub2api -s /bin/sh -D sub2api
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy pre-built binary from GoReleaser
|
||||||
|
COPY sub2api /app/sub2api
|
||||||
|
|
||||||
|
# Create data directory
|
||||||
|
RUN mkdir -p /app/data && chown -R sub2api:sub2api /app
|
||||||
|
|
||||||
|
# Copy entrypoint script (fixes volume permissions then drops to sub2api)
|
||||||
|
COPY deploy/docker-entrypoint.sh /app/docker-entrypoint.sh
|
||||||
|
RUN chmod +x /app/docker-entrypoint.sh
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
||||||
|
CMD curl -f http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
||||||
|
|
||||||
|
# Run the application (entrypoint fixes /app/data ownership then execs as sub2api)
|
||||||
|
ENTRYPOINT ["/app/docker-entrypoint.sh"]
|
||||||
|
CMD ["/app/sub2api"]
|
||||||
178
LICENSE
@@ -1,21 +1,165 @@
|
|||||||
MIT License
|
GNU LESSER GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
Copyright (c) 2025 Wesley Liddick
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
This version of the GNU Lesser General Public License incorporates
|
||||||
copies or substantial portions of the Software.
|
the terms and conditions of version 3 of the GNU General Public
|
||||||
|
License, supplemented by the additional permissions listed below.
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
0. Additional Definitions.
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
General Public License.
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
"The Library" refers to a covered work governed by this License,
|
||||||
|
other than an Application or a Combined Work as defined below.
|
||||||
|
|
||||||
|
An "Application" is any work that makes use of an interface provided
|
||||||
|
by the Library, but which is not otherwise based on the Library.
|
||||||
|
Defining a subclass of a class defined by the Library is deemed a mode
|
||||||
|
of using an interface provided by the Library.
|
||||||
|
|
||||||
|
A "Combined Work" is a work produced by combining or linking an
|
||||||
|
Application with the Library. The particular version of the Library
|
||||||
|
with which the Combined Work was made is also called the "Linked
|
||||||
|
Version".
|
||||||
|
|
||||||
|
The "Minimal Corresponding Source" for a Combined Work means the
|
||||||
|
Corresponding Source for the Combined Work, excluding any source code
|
||||||
|
for portions of the Combined Work that, considered in isolation, are
|
||||||
|
based on the Application, and not on the Linked Version.
|
||||||
|
|
||||||
|
The "Corresponding Application Code" for a Combined Work means the
|
||||||
|
object code and/or source code for the Application, including any data
|
||||||
|
and utility programs needed for reproducing the Combined Work from the
|
||||||
|
Application, but excluding the System Libraries of the Combined Work.
|
||||||
|
|
||||||
|
1. Exception to Section 3 of the GNU GPL.
|
||||||
|
|
||||||
|
You may convey a covered work under sections 3 and 4 of this License
|
||||||
|
without being bound by section 3 of the GNU GPL.
|
||||||
|
|
||||||
|
2. Conveying Modified Versions.
|
||||||
|
|
||||||
|
If you modify a copy of the Library, and, in your modifications, a
|
||||||
|
facility refers to a function or data to be supplied by an Application
|
||||||
|
that uses the facility (other than as an argument passed when the
|
||||||
|
facility is invoked), then you may convey a copy of the modified
|
||||||
|
version:
|
||||||
|
|
||||||
|
a) under this License, provided that you make a good faith effort to
|
||||||
|
ensure that, in the event an Application does not supply the
|
||||||
|
function or data, the facility still operates, and performs
|
||||||
|
whatever part of its purpose remains meaningful, or
|
||||||
|
|
||||||
|
b) under the GNU GPL, with none of the additional permissions of
|
||||||
|
this License applicable to that copy.
|
||||||
|
|
||||||
|
3. Object Code Incorporating Material from Library Header Files.
|
||||||
|
|
||||||
|
The object code form of an Application may incorporate material from
|
||||||
|
a header file that is part of the Library. You may convey such object
|
||||||
|
code under terms of your choice, provided that, if the incorporated
|
||||||
|
material is not limited to numerical parameters, data structure
|
||||||
|
layouts and accessors, or small macros, inline functions and templates
|
||||||
|
(ten or fewer lines in length), you do both of the following:
|
||||||
|
|
||||||
|
a) Give prominent notice with each copy of the object code that the
|
||||||
|
Library is used in it and that the Library and its use are
|
||||||
|
covered by this License.
|
||||||
|
|
||||||
|
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||||
|
document.
|
||||||
|
|
||||||
|
4. Combined Works.
|
||||||
|
|
||||||
|
You may convey a Combined Work under terms of your choice that,
|
||||||
|
taken together, effectively do not restrict modification of the
|
||||||
|
portions of the Library contained in the Combined Work and reverse
|
||||||
|
engineering for debugging such modifications, if you also do each of
|
||||||
|
the following:
|
||||||
|
|
||||||
|
a) Give prominent notice with each copy of the Combined Work that
|
||||||
|
the Library is used in it and that the Library and its use are
|
||||||
|
covered by this License.
|
||||||
|
|
||||||
|
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||||
|
document.
|
||||||
|
|
||||||
|
c) For a Combined Work that displays copyright notices during
|
||||||
|
execution, include the copyright notice for the Library among
|
||||||
|
these notices, as well as a reference directing the user to the
|
||||||
|
copies of the GNU GPL and this license document.
|
||||||
|
|
||||||
|
d) Do one of the following:
|
||||||
|
|
||||||
|
0) Convey the Minimal Corresponding Source under the terms of this
|
||||||
|
License, and the Corresponding Application Code in a form
|
||||||
|
suitable for, and under terms that permit, the user to
|
||||||
|
recombine or relink the Application with a modified version of
|
||||||
|
the Linked Version to produce a modified Combined Work, in the
|
||||||
|
manner specified by section 6 of the GNU GPL for conveying
|
||||||
|
Corresponding Source.
|
||||||
|
|
||||||
|
1) Use a suitable shared library mechanism for linking with the
|
||||||
|
Library. A suitable mechanism is one that (a) uses at run time
|
||||||
|
a copy of the Library already present on the user's computer
|
||||||
|
system, and (b) will operate properly with a modified version
|
||||||
|
of the Library that is interface-compatible with the Linked
|
||||||
|
Version.
|
||||||
|
|
||||||
|
e) Provide Installation Information, but only if you would otherwise
|
||||||
|
be required to provide such information under section 6 of the
|
||||||
|
GNU GPL, and only to the extent that such information is
|
||||||
|
necessary to install and execute a modified version of the
|
||||||
|
Combined Work produced by recombining or relinking the
|
||||||
|
Application with a modified version of the Linked Version. (If
|
||||||
|
you use option 4d0, the Installation Information must accompany
|
||||||
|
the Minimal Corresponding Source and Corresponding Application
|
||||||
|
Code. If you use option 4d1, you must provide the Installation
|
||||||
|
Information in the manner specified by section 6 of the GNU GPL
|
||||||
|
for conveying Corresponding Source.)
|
||||||
|
|
||||||
|
5. Combined Libraries.
|
||||||
|
|
||||||
|
You may place library facilities that are a work based on the
|
||||||
|
Library side by side in a single library together with other library
|
||||||
|
facilities that are not Applications and are not covered by this
|
||||||
|
License, and convey such a combined library under terms of your
|
||||||
|
choice, if you do both of the following:
|
||||||
|
|
||||||
|
a) Accompany the combined library with a copy of the same work based
|
||||||
|
on the Library, uncombined with any other library facilities,
|
||||||
|
conveyed under the terms of this License.
|
||||||
|
|
||||||
|
b) Give prominent notice with the combined library that part of it
|
||||||
|
is a work based on the Library, and explaining where to find the
|
||||||
|
accompanying uncombined form of the same work.
|
||||||
|
|
||||||
|
6. Revised Versions of the GNU Lesser General Public License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions
|
||||||
|
of the GNU Lesser General Public License from time to time. Such new
|
||||||
|
versions will be similar in spirit to the present version, but may
|
||||||
|
differ in detail to address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Library as you received it specifies that a certain numbered version
|
||||||
|
of the GNU Lesser General Public License "or any later version"
|
||||||
|
applies to it, you have the option of following the terms and
|
||||||
|
conditions either of that published version or of any later version
|
||||||
|
published by the Free Software Foundation. If the Library as you
|
||||||
|
received it does not specify a version number of the GNU Lesser
|
||||||
|
General Public License, you may choose any version of the GNU Lesser
|
||||||
|
General Public License ever published by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Library as you received it specifies that a proxy can decide
|
||||||
|
whether future versions of the GNU Lesser General Public License shall
|
||||||
|
apply, that proxy's public statement of acceptance of any version is
|
||||||
|
permanent authorization for you to choose that version for the
|
||||||
|
Library.
|
||||||
44
Makefile
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
.PHONY: build build-backend build-frontend build-datamanagementd test test-backend test-frontend test-frontend-critical test-datamanagementd secret-scan
|
||||||
|
|
||||||
|
FRONTEND_CRITICAL_VITEST := \
|
||||||
|
src/views/auth/__tests__/LinuxDoCallbackView.spec.ts \
|
||||||
|
src/views/auth/__tests__/WechatCallbackView.spec.ts \
|
||||||
|
src/views/user/__tests__/PaymentView.spec.ts \
|
||||||
|
src/views/user/__tests__/PaymentResultView.spec.ts \
|
||||||
|
src/components/user/profile/__tests__/ProfileInfoCard.spec.ts \
|
||||||
|
src/views/admin/__tests__/SettingsView.spec.ts
|
||||||
|
|
||||||
|
# 一键编译前后端
|
||||||
|
build: build-backend build-frontend
|
||||||
|
|
||||||
|
# 编译后端(复用 backend/Makefile)
|
||||||
|
build-backend:
|
||||||
|
@$(MAKE) -C backend build
|
||||||
|
|
||||||
|
# 编译前端(需要已安装依赖)
|
||||||
|
build-frontend:
|
||||||
|
@pnpm --dir frontend run build
|
||||||
|
|
||||||
|
# 编译 datamanagementd(宿主机数据管理进程)
|
||||||
|
build-datamanagementd:
|
||||||
|
@cd datamanagement && go build -o datamanagementd ./cmd/datamanagementd
|
||||||
|
|
||||||
|
# 运行测试(后端 + 前端)
|
||||||
|
test: test-backend test-frontend
|
||||||
|
|
||||||
|
test-backend:
|
||||||
|
@$(MAKE) -C backend test
|
||||||
|
|
||||||
|
test-frontend:
|
||||||
|
@pnpm --dir frontend run lint:check
|
||||||
|
@pnpm --dir frontend run typecheck
|
||||||
|
@$(MAKE) test-frontend-critical
|
||||||
|
|
||||||
|
test-frontend-critical:
|
||||||
|
@pnpm --dir frontend exec vitest run $(FRONTEND_CRITICAL_VITEST)
|
||||||
|
|
||||||
|
test-datamanagementd:
|
||||||
|
@cd datamanagement && go test ./...
|
||||||
|
|
||||||
|
secret-scan:
|
||||||
|
@python3 tools/secret_scan.py
|
||||||
378
README.md
@@ -2,31 +2,37 @@
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||
[](https://golang.org/)
|
[](https://golang.org/)
|
||||||
[](https://vuejs.org/)
|
[](https://vuejs.org/)
|
||||||
[](https://www.postgresql.org/)
|
[](https://www.postgresql.org/)
|
||||||
[](https://redis.io/)
|
[](https://redis.io/)
|
||||||
[](https://www.docker.com/)
|
[](https://www.docker.com/)
|
||||||
|
|
||||||
|
<a href="https://trendshift.io/repositories/21823" target="_blank"><img src="https://trendshift.io/api/badge/repositories/21823" alt="Wei-Shaw%2Fsub2api | Trendshift" width="250" height="55"/></a>
|
||||||
|
|
||||||
**AI API Gateway Platform for Subscription Quota Distribution**
|
**AI API Gateway Platform for Subscription Quota Distribution**
|
||||||
|
|
||||||
English | [中文](README_CN.md)
|
English | [中文](README_CN.md) | [日本語](README_JA.md)
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
> **Sub2API officially uses only the domains `sub2api.org` and `pincc.ai`. Other websites using the Sub2API name may be third-party deployments or services and are not affiliated with this project. Please verify and exercise your own judgment.**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Demo
|
## Demo
|
||||||
|
|
||||||
Try Sub2API online: **https://v2.pincc.ai/**
|
Try Sub2API online: **[https://demo.sub2api.org/](https://demo.sub2api.org/)**
|
||||||
|
|
||||||
|
Demo credentials (shared demo environment; **not** created automatically for self-hosted installs):
|
||||||
|
|
||||||
| Email | Password |
|
| Email | Password |
|
||||||
|-------|----------|
|
|-------|----------|
|
||||||
| admin@sub2api.com | admin123 |
|
| admin@sub2api.org | admin123 |
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
Sub2API is an AI API gateway platform designed to distribute and manage API quotas from AI product subscriptions (like Claude Code $200/month). Users can access upstream AI services through platform-generated API Keys, while the platform handles authentication, billing, load balancing, and request forwarding.
|
Sub2API is an AI API gateway platform designed to distribute and manage API quotas from AI product subscriptions. Users can access upstream AI services through platform-generated API Keys, while the platform handles authentication, billing, load balancing, and request forwarding.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
@@ -36,19 +42,99 @@ Sub2API is an AI API gateway platform designed to distribute and manage API quot
|
|||||||
- **Smart Scheduling** - Intelligent account selection with sticky sessions
|
- **Smart Scheduling** - Intelligent account selection with sticky sessions
|
||||||
- **Concurrency Control** - Per-user and per-account concurrency limits
|
- **Concurrency Control** - Per-user and per-account concurrency limits
|
||||||
- **Rate Limiting** - Configurable request and token rate limits
|
- **Rate Limiting** - Configurable request and token rate limits
|
||||||
|
- **Built-in Payment System** - Supports EasyPay, Alipay, WeChat Pay, and Stripe for user self-service top-up, no separate payment service needed ([Configuration Guide](docs/PAYMENT.md))
|
||||||
- **Admin Dashboard** - Web interface for monitoring and management
|
- **Admin Dashboard** - Web interface for monitoring and management
|
||||||
|
- **External System Integration** - Embed external systems (e.g. ticketing) via iframe to extend the admin dashboard
|
||||||
|
|
||||||
|
## ❤️ Sponsors
|
||||||
|
|
||||||
|
> [Want to appear here?](mailto:support@pincc.ai)
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="150"></a></td>
|
||||||
|
<td valign="middle"><b><a href="https://shop.pincc.ai/">PinCC</a></b> is the official relay service built on Sub2API, offering stable access to Claude Code, Codex, Gemini and other popular models — ready to use, no deployment or maintenance required.</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://www.packyapi.com/register?aff=sub2api"><img src="assets/partners/logos/packycode.png" alt="PackyCode" width="150"></a></td>
|
||||||
|
<td>Thanks to PackyCode for sponsoring this project! PackyCode is a reliable and efficient API relay service provider, offering relay services for Claude Code, Codex, Gemini, and more. PackyCode provides special discounts for our software users: register using <a href="https://www.packyapi.com/register?aff=sub2api">this link</a> and enter the "sub2api" promo code during first recharge to get 10% off.</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://poixe.com/i/sub2api"><img src="assets/partners/logos/poixe.png" alt="PoixeAi" width="150"></a></td>
|
||||||
|
<td>Thanks to Poixe Ai for sponsoring this project! Poixe AI provides reliable LLM API services. You can leverage the platform's API endpoints to seamlessly build AI-powered products. Additionally, you can become a vendor by providing AI API resources to the platform and earn revenue. Register through the exclusive <a href="https://poixe.com/i/sub2api">sub2api</a> referral link and receive a bonus of $5 USD on your first top-up.</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://ctok.ai"><img src="assets/partners/logos/ctok.png" alt="CTok" width="150"></a></td>
|
||||||
|
<td>Thanks to CTok.ai for sponsoring this project! CTok.ai is dedicated to building a one-stop AI programming tool service platform. We offer professional Claude Code packages and technical community services, with support for Google Gemini and OpenAI Codex. Through carefully designed plans and a professional tech community, we provide developers with reliable service guarantees and continuous technical support, making AI-assisted programming a true productivity tool. Click <a href="https://ctok.ai">here</a> to register!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://code.silkapi.com/"><img src="assets/partners/logos/silkapi.png" alt="silkapi" width="150"></a></td>
|
||||||
|
<td>Thanks to SilkAPI for sponsoring this project! <a href="https://code.silkapi.com/">SilkAPI</a> is a relay service built on Sub2API, specializing in providing high-speed and stable Codex API relay.</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://ylscode.com/"><img src="assets/partners/logos/ylscode.png" alt="ylscode" width="150"></a></td>
|
||||||
|
<td>Thanks to YLS Code for sponsoring this project! <a href="https://ylscode.com/">YLS Code</a> is dedicated to building secure enterprise-grade Coding Agent productivity services, offering stable and fast Codex / Claude / Gemini subscription services along with pay-as-you-go API options for flexible choices. Register now for a limited-time 3-day Codex trial bonus!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://www.aicodemirror.com/register?invitecode=KMVZQM"><img src="assets/partners/logos/AICodeMirror.jpg" alt="AICodeMirror" width="150"></a></td>
|
||||||
|
<td>Thanks to AICodeMirror for sponsoring this project! AICodeMirror provides official high-stability relay services for Claude Code / Codex / Gemini CLI, with enterprise-grade concurrency, fast invoicing, and 24/7 dedicated technical support. Claude Code / Codex / Gemini official channels at 38% / 2% / 9% of original price, with extra discounts on top-ups! AICodeMirror offers special benefits for sub2api users: register via <a href="https://www.aicodemirror.com/register?invitecode=KMVZQM">this link</a> to enjoy 20% off your first top-up, and enterprise customers can get up to 25% off!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://aigocode.com/invite/SUB2API"><img src="assets/partners/logos/aigocode.png" alt="AIGoCode" width="150"></a></td>
|
||||||
|
<td>Thanks to AIGoCode for sponsoring this project! AIGoCode is an all-in-one platform that integrates Claude Code, Codex, and the latest Gemini models, providing you with stable, efficient, and highly cost-effective AI coding services. The platform offers flexible subscription plans, zero risk of account suspension, direct access with no VPN required, and lightning-fast responses. AIGoCode has prepared a special benefit for sub2api users: if you register via <a href="https://aigocode.com/invite/SUB2API">this link</a>, you'll receive an extra 10% bonus credit on your first top-up!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://shop.bmoplus.com/?utm_source=github"><img src="assets/partners/logos/bmoplus.jpg" alt="bmoplus" width="150"></a></td>
|
||||||
|
<td>Huge thanks to BmoPlus for sponsoring this project! BmoPlus is a highly reliable AI account provider built strictly for heavy AI users and developers. They offer rock-solid, ready-to-use accounts and official top-up services for ChatGPT Plus / ChatGPT Pro (Full Warranty) / Claude Pro / Super Grok / Gemini Pro. By registering and ordering through <a href="https://shop.bmoplus.com/?utm_source=github">BmoPlus - Premium AI Accounts & Top-ups</a>, users can unlock the mind-blowing rate of 10% of the official GPT subscription price (90% OFF)</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://bestproxy.com/?keyword=a2e8iuol"><img src="assets/partners/logos/bestproxy.png" alt="bestproxy" width="150"></a></td>
|
||||||
|
<td>Thanks to Bestproxy for sponsoring this project! <a href="https://bestproxy.com/?keyword=a2e8iuol">Bestproxy</a> provides high-purity residential IPs with dedicated one-IP-per-account support. By combining real home networks with fingerprint isolation, it enables link environment isolation and reduces the probability of association-based risk control.</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
</table>
|
||||||
|
|
||||||
|
## Ecosystem
|
||||||
|
|
||||||
|
Community projects that extend or integrate with Sub2API:
|
||||||
|
|
||||||
|
| Project | Description | Features |
|
||||||
|
|---------|-------------|----------|
|
||||||
|
| ~~[Sub2ApiPay](https://github.com/touwaeriol/sub2apipay)~~ | ~~Self-service payment system~~ | **Now Built-in** — Payment is now integrated into Sub2API, no separate deployment needed. See [Payment Configuration Guide](docs/PAYMENT.md) |
|
||||||
|
| [sub2api-mobile](https://github.com/ckken/sub2api-mobile) | Mobile admin console | Cross-platform app (iOS/Android/Web) for user management, account management, monitoring dashboard, and multi-backend switching; built with Expo + React Native |
|
||||||
|
|
||||||
## Tech Stack
|
## Tech Stack
|
||||||
|
|
||||||
| Component | Technology |
|
| Component | Technology |
|
||||||
|-----------|------------|
|
|-----------|------------|
|
||||||
| Backend | Go 1.21+, Gin, GORM |
|
| Backend | Go 1.25.7, Gin, Ent |
|
||||||
| Frontend | Vue 3.4+, Vite 5+, TailwindCSS |
|
| Frontend | Vue 3.4+, Vite 5+, TailwindCSS |
|
||||||
| Database | PostgreSQL 15+ |
|
| Database | PostgreSQL 15+ |
|
||||||
| Cache/Queue | Redis 7+ |
|
| Cache/Queue | Redis 7+ |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Nginx Reverse Proxy Note
|
||||||
|
|
||||||
|
When using Nginx as a reverse proxy for Sub2API (or CRS) with Codex CLI, add the following to the `http` block in your Nginx configuration:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
underscores_in_headers on;
|
||||||
|
```
|
||||||
|
|
||||||
|
Nginx drops headers containing underscores by default (e.g. `session_id`), which breaks sticky session routing in multi-account setups.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Deployment
|
## Deployment
|
||||||
|
|
||||||
### Method 1: Script Installation (Recommended)
|
### Method 1: Script Installation (Recommended)
|
||||||
@@ -120,7 +206,7 @@ curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### Method 2: Docker Compose
|
### Method 2: Docker Compose (Recommended)
|
||||||
|
|
||||||
Deploy with Docker Compose, including PostgreSQL and Redis containers.
|
Deploy with Docker Compose, including PostgreSQL and Redis containers.
|
||||||
|
|
||||||
@@ -129,29 +215,59 @@ Deploy with Docker Compose, including PostgreSQL and Redis containers.
|
|||||||
- Docker 20.10+
|
- Docker 20.10+
|
||||||
- Docker Compose v2+
|
- Docker Compose v2+
|
||||||
|
|
||||||
#### Installation Steps
|
#### Quick Start (One-Click Deployment)
|
||||||
|
|
||||||
|
Use the automated deployment script for easy setup:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create deployment directory
|
||||||
|
mkdir -p sub2api-deploy && cd sub2api-deploy
|
||||||
|
|
||||||
|
# Download and run deployment preparation script
|
||||||
|
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash
|
||||||
|
|
||||||
|
# Start services
|
||||||
|
docker compose up -d
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
docker compose logs -f sub2api
|
||||||
|
```
|
||||||
|
|
||||||
|
**What the script does:**
|
||||||
|
- Downloads `docker-compose.local.yml` (saved as `docker-compose.yml`) and `.env.example`
|
||||||
|
- Generates secure credentials (JWT_SECRET, TOTP_ENCRYPTION_KEY, POSTGRES_PASSWORD)
|
||||||
|
- Creates `.env` file with auto-generated secrets
|
||||||
|
- Creates data directories (uses local directories for easy backup/migration)
|
||||||
|
- Displays generated credentials for your reference
|
||||||
|
|
||||||
|
#### Manual Deployment
|
||||||
|
|
||||||
|
If you prefer manual setup:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 1. Clone the repository
|
# 1. Clone the repository
|
||||||
git clone https://github.com/Wei-Shaw/sub2api.git
|
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||||
cd sub2api
|
cd sub2api/deploy
|
||||||
|
|
||||||
# 2. Enter the deploy directory
|
# 2. Copy environment configuration
|
||||||
cd deploy
|
|
||||||
|
|
||||||
# 3. Copy environment configuration
|
|
||||||
cp .env.example .env
|
cp .env.example .env
|
||||||
|
|
||||||
# 4. Edit configuration (set your passwords)
|
# 3. Edit configuration (generate secure passwords)
|
||||||
nano .env
|
nano .env
|
||||||
```
|
```
|
||||||
|
|
||||||
**Required configuration in `.env`:**
|
**Required configuration in `.env`:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# PostgreSQL password (REQUIRED - change this!)
|
# PostgreSQL password (REQUIRED)
|
||||||
POSTGRES_PASSWORD=your_secure_password_here
|
POSTGRES_PASSWORD=your_secure_password_here
|
||||||
|
|
||||||
|
# JWT Secret (RECOMMENDED - keeps users logged in after restart)
|
||||||
|
JWT_SECRET=your_jwt_secret_here
|
||||||
|
|
||||||
|
# TOTP Encryption Key (RECOMMENDED - preserves 2FA after restart)
|
||||||
|
TOTP_ENCRYPTION_KEY=your_totp_key_here
|
||||||
|
|
||||||
# Optional: Admin account
|
# Optional: Admin account
|
||||||
ADMIN_EMAIL=admin@example.com
|
ADMIN_EMAIL=admin@example.com
|
||||||
ADMIN_PASSWORD=your_admin_password
|
ADMIN_PASSWORD=your_admin_password
|
||||||
@@ -160,40 +276,96 @@ ADMIN_PASSWORD=your_admin_password
|
|||||||
SERVER_PORT=8080
|
SERVER_PORT=8080
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Generate secure secrets:**
|
||||||
```bash
|
```bash
|
||||||
|
# Generate JWT_SECRET
|
||||||
|
openssl rand -hex 32
|
||||||
|
|
||||||
|
# Generate TOTP_ENCRYPTION_KEY
|
||||||
|
openssl rand -hex 32
|
||||||
|
|
||||||
|
# Generate POSTGRES_PASSWORD
|
||||||
|
openssl rand -hex 32
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 4. Create data directories (for local version)
|
||||||
|
mkdir -p data postgres_data redis_data
|
||||||
|
|
||||||
# 5. Start all services
|
# 5. Start all services
|
||||||
docker-compose up -d
|
# Option A: Local directory version (recommended - easy migration)
|
||||||
|
docker compose -f docker-compose.local.yml up -d
|
||||||
|
|
||||||
|
# Option B: Named volumes version (simple setup)
|
||||||
|
docker compose up -d
|
||||||
|
|
||||||
# 6. Check status
|
# 6. Check status
|
||||||
docker-compose ps
|
docker compose -f docker-compose.local.yml ps
|
||||||
|
|
||||||
# 7. View logs
|
# 7. View logs
|
||||||
docker-compose logs -f sub2api
|
docker compose -f docker-compose.local.yml logs -f sub2api
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Deployment Versions
|
||||||
|
|
||||||
|
| Version | Data Storage | Migration | Best For |
|
||||||
|
|---------|-------------|-----------|----------|
|
||||||
|
| **docker-compose.local.yml** | Local directories | ✅ Easy (tar entire directory) | Production, frequent backups |
|
||||||
|
| **docker-compose.yml** | Named volumes | ⚠️ Requires docker commands | Simple setup |
|
||||||
|
|
||||||
|
**Recommendation:** Use `docker-compose.local.yml` (deployed by script) for easier data management.
|
||||||
|
|
||||||
#### Access
|
#### Access
|
||||||
|
|
||||||
Open `http://YOUR_SERVER_IP:8080` in your browser.
|
Open `http://YOUR_SERVER_IP:8080` in your browser.
|
||||||
|
|
||||||
|
If admin password was auto-generated, find it in logs:
|
||||||
|
```bash
|
||||||
|
docker compose -f docker-compose.local.yml logs sub2api | grep "admin password"
|
||||||
|
```
|
||||||
|
|
||||||
#### Upgrade
|
#### Upgrade
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Pull latest image and recreate container
|
# Pull latest image and recreate container
|
||||||
docker-compose pull
|
docker compose -f docker-compose.local.yml pull
|
||||||
docker-compose up -d
|
docker compose -f docker-compose.local.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Easy Migration (Local Directory Version)
|
||||||
|
|
||||||
|
When using `docker-compose.local.yml`, migrate to a new server easily:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On source server
|
||||||
|
docker compose -f docker-compose.local.yml down
|
||||||
|
cd ..
|
||||||
|
tar czf sub2api-complete.tar.gz sub2api-deploy/
|
||||||
|
|
||||||
|
# Transfer to new server
|
||||||
|
scp sub2api-complete.tar.gz user@new-server:/path/
|
||||||
|
|
||||||
|
# On new server
|
||||||
|
tar xzf sub2api-complete.tar.gz
|
||||||
|
cd sub2api-deploy/
|
||||||
|
docker compose -f docker-compose.local.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Useful Commands
|
#### Useful Commands
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Stop all services
|
# Stop all services
|
||||||
docker-compose down
|
docker compose -f docker-compose.local.yml down
|
||||||
|
|
||||||
# Restart
|
# Restart
|
||||||
docker-compose restart
|
docker compose -f docker-compose.local.yml restart
|
||||||
|
|
||||||
# View all logs
|
# View all logs
|
||||||
docker-compose logs -f
|
docker compose -f docker-compose.local.yml logs -f
|
||||||
|
|
||||||
|
# Remove all data (caution!)
|
||||||
|
docker compose -f docker-compose.local.yml down
|
||||||
|
rm -rf data/ postgres_data/ redis_data/
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -216,20 +388,23 @@ Build and run from source code for development or customization.
|
|||||||
git clone https://github.com/Wei-Shaw/sub2api.git
|
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||||
cd sub2api
|
cd sub2api
|
||||||
|
|
||||||
# 2. Build frontend
|
# 2. Install pnpm (if not already installed)
|
||||||
|
npm install -g pnpm
|
||||||
|
|
||||||
|
# 3. Build frontend
|
||||||
cd frontend
|
cd frontend
|
||||||
npm install
|
pnpm install
|
||||||
npm run build
|
pnpm run build
|
||||||
# Output will be in ../backend/internal/web/dist/
|
# Output will be in ../backend/internal/web/dist/
|
||||||
|
|
||||||
# 3. Build backend with embedded frontend
|
# 4. Build backend with embedded frontend
|
||||||
cd ../backend
|
cd ../backend
|
||||||
go build -tags embed -o sub2api ./cmd/server
|
go build -tags embed -o sub2api ./cmd/server
|
||||||
|
|
||||||
# 4. Create configuration file
|
# 5. Create configuration file
|
||||||
cp ../deploy/config.example.yaml ./config.yaml
|
cp ../deploy/config.example.yaml ./config.yaml
|
||||||
|
|
||||||
# 5. Edit configuration
|
# 6. Edit configuration
|
||||||
nano config.yaml
|
nano config.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -260,10 +435,71 @@ jwt:
|
|||||||
expire_hour: 24
|
expire_hour: 24
|
||||||
|
|
||||||
default:
|
default:
|
||||||
admin_email: "admin@example.com"
|
user_concurrency: 5
|
||||||
admin_password: "admin123"
|
user_balance: 0
|
||||||
|
api_key_prefix: "sk-"
|
||||||
|
rate_multiplier: 1.0
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Sora Status (Temporarily Unavailable)
|
||||||
|
|
||||||
|
> ⚠️ Sora-related features are temporarily unavailable due to technical issues in upstream integration and media delivery.
|
||||||
|
> Please do not rely on Sora in production at this time.
|
||||||
|
> Existing `gateway.sora_*` configuration keys are reserved and may not take effect until these issues are resolved.
|
||||||
|
|
||||||
|
Additional security-related options are available in `config.yaml`:
|
||||||
|
|
||||||
|
- `cors.allowed_origins` for CORS allowlist
|
||||||
|
- `security.url_allowlist` for upstream/pricing/CRS host allowlists
|
||||||
|
- `security.url_allowlist.enabled` to disable URL validation (use with caution)
|
||||||
|
- `security.url_allowlist.allow_insecure_http` to allow HTTP URLs when validation is disabled
|
||||||
|
- `security.url_allowlist.allow_private_hosts` to allow private/local IP addresses
|
||||||
|
- `security.response_headers.enabled` to enable configurable response header filtering (disabled uses default allowlist)
|
||||||
|
- `security.csp` to control Content-Security-Policy headers
|
||||||
|
- `billing.circuit_breaker` to fail closed on billing errors
|
||||||
|
- `server.trusted_proxies` to enable X-Forwarded-For parsing
|
||||||
|
- `turnstile.required` to require Turnstile in release mode
|
||||||
|
|
||||||
|
**⚠️ Security Warning: HTTP URL Configuration**
|
||||||
|
|
||||||
|
When `security.url_allowlist.enabled=false`, the system performs minimal URL validation by default, **rejecting HTTP URLs** and only allowing HTTPS. To allow HTTP URLs (e.g., for development or internal testing), you must explicitly set:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
security:
|
||||||
|
url_allowlist:
|
||||||
|
enabled: false # Disable allowlist checks
|
||||||
|
allow_insecure_http: true # Allow HTTP URLs (⚠️ INSECURE)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Or via environment variable:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
SECURITY_URL_ALLOWLIST_ENABLED=false
|
||||||
|
SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=true
|
||||||
|
```
|
||||||
|
|
||||||
|
**Risks of allowing HTTP:**
|
||||||
|
- API keys and data transmitted in **plaintext** (vulnerable to interception)
|
||||||
|
- Susceptible to **man-in-the-middle (MITM) attacks**
|
||||||
|
- **NOT suitable for production** environments
|
||||||
|
|
||||||
|
**When to use HTTP:**
|
||||||
|
- ✅ Development/testing with local servers (http://localhost)
|
||||||
|
- ✅ Internal networks with trusted endpoints
|
||||||
|
- ✅ Testing account connectivity before obtaining HTTPS
|
||||||
|
- ❌ Production environments (use HTTPS only)
|
||||||
|
|
||||||
|
**Example error without this setting:**
|
||||||
|
```
|
||||||
|
Invalid base URL: invalid url scheme: http
|
||||||
|
```
|
||||||
|
|
||||||
|
If you disable URL validation or response header filtering, harden your network layer:
|
||||||
|
- Enforce an egress allowlist for upstream domains/IPs
|
||||||
|
- Block private/loopback/link-local ranges
|
||||||
|
- Enforce TLS-only outbound traffic
|
||||||
|
- Strip sensitive upstream response headers at the proxy
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 6. Run the application
|
# 6. Run the application
|
||||||
./sub2api
|
./sub2api
|
||||||
@@ -278,9 +514,61 @@ go run ./cmd/server
|
|||||||
|
|
||||||
# Frontend (with hot reload)
|
# Frontend (with hot reload)
|
||||||
cd frontend
|
cd frontend
|
||||||
npm run dev
|
pnpm run dev
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Code Generation
|
||||||
|
|
||||||
|
When editing `backend/ent/schema`, regenerate Ent + Wire:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd backend
|
||||||
|
go generate ./ent
|
||||||
|
go generate ./cmd/server
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Simple Mode
|
||||||
|
|
||||||
|
Simple Mode is designed for individual developers or internal teams who want quick access without full SaaS features.
|
||||||
|
|
||||||
|
- Enable: Set environment variable `RUN_MODE=simple`
|
||||||
|
- Difference: Hides SaaS-related features and skips billing process
|
||||||
|
- Security note: In production, you must also set `SIMPLE_MODE_CONFIRM=true` to allow startup
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Antigravity Support
|
||||||
|
|
||||||
|
Sub2API supports [Antigravity](https://antigravity.so/) accounts. After authorization, dedicated endpoints are available for Claude and Gemini models.
|
||||||
|
|
||||||
|
### Dedicated Endpoints
|
||||||
|
|
||||||
|
| Endpoint | Model |
|
||||||
|
|----------|-------|
|
||||||
|
| `/antigravity/v1/messages` | Claude models |
|
||||||
|
| `/antigravity/v1beta/` | Gemini models |
|
||||||
|
|
||||||
|
### Claude Code Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export ANTHROPIC_BASE_URL="http://localhost:8080/antigravity"
|
||||||
|
export ANTHROPIC_AUTH_TOKEN="sk-xxx"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Hybrid Scheduling Mode
|
||||||
|
|
||||||
|
Antigravity accounts support optional **hybrid scheduling**. When enabled, the general endpoints `/v1/messages` and `/v1beta/` will also route requests to Antigravity accounts.
|
||||||
|
|
||||||
|
> **⚠️ Warning**: Anthropic Claude and Antigravity Claude **cannot be mixed within the same conversation context**. Use groups to isolate them properly.
|
||||||
|
|
||||||
|
### Known Issues
|
||||||
|
|
||||||
|
In Claude Code, Plan Mode cannot exit automatically. (Normally when using the native Claude API, after planning is complete, Claude Code will pop up options for users to approve or reject the plan.)
|
||||||
|
|
||||||
|
**Workaround**: Press `Shift + Tab` to manually exit Plan Mode, then type your response to approve or reject the plan.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Project Structure
|
## Project Structure
|
||||||
@@ -311,9 +599,33 @@ sub2api/
|
|||||||
└── install.sh # One-click installation script
|
└── install.sh # One-click installation script
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Disclaimer
|
||||||
|
|
||||||
|
> **Please read carefully before using this project:**
|
||||||
|
>
|
||||||
|
> :rotating_light: **Terms of Service Risk**: Using this project may violate Anthropic's Terms of Service. Please read Anthropic's user agreement carefully before use. All risks arising from the use of this project are borne solely by the user.
|
||||||
|
>
|
||||||
|
> :book: **Disclaimer**: This project is for technical learning and research purposes only. The author assumes no responsibility for account suspension, service interruption, or any other losses caused by the use of this project.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Star History
|
||||||
|
|
||||||
|
<a href="https://star-history.com/#Wei-Shaw/sub2api&Date">
|
||||||
|
<picture>
|
||||||
|
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date&theme=dark" />
|
||||||
|
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||||
|
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||||
|
</picture>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
MIT License
|
This project is licensed under the [GNU Lesser General Public License v3.0](LICENSE) (or later).
|
||||||
|
|
||||||
|
Copyright (c) 2026 Wesley Liddick
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
439
README_CN.md
@@ -2,31 +2,36 @@
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||
[](https://golang.org/)
|
[](https://golang.org/)
|
||||||
[](https://vuejs.org/)
|
[](https://vuejs.org/)
|
||||||
[](https://www.postgresql.org/)
|
[](https://www.postgresql.org/)
|
||||||
[](https://redis.io/)
|
[](https://redis.io/)
|
||||||
[](https://www.docker.com/)
|
[](https://www.docker.com/)
|
||||||
|
|
||||||
|
<a href="https://trendshift.io/repositories/21823" target="_blank"><img src="https://trendshift.io/api/badge/repositories/21823" alt="Wei-Shaw%2Fsub2api | Trendshift" width="250" height="55"/></a>
|
||||||
|
|
||||||
**AI API 网关平台 - 订阅配额分发管理**
|
**AI API 网关平台 - 订阅配额分发管理**
|
||||||
|
|
||||||
[English](README.md) | 中文
|
[English](README.md) | 中文 | [日本語](README_JA.md)
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
> **Sub2API 官方仅使用 `sub2api.org` 与 `pincc.ai` 两个域名。其他使用 Sub2API 名义的网站可能为第三方部署或服务,与本项目无关,请自行甄别。**
|
||||||
---
|
---
|
||||||
|
|
||||||
## 在线体验
|
## 在线体验
|
||||||
|
|
||||||
体验地址:**https://v2.pincc.ai/**
|
体验地址:**[https://demo.sub2api.org/](https://demo.sub2api.org/)**
|
||||||
|
|
||||||
|
演示账号(共享演示环境;自建部署不会自动创建该账号):
|
||||||
|
|
||||||
| 邮箱 | 密码 |
|
| 邮箱 | 密码 |
|
||||||
|------|------|
|
|------|------|
|
||||||
| admin@sub2api.com | admin123 |
|
| admin@sub2api.org | admin123 |
|
||||||
|
|
||||||
## 项目概述
|
## 项目概述
|
||||||
|
|
||||||
Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅(如 Claude Code $200/月)的 API 配额。用户通过平台生成的 API Key 调用上游 AI 服务,平台负责鉴权、计费、负载均衡和请求转发。
|
Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅的 API 配额。用户通过平台生成的 API Key 调用上游 AI 服务,平台负责鉴权、计费、负载均衡和请求转发。
|
||||||
|
|
||||||
## 核心功能
|
## 核心功能
|
||||||
|
|
||||||
@@ -36,19 +41,99 @@ Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅(
|
|||||||
- **智能调度** - 智能账号选择,支持粘性会话
|
- **智能调度** - 智能账号选择,支持粘性会话
|
||||||
- **并发控制** - 用户级和账号级并发限制
|
- **并发控制** - 用户级和账号级并发限制
|
||||||
- **速率限制** - 可配置的请求和 Token 速率限制
|
- **速率限制** - 可配置的请求和 Token 速率限制
|
||||||
|
- **内置支付系统** - 支持 EasyPay 易支付、支付宝官方、微信官方、Stripe,用户自助充值,无需独立部署支付服务([配置指南](docs/PAYMENT_CN.md))
|
||||||
- **管理后台** - Web 界面进行监控和管理
|
- **管理后台** - Web 界面进行监控和管理
|
||||||
|
- **外部系统集成** - 支持通过 iframe 嵌入外部系统(如工单等),扩展管理后台功能
|
||||||
|
|
||||||
|
## ❤️ 赞助商
|
||||||
|
|
||||||
|
> [想出现在这里?](mailto:support@pincc.ai)
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="150"></a></td>
|
||||||
|
<td valign="middle"><b><a href="https://shop.pincc.ai/">PinCC</a></b> 是基于 Sub2API 搭建的官方中转服务,提供 Claude Code、Codex、Gemini 等主流模型的稳定中转,开箱即用,免去自建部署与运维烦恼。</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://www.packyapi.com/register?aff=sub2api"><img src="assets/partners/logos/packycode.png" alt="PackyCode" width="150"></a></td>
|
||||||
|
<td>感谢 PackyCode 赞助了本项目!PackyCode 是一家稳定、高效的API中转服务商,提供 Claude Code、Codex、Gemini 等多种中转服务。PackyCode 为本软件的用户提供了特别优惠,使用<a href="https://www.packyapi.com/register?aff=sub2api">此链接</a>注册并在充值时填写"sub2api"优惠码,首次充值可以享受9折优惠!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://poixe.com/i/sub2api"><img src="assets/partners/logos/poixe.png" alt="PoixeAI" width="150"></a></td>
|
||||||
|
<td>感谢 Poixe AI 赞助了本项目!Poixe AI 提供可靠的 AI 模型接口服务,您可以使用平台提供的 LLM API 接口轻松构建 AI 产品,同时也可以成为供应商,为平台提供大模型资源以赚取收益。通过 <a href="https://poixe.com/i/sub2api">此链接</a> 专属链接注册,充值额外赠送 $5 美金</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://ctok.ai"><img src="assets/partners/logos/ctok.png" alt="CTok" width="150"></a></td>
|
||||||
|
<td>感谢 CTok.ai 赞助了本项目!CTok.ai 致力于打造一站式 AI 编程工具服务平台。我们提供 Claude Code 专业套餐及技术社群服务,同时支持 Google Gemini 和 OpenAI Codex。通过精心设计的套餐方案和专业的技术社群,为开发者提供稳定的服务保障和持续的技术支持,让 AI 辅助编程真正成为开发者的生产力工具。点击<a href="https://ctok.ai">这里</a>注册!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://code.silkapi.com/"><img src="assets/partners/logos/silkapi.png" alt="silkapi" width="150"></a></td>
|
||||||
|
<td>感谢 丝绸API 赞助了本项目! <a href="https://code.silkapi.com/">丝绸API</a> 是基于 Sub2API 搭建的中转服务,专注于提供 Codex 高速稳定API中转。</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://ylscode.com/"><img src="assets/partners/logos/ylscode.png" alt="ylscode" width="150"></a></td>
|
||||||
|
<td>感谢 伊莉思Code 赞助了本项目! <a href="https://ylscode.com/">伊莉思Code</a> 致力于构建安全的企业级Coding Agent生产力服务,提供稳定快速的 Codex / Claude / Gemini 订阅服务与即用即付API多种方案灵活选择,限时注册赠送 3 天 Codex 试用福利!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://www.aicodemirror.com/register?invitecode=KMVZQM"><img src="assets/partners/logos/AICodeMirror.jpg" alt="AICodeMirror" width="150"></a></td>
|
||||||
|
<td>感谢 AICodeMirror 赞助了本项目!AICodeMirror 提供 Claude Code / Codex / Gemini CLI 官方高稳定性中转服务,企业级并发、快速开票、7×24 小时专属技术支持。Claude Code / Codex / Gemini 官方通道低至原价 38% / 2% / 9%,充值更享额外折扣!AICodeMirror 为 sub2api 用户提供专属福利:通过<a href="https://www.aicodemirror.com/register?invitecode=KMVZQM">此链接</a>注册,首次充值立享 8 折优惠,企业客户最高可享 75 折!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://aigocode.com/invite/SUB2API"><img src="assets/partners/logos/aigocode.png" alt="AIGoCode" width="150"></a></td>
|
||||||
|
<td>感谢 AIGoCode 赞助了本项目!AIGoCode 是一站式集成 Claude Code、Codex 以及最新 Gemini 模型的综合平台,为您提供稳定、高效、高性价比的 AI 编程服务。平台提供灵活的订阅方案,零封号风险,免 VPN 直连,响应极速。AIGoCode 为 sub2api 用户准备了专属福利:通过<a href="https://aigocode.com/invite/SUB2API">此链接</a>注册,首次充值可额外获得 10% 赠送额度!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://shop.bmoplus.com/?utm_source=github"><img src="assets/partners/logos/bmoplus.jpg" alt="bmoplus" width="150"></a></td>
|
||||||
|
<td>感谢 BmoPlus 赞助了本项目!BmoPlus 是一家专为AI订阅重度用户打造的可靠 AI 账号代充服务商,提供稳定的 ChatGPT Plus / ChatGPT Pro(全程质保) / Claude Pro / Super Grok / Gemini Pro 的官方代充&成品账号。 通过<a href="https://shop.bmoplus.com/?utm_source=github">BmoPlus AI成品号专卖/代充</a>注册下单的用户,可享GPT 官网订阅一折 的震撼价格!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://bestproxy.com/?keyword=a2e8iuol"><img src="assets/partners/logos/bestproxy.png" alt="bestproxy" width="150"></a></td>
|
||||||
|
<td>感谢 Bestproxy 赞助了本项目!<a href="https://bestproxy.com/?keyword=a2e8iuol">Bestproxy</a> 是一家提供高纯度住宅IP,支持一号一IP独享,结合真实家庭网络与指纹隔离,可实现链路环境隔离,降低关联风控概率。</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
</table>
|
||||||
|
|
||||||
|
## 生态项目
|
||||||
|
|
||||||
|
围绕 Sub2API 的社区扩展与集成项目:
|
||||||
|
|
||||||
|
| 项目 | 说明 | 功能 |
|
||||||
|
|------|------|------|
|
||||||
|
| ~~[Sub2ApiPay](https://github.com/touwaeriol/sub2apipay)~~ | ~~自助支付系统~~ | **已内置** — 支付功能已集成到 Sub2API 中,无需独立部署。详见 [支付配置指南](docs/PAYMENT_CN.md) |
|
||||||
|
| [sub2api-mobile](https://github.com/ckken/sub2api-mobile) | 移动端管理控制台 | 跨平台应用(iOS/Android/Web),支持用户管理、账号管理、监控看板、多后端切换;基于 Expo + React Native 构建 |
|
||||||
|
|
||||||
## 技术栈
|
## 技术栈
|
||||||
|
|
||||||
| 组件 | 技术 |
|
| 组件 | 技术 |
|
||||||
|------|------|
|
|------|------|
|
||||||
| 后端 | Go 1.21+, Gin, GORM |
|
| 后端 | Go 1.25.7, Gin, Ent |
|
||||||
| 前端 | Vue 3.4+, Vite 5+, TailwindCSS |
|
| 前端 | Vue 3.4+, Vite 5+, TailwindCSS |
|
||||||
| 数据库 | PostgreSQL 15+ |
|
| 数据库 | PostgreSQL 15+ |
|
||||||
| 缓存/队列 | Redis 7+ |
|
| 缓存/队列 | Redis 7+ |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Nginx 反向代理注意事项
|
||||||
|
|
||||||
|
通过 Nginx 反向代理 Sub2API(或 CRS 服务)并搭配 Codex CLI 使用时,需要在 Nginx 配置的 `http` 块中添加:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
underscores_in_headers on;
|
||||||
|
```
|
||||||
|
|
||||||
|
Nginx 默认会丢弃名称中含下划线的请求头(如 `session_id`),这会导致多账号环境下的粘性会话功能失效。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## 部署方式
|
## 部署方式
|
||||||
|
|
||||||
### 方式一:脚本安装(推荐)
|
### 方式一:脚本安装(推荐)
|
||||||
@@ -120,7 +205,7 @@ curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
### 方式二:Docker Compose
|
### 方式二:Docker Compose(推荐)
|
||||||
|
|
||||||
使用 Docker Compose 部署,包含 PostgreSQL 和 Redis 容器。
|
使用 Docker Compose 部署,包含 PostgreSQL 和 Redis 容器。
|
||||||
|
|
||||||
@@ -129,29 +214,59 @@ curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install
|
|||||||
- Docker 20.10+
|
- Docker 20.10+
|
||||||
- Docker Compose v2+
|
- Docker Compose v2+
|
||||||
|
|
||||||
#### 安装步骤
|
#### 快速开始(一键部署)
|
||||||
|
|
||||||
|
使用自动化部署脚本快速搭建:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 创建部署目录
|
||||||
|
mkdir -p sub2api-deploy && cd sub2api-deploy
|
||||||
|
|
||||||
|
# 下载并运行部署准备脚本
|
||||||
|
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash
|
||||||
|
|
||||||
|
# 启动服务
|
||||||
|
docker compose up -d
|
||||||
|
|
||||||
|
# 查看日志
|
||||||
|
docker compose logs -f sub2api
|
||||||
|
```
|
||||||
|
|
||||||
|
**脚本功能:**
|
||||||
|
- 下载 `docker-compose.local.yml`(本地保存为 `docker-compose.yml`)和 `.env.example`
|
||||||
|
- 自动生成安全凭证(JWT_SECRET、TOTP_ENCRYPTION_KEY、POSTGRES_PASSWORD)
|
||||||
|
- 创建 `.env` 文件并填充自动生成的密钥
|
||||||
|
- 创建数据目录(使用本地目录,便于备份和迁移)
|
||||||
|
- 显示生成的凭证供你记录
|
||||||
|
|
||||||
|
#### 手动部署
|
||||||
|
|
||||||
|
如果你希望手动配置:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 1. 克隆仓库
|
# 1. 克隆仓库
|
||||||
git clone https://github.com/Wei-Shaw/sub2api.git
|
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||||
cd sub2api
|
cd sub2api/deploy
|
||||||
|
|
||||||
# 2. 进入 deploy 目录
|
# 2. 复制环境配置文件
|
||||||
cd deploy
|
|
||||||
|
|
||||||
# 3. 复制环境配置文件
|
|
||||||
cp .env.example .env
|
cp .env.example .env
|
||||||
|
|
||||||
# 4. 编辑配置(设置密码等)
|
# 3. 编辑配置(生成安全密码)
|
||||||
nano .env
|
nano .env
|
||||||
```
|
```
|
||||||
|
|
||||||
**`.env` 必须配置项:**
|
**`.env` 必须配置项:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# PostgreSQL 密码(必须修改!)
|
# PostgreSQL 密码(必需)
|
||||||
POSTGRES_PASSWORD=your_secure_password_here
|
POSTGRES_PASSWORD=your_secure_password_here
|
||||||
|
|
||||||
|
# JWT 密钥(推荐 - 重启后保持用户登录状态)
|
||||||
|
JWT_SECRET=your_jwt_secret_here
|
||||||
|
|
||||||
|
# TOTP 加密密钥(推荐 - 重启后保留双因素认证)
|
||||||
|
TOTP_ENCRYPTION_KEY=your_totp_key_here
|
||||||
|
|
||||||
# 可选:管理员账号
|
# 可选:管理员账号
|
||||||
ADMIN_EMAIL=admin@example.com
|
ADMIN_EMAIL=admin@example.com
|
||||||
ADMIN_PASSWORD=your_admin_password
|
ADMIN_PASSWORD=your_admin_password
|
||||||
@@ -160,40 +275,108 @@ ADMIN_PASSWORD=your_admin_password
|
|||||||
SERVER_PORT=8080
|
SERVER_PORT=8080
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**生成安全密钥:**
|
||||||
```bash
|
```bash
|
||||||
|
# 生成 JWT_SECRET
|
||||||
|
openssl rand -hex 32
|
||||||
|
|
||||||
|
# 生成 TOTP_ENCRYPTION_KEY
|
||||||
|
openssl rand -hex 32
|
||||||
|
|
||||||
|
# 生成 POSTGRES_PASSWORD
|
||||||
|
openssl rand -hex 32
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 4. 创建数据目录(本地版)
|
||||||
|
mkdir -p data postgres_data redis_data
|
||||||
|
|
||||||
# 5. 启动所有服务
|
# 5. 启动所有服务
|
||||||
docker-compose up -d
|
# 选项 A:本地目录版(推荐 - 易于迁移)
|
||||||
|
docker compose -f docker-compose.local.yml up -d
|
||||||
|
|
||||||
|
# 选项 B:命名卷版(简单设置)
|
||||||
|
docker compose up -d
|
||||||
|
|
||||||
# 6. 查看状态
|
# 6. 查看状态
|
||||||
docker-compose ps
|
docker compose -f docker-compose.local.yml ps
|
||||||
|
|
||||||
# 7. 查看日志
|
# 7. 查看日志
|
||||||
docker-compose logs -f sub2api
|
docker compose -f docker-compose.local.yml logs -f sub2api
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### 部署版本对比
|
||||||
|
|
||||||
|
| 版本 | 数据存储 | 迁移便利性 | 适用场景 |
|
||||||
|
|------|---------|-----------|---------|
|
||||||
|
| **docker-compose.local.yml** | 本地目录 | ✅ 简单(打包整个目录) | 生产环境、频繁备份 |
|
||||||
|
| **docker-compose.yml** | 命名卷 | ⚠️ 需要 docker 命令 | 简单设置 |
|
||||||
|
|
||||||
|
**推荐:** 使用 `docker-compose.local.yml`(脚本部署)以便更轻松地管理数据。
|
||||||
|
|
||||||
|
#### 启用“数据管理”功能(datamanagementd)
|
||||||
|
|
||||||
|
如需启用管理后台“数据管理”,需要额外部署宿主机数据管理进程 `datamanagementd`。
|
||||||
|
|
||||||
|
关键点:
|
||||||
|
|
||||||
|
- 主进程固定探测:`/tmp/sub2api-datamanagement.sock`
|
||||||
|
- 只有该 Socket 可连通时,数据管理功能才会开启
|
||||||
|
- Docker 场景需将宿主机 Socket 挂载到容器同路径
|
||||||
|
|
||||||
|
详细部署步骤见:`deploy/DATAMANAGEMENTD_CN.md`
|
||||||
|
|
||||||
#### 访问
|
#### 访问
|
||||||
|
|
||||||
在浏览器中打开 `http://你的服务器IP:8080`
|
在浏览器中打开 `http://你的服务器IP:8080`
|
||||||
|
|
||||||
|
如果管理员密码是自动生成的,在日志中查找:
|
||||||
|
```bash
|
||||||
|
docker compose -f docker-compose.local.yml logs sub2api | grep "admin password"
|
||||||
|
```
|
||||||
|
|
||||||
#### 升级
|
#### 升级
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 拉取最新镜像并重建容器
|
# 拉取最新镜像并重建容器
|
||||||
docker-compose pull
|
docker compose -f docker-compose.local.yml pull
|
||||||
docker-compose up -d
|
docker compose -f docker-compose.local.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 轻松迁移(本地目录版)
|
||||||
|
|
||||||
|
使用 `docker-compose.local.yml` 时,可以轻松迁移到新服务器:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 源服务器
|
||||||
|
docker compose -f docker-compose.local.yml down
|
||||||
|
cd ..
|
||||||
|
tar czf sub2api-complete.tar.gz sub2api-deploy/
|
||||||
|
|
||||||
|
# 传输到新服务器
|
||||||
|
scp sub2api-complete.tar.gz user@new-server:/path/
|
||||||
|
|
||||||
|
# 新服务器
|
||||||
|
tar xzf sub2api-complete.tar.gz
|
||||||
|
cd sub2api-deploy/
|
||||||
|
docker compose -f docker-compose.local.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 常用命令
|
#### 常用命令
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 停止所有服务
|
# 停止所有服务
|
||||||
docker-compose down
|
docker compose -f docker-compose.local.yml down
|
||||||
|
|
||||||
# 重启
|
# 重启
|
||||||
docker-compose restart
|
docker compose -f docker-compose.local.yml restart
|
||||||
|
|
||||||
# 查看所有日志
|
# 查看所有日志
|
||||||
docker-compose logs -f
|
docker compose -f docker-compose.local.yml logs -f
|
||||||
|
|
||||||
|
# 删除所有数据(谨慎!)
|
||||||
|
docker compose -f docker-compose.local.yml down
|
||||||
|
rm -rf data/ postgres_data/ redis_data/
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -216,20 +399,23 @@ docker-compose logs -f
|
|||||||
git clone https://github.com/Wei-Shaw/sub2api.git
|
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||||
cd sub2api
|
cd sub2api
|
||||||
|
|
||||||
# 2. 编译前端
|
# 2. 安装 pnpm(如果还没有安装)
|
||||||
|
npm install -g pnpm
|
||||||
|
|
||||||
|
# 3. 编译前端
|
||||||
cd frontend
|
cd frontend
|
||||||
npm install
|
pnpm install
|
||||||
npm run build
|
pnpm run build
|
||||||
# 构建产物输出到 ../backend/internal/web/dist/
|
# 构建产物输出到 ../backend/internal/web/dist/
|
||||||
|
|
||||||
# 3. 编译后端(嵌入前端)
|
# 4. 编译后端(嵌入前端)
|
||||||
cd ../backend
|
cd ../backend
|
||||||
go build -tags embed -o sub2api ./cmd/server
|
go build -tags embed -o sub2api ./cmd/server
|
||||||
|
|
||||||
# 4. 创建配置文件
|
# 5. 创建配置文件
|
||||||
cp ../deploy/config.example.yaml ./config.yaml
|
cp ../deploy/config.example.yaml ./config.yaml
|
||||||
|
|
||||||
# 5. 编辑配置
|
# 6. 编辑配置
|
||||||
nano config.yaml
|
nano config.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -260,15 +446,128 @@ jwt:
|
|||||||
expire_hour: 24
|
expire_hour: 24
|
||||||
|
|
||||||
default:
|
default:
|
||||||
admin_email: "admin@example.com"
|
user_concurrency: 5
|
||||||
admin_password: "admin123"
|
user_balance: 0
|
||||||
|
api_key_prefix: "sk-"
|
||||||
|
rate_multiplier: 1.0
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Sora 功能状态(暂不可用)
|
||||||
|
|
||||||
|
> ⚠️ 当前 Sora 相关功能因上游接入与媒体链路存在技术问题,暂时不可用。
|
||||||
|
> 现阶段请勿在生产环境依赖 Sora 能力。
|
||||||
|
> 文档中的 `gateway.sora_*` 配置仅作预留,待技术问题修复后再恢复可用。
|
||||||
|
|
||||||
|
### Sora 媒体签名 URL(功能恢复后可选)
|
||||||
|
|
||||||
|
当配置 `gateway.sora_media_signing_key` 且 `gateway.sora_media_signed_url_ttl_seconds > 0` 时,网关会将 Sora 输出的媒体地址改写为临时签名 URL(`/sora/media-signed/...`)。这样无需 API Key 即可在浏览器中直接访问,且具备过期控制与防篡改能力(签名包含 path + query)。
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
gateway:
|
||||||
|
# /sora/media 是否强制要求 API Key(默认 false)
|
||||||
|
sora_media_require_api_key: false
|
||||||
|
# 媒体临时签名密钥(为空则禁用签名)
|
||||||
|
sora_media_signing_key: "your-signing-key"
|
||||||
|
# 临时签名 URL 有效期(秒)
|
||||||
|
sora_media_signed_url_ttl_seconds: 900
|
||||||
|
```
|
||||||
|
|
||||||
|
> 若未配置签名密钥,`/sora/media-signed` 将返回 503。
|
||||||
|
> 如需更严格的访问控制,可将 `sora_media_require_api_key` 设为 true,仅允许携带 API Key 的 `/sora/media` 访问。
|
||||||
|
|
||||||
|
访问策略说明:
|
||||||
|
- `/sora/media`:内部调用或客户端携带 API Key 才能下载
|
||||||
|
- `/sora/media-signed`:外部可访问,但有签名 + 过期控制
|
||||||
|
|
||||||
|
`config.yaml` 还支持以下安全相关配置:
|
||||||
|
|
||||||
|
- `cors.allowed_origins` 配置 CORS 白名单
|
||||||
|
- `security.url_allowlist` 配置上游/价格数据/CRS 主机白名单
|
||||||
|
- `security.url_allowlist.enabled` 可关闭 URL 校验(慎用)
|
||||||
|
- `security.url_allowlist.allow_insecure_http` 关闭校验时允许 HTTP URL
|
||||||
|
- `security.url_allowlist.allow_private_hosts` 允许私有/本地 IP 地址
|
||||||
|
- `security.response_headers.enabled` 可启用可配置响应头过滤(关闭时使用默认白名单)
|
||||||
|
- `security.csp` 配置 Content-Security-Policy
|
||||||
|
- `billing.circuit_breaker` 计费异常时 fail-closed
|
||||||
|
- `server.trusted_proxies` 启用可信代理解析 X-Forwarded-For
|
||||||
|
- `turnstile.required` 在 release 模式强制启用 Turnstile
|
||||||
|
|
||||||
|
**网关防御纵深建议(重点)**
|
||||||
|
|
||||||
|
- `gateway.upstream_response_read_max_bytes`:限制非流式上游响应读取大小(默认 `8MB`),用于防止异常响应导致内存放大。
|
||||||
|
- `gateway.proxy_probe_response_read_max_bytes`:限制代理探测响应读取大小(默认 `1MB`)。
|
||||||
|
- `gateway.gemini_debug_response_headers`:默认 `false`,仅在排障时短时开启,避免高频请求日志开销。
|
||||||
|
- `/auth/register`、`/auth/login`、`/auth/login/2fa`、`/auth/send-verify-code` 已提供服务端兜底限流(Redis 故障时 fail-close)。
|
||||||
|
- 推荐将 WAF/CDN 作为第一层防护,服务端限流与响应读取上限作为第二层兜底;两层同时保留,避免旁路流量与误配置风险。
|
||||||
|
|
||||||
|
**⚠️ 安全警告:HTTP URL 配置**
|
||||||
|
|
||||||
|
当 `security.url_allowlist.enabled=false` 时,系统默认执行最小 URL 校验,**拒绝 HTTP URL**,仅允许 HTTPS。要允许 HTTP URL(例如用于开发或内网测试),必须显式设置:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
security:
|
||||||
|
url_allowlist:
|
||||||
|
enabled: false # 禁用白名单检查
|
||||||
|
allow_insecure_http: true # 允许 HTTP URL(⚠️ 不安全)
|
||||||
|
```
|
||||||
|
|
||||||
|
**或通过环境变量:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
SECURITY_URL_ALLOWLIST_ENABLED=false
|
||||||
|
SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=true
|
||||||
|
```
|
||||||
|
|
||||||
|
**允许 HTTP 的风险:**
|
||||||
|
- API 密钥和数据以**明文传输**(可被截获)
|
||||||
|
- 易受**中间人攻击 (MITM)**
|
||||||
|
- **不适合生产环境**
|
||||||
|
|
||||||
|
**适用场景:**
|
||||||
|
- ✅ 开发/测试环境的本地服务器(http://localhost)
|
||||||
|
- ✅ 内网可信端点
|
||||||
|
- ✅ 获取 HTTPS 前测试账号连通性
|
||||||
|
- ❌ 生产环境(仅使用 HTTPS)
|
||||||
|
|
||||||
|
**未设置此项时的错误示例:**
|
||||||
|
```
|
||||||
|
Invalid base URL: invalid url scheme: http
|
||||||
|
```
|
||||||
|
|
||||||
|
如关闭 URL 校验或响应头过滤,请加强网络层防护:
|
||||||
|
- 出站访问白名单限制上游域名/IP
|
||||||
|
- 阻断私网/回环/链路本地地址
|
||||||
|
- 强制仅允许 TLS 出站
|
||||||
|
- 在反向代理层移除敏感响应头
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 6. 运行应用
|
# 6. 运行应用
|
||||||
./sub2api
|
./sub2api
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### HTTP/2 (h2c) 与 HTTP/1.1 回退
|
||||||
|
|
||||||
|
后端明文端口默认支持 h2c,并保留 HTTP/1.1 回退用于 WebSocket 与旧客户端。浏览器通常不支持 h2c,性能收益主要在反向代理或内网链路。
|
||||||
|
|
||||||
|
**反向代理示例(Caddy):**
|
||||||
|
|
||||||
|
```caddyfile
|
||||||
|
transport http {
|
||||||
|
versions h2c h1
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**验证:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# h2c prior knowledge
|
||||||
|
curl --http2-prior-knowledge -I http://localhost:8080/health
|
||||||
|
# HTTP/1.1 回退
|
||||||
|
curl --http1.1 -I http://localhost:8080/health
|
||||||
|
# WebSocket 回退验证(需管理员 token)
|
||||||
|
websocat -H="Sec-WebSocket-Protocol: sub2api-admin, jwt.<ADMIN_TOKEN>" ws://localhost:8080/api/v1/admin/ops/ws/qps
|
||||||
|
```
|
||||||
|
|
||||||
#### 开发模式
|
#### 开发模式
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -278,9 +577,59 @@ go run ./cmd/server
|
|||||||
|
|
||||||
# 前端(支持热重载)
|
# 前端(支持热重载)
|
||||||
cd frontend
|
cd frontend
|
||||||
npm run dev
|
pnpm run dev
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### 代码生成
|
||||||
|
|
||||||
|
修改 `backend/ent/schema` 后,需要重新生成 Ent + Wire:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd backend
|
||||||
|
go generate ./ent
|
||||||
|
go generate ./cmd/server
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 简易模式
|
||||||
|
|
||||||
|
简易模式适合个人开发者或内部团队快速使用,不依赖完整 SaaS 功能。
|
||||||
|
|
||||||
|
- 启用方式:设置环境变量 `RUN_MODE=simple`
|
||||||
|
- 功能差异:隐藏 SaaS 相关功能,跳过计费流程
|
||||||
|
- 安全注意事项:生产环境需同时设置 `SIMPLE_MODE_CONFIRM=true` 才允许启动
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Antigravity 使用说明
|
||||||
|
|
||||||
|
Sub2API 支持 [Antigravity](https://antigravity.so/) 账户,授权后可通过专用端点访问 Claude 和 Gemini 模型。
|
||||||
|
|
||||||
|
### 专用端点
|
||||||
|
|
||||||
|
| 端点 | 模型 |
|
||||||
|
|------|------|
|
||||||
|
| `/antigravity/v1/messages` | Claude 模型 |
|
||||||
|
| `/antigravity/v1beta/` | Gemini 模型 |
|
||||||
|
|
||||||
|
### Claude Code 配置示例
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export ANTHROPIC_BASE_URL="http://localhost:8080/antigravity"
|
||||||
|
export ANTHROPIC_AUTH_TOKEN="sk-xxx"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 混合调度模式
|
||||||
|
|
||||||
|
Antigravity 账户支持可选的**混合调度**功能。开启后,通用端点 `/v1/messages` 和 `/v1beta/` 也会调度该账户。
|
||||||
|
|
||||||
|
> **⚠️ 注意**:Anthropic Claude 和 Antigravity Claude **不能在同一上下文中混合使用**,请通过分组功能做好隔离。
|
||||||
|
|
||||||
|
|
||||||
|
### 已知问题
|
||||||
|
在 Claude Code 中,无法自动退出Plan Mode。(正常使用原生Claude Api时,Plan 完成后,Claude Code会弹出弹出选项让用户同意或拒绝Plan。)
|
||||||
|
解决办法:shift + Tab,手动退出Plan mode,然后输入内容 告诉 Claude Code 同意或拒绝 Plan
|
||||||
---
|
---
|
||||||
|
|
||||||
## 项目结构
|
## 项目结构
|
||||||
@@ -311,9 +660,33 @@ sub2api/
|
|||||||
└── install.sh # 一键安装脚本
|
└── install.sh # 一键安装脚本
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## 免责声明
|
||||||
|
|
||||||
|
> **使用本项目前请仔细阅读:**
|
||||||
|
>
|
||||||
|
> :rotating_light: **服务条款风险**: 使用本项目可能违反 Anthropic 的服务条款。请在使用前仔细阅读 Anthropic 的用户协议,使用本项目的一切风险由用户自行承担。
|
||||||
|
>
|
||||||
|
> :book: **免责声明**: 本项目仅供技术学习和研究使用,作者不对因使用本项目导致的账户封禁、服务中断或其他损失承担任何责任。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Star History
|
||||||
|
|
||||||
|
<a href="https://star-history.com/#Wei-Shaw/sub2api&Date">
|
||||||
|
<picture>
|
||||||
|
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date&theme=dark" />
|
||||||
|
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||||
|
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||||
|
</picture>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## 许可证
|
## 许可证
|
||||||
|
|
||||||
MIT License
|
本项目基于 [GNU 宽通用公共许可证 v3.0](LICENSE)(或更高版本)授权。
|
||||||
|
|
||||||
|
Copyright (c) 2026 Wesley Liddick
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
635
README_JA.md
Normal file
@@ -0,0 +1,635 @@
|
|||||||
|
# Sub2API
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
[](https://golang.org/)
|
||||||
|
[](https://vuejs.org/)
|
||||||
|
[](https://www.postgresql.org/)
|
||||||
|
[](https://redis.io/)
|
||||||
|
[](https://www.docker.com/)
|
||||||
|
|
||||||
|
<a href="https://trendshift.io/repositories/21823" target="_blank"><img src="https://trendshift.io/api/badge/repositories/21823" alt="Wei-Shaw%2Fsub2api | Trendshift" width="250" height="55"/></a>
|
||||||
|
|
||||||
|
**サブスクリプションクォータ配分のための AI API ゲートウェイプラットフォーム**
|
||||||
|
|
||||||
|
[English](README.md) | [中文](README_CN.md) | 日本語
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
> **Sub2API が公式に使用しているドメインは `sub2api.org` と `pincc.ai` のみです。Sub2API の名称を使用している他のウェブサイトは、サードパーティによるデプロイやサービスであり、本プロジェクトとは一切関係がありません。ご利用の際はご自身で確認・判断をお願いします。**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## デモ
|
||||||
|
|
||||||
|
Sub2API をオンラインでお試しください: **[https://demo.sub2api.org/](https://demo.sub2api.org/)**
|
||||||
|
|
||||||
|
デモ用認証情報(共有デモ環境です。セルフホスト環境では**自動作成されません**):
|
||||||
|
|
||||||
|
| メールアドレス | パスワード |
|
||||||
|
|-------|----------|
|
||||||
|
| admin@sub2api.org | admin123 |
|
||||||
|
|
||||||
|
## 概要
|
||||||
|
|
||||||
|
Sub2API は、AI 製品のサブスクリプションから API クォータを配分・管理するために設計された AI API ゲートウェイプラットフォームです。ユーザーはプラットフォームが生成した API キーを通じて上流の AI サービスにアクセスでき、プラットフォームは認証、課金、負荷分散、リクエスト転送を処理します。
|
||||||
|
|
||||||
|
## 機能
|
||||||
|
|
||||||
|
- **マルチアカウント管理** - 複数の上流アカウントタイプ(OAuth、APIキー)をサポート
|
||||||
|
- **APIキー配布** - ユーザー向けの APIキーの生成と管理
|
||||||
|
- **精密な課金** - トークンレベルの使用量追跡とコスト計算
|
||||||
|
- **スマートスケジューリング** - スティッキーセッション付きのインテリジェントなアカウント選択
|
||||||
|
- **同時実行制御** - ユーザーごと・アカウントごとの同時実行数制限
|
||||||
|
- **レート制限** - 設定可能なリクエスト数およびトークンレート制限
|
||||||
|
- **内蔵決済システム** - EasyPay、Alipay、WeChat Pay、Stripe に対応。ユーザーのセルフサービスチャージが可能で、別途決済サービスのデプロイは不要([設定ガイド](docs/PAYMENT.md))
|
||||||
|
- **管理ダッシュボード** - 監視・管理のための Web インターフェース
|
||||||
|
- **外部システム連携** - 外部システム(チケット管理など)を iframe 経由で管理ダッシュボードに埋め込み可能
|
||||||
|
|
||||||
|
## ❤️ スポンサー
|
||||||
|
|
||||||
|
> [こちらに掲載しませんか?](mailto:support@pincc.ai)
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="150"></a></td>
|
||||||
|
<td valign="middle"><b><a href="https://shop.pincc.ai/">PinCC</a></b> は Sub2API 上に構築された公式リレーサービスで、Claude Code、Codex、Gemini などの人気モデルへの安定したアクセスを提供します。デプロイやメンテナンスは不要で、すぐにご利用いただけます。</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://www.packyapi.com/register?aff=sub2api"><img src="assets/partners/logos/packycode.png" alt="PackyCode" width="150"></a></td>
|
||||||
|
<td>PackyCode のご支援に感謝します!PackyCode は Claude Code、Codex、Gemini などのリレーサービスを提供する信頼性の高い API 中継プラットフォームです。本ソフト利用者向けに特別割引があります:<a href="https://www.packyapi.com/register?aff=sub2api">このリンク</a>で登録し、チャージ時に「sub2api」クーポンを入力すると 10% オフになります。</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://poixe.com/i/sub2api"><img src="assets/partners/logos/poixe.png" alt="PoixeAi" width="150"></a></td>
|
||||||
|
<td>Poixe AI のご支援に感謝します!Poixe AI は信頼性の高い LLM API サービスを提供しています。プラットフォームの API エンドポイントを活用して、AI 搭載プロダクトをシームレスに構築できます。また、ベンダーとして AI API リソースをプラットフォームに提供し、収益を得ることも可能です。専用の <a href="https://poixe.com/i/sub2api">sub2api</a> 紹介リンクから登録すると、初回チャージ時に $5 USD のボーナスがもらえます。</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://ctok.ai"><img src="assets/partners/logos/ctok.png" alt="CTok" width="150"></a></td>
|
||||||
|
<td>CTok.ai のご支援に感謝します!CTok.ai はワンストップ AI プログラミングツールサービスプラットフォームの構築に取り組んでいます。Claude Code の専用プランと技術コミュニティサービスを提供し、Google Gemini や OpenAI Codex もサポートしています。丁寧に設計されたプランと専門的な技術コミュニティを通じて、開発者に安定したサービス保証と継続的な技術サポートを提供し、AI アシスト プログラミングを真の生産性向上ツールにします。<a href="https://ctok.ai">こちら</a>から登録!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://code.silkapi.com/"><img src="assets/partners/logos/silkapi.png" alt="silkapi" width="150"></a></td>
|
||||||
|
<td>SilkAPI のご支援に感謝します!<a href="https://code.silkapi.com/">SilkAPI</a> は Sub2API をベースに構築された中継サービスで、高速かつ安定した Codex API 中継の提供に特化しています。</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://ylscode.com/"><img src="assets/partners/logos/ylscode.png" alt="ylscode" width="150"></a></td>
|
||||||
|
<td>YLS Code のご支援に感謝します!<a href="https://ylscode.com/">YLS Code</a> は安全なエンタープライズグレードの Coding Agent 生産性サービスの構築に取り組んでおり、安定かつ高速な Codex / Claude / Gemini サブスクリプションサービスと従量課金 API の柔軟なプランを提供しています。期間限定で新規登録者に 3 日間の Codex 試用特典をプレゼント中!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://www.aicodemirror.com/register?invitecode=KMVZQM"><img src="assets/partners/logos/AICodeMirror.jpg" alt="AICodeMirror" width="150"></a></td>
|
||||||
|
<td>AICodeMirror のご支援に感謝します!AICodeMirror は Claude Code / Codex / Gemini CLI の公式高安定性リレーサービスを提供しており、エンタープライズグレードの同時実行、迅速な請求書発行、24時間年中無休の専属テクニカルサポートを備えています。Claude Code / Codex / Gemini の公式チャネルを定価の 38% / 2% / 9% で利用可能、チャージ時にはさらに追加割引!AICodeMirror は sub2api ユーザー向けに特別特典を提供中:<a href="https://www.aicodemirror.com/register?invitecode=KMVZQM">こちらのリンク</a>から登録すると、初回チャージが 20% オフ、法人のお客様は最大 25% オフ!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://aigocode.com/invite/SUB2API"><img src="assets/partners/logos/aigocode.png" alt="AIGoCode" width="150"></a></td>
|
||||||
|
<td>AIGoCode のご支援に感謝します!AIGoCode は Claude Code、Codex、最新の Gemini モデルを統合したオールインワンプラットフォームで、安定的かつ効率的でコストパフォーマンスに優れた AI コーディングサービスを提供します。柔軟なサブスクリプションプラン、アカウント停止リスクゼロ、VPN 不要の直接アクセス、超高速レスポンスが特長です。AIGoCode は sub2api ユーザー向けに特別特典を用意しています:<a href="https://aigocode.com/invite/SUB2API">こちらのリンク</a>から登録すると、初回チャージ時に 10% のボーナスクレジットを追加プレゼント!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://shop.bmoplus.com/?utm_source=github"><img src="assets/partners/logos/bmoplus.jpg" alt="bmoplus" width="150"></a></td>
|
||||||
|
<td>本プロジェクトにご支援いただいた BmoPlus に感謝いたします!BmoPlusは、AIサブスクリプションのヘビーユーザー向けに特化した信頼性の高いAIアカウントサービスプロバイダーであり、安定した ChatGPT Plus / ChatGPT Pro (完全保証) / Claude Pro / Super Grok / Gemini Pro の公式代行チャージおよび即納アカウントを提供しています。こちらの<a href="https://shop.bmoplus.com/?utm_source=github">BmoPlus AIアカウント専門店/代行チャージ</a>経由でご登録・ご注文いただいたユーザー様は、GPTを 公式サイト価格の約1割(90% OFF) という驚異的な価格でご利用いただけます!</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
<tr>
|
||||||
|
<td width="180"><a href="https://bestproxy.com/?keyword=a2e8iuol"><img src="assets/partners/logos/bestproxy.png" alt="bestproxy" width="150"></a></td>
|
||||||
|
<td>Bestproxy のご支援に感謝します!<a href="https://bestproxy.com/?keyword=a2e8iuol">Bestproxy</a> は高純度の住宅IPを提供し、1アカウント1IP専有をサポートしています。実際の家庭ネットワークとフィンガープリント分離を組み合わせることで、リンク環境の分離を実現し、関連付けによるリスク管理の確率を低減します。</td>
|
||||||
|
</tr>
|
||||||
|
|
||||||
|
</table>
|
||||||
|
|
||||||
|
## エコシステム
|
||||||
|
|
||||||
|
Sub2API を拡張・統合するコミュニティプロジェクト:
|
||||||
|
|
||||||
|
| プロジェクト | 説明 | 機能 |
|
||||||
|
|---------|-------------|----------|
|
||||||
|
| ~~[Sub2ApiPay](https://github.com/touwaeriol/sub2apipay)~~ | ~~セルフサービス決済システム~~ | **内蔵済み** — 決済機能は Sub2API に統合されました。別途デプロイは不要です。[決済設定ガイド](docs/PAYMENT.md)をご参照ください |
|
||||||
|
| [sub2api-mobile](https://github.com/ckken/sub2api-mobile) | モバイル管理コンソール | ユーザー管理、アカウント管理、監視ダッシュボード、マルチバックエンド切り替えが可能なクロスプラットフォームアプリ(iOS/Android/Web)。Expo + React Native で構築 |
|
||||||
|
|
||||||
|
## 技術スタック
|
||||||
|
|
||||||
|
| コンポーネント | 技術 |
|
||||||
|
|-----------|------------|
|
||||||
|
| バックエンド | Go 1.25.7, Gin, Ent |
|
||||||
|
| フロントエンド | Vue 3.4+, Vite 5+, TailwindCSS |
|
||||||
|
| データベース | PostgreSQL 15+ |
|
||||||
|
| キャッシュ/キュー | Redis 7+ |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Nginx リバースプロキシに関する注意
|
||||||
|
|
||||||
|
Sub2API(または CRS)を Nginx でリバースプロキシし、Codex CLI と組み合わせて使用する場合、Nginx の `http` ブロックに以下の設定を追加してください:
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
underscores_in_headers on;
|
||||||
|
```
|
||||||
|
|
||||||
|
Nginx はデフォルトでアンダースコアを含むヘッダー(例: `session_id`)を破棄するため、マルチアカウント構成でのスティッキーセッションルーティングに支障をきたします。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## デプロイ
|
||||||
|
|
||||||
|
### 方法1: スクリプトによるインストール(推奨)
|
||||||
|
|
||||||
|
GitHub Releases からビルド済みバイナリをダウンロードするワンクリックインストールスクリプトです。
|
||||||
|
|
||||||
|
#### 前提条件
|
||||||
|
|
||||||
|
- Linux サーバー(amd64 または arm64)
|
||||||
|
- PostgreSQL 15+(インストール済みかつ稼働中)
|
||||||
|
- Redis 7+(インストール済みかつ稼働中)
|
||||||
|
- root 権限
|
||||||
|
|
||||||
|
#### インストール手順
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash
|
||||||
|
```
|
||||||
|
|
||||||
|
スクリプトは以下を実行します:
|
||||||
|
1. システムアーキテクチャの検出
|
||||||
|
2. 最新リリースのダウンロード
|
||||||
|
3. バイナリを `/opt/sub2api` にインストール
|
||||||
|
4. systemd サービスの作成
|
||||||
|
5. システムユーザーと権限の設定
|
||||||
|
|
||||||
|
#### インストール後の作業
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. サービスを起動
|
||||||
|
sudo systemctl start sub2api
|
||||||
|
|
||||||
|
# 2. 起動時の自動起動を有効化
|
||||||
|
sudo systemctl enable sub2api
|
||||||
|
|
||||||
|
# 3. ブラウザでセットアップウィザードを開く
|
||||||
|
# http://YOUR_SERVER_IP:8080
|
||||||
|
```
|
||||||
|
|
||||||
|
セットアップウィザードでは以下の設定を行います:
|
||||||
|
- データベース設定
|
||||||
|
- Redis 設定
|
||||||
|
- 管理者アカウントの作成
|
||||||
|
|
||||||
|
#### アップグレード
|
||||||
|
|
||||||
|
**管理ダッシュボード**の左上にある**アップデートを確認**ボタンをクリックすることで、ダッシュボードから直接アップグレードできます。
|
||||||
|
|
||||||
|
Web インターフェースでは以下が可能です:
|
||||||
|
- 新しいバージョンの自動確認
|
||||||
|
- ワンクリックでのアップデートのダウンロードと適用
|
||||||
|
- 必要に応じたロールバック
|
||||||
|
|
||||||
|
#### よく使うコマンド
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# ステータスを確認
|
||||||
|
sudo systemctl status sub2api
|
||||||
|
|
||||||
|
# ログを表示
|
||||||
|
sudo journalctl -u sub2api -f
|
||||||
|
|
||||||
|
# サービスを再起動
|
||||||
|
sudo systemctl restart sub2api
|
||||||
|
|
||||||
|
# アンインストール
|
||||||
|
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash -s -- uninstall -y
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 方法2: Docker Compose(推奨)
|
||||||
|
|
||||||
|
PostgreSQL と Redis のコンテナを含む Docker Compose でデプロイします。
|
||||||
|
|
||||||
|
#### 前提条件
|
||||||
|
|
||||||
|
- Docker 20.10+
|
||||||
|
- Docker Compose v2+
|
||||||
|
|
||||||
|
#### クイックスタート(ワンクリックデプロイ)
|
||||||
|
|
||||||
|
自動デプロイスクリプトを使用して簡単にセットアップできます:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# デプロイ用ディレクトリを作成
|
||||||
|
mkdir -p sub2api-deploy && cd sub2api-deploy
|
||||||
|
|
||||||
|
# デプロイ準備スクリプトをダウンロードして実行
|
||||||
|
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash
|
||||||
|
|
||||||
|
# サービスを起動
|
||||||
|
docker compose up -d
|
||||||
|
|
||||||
|
# ログを表示
|
||||||
|
docker compose logs -f sub2api
|
||||||
|
```
|
||||||
|
|
||||||
|
**スクリプトの動作内容:**
|
||||||
|
- `docker-compose.local.yml`(`docker-compose.yml` として保存)と `.env.example` をダウンロード
|
||||||
|
- セキュアな認証情報(JWT_SECRET、TOTP_ENCRYPTION_KEY、POSTGRES_PASSWORD)を自動生成
|
||||||
|
- 自動生成されたシークレットで `.env` ファイルを作成
|
||||||
|
- データディレクトリを作成(バックアップ・移行が容易なローカルディレクトリを使用)
|
||||||
|
- 生成された認証情報を参照用に表示
|
||||||
|
|
||||||
|
#### 手動デプロイ
|
||||||
|
|
||||||
|
手動でセットアップする場合:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. リポジトリをクローン
|
||||||
|
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||||
|
cd sub2api/deploy
|
||||||
|
|
||||||
|
# 2. 環境設定ファイルをコピー
|
||||||
|
cp .env.example .env
|
||||||
|
|
||||||
|
# 3. 設定を編集(セキュアなパスワードを生成)
|
||||||
|
nano .env
|
||||||
|
```
|
||||||
|
|
||||||
|
**`.env` の必須設定:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# PostgreSQL パスワード(必須)
|
||||||
|
POSTGRES_PASSWORD=your_secure_password_here
|
||||||
|
|
||||||
|
# JWT シークレット(推奨 - 再起動後もユーザーのログイン状態を保持)
|
||||||
|
JWT_SECRET=your_jwt_secret_here
|
||||||
|
|
||||||
|
# TOTP 暗号化キー(推奨 - 再起動後も二要素認証を維持)
|
||||||
|
TOTP_ENCRYPTION_KEY=your_totp_key_here
|
||||||
|
|
||||||
|
# オプション: 管理者アカウント
|
||||||
|
ADMIN_EMAIL=admin@example.com
|
||||||
|
ADMIN_PASSWORD=your_admin_password
|
||||||
|
|
||||||
|
# オプション: カスタムポート
|
||||||
|
SERVER_PORT=8080
|
||||||
|
```
|
||||||
|
|
||||||
|
**セキュアなシークレットの生成方法:**
|
||||||
|
```bash
|
||||||
|
# JWT_SECRET を生成
|
||||||
|
openssl rand -hex 32
|
||||||
|
|
||||||
|
# TOTP_ENCRYPTION_KEY を生成
|
||||||
|
openssl rand -hex 32
|
||||||
|
|
||||||
|
# POSTGRES_PASSWORD を生成
|
||||||
|
openssl rand -hex 32
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 4. データディレクトリを作成(ローカルバージョンの場合)
|
||||||
|
mkdir -p data postgres_data redis_data
|
||||||
|
|
||||||
|
# 5. すべてのサービスを起動
|
||||||
|
# オプション A: ローカルディレクトリバージョン(推奨 - 移行が容易)
|
||||||
|
docker compose -f docker-compose.local.yml up -d
|
||||||
|
|
||||||
|
# オプション B: 名前付きボリュームバージョン(シンプルなセットアップ)
|
||||||
|
docker compose up -d
|
||||||
|
|
||||||
|
# 6. ステータスを確認
|
||||||
|
docker compose -f docker-compose.local.yml ps
|
||||||
|
|
||||||
|
# 7. ログを表示
|
||||||
|
docker compose -f docker-compose.local.yml logs -f sub2api
|
||||||
|
```
|
||||||
|
|
||||||
|
#### デプロイバージョン
|
||||||
|
|
||||||
|
| バージョン | データストレージ | 移行 | 推奨用途 |
|
||||||
|
|---------|-------------|-----------|----------|
|
||||||
|
| **docker-compose.local.yml** | ローカルディレクトリ | ✅ 容易(ディレクトリ全体を tar) | 本番環境、頻繁なバックアップ |
|
||||||
|
| **docker-compose.yml** | 名前付きボリューム | ⚠️ docker コマンドが必要 | シンプルなセットアップ |
|
||||||
|
|
||||||
|
**推奨:** データ管理が容易な `docker-compose.local.yml`(スクリプトによるデプロイ)を使用してください。
|
||||||
|
|
||||||
|
#### アクセス
|
||||||
|
|
||||||
|
ブラウザで `http://YOUR_SERVER_IP:8080` を開いてください。
|
||||||
|
|
||||||
|
管理者パスワードが自動生成された場合は、ログで確認できます:
|
||||||
|
```bash
|
||||||
|
docker compose -f docker-compose.local.yml logs sub2api | grep "admin password"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### アップグレード
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 最新イメージをプルしてコンテナを再作成
|
||||||
|
docker compose -f docker-compose.local.yml pull
|
||||||
|
docker compose -f docker-compose.local.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 簡単な移行(ローカルディレクトリバージョン)
|
||||||
|
|
||||||
|
`docker-compose.local.yml` を使用している場合、新しいサーバーへの移行が簡単です:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 移行元サーバーにて
|
||||||
|
docker compose -f docker-compose.local.yml down
|
||||||
|
cd ..
|
||||||
|
tar czf sub2api-complete.tar.gz sub2api-deploy/
|
||||||
|
|
||||||
|
# 新しいサーバーに転送
|
||||||
|
scp sub2api-complete.tar.gz user@new-server:/path/
|
||||||
|
|
||||||
|
# 移行先サーバーにて
|
||||||
|
tar xzf sub2api-complete.tar.gz
|
||||||
|
cd sub2api-deploy/
|
||||||
|
docker compose -f docker-compose.local.yml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
#### よく使うコマンド
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# すべてのサービスを停止
|
||||||
|
docker compose -f docker-compose.local.yml down
|
||||||
|
|
||||||
|
# 再起動
|
||||||
|
docker compose -f docker-compose.local.yml restart
|
||||||
|
|
||||||
|
# すべてのログを表示
|
||||||
|
docker compose -f docker-compose.local.yml logs -f
|
||||||
|
|
||||||
|
# すべてのデータを削除(注意!)
|
||||||
|
docker compose -f docker-compose.local.yml down
|
||||||
|
rm -rf data/ postgres_data/ redis_data/
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 方法3: ソースからビルド
|
||||||
|
|
||||||
|
開発やカスタマイズのためにソースコードからビルドして実行します。
|
||||||
|
|
||||||
|
#### 前提条件
|
||||||
|
|
||||||
|
- Go 1.21+
|
||||||
|
- Node.js 18+
|
||||||
|
- PostgreSQL 15+
|
||||||
|
- Redis 7+
|
||||||
|
|
||||||
|
#### ビルド手順
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. リポジトリをクローン
|
||||||
|
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||||
|
cd sub2api
|
||||||
|
|
||||||
|
# 2. pnpm をインストール(未インストールの場合)
|
||||||
|
npm install -g pnpm
|
||||||
|
|
||||||
|
# 3. フロントエンドをビルド
|
||||||
|
cd frontend
|
||||||
|
pnpm install
|
||||||
|
pnpm run build
|
||||||
|
# 出力先: ../backend/internal/web/dist/
|
||||||
|
|
||||||
|
# 4. フロントエンドを組み込んだバックエンドをビルド
|
||||||
|
cd ../backend
|
||||||
|
go build -tags embed -o sub2api ./cmd/server
|
||||||
|
|
||||||
|
# 5. 設定ファイルを作成
|
||||||
|
cp ../deploy/config.example.yaml ./config.yaml
|
||||||
|
|
||||||
|
# 6. 設定を編集
|
||||||
|
nano config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
> **注意:** `-tags embed` フラグはフロントエンドをバイナリに組み込みます。このフラグがない場合、バイナリはフロントエンド UI を提供しません。
|
||||||
|
|
||||||
|
**`config.yaml` の主要設定:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
server:
|
||||||
|
host: "0.0.0.0"
|
||||||
|
port: 8080
|
||||||
|
mode: "release"
|
||||||
|
|
||||||
|
database:
|
||||||
|
host: "localhost"
|
||||||
|
port: 5432
|
||||||
|
user: "postgres"
|
||||||
|
password: "your_password"
|
||||||
|
dbname: "sub2api"
|
||||||
|
|
||||||
|
redis:
|
||||||
|
host: "localhost"
|
||||||
|
port: 6379
|
||||||
|
password: ""
|
||||||
|
|
||||||
|
jwt:
|
||||||
|
secret: "change-this-to-a-secure-random-string"
|
||||||
|
expire_hour: 24
|
||||||
|
|
||||||
|
default:
|
||||||
|
user_concurrency: 5
|
||||||
|
user_balance: 0
|
||||||
|
api_key_prefix: "sk-"
|
||||||
|
rate_multiplier: 1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
### Sora ステータス(一時的に利用不可)
|
||||||
|
|
||||||
|
> ⚠️ Sora 関連の機能は、上流統合およびメディア配信の技術的問題により一時的に利用できません。
|
||||||
|
> 現時点では本番環境で Sora に依存しないでください。
|
||||||
|
> 既存の `gateway.sora_*` 設定キーは予約されていますが、これらの問題が解決されるまで有効にならない場合があります。
|
||||||
|
|
||||||
|
`config.yaml` では追加のセキュリティ関連オプションも利用できます:
|
||||||
|
|
||||||
|
- `cors.allowed_origins` - CORS 許可リスト
|
||||||
|
- `security.url_allowlist` - 上流/価格/CRS ホストの許可リスト
|
||||||
|
- `security.url_allowlist.enabled` - URL バリデーションの無効化(注意して使用)
|
||||||
|
- `security.url_allowlist.allow_insecure_http` - バリデーション無効時に HTTP URL を許可
|
||||||
|
- `security.url_allowlist.allow_private_hosts` - プライベート/ローカル IP アドレスを許可
|
||||||
|
- `security.response_headers.enabled` - 設定可能なレスポンスヘッダーフィルタリングを有効化(無効時はデフォルトの許可リストを使用)
|
||||||
|
- `security.csp` - Content-Security-Policy ヘッダーの制御
|
||||||
|
- `billing.circuit_breaker` - 課金エラー時にフェイルクローズ
|
||||||
|
- `server.trusted_proxies` - X-Forwarded-For パースの有効化
|
||||||
|
- `turnstile.required` - リリースモードでの Turnstile 必須化
|
||||||
|
|
||||||
|
**⚠️ セキュリティ警告: HTTP URL 設定**
|
||||||
|
|
||||||
|
`security.url_allowlist.enabled=false` の場合、システムはデフォルトで最小限の URL バリデーションを行い、**HTTP URL を拒否**して HTTPS のみを許可します。HTTP URL を許可するには(開発環境や内部テスト用など)、以下を明示的に設定する必要があります:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
security:
|
||||||
|
url_allowlist:
|
||||||
|
enabled: false # 許可リストチェックを無効化
|
||||||
|
allow_insecure_http: true # HTTP URL を許可(⚠️ セキュリティリスクあり)
|
||||||
|
```
|
||||||
|
|
||||||
|
**または環境変数で設定:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
SECURITY_URL_ALLOWLIST_ENABLED=false
|
||||||
|
SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=true
|
||||||
|
```
|
||||||
|
|
||||||
|
**HTTP を許可するリスク:**
|
||||||
|
- API キーとデータが**平文**で送信される(傍受の危険性)
|
||||||
|
- **中間者攻撃(MITM)**を受けやすい
|
||||||
|
- **本番環境には不適切**
|
||||||
|
|
||||||
|
**HTTP を使用すべき場面:**
|
||||||
|
- ✅ ローカルサーバーでの開発・テスト(http://localhost)
|
||||||
|
- ✅ 信頼できるエンドポイントを持つ内部ネットワーク
|
||||||
|
- ✅ HTTPS 取得前のアカウント接続テスト
|
||||||
|
- ❌ 本番環境(HTTPS のみを使用)
|
||||||
|
|
||||||
|
**この設定なしで表示されるエラー例:**
|
||||||
|
```
|
||||||
|
Invalid base URL: invalid url scheme: http
|
||||||
|
```
|
||||||
|
|
||||||
|
URL バリデーションまたはレスポンスヘッダーフィルタリングを無効にする場合は、ネットワーク層を強化してください:
|
||||||
|
- 上流ドメイン/IP のエグレス許可リストを適用
|
||||||
|
- プライベート/ループバック/リンクローカル範囲をブロック
|
||||||
|
- TLS のみのアウトバウンドトラフィックを強制
|
||||||
|
- プロキシで機密性の高い上流レスポンスヘッダーを除去
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 6. アプリケーションを実行
|
||||||
|
./sub2api
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 開発モード
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# バックエンド(ホットリロード付き)
|
||||||
|
cd backend
|
||||||
|
go run ./cmd/server
|
||||||
|
|
||||||
|
# フロントエンド(ホットリロード付き)
|
||||||
|
cd frontend
|
||||||
|
pnpm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
#### コード生成
|
||||||
|
|
||||||
|
`backend/ent/schema` を編集した場合、Ent + Wire を再生成してください:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd backend
|
||||||
|
go generate ./ent
|
||||||
|
go generate ./cmd/server
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## シンプルモード
|
||||||
|
|
||||||
|
シンプルモードは、フル SaaS 機能を必要とせず、素早くアクセスしたい個人開発者や社内チーム向けに設計されています。
|
||||||
|
|
||||||
|
- 有効化: 環境変数 `RUN_MODE=simple` を設定
|
||||||
|
- 違い: SaaS 関連機能を非表示にし、課金プロセスをスキップ
|
||||||
|
- セキュリティに関する注意: 本番環境では `SIMPLE_MODE_CONFIRM=true` も設定する必要があります
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Antigravity サポート
|
||||||
|
|
||||||
|
Sub2API は [Antigravity](https://antigravity.so/) アカウントをサポートしています。認証後、Claude および Gemini モデル用の専用エンドポイントが利用可能になります。
|
||||||
|
|
||||||
|
### 専用エンドポイント
|
||||||
|
|
||||||
|
| エンドポイント | モデル |
|
||||||
|
|----------|-------|
|
||||||
|
| `/antigravity/v1/messages` | Claude モデル |
|
||||||
|
| `/antigravity/v1beta/` | Gemini モデル |
|
||||||
|
|
||||||
|
### Claude Code の設定
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export ANTHROPIC_BASE_URL="http://localhost:8080/antigravity"
|
||||||
|
export ANTHROPIC_AUTH_TOKEN="sk-xxx"
|
||||||
|
```
|
||||||
|
|
||||||
|
### ハイブリッドスケジューリングモード
|
||||||
|
|
||||||
|
Antigravity アカウントはオプションの**ハイブリッドスケジューリング**をサポートしています。有効にすると、汎用エンドポイント `/v1/messages` および `/v1beta/` も Antigravity アカウントにリクエストをルーティングします。
|
||||||
|
|
||||||
|
> **⚠️ 警告**: Anthropic Claude と Antigravity Claude は**同じ会話コンテキスト内で混在させることはできません**。グループを使用して適切に分離してください。
|
||||||
|
|
||||||
|
### 既知の問題
|
||||||
|
|
||||||
|
Claude Code では、Plan Mode を自動的に終了できません。(通常、ネイティブの Claude API を使用する場合、計画が完了すると Claude Code はユーザーに計画を承認または拒否するオプションをポップアップ表示します。)
|
||||||
|
|
||||||
|
**回避策**: `Shift + Tab` を押して手動で Plan Mode を終了し、計画を承認または拒否するためのレスポンスを入力してください。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## プロジェクト構成
|
||||||
|
|
||||||
|
```
|
||||||
|
sub2api/
|
||||||
|
├── backend/ # Go バックエンドサービス
|
||||||
|
│ ├── cmd/server/ # アプリケーションエントリ
|
||||||
|
│ ├── internal/ # 内部モジュール
|
||||||
|
│ │ ├── config/ # 設定
|
||||||
|
│ │ ├── model/ # データモデル
|
||||||
|
│ │ ├── service/ # ビジネスロジック
|
||||||
|
│ │ ├── handler/ # HTTP ハンドラー
|
||||||
|
│ │ └── gateway/ # API ゲートウェイコア
|
||||||
|
│ └── resources/ # 静的リソース
|
||||||
|
│
|
||||||
|
├── frontend/ # Vue 3 フロントエンド
|
||||||
|
│ └── src/
|
||||||
|
│ ├── api/ # API 呼び出し
|
||||||
|
│ ├── stores/ # 状態管理
|
||||||
|
│ ├── views/ # ページコンポーネント
|
||||||
|
│ └── components/ # 再利用可能なコンポーネント
|
||||||
|
│
|
||||||
|
└── deploy/ # デプロイファイル
|
||||||
|
├── docker-compose.yml # Docker Compose 設定
|
||||||
|
├── .env.example # Docker Compose 用環境変数
|
||||||
|
├── config.example.yaml # バイナリデプロイ用フル設定ファイル
|
||||||
|
└── install.sh # ワンクリックインストールスクリプト
|
||||||
|
```
|
||||||
|
|
||||||
|
## 免責事項
|
||||||
|
|
||||||
|
> **本プロジェクトをご利用の前に、以下をよくお読みください:**
|
||||||
|
>
|
||||||
|
> :rotating_light: **利用規約違反のリスク**: 本プロジェクトの使用は Anthropic の利用規約に違反する可能性があります。使用前に Anthropic のユーザー契約をよくお読みください。本プロジェクトの使用に起因するすべてのリスクは、ユーザー自身が負うものとします。
|
||||||
|
>
|
||||||
|
> :book: **免責事項**: 本プロジェクトは技術的な学習および研究目的のみで提供されています。作者は、本プロジェクトの使用によるアカウント停止、サービス中断、その他の損失について一切の責任を負いません。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## スター履歴
|
||||||
|
|
||||||
|
<a href="https://star-history.com/#Wei-Shaw/sub2api&Date">
|
||||||
|
<picture>
|
||||||
|
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date&theme=dark" />
|
||||||
|
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||||
|
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||||
|
</picture>
|
||||||
|
</a>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## ライセンス
|
||||||
|
|
||||||
|
本プロジェクトは [GNU Lesser General Public License v3.0](LICENSE)(またはそれ以降のバージョン)の下でライセンスされています。
|
||||||
|
|
||||||
|
Copyright (c) 2026 Wesley Liddick
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
**このプロジェクトが役に立ったら、ぜひスターをお願いします!**
|
||||||
|
|
||||||
|
</div>
|
||||||
BIN
assets/partners/logos/AICodeMirror.jpg
Normal file
|
After Width: | Height: | Size: 83 KiB |
BIN
assets/partners/logos/aigocode.png
Normal file
|
After Width: | Height: | Size: 38 KiB |
BIN
assets/partners/logos/bestproxy.png
Normal file
|
After Width: | Height: | Size: 9.5 KiB |
BIN
assets/partners/logos/bmoplus.jpg
Normal file
|
After Width: | Height: | Size: 7.8 KiB |
BIN
assets/partners/logos/ctok.png
Normal file
|
After Width: | Height: | Size: 246 KiB |
BIN
assets/partners/logos/packycode.png
Normal file
|
After Width: | Height: | Size: 8.1 KiB |
BIN
assets/partners/logos/pincc-logo.png
Normal file
|
After Width: | Height: | Size: 171 KiB |
BIN
assets/partners/logos/poixe.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
assets/partners/logos/silkapi.png
Normal file
|
After Width: | Height: | Size: 2.8 KiB |
BIN
assets/partners/logos/ylscode.png
Normal file
|
After Width: | Height: | Size: 26 KiB |
2
backend/.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
.cache/
|
||||||
|
.DS_Store
|
||||||
@@ -5,6 +5,7 @@ linters:
|
|||||||
enable:
|
enable:
|
||||||
- depguard
|
- depguard
|
||||||
- errcheck
|
- errcheck
|
||||||
|
- gosec
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- staticcheck
|
- staticcheck
|
||||||
@@ -18,16 +19,46 @@ linters:
|
|||||||
list-mode: original
|
list-mode: original
|
||||||
files:
|
files:
|
||||||
- "**/internal/service/**"
|
- "**/internal/service/**"
|
||||||
|
- "!**/internal/service/ops_aggregation_service.go"
|
||||||
|
- "!**/internal/service/ops_alert_evaluator_service.go"
|
||||||
|
- "!**/internal/service/ops_cleanup_service.go"
|
||||||
|
- "!**/internal/service/ops_metrics_collector.go"
|
||||||
|
- "!**/internal/service/ops_scheduled_report_service.go"
|
||||||
|
- "!**/internal/service/wire.go"
|
||||||
deny:
|
deny:
|
||||||
- pkg: sub2api/internal/repository
|
- pkg: github.com/Wei-Shaw/sub2api/internal/repository
|
||||||
desc: "service must not import repository"
|
desc: "service must not import repository"
|
||||||
|
- pkg: gorm.io/gorm
|
||||||
|
desc: "service must not import gorm"
|
||||||
|
- pkg: github.com/redis/go-redis/v9
|
||||||
|
desc: "service must not import redis"
|
||||||
handler-no-repository:
|
handler-no-repository:
|
||||||
list-mode: original
|
list-mode: original
|
||||||
files:
|
files:
|
||||||
- "**/internal/handler/**"
|
- "**/internal/handler/**"
|
||||||
deny:
|
deny:
|
||||||
- pkg: sub2api/internal/repository
|
- pkg: github.com/Wei-Shaw/sub2api/internal/repository
|
||||||
desc: "handler must not import repository"
|
desc: "handler must not import repository"
|
||||||
|
- pkg: gorm.io/gorm
|
||||||
|
desc: "handler must not import gorm"
|
||||||
|
- pkg: github.com/redis/go-redis/v9
|
||||||
|
desc: "handler must not import redis"
|
||||||
|
gosec:
|
||||||
|
excludes:
|
||||||
|
- G101
|
||||||
|
- G103
|
||||||
|
- G104
|
||||||
|
- G109
|
||||||
|
- G115
|
||||||
|
- G201
|
||||||
|
- G202
|
||||||
|
- G301
|
||||||
|
- G302
|
||||||
|
- G304
|
||||||
|
- G306
|
||||||
|
- G404
|
||||||
|
severity: high
|
||||||
|
confidence: high
|
||||||
errcheck:
|
errcheck:
|
||||||
# Report about not checking of errors in type assertions: `a := b.(MyStruct)`.
|
# Report about not checking of errors in type assertions: `a := b.(MyStruct)`.
|
||||||
# Such cases aren't reported by default.
|
# Such cases aren't reported by default.
|
||||||
@@ -62,519 +93,33 @@ linters:
|
|||||||
check-escaping-errors: true
|
check-escaping-errors: true
|
||||||
staticcheck:
|
staticcheck:
|
||||||
# https://staticcheck.dev/docs/configuration/options/#dot_import_whitelist
|
# https://staticcheck.dev/docs/configuration/options/#dot_import_whitelist
|
||||||
# Default: ["github.com/mmcloughlin/avo/build", "github.com/mmcloughlin/avo/operand", "github.com/mmcloughlin/avo/reg"]
|
|
||||||
dot-import-whitelist:
|
dot-import-whitelist:
|
||||||
- fmt
|
- fmt
|
||||||
# https://staticcheck.dev/docs/configuration/options/#initialisms
|
# https://staticcheck.dev/docs/configuration/options/#initialisms
|
||||||
# Default: ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS"]
|
|
||||||
initialisms: [ "ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS" ]
|
initialisms: [ "ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS" ]
|
||||||
# https://staticcheck.dev/docs/configuration/options/#http_status_code_whitelist
|
# https://staticcheck.dev/docs/configuration/options/#http_status_code_whitelist
|
||||||
# Default: ["200", "400", "404", "500"]
|
|
||||||
http-status-code-whitelist: [ "200", "400", "404", "500" ]
|
http-status-code-whitelist: [ "200", "400", "404", "500" ]
|
||||||
# SAxxxx checks in https://staticcheck.dev/docs/configuration/options/#checks
|
# "all" enables every SA/ST/S/QF check; only list the ones to disable.
|
||||||
# Example (to disable some checks): [ "all", "-SA1000", "-SA1001"]
|
|
||||||
# Run `GL_DEBUG=staticcheck golangci-lint run --enable=staticcheck` to see all available checks and enabled by config checks.
|
|
||||||
# Default: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022"]
|
|
||||||
checks:
|
checks:
|
||||||
# Invalid regular expression.
|
- all
|
||||||
# https://staticcheck.dev/docs/checks/#SA1000
|
- -ST1000 # Package comment format
|
||||||
- SA1000
|
- -ST1003 # Poorly chosen identifier (ApiKey vs APIKey)
|
||||||
# Invalid template.
|
- -ST1020 # Comment on exported method format
|
||||||
# https://staticcheck.dev/docs/checks/#SA1001
|
- -ST1021 # Comment on exported type format
|
||||||
- SA1001
|
- -ST1022 # Comment on exported variable format
|
||||||
# Invalid format in 'time.Parse'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1002
|
|
||||||
- SA1002
|
|
||||||
# Unsupported argument to functions in 'encoding/binary'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1003
|
|
||||||
- SA1003
|
|
||||||
# Suspiciously small untyped constant in 'time.Sleep'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1004
|
|
||||||
- SA1004
|
|
||||||
# Invalid first argument to 'exec.Command'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1005
|
|
||||||
- SA1005
|
|
||||||
# 'Printf' with dynamic first argument and no further arguments.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1006
|
|
||||||
- SA1006
|
|
||||||
# Invalid URL in 'net/url.Parse'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1007
|
|
||||||
- SA1007
|
|
||||||
# Non-canonical key in 'http.Header' map.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1008
|
|
||||||
- SA1008
|
|
||||||
# '(*regexp.Regexp).FindAll' called with 'n == 0', which will always return zero results.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1010
|
|
||||||
- SA1010
|
|
||||||
# Various methods in the "strings" package expect valid UTF-8, but invalid input is provided.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1011
|
|
||||||
- SA1011
|
|
||||||
# A nil 'context.Context' is being passed to a function, consider using 'context.TODO' instead.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1012
|
|
||||||
- SA1012
|
|
||||||
# 'io.Seeker.Seek' is being called with the whence constant as the first argument, but it should be the second.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1013
|
|
||||||
- SA1013
|
|
||||||
# Non-pointer value passed to 'Unmarshal' or 'Decode'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1014
|
|
||||||
- SA1014
|
|
||||||
# Using 'time.Tick' in a way that will leak. Consider using 'time.NewTicker', and only use 'time.Tick' in tests, commands and endless functions.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1015
|
|
||||||
- SA1015
|
|
||||||
# Trapping a signal that cannot be trapped.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1016
|
|
||||||
- SA1016
|
|
||||||
# Channels used with 'os/signal.Notify' should be buffered.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1017
|
|
||||||
- SA1017
|
|
||||||
# 'strings.Replace' called with 'n == 0', which does nothing.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1018
|
|
||||||
- SA1018
|
|
||||||
# Using a deprecated function, variable, constant or field.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1019
|
|
||||||
- SA1019
|
|
||||||
# Using an invalid host:port pair with a 'net.Listen'-related function.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1020
|
|
||||||
- SA1020
|
|
||||||
# Using 'bytes.Equal' to compare two 'net.IP'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1021
|
|
||||||
- SA1021
|
|
||||||
# Modifying the buffer in an 'io.Writer' implementation.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1023
|
|
||||||
- SA1023
|
|
||||||
# A string cutset contains duplicate characters.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1024
|
|
||||||
- SA1024
|
|
||||||
# It is not possible to use '(*time.Timer).Reset''s return value correctly.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1025
|
|
||||||
- SA1025
|
|
||||||
# Cannot marshal channels or functions.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1026
|
|
||||||
- SA1026
|
|
||||||
# Atomic access to 64-bit variable must be 64-bit aligned.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1027
|
|
||||||
- SA1027
|
|
||||||
# 'sort.Slice' can only be used on slices.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1028
|
|
||||||
- SA1028
|
|
||||||
# Inappropriate key in call to 'context.WithValue'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1029
|
|
||||||
- SA1029
|
|
||||||
# Invalid argument in call to a 'strconv' function.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1030
|
|
||||||
- SA1030
|
|
||||||
# Overlapping byte slices passed to an encoder.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1031
|
|
||||||
- SA1031
|
|
||||||
# Wrong order of arguments to 'errors.Is'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA1032
|
|
||||||
- SA1032
|
|
||||||
# 'sync.WaitGroup.Add' called inside the goroutine, leading to a race condition.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA2000
|
|
||||||
- SA2000
|
|
||||||
# Empty critical section, did you mean to defer the unlock?.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA2001
|
|
||||||
- SA2001
|
|
||||||
# Called 'testing.T.FailNow' or 'SkipNow' in a goroutine, which isn't allowed.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA2002
|
|
||||||
- SA2002
|
|
||||||
# Deferred 'Lock' right after locking, likely meant to defer 'Unlock' instead.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA2003
|
|
||||||
- SA2003
|
|
||||||
# 'TestMain' doesn't call 'os.Exit', hiding test failures.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA3000
|
|
||||||
- SA3000
|
|
||||||
# Assigning to 'b.N' in benchmarks distorts the results.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA3001
|
|
||||||
- SA3001
|
|
||||||
# Binary operator has identical expressions on both sides.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4000
|
|
||||||
- SA4000
|
|
||||||
# '&*x' gets simplified to 'x', it does not copy 'x'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4001
|
|
||||||
- SA4001
|
|
||||||
# Comparing unsigned values against negative values is pointless.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4003
|
|
||||||
- SA4003
|
|
||||||
# The loop exits unconditionally after one iteration.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4004
|
|
||||||
- SA4004
|
|
||||||
# Field assignment that will never be observed. Did you mean to use a pointer receiver?.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4005
|
|
||||||
- SA4005
|
|
||||||
# A value assigned to a variable is never read before being overwritten. Forgotten error check or dead code?.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4006
|
|
||||||
- SA4006
|
|
||||||
# The variable in the loop condition never changes, are you incrementing the wrong variable?.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4008
|
|
||||||
- SA4008
|
|
||||||
# A function argument is overwritten before its first use.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4009
|
|
||||||
- SA4009
|
|
||||||
# The result of 'append' will never be observed anywhere.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4010
|
|
||||||
- SA4010
|
|
||||||
# Break statement with no effect. Did you mean to break out of an outer loop?.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4011
|
|
||||||
- SA4011
|
|
||||||
# Comparing a value against NaN even though no value is equal to NaN.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4012
|
|
||||||
- SA4012
|
|
||||||
# Negating a boolean twice ('!!b') is the same as writing 'b'. This is either redundant, or a typo.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4013
|
|
||||||
- SA4013
|
|
||||||
# An if/else if chain has repeated conditions and no side-effects; if the condition didn't match the first time, it won't match the second time, either.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4014
|
|
||||||
- SA4014
|
|
||||||
# Calling functions like 'math.Ceil' on floats converted from integers doesn't do anything useful.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4015
|
|
||||||
- SA4015
|
|
||||||
# Certain bitwise operations, such as 'x ^ 0', do not do anything useful.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4016
|
|
||||||
- SA4016
|
|
||||||
# Discarding the return values of a function without side effects, making the call pointless.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4017
|
|
||||||
- SA4017
|
|
||||||
# Self-assignment of variables.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4018
|
|
||||||
- SA4018
|
|
||||||
# Multiple, identical build constraints in the same file.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4019
|
|
||||||
- SA4019
|
|
||||||
# Unreachable case clause in a type switch.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4020
|
|
||||||
- SA4020
|
|
||||||
# "x = append(y)" is equivalent to "x = y".
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4021
|
|
||||||
- SA4021
|
|
||||||
# Comparing the address of a variable against nil.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4022
|
|
||||||
- SA4022
|
|
||||||
# Impossible comparison of interface value with untyped nil.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4023
|
|
||||||
- SA4023
|
|
||||||
# Checking for impossible return value from a builtin function.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4024
|
|
||||||
- SA4024
|
|
||||||
# Integer division of literals that results in zero.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4025
|
|
||||||
- SA4025
|
|
||||||
# Go constants cannot express negative zero.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4026
|
|
||||||
- SA4026
|
|
||||||
# '(*net/url.URL).Query' returns a copy, modifying it doesn't change the URL.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4027
|
|
||||||
- SA4027
|
|
||||||
# 'x % 1' is always zero.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4028
|
|
||||||
- SA4028
|
|
||||||
# Ineffective attempt at sorting slice.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4029
|
|
||||||
- SA4029
|
|
||||||
# Ineffective attempt at generating random number.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4030
|
|
||||||
- SA4030
|
|
||||||
# Checking never-nil value against nil.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4031
|
|
||||||
- SA4031
|
|
||||||
# Comparing 'runtime.GOOS' or 'runtime.GOARCH' against impossible value.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA4032
|
|
||||||
- SA4032
|
|
||||||
# Assignment to nil map.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5000
|
|
||||||
- SA5000
|
|
||||||
# Deferring 'Close' before checking for a possible error.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5001
|
|
||||||
- SA5001
|
|
||||||
# The empty for loop ("for {}") spins and can block the scheduler.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5002
|
|
||||||
- SA5002
|
|
||||||
# Defers in infinite loops will never execute.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5003
|
|
||||||
- SA5003
|
|
||||||
# "for { select { ..." with an empty default branch spins.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5004
|
|
||||||
- SA5004
|
|
||||||
# The finalizer references the finalized object, preventing garbage collection.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5005
|
|
||||||
- SA5005
|
|
||||||
# Infinite recursive call.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5007
|
|
||||||
- SA5007
|
|
||||||
# Invalid struct tag.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5008
|
|
||||||
- SA5008
|
|
||||||
# Invalid Printf call.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5009
|
|
||||||
- SA5009
|
|
||||||
# Impossible type assertion.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5010
|
|
||||||
- SA5010
|
|
||||||
# Possible nil pointer dereference.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5011
|
|
||||||
- SA5011
|
|
||||||
# Passing odd-sized slice to function expecting even size.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA5012
|
|
||||||
- SA5012
|
|
||||||
# Using 'regexp.Match' or related in a loop, should use 'regexp.Compile'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6000
|
|
||||||
- SA6000
|
|
||||||
# Missing an optimization opportunity when indexing maps by byte slices.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6001
|
|
||||||
- SA6001
|
|
||||||
# Storing non-pointer values in 'sync.Pool' allocates memory.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6002
|
|
||||||
- SA6002
|
|
||||||
# Converting a string to a slice of runes before ranging over it.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6003
|
|
||||||
- SA6003
|
|
||||||
# Inefficient string comparison with 'strings.ToLower' or 'strings.ToUpper'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6005
|
|
||||||
- SA6005
|
|
||||||
# Using io.WriteString to write '[]byte'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA6006
|
|
||||||
- SA6006
|
|
||||||
# Defers in range loops may not run when you expect them to.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9001
|
|
||||||
- SA9001
|
|
||||||
# Using a non-octal 'os.FileMode' that looks like it was meant to be in octal.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9002
|
|
||||||
- SA9002
|
|
||||||
# Empty body in an if or else branch.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9003
|
|
||||||
- SA9003
|
|
||||||
# Only the first constant has an explicit type.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9004
|
|
||||||
- SA9004
|
|
||||||
# Trying to marshal a struct with no public fields nor custom marshaling.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9005
|
|
||||||
- SA9005
|
|
||||||
# Dubious bit shifting of a fixed size integer value.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9006
|
|
||||||
- SA9006
|
|
||||||
# Deleting a directory that shouldn't be deleted.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9007
|
|
||||||
- SA9007
|
|
||||||
# 'else' branch of a type assertion is probably not reading the right value.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9008
|
|
||||||
- SA9008
|
|
||||||
# Ineffectual Go compiler directive.
|
|
||||||
# https://staticcheck.dev/docs/checks/#SA9009
|
|
||||||
- SA9009
|
|
||||||
# Incorrect or missing package comment.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1000
|
|
||||||
- ST1000
|
|
||||||
# Dot imports are discouraged.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1001
|
|
||||||
- ST1001
|
|
||||||
# Poorly chosen identifier.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1003
|
|
||||||
- ST1003
|
|
||||||
# Incorrectly formatted error string.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1005
|
|
||||||
- ST1005
|
|
||||||
# Poorly chosen receiver name.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1006
|
|
||||||
- ST1006
|
|
||||||
# A function's error value should be its last return value.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1008
|
|
||||||
- ST1008
|
|
||||||
# Poorly chosen name for variable of type 'time.Duration'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1011
|
|
||||||
- ST1011
|
|
||||||
# Poorly chosen name for error variable.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1012
|
|
||||||
- ST1012
|
|
||||||
# Should use constants for HTTP error codes, not magic numbers.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1013
|
|
||||||
- ST1013
|
|
||||||
# A switch's default case should be the first or last case.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1015
|
|
||||||
- ST1015
|
|
||||||
# Use consistent method receiver names.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1016
|
|
||||||
- ST1016
|
|
||||||
# Don't use Yoda conditions.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1017
|
|
||||||
- ST1017
|
|
||||||
# Avoid zero-width and control characters in string literals.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1018
|
|
||||||
- ST1018
|
|
||||||
# Importing the same package multiple times.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1019
|
|
||||||
- ST1019
|
|
||||||
# The documentation of an exported function should start with the function's name.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1020
|
|
||||||
- ST1020
|
|
||||||
# The documentation of an exported type should start with type's name.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1021
|
|
||||||
- ST1021
|
|
||||||
# The documentation of an exported variable or constant should start with variable's name.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1022
|
|
||||||
- ST1022
|
|
||||||
# Redundant type in variable declaration.
|
|
||||||
# https://staticcheck.dev/docs/checks/#ST1023
|
|
||||||
- ST1023
|
|
||||||
# Use plain channel send or receive instead of single-case select.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1000
|
|
||||||
- S1000
|
|
||||||
# Replace for loop with call to copy.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1001
|
|
||||||
- S1001
|
|
||||||
# Omit comparison with boolean constant.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1002
|
|
||||||
- S1002
|
|
||||||
# Replace call to 'strings.Index' with 'strings.Contains'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1003
|
|
||||||
- S1003
|
|
||||||
# Replace call to 'bytes.Compare' with 'bytes.Equal'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1004
|
|
||||||
- S1004
|
|
||||||
# Drop unnecessary use of the blank identifier.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1005
|
|
||||||
- S1005
|
|
||||||
# Use "for { ... }" for infinite loops.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1006
|
|
||||||
- S1006
|
|
||||||
# Simplify regular expression by using raw string literal.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1007
|
|
||||||
- S1007
|
|
||||||
# Simplify returning boolean expression.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1008
|
|
||||||
- S1008
|
|
||||||
# Omit redundant nil check on slices, maps, and channels.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1009
|
|
||||||
- S1009
|
|
||||||
# Omit default slice index.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1010
|
|
||||||
- S1010
|
|
||||||
# Use a single 'append' to concatenate two slices.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1011
|
|
||||||
- S1011
|
|
||||||
# Replace 'time.Now().Sub(x)' with 'time.Since(x)'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1012
|
|
||||||
- S1012
|
|
||||||
# Use a type conversion instead of manually copying struct fields.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1016
|
|
||||||
- S1016
|
|
||||||
# Replace manual trimming with 'strings.TrimPrefix'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1017
|
|
||||||
- S1017
|
|
||||||
# Use "copy" for sliding elements.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1018
|
|
||||||
- S1018
|
|
||||||
# Simplify "make" call by omitting redundant arguments.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1019
|
|
||||||
- S1019
|
|
||||||
# Omit redundant nil check in type assertion.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1020
|
|
||||||
- S1020
|
|
||||||
# Merge variable declaration and assignment.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1021
|
|
||||||
- S1021
|
|
||||||
# Omit redundant control flow.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1023
|
|
||||||
- S1023
|
|
||||||
# Replace 'x.Sub(time.Now())' with 'time.Until(x)'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1024
|
|
||||||
- S1024
|
|
||||||
# Don't use 'fmt.Sprintf("%s", x)' unnecessarily.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1025
|
|
||||||
- S1025
|
|
||||||
# Simplify error construction with 'fmt.Errorf'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1028
|
|
||||||
- S1028
|
|
||||||
# Range over the string directly.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1029
|
|
||||||
- S1029
|
|
||||||
# Use 'bytes.Buffer.String' or 'bytes.Buffer.Bytes'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1030
|
|
||||||
- S1030
|
|
||||||
# Omit redundant nil check around loop.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1031
|
|
||||||
- S1031
|
|
||||||
# Use 'sort.Ints(x)', 'sort.Float64s(x)', and 'sort.Strings(x)'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1032
|
|
||||||
- S1032
|
|
||||||
# Unnecessary guard around call to "delete".
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1033
|
|
||||||
- S1033
|
|
||||||
# Use result of type assertion to simplify cases.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1034
|
|
||||||
- S1034
|
|
||||||
# Redundant call to 'net/http.CanonicalHeaderKey' in method call on 'net/http.Header'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1035
|
|
||||||
- S1035
|
|
||||||
# Unnecessary guard around map access.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1036
|
|
||||||
- S1036
|
|
||||||
# Elaborate way of sleeping.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1037
|
|
||||||
- S1037
|
|
||||||
# Unnecessarily complex way of printing formatted string.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1038
|
|
||||||
- S1038
|
|
||||||
# Unnecessary use of 'fmt.Sprint'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1039
|
|
||||||
- S1039
|
|
||||||
# Type assertion to current type.
|
|
||||||
# https://staticcheck.dev/docs/checks/#S1040
|
|
||||||
- S1040
|
|
||||||
# Apply De Morgan's law.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1001
|
|
||||||
- QF1001
|
|
||||||
# Convert untagged switch to tagged switch.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1002
|
|
||||||
- QF1002
|
|
||||||
# Convert if/else-if chain to tagged switch.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1003
|
|
||||||
- QF1003
|
|
||||||
# Use 'strings.ReplaceAll' instead of 'strings.Replace' with 'n == -1'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1004
|
|
||||||
- QF1004
|
|
||||||
# Expand call to 'math.Pow'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1005
|
|
||||||
- QF1005
|
|
||||||
# Lift 'if'+'break' into loop condition.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1006
|
|
||||||
- QF1006
|
|
||||||
# Merge conditional assignment into variable declaration.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1007
|
|
||||||
- QF1007
|
|
||||||
# Omit embedded fields from selector expression.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1008
|
|
||||||
- QF1008
|
|
||||||
# Use 'time.Time.Equal' instead of '==' operator.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1009
|
|
||||||
- QF1009
|
|
||||||
# Convert slice of bytes to string when printing it.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1010
|
|
||||||
- QF1010
|
|
||||||
# Omit redundant type from variable declaration.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1011
|
|
||||||
- QF1011
|
|
||||||
# Use 'fmt.Fprintf(x, ...)' instead of 'x.Write(fmt.Sprintf(...))'.
|
|
||||||
# https://staticcheck.dev/docs/checks/#QF1012
|
|
||||||
- QF1012
|
|
||||||
unused:
|
unused:
|
||||||
# Mark all struct fields that have been written to as used.
|
|
||||||
# Default: true
|
# Default: true
|
||||||
field-writes-are-uses: false
|
field-writes-are-uses: true
|
||||||
# Treat IncDec statement (e.g. `i++` or `i--`) as both read and write operation instead of just write.
|
|
||||||
# Default: false
|
# Default: false
|
||||||
post-statements-are-reads: true
|
post-statements-are-reads: true
|
||||||
# Mark all exported fields as used.
|
|
||||||
# default: true
|
|
||||||
exported-fields-are-used: false
|
|
||||||
# Mark all function parameters as used.
|
|
||||||
# default: true
|
|
||||||
parameters-are-used: true
|
|
||||||
# Mark all local variables as used.
|
|
||||||
# default: true
|
|
||||||
local-variables-are-used: false
|
|
||||||
# Mark all identifiers inside generated files as used.
|
|
||||||
# Default: true
|
# Default: true
|
||||||
generated-is-used: false
|
exported-fields-are-used: true
|
||||||
|
# Default: true
|
||||||
|
parameters-are-used: true
|
||||||
|
# Default: true
|
||||||
|
local-variables-are-used: false
|
||||||
|
# Default: true — must be true, ent generates 130K+ lines of code
|
||||||
|
generated-is-used: true
|
||||||
|
|
||||||
formatters:
|
formatters:
|
||||||
enable:
|
enable:
|
||||||
@@ -591,4 +136,4 @@ formatters:
|
|||||||
- pattern: 'interface{}'
|
- pattern: 'interface{}'
|
||||||
replacement: 'any'
|
replacement: 'any'
|
||||||
- pattern: 'a[b:len(a)]'
|
- pattern: 'a[b:len(a)]'
|
||||||
replacement: 'a[b:]'
|
replacement: 'a[b:]'
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.21-alpine
|
FROM golang:1.25.7-alpine
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
@@ -15,7 +15,7 @@ RUN go mod download
|
|||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# 构建应用
|
# 构建应用
|
||||||
RUN go build -o main cmd/server/main.go
|
RUN go build -o main ./cmd/server/
|
||||||
|
|
||||||
# 暴露端口
|
# 暴露端口
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|||||||
@@ -1,16 +1,27 @@
|
|||||||
.PHONY: wire build build-embed
|
.PHONY: build generate test test-unit test-integration test-e2e
|
||||||
|
|
||||||
wire:
|
VERSION ?= $(shell tr -d '\r\n' < ./cmd/server/VERSION)
|
||||||
@echo "生成 Wire 代码..."
|
LDFLAGS ?= -s -w -X main.Version=$(VERSION)
|
||||||
@cd cmd/server && go generate
|
|
||||||
@echo "Wire 代码生成完成"
|
|
||||||
|
|
||||||
build:
|
build:
|
||||||
@echo "构建后端(不嵌入前端)..."
|
CGO_ENABLED=0 go build -ldflags="$(LDFLAGS)" -trimpath -o bin/server ./cmd/server
|
||||||
@go build -o bin/server ./cmd/server
|
|
||||||
@echo "构建完成: bin/server"
|
|
||||||
|
|
||||||
build-embed:
|
generate:
|
||||||
@echo "构建后端(嵌入前端)..."
|
go generate ./ent
|
||||||
@go build -tags embed -o bin/server ./cmd/server
|
go generate ./cmd/server
|
||||||
@echo "构建完成: bin/server (with embedded frontend)"
|
|
||||||
|
test:
|
||||||
|
go test ./...
|
||||||
|
golangci-lint run ./...
|
||||||
|
|
||||||
|
test-unit:
|
||||||
|
go test -tags=unit ./...
|
||||||
|
|
||||||
|
test-integration:
|
||||||
|
go test -tags=integration ./...
|
||||||
|
|
||||||
|
test-e2e:
|
||||||
|
./scripts/e2e-test.sh
|
||||||
|
|
||||||
|
test-e2e-local:
|
||||||
|
go test -tags=e2e -v -timeout=300s ./internal/integration/...
|
||||||
|
|||||||
57
backend/cmd/jwtgen/main.go
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/repository"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
email := flag.String("email", "", "Admin email to issue a JWT for (defaults to first active admin)")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
cfg, err := config.LoadForBootstrap()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to load config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
client, sqlDB, err := repository.InitEnt(cfg)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to init db: %v", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := client.Close(); err != nil {
|
||||||
|
log.Printf("failed to close db: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
userRepo := repository.NewUserRepository(client, sqlDB)
|
||||||
|
authService := service.NewAuthService(client, userRepo, nil, nil, cfg, nil, nil, nil, nil, nil, nil)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var user *service.User
|
||||||
|
if *email != "" {
|
||||||
|
user, err = userRepo.GetByEmail(ctx, *email)
|
||||||
|
} else {
|
||||||
|
user, err = userRepo.GetFirstAdmin(ctx)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to resolve admin user: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := authService.GenerateToken(user)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to generate token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("ADMIN_EMAIL=%s\nADMIN_USER_ID=%d\nJWT=%s\n", user.Email, user.ID, token)
|
||||||
|
}
|
||||||
@@ -1 +1 @@
|
|||||||
0.1.1
|
0.1.116
|
||||||
|
|||||||
@@ -15,13 +15,17 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/handler"
|
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/middleware"
|
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/setup"
|
"github.com/Wei-Shaw/sub2api/internal/setup"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/web"
|
"github.com/Wei-Shaw/sub2api/internal/web"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"golang.org/x/net/http2/h2c"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:embed VERSION
|
//go:embed VERSION
|
||||||
@@ -36,14 +40,24 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Read version from embedded VERSION file
|
// 如果 Version 已通过 ldflags 注入(例如 -X main.Version=...),则不要覆盖。
|
||||||
|
if strings.TrimSpace(Version) != "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// 默认从 embedded VERSION 文件读取版本号(编译期打包进二进制)。
|
||||||
Version = strings.TrimSpace(embeddedVersion)
|
Version = strings.TrimSpace(embeddedVersion)
|
||||||
if Version == "" {
|
if Version == "" {
|
||||||
Version = "0.0.0-dev"
|
Version = "0.0.0-dev"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// initLogger configures the default slog handler based on gin.Mode().
|
||||||
|
// In non-release mode, Debug level logs are enabled.
|
||||||
func main() {
|
func main() {
|
||||||
|
logger.InitBootstrap()
|
||||||
|
defer logger.Sync()
|
||||||
|
|
||||||
// Parse command line flags
|
// Parse command line flags
|
||||||
setupMode := flag.Bool("setup", false, "Run setup wizard in CLI mode")
|
setupMode := flag.Bool("setup", false, "Run setup wizard in CLI mode")
|
||||||
showVersion := flag.Bool("version", false, "Show version information")
|
showVersion := flag.Bool("version", false, "Show version information")
|
||||||
@@ -84,8 +98,9 @@ func main() {
|
|||||||
|
|
||||||
func runSetupServer() {
|
func runSetupServer() {
|
||||||
r := gin.New()
|
r := gin.New()
|
||||||
r.Use(gin.Recovery())
|
r.Use(middleware.Recovery())
|
||||||
r.Use(middleware.CORS())
|
r.Use(middleware.CORS(config.CORSConfig{}))
|
||||||
|
r.Use(middleware.SecurityHeaders(config.CSPConfig{Enabled: true, Policy: config.DefaultCSPPolicy}, nil))
|
||||||
|
|
||||||
// Register setup routes
|
// Register setup routes
|
||||||
setup.RegisterRoutes(r)
|
setup.RegisterRoutes(r)
|
||||||
@@ -101,12 +116,30 @@ func runSetupServer() {
|
|||||||
log.Printf("Setup wizard available at http://%s", addr)
|
log.Printf("Setup wizard available at http://%s", addr)
|
||||||
log.Println("Complete the setup wizard to configure Sub2API")
|
log.Println("Complete the setup wizard to configure Sub2API")
|
||||||
|
|
||||||
if err := r.Run(addr); err != nil {
|
server := &http.Server{
|
||||||
|
Addr: addr,
|
||||||
|
Handler: h2c.NewHandler(r, &http2.Server{}),
|
||||||
|
ReadHeaderTimeout: 30 * time.Second,
|
||||||
|
IdleTimeout: 120 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
log.Fatalf("Failed to start setup server: %v", err)
|
log.Fatalf("Failed to start setup server: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runMainServer() {
|
func runMainServer() {
|
||||||
|
cfg, err := config.LoadForBootstrap()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to load config: %v", err)
|
||||||
|
}
|
||||||
|
if err := logger.Init(logger.OptionsFromConfig(cfg.Log)); err != nil {
|
||||||
|
log.Fatalf("Failed to initialize logger: %v", err)
|
||||||
|
}
|
||||||
|
if cfg.RunMode == config.RunModeSimple {
|
||||||
|
log.Println("⚠️ WARNING: Running in SIMPLE mode - billing and quota checks are DISABLED")
|
||||||
|
}
|
||||||
|
|
||||||
buildInfo := handler.BuildInfo{
|
buildInfo := handler.BuildInfo{
|
||||||
Version: Version,
|
Version: Version,
|
||||||
BuildType: BuildType,
|
BuildType: BuildType,
|
||||||
|
|||||||
@@ -4,21 +4,23 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/handler"
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/infrastructure"
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/repository"
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/server"
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
|
||||||
|
|
||||||
"context"
|
"context"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/payment"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/repository"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/server"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
|
||||||
"github.com/google/wire"
|
"github.com/google/wire"
|
||||||
"github.com/redis/go-redis/v9"
|
"github.com/redis/go-redis/v9"
|
||||||
"gorm.io/gorm"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Application struct {
|
type Application struct {
|
||||||
@@ -28,30 +30,38 @@ type Application struct {
|
|||||||
|
|
||||||
func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||||
wire.Build(
|
wire.Build(
|
||||||
// 基础设施层 ProviderSets
|
// Infrastructure layer ProviderSets
|
||||||
config.ProviderSet,
|
config.ProviderSet,
|
||||||
infrastructure.ProviderSet,
|
|
||||||
|
|
||||||
// 业务层 ProviderSets
|
// Business layer ProviderSets
|
||||||
repository.ProviderSet,
|
repository.ProviderSet,
|
||||||
service.ProviderSet,
|
service.ProviderSet,
|
||||||
|
payment.ProviderSet,
|
||||||
|
middleware.ProviderSet,
|
||||||
handler.ProviderSet,
|
handler.ProviderSet,
|
||||||
|
|
||||||
// 服务器层 ProviderSet
|
// Server layer ProviderSet
|
||||||
server.ProviderSet,
|
server.ProviderSet,
|
||||||
|
|
||||||
|
// Privacy client factory for OpenAI training opt-out
|
||||||
|
providePrivacyClientFactory,
|
||||||
|
|
||||||
// BuildInfo provider
|
// BuildInfo provider
|
||||||
provideServiceBuildInfo,
|
provideServiceBuildInfo,
|
||||||
|
|
||||||
// 清理函数提供者
|
// Cleanup function provider
|
||||||
provideCleanup,
|
provideCleanup,
|
||||||
|
|
||||||
// 应用程序结构体
|
// Application struct
|
||||||
wire.Struct(new(Application), "Server", "Cleanup"),
|
wire.Struct(new(Application), "Server", "Cleanup"),
|
||||||
)
|
)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func providePrivacyClientFactory() service.PrivacyClientFactory {
|
||||||
|
return repository.CreatePrivacyReqClient
|
||||||
|
}
|
||||||
|
|
||||||
func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
||||||
return service.BuildInfo{
|
return service.BuildInfo{
|
||||||
Version: buildInfo.Version,
|
Version: buildInfo.Version,
|
||||||
@@ -60,60 +70,230 @@ func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func provideCleanup(
|
func provideCleanup(
|
||||||
db *gorm.DB,
|
entClient *ent.Client,
|
||||||
rdb *redis.Client,
|
rdb *redis.Client,
|
||||||
services *service.Services,
|
opsMetricsCollector *service.OpsMetricsCollector,
|
||||||
|
opsAggregation *service.OpsAggregationService,
|
||||||
|
opsAlertEvaluator *service.OpsAlertEvaluatorService,
|
||||||
|
opsCleanup *service.OpsCleanupService,
|
||||||
|
opsScheduledReport *service.OpsScheduledReportService,
|
||||||
|
opsSystemLogSink *service.OpsSystemLogSink,
|
||||||
|
schedulerSnapshot *service.SchedulerSnapshotService,
|
||||||
|
tokenRefresh *service.TokenRefreshService,
|
||||||
|
accountExpiry *service.AccountExpiryService,
|
||||||
|
subscriptionExpiry *service.SubscriptionExpiryService,
|
||||||
|
usageCleanup *service.UsageCleanupService,
|
||||||
|
idempotencyCleanup *service.IdempotencyCleanupService,
|
||||||
|
pricing *service.PricingService,
|
||||||
|
emailQueue *service.EmailQueueService,
|
||||||
|
billingCache *service.BillingCacheService,
|
||||||
|
usageRecordWorkerPool *service.UsageRecordWorkerPool,
|
||||||
|
subscriptionService *service.SubscriptionService,
|
||||||
|
oauth *service.OAuthService,
|
||||||
|
openaiOAuth *service.OpenAIOAuthService,
|
||||||
|
geminiOAuth *service.GeminiOAuthService,
|
||||||
|
antigravityOAuth *service.AntigravityOAuthService,
|
||||||
|
openAIGateway *service.OpenAIGatewayService,
|
||||||
|
scheduledTestRunner *service.ScheduledTestRunnerService,
|
||||||
|
backupSvc *service.BackupService,
|
||||||
|
paymentOrderExpiry *service.PaymentOrderExpiryService,
|
||||||
|
channelMonitorRunner *service.ChannelMonitorRunner,
|
||||||
) func() {
|
) func() {
|
||||||
return func() {
|
return func() {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// Cleanup steps in reverse dependency order
|
type cleanupStep struct {
|
||||||
cleanupSteps := []struct {
|
|
||||||
name string
|
name string
|
||||||
fn func() error
|
fn func() error
|
||||||
}{
|
}
|
||||||
|
|
||||||
|
// 应用层清理步骤可并行执行,基础设施资源(Redis/Ent)最后按顺序关闭。
|
||||||
|
parallelSteps := []cleanupStep{
|
||||||
|
{"OpsScheduledReportService", func() error {
|
||||||
|
if opsScheduledReport != nil {
|
||||||
|
opsScheduledReport.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsCleanupService", func() error {
|
||||||
|
if opsCleanup != nil {
|
||||||
|
opsCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsSystemLogSink", func() error {
|
||||||
|
if opsSystemLogSink != nil {
|
||||||
|
opsSystemLogSink.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAlertEvaluatorService", func() error {
|
||||||
|
if opsAlertEvaluator != nil {
|
||||||
|
opsAlertEvaluator.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAggregationService", func() error {
|
||||||
|
if opsAggregation != nil {
|
||||||
|
opsAggregation.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsMetricsCollector", func() error {
|
||||||
|
if opsMetricsCollector != nil {
|
||||||
|
opsMetricsCollector.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SchedulerSnapshotService", func() error {
|
||||||
|
if schedulerSnapshot != nil {
|
||||||
|
schedulerSnapshot.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"UsageCleanupService", func() error {
|
||||||
|
if usageCleanup != nil {
|
||||||
|
usageCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"IdempotencyCleanupService", func() error {
|
||||||
|
if idempotencyCleanup != nil {
|
||||||
|
idempotencyCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"TokenRefreshService", func() error {
|
{"TokenRefreshService", func() error {
|
||||||
services.TokenRefresh.Stop()
|
tokenRefresh.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"AccountExpiryService", func() error {
|
||||||
|
accountExpiry.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SubscriptionExpiryService", func() error {
|
||||||
|
subscriptionExpiry.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SubscriptionService", func() error {
|
||||||
|
if subscriptionService != nil {
|
||||||
|
subscriptionService.Stop()
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
{"PricingService", func() error {
|
{"PricingService", func() error {
|
||||||
services.Pricing.Stop()
|
pricing.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
{"EmailQueueService", func() error {
|
{"EmailQueueService", func() error {
|
||||||
services.EmailQueue.Stop()
|
emailQueue.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"BillingCacheService", func() error {
|
||||||
|
billingCache.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"UsageRecordWorkerPool", func() error {
|
||||||
|
if usageRecordWorkerPool != nil {
|
||||||
|
usageRecordWorkerPool.Stop()
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
{"OAuthService", func() error {
|
{"OAuthService", func() error {
|
||||||
services.OAuth.Stop()
|
oauth.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
{"OpenAIOAuthService", func() error {
|
{"OpenAIOAuthService", func() error {
|
||||||
services.OpenAIOAuth.Stop()
|
openaiOAuth.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
{"Redis", func() error {
|
{"GeminiOAuthService", func() error {
|
||||||
return rdb.Close()
|
geminiOAuth.Stop()
|
||||||
|
return nil
|
||||||
}},
|
}},
|
||||||
{"Database", func() error {
|
{"AntigravityOAuthService", func() error {
|
||||||
sqlDB, err := db.DB()
|
antigravityOAuth.Stop()
|
||||||
if err != nil {
|
return nil
|
||||||
return err
|
}},
|
||||||
|
{"OpenAIWSPool", func() error {
|
||||||
|
if openAIGateway != nil {
|
||||||
|
openAIGateway.CloseOpenAIWSPool()
|
||||||
}
|
}
|
||||||
return sqlDB.Close()
|
return nil
|
||||||
|
}},
|
||||||
|
{"ScheduledTestRunnerService", func() error {
|
||||||
|
if scheduledTestRunner != nil {
|
||||||
|
scheduledTestRunner.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"BackupService", func() error {
|
||||||
|
if backupSvc != nil {
|
||||||
|
backupSvc.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"PaymentOrderExpiryService", func() error {
|
||||||
|
if paymentOrderExpiry != nil {
|
||||||
|
paymentOrderExpiry.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"ChannelMonitorRunner", func() error {
|
||||||
|
if channelMonitorRunner != nil {
|
||||||
|
channelMonitorRunner.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, step := range cleanupSteps {
|
infraSteps := []cleanupStep{
|
||||||
if err := step.fn(); err != nil {
|
{"Redis", func() error {
|
||||||
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
if rdb == nil {
|
||||||
// Continue with remaining cleanup steps even if one fails
|
return nil
|
||||||
} else {
|
}
|
||||||
|
return rdb.Close()
|
||||||
|
}},
|
||||||
|
{"Ent", func() error {
|
||||||
|
if entClient == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return entClient.Close()
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
runParallel := func(steps []cleanupStep) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for i := range steps {
|
||||||
|
step := steps[i]
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := step.fn(); err != nil {
|
||||||
|
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
runSequential := func(steps []cleanupStep) {
|
||||||
|
for i := range steps {
|
||||||
|
step := steps[i]
|
||||||
|
if err := step.fn(); err != nil {
|
||||||
|
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
log.Printf("[Cleanup] %s succeeded", step.name)
|
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runParallel(parallelSteps)
|
||||||
|
runSequential(infraSteps)
|
||||||
|
|
||||||
// Check if context timed out
|
// Check if context timed out
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
|||||||
@@ -8,22 +8,25 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/handler"
|
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/handler/admin"
|
"github.com/Wei-Shaw/sub2api/internal/handler/admin"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/infrastructure"
|
"github.com/Wei-Shaw/sub2api/internal/payment"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/repository"
|
"github.com/Wei-Shaw/sub2api/internal/repository"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/server"
|
"github.com/Wei-Shaw/sub2api/internal/server"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
"github.com/redis/go-redis/v9"
|
"github.com/redis/go-redis/v9"
|
||||||
"gorm.io/gorm"
|
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
import (
|
import (
|
||||||
_ "embed"
|
_ "embed"
|
||||||
|
_ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Injectors from wire.go:
|
// Injectors from wire.go:
|
||||||
@@ -33,136 +36,234 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
db, err := infrastructure.ProvideDB(configConfig)
|
client, err := repository.ProvideEnt(configConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
userRepository := repository.NewUserRepository(db)
|
db, err := repository.ProvideSQLDB(client)
|
||||||
settingRepository := repository.NewSettingRepository(db)
|
if err != nil {
|
||||||
settingService := service.NewSettingService(settingRepository, configConfig)
|
return nil, err
|
||||||
client := infrastructure.ProvideRedis(configConfig)
|
}
|
||||||
emailCache := repository.NewEmailCache(client)
|
userRepository := repository.NewUserRepository(client, db)
|
||||||
|
redeemCodeRepository := repository.NewRedeemCodeRepository(client)
|
||||||
|
redisClient := repository.ProvideRedis(configConfig)
|
||||||
|
refreshTokenCache := repository.NewRefreshTokenCache(redisClient)
|
||||||
|
settingRepository := repository.NewSettingRepository(client)
|
||||||
|
groupRepository := repository.NewGroupRepository(client, db)
|
||||||
|
proxyRepository := repository.NewProxyRepository(client, db)
|
||||||
|
settingService := service.ProvideSettingService(settingRepository, groupRepository, proxyRepository, configConfig)
|
||||||
|
emailCache := repository.NewEmailCache(redisClient)
|
||||||
emailService := service.NewEmailService(settingRepository, emailCache)
|
emailService := service.NewEmailService(settingRepository, emailCache)
|
||||||
turnstileVerifier := repository.NewTurnstileVerifier()
|
turnstileVerifier := repository.NewTurnstileVerifier()
|
||||||
turnstileService := service.NewTurnstileService(settingService, turnstileVerifier)
|
turnstileService := service.NewTurnstileService(settingService, turnstileVerifier)
|
||||||
emailQueueService := service.ProvideEmailQueueService(emailService)
|
emailQueueService := service.ProvideEmailQueueService(emailService)
|
||||||
authService := service.NewAuthService(userRepository, configConfig, settingService, emailService, turnstileService, emailQueueService)
|
promoCodeRepository := repository.NewPromoCodeRepository(client)
|
||||||
authHandler := handler.NewAuthHandler(authService)
|
billingCache := repository.NewBillingCache(redisClient)
|
||||||
userService := service.NewUserService(userRepository)
|
userSubscriptionRepository := repository.NewUserSubscriptionRepository(client)
|
||||||
userHandler := handler.NewUserHandler(userService)
|
apiKeyRepository := repository.NewAPIKeyRepository(client, db)
|
||||||
apiKeyRepository := repository.NewApiKeyRepository(db)
|
userRPMCache := repository.NewUserRPMCache(redisClient)
|
||||||
groupRepository := repository.NewGroupRepository(db)
|
userGroupRateRepository := repository.NewUserGroupRateRepository(db)
|
||||||
userSubscriptionRepository := repository.NewUserSubscriptionRepository(db)
|
billingCacheService := service.ProvideBillingCacheService(billingCache, userRepository, userSubscriptionRepository, apiKeyRepository, userRPMCache, userGroupRateRepository, configConfig)
|
||||||
apiKeyCache := repository.NewApiKeyCache(client)
|
apiKeyCache := repository.NewAPIKeyCache(redisClient)
|
||||||
apiKeyService := service.NewApiKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, apiKeyCache, configConfig)
|
apiKeyService := service.NewAPIKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, userGroupRateRepository, apiKeyCache, configConfig)
|
||||||
|
apiKeyAuthCacheInvalidator := service.ProvideAPIKeyAuthCacheInvalidator(apiKeyService)
|
||||||
|
promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||||
|
subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService, client, configConfig)
|
||||||
|
authService := service.NewAuthService(client, userRepository, redeemCodeRepository, refreshTokenCache, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService, subscriptionService)
|
||||||
|
userService := service.NewUserService(userRepository, settingRepository, apiKeyAuthCacheInvalidator, billingCache)
|
||||||
|
redeemCache := repository.NewRedeemCache(redisClient)
|
||||||
|
redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||||
|
secretEncryptor, err := repository.NewAESEncryptor(configConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
totpCache := repository.NewTotpCache(redisClient)
|
||||||
|
totpService := service.NewTotpService(userRepository, secretEncryptor, totpCache, settingService, emailService, emailQueueService)
|
||||||
|
authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService, promoService, redeemService, totpService)
|
||||||
|
userHandler := handler.NewUserHandler(userService, authService, emailService, emailCache)
|
||||||
apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService)
|
apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService)
|
||||||
usageLogRepository := repository.NewUsageLogRepository(db)
|
usageLogRepository := repository.NewUsageLogRepository(client, db)
|
||||||
usageService := service.NewUsageService(usageLogRepository, userRepository)
|
usageService := service.NewUsageService(usageLogRepository, userRepository, client, apiKeyAuthCacheInvalidator)
|
||||||
usageHandler := handler.NewUsageHandler(usageService, apiKeyService)
|
usageHandler := handler.NewUsageHandler(usageService, apiKeyService)
|
||||||
redeemCodeRepository := repository.NewRedeemCodeRepository(db)
|
|
||||||
billingCache := repository.NewBillingCache(client)
|
|
||||||
billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository)
|
|
||||||
subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService)
|
|
||||||
redeemCache := repository.NewRedeemCache(client)
|
|
||||||
redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService)
|
|
||||||
redeemHandler := handler.NewRedeemHandler(redeemService)
|
redeemHandler := handler.NewRedeemHandler(redeemService)
|
||||||
subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService)
|
subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService)
|
||||||
dashboardService := service.NewDashboardService(usageLogRepository)
|
announcementRepository := repository.NewAnnouncementRepository(client)
|
||||||
dashboardHandler := admin.NewDashboardHandler(dashboardService)
|
announcementReadRepository := repository.NewAnnouncementReadRepository(client)
|
||||||
accountRepository := repository.NewAccountRepository(db)
|
announcementService := service.NewAnnouncementService(announcementRepository, announcementReadRepository, userRepository, userSubscriptionRepository)
|
||||||
proxyRepository := repository.NewProxyRepository(db)
|
announcementHandler := handler.NewAnnouncementHandler(announcementService)
|
||||||
proxyExitInfoProber := repository.NewProxyExitInfoProber()
|
dashboardAggregationRepository := repository.NewDashboardAggregationRepository(db)
|
||||||
adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, billingCacheService, proxyExitInfoProber)
|
dashboardStatsCache := repository.NewDashboardCache(redisClient, configConfig)
|
||||||
adminUserHandler := admin.NewUserHandler(adminService)
|
dashboardService := service.NewDashboardService(usageLogRepository, dashboardAggregationRepository, dashboardStatsCache, configConfig)
|
||||||
groupHandler := admin.NewGroupHandler(adminService)
|
timingWheelService, err := service.ProvideTimingWheelService()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dashboardAggregationService := service.ProvideDashboardAggregationService(dashboardAggregationRepository, timingWheelService, configConfig)
|
||||||
|
dashboardHandler := admin.NewDashboardHandler(dashboardService, dashboardAggregationService)
|
||||||
|
schedulerCache := repository.ProvideSchedulerCache(redisClient, configConfig)
|
||||||
|
accountRepository := repository.NewAccountRepository(client, db, schedulerCache)
|
||||||
|
proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig)
|
||||||
|
proxyLatencyCache := repository.NewProxyLatencyCache(redisClient)
|
||||||
|
privacyClientFactory := providePrivacyClientFactory()
|
||||||
|
adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, userGroupRateRepository, userRPMCache, billingCacheService, proxyExitInfoProber, proxyLatencyCache, apiKeyAuthCacheInvalidator, client, settingService, subscriptionService, userSubscriptionRepository, privacyClientFactory)
|
||||||
|
concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig)
|
||||||
|
concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig)
|
||||||
|
adminUserHandler := admin.NewUserHandler(adminService, concurrencyService)
|
||||||
|
sessionLimitCache := repository.ProvideSessionLimitCache(redisClient, configConfig)
|
||||||
|
rpmCache := repository.NewRPMCache(redisClient)
|
||||||
|
groupCapacityService := service.NewGroupCapacityService(accountRepository, groupRepository, concurrencyService, sessionLimitCache, rpmCache)
|
||||||
|
groupHandler := admin.NewGroupHandler(adminService, dashboardService, groupCapacityService)
|
||||||
claudeOAuthClient := repository.NewClaudeOAuthClient()
|
claudeOAuthClient := repository.NewClaudeOAuthClient()
|
||||||
oAuthService := service.NewOAuthService(proxyRepository, claudeOAuthClient)
|
oAuthService := service.NewOAuthService(proxyRepository, claudeOAuthClient)
|
||||||
openAIOAuthClient := repository.NewOpenAIOAuthClient()
|
openAIOAuthClient := repository.NewOpenAIOAuthClient()
|
||||||
openAIOAuthService := service.NewOpenAIOAuthService(proxyRepository, openAIOAuthClient)
|
openAIOAuthService := service.NewOpenAIOAuthService(proxyRepository, openAIOAuthClient)
|
||||||
rateLimitService := service.NewRateLimitService(accountRepository, configConfig)
|
geminiOAuthClient := repository.NewGeminiOAuthClient(configConfig)
|
||||||
claudeUsageFetcher := repository.NewClaudeUsageFetcher()
|
geminiCliCodeAssistClient := repository.NewGeminiCliCodeAssistClient()
|
||||||
accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher)
|
driveClient := repository.NewGeminiDriveClient()
|
||||||
|
geminiOAuthService := service.NewGeminiOAuthService(proxyRepository, geminiOAuthClient, geminiCliCodeAssistClient, driveClient, configConfig)
|
||||||
|
antigravityOAuthService := service.NewAntigravityOAuthService(proxyRepository)
|
||||||
|
geminiQuotaService := service.NewGeminiQuotaService(configConfig, settingRepository)
|
||||||
|
tempUnschedCache := repository.NewTempUnschedCache(redisClient)
|
||||||
|
timeoutCounterCache := repository.NewTimeoutCounterCache(redisClient)
|
||||||
|
openAI403CounterCache := repository.NewOpenAI403CounterCache(redisClient)
|
||||||
|
geminiTokenCache := repository.NewGeminiTokenCache(redisClient)
|
||||||
|
compositeTokenCacheInvalidator := service.NewCompositeTokenCacheInvalidator(geminiTokenCache)
|
||||||
|
rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, openAI403CounterCache, settingService, compositeTokenCacheInvalidator)
|
||||||
httpUpstream := repository.NewHTTPUpstream(configConfig)
|
httpUpstream := repository.NewHTTPUpstream(configConfig)
|
||||||
accountTestService := service.NewAccountTestService(accountRepository, oAuthService, openAIOAuthService, httpUpstream)
|
claudeUsageFetcher := repository.NewClaudeUsageFetcher(httpUpstream)
|
||||||
concurrencyCache := repository.NewConcurrencyCache(client)
|
antigravityQuotaFetcher := service.NewAntigravityQuotaFetcher(proxyRepository)
|
||||||
concurrencyService := service.NewConcurrencyService(concurrencyCache)
|
usageCache := service.NewUsageCache()
|
||||||
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService)
|
identityCache := repository.NewIdentityCache(redisClient)
|
||||||
|
tlsFingerprintProfileRepository := repository.NewTLSFingerprintProfileRepository(client)
|
||||||
|
tlsFingerprintProfileCache := repository.NewTLSFingerprintProfileCache(redisClient)
|
||||||
|
tlsFingerprintProfileService := service.NewTLSFingerprintProfileService(tlsFingerprintProfileRepository, tlsFingerprintProfileCache)
|
||||||
|
accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache, identityCache, tlsFingerprintProfileService)
|
||||||
|
oAuthRefreshAPI := service.ProvideOAuthRefreshAPI(accountRepository, geminiTokenCache)
|
||||||
|
geminiTokenProvider := service.ProvideGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService, oAuthRefreshAPI)
|
||||||
|
gatewayCache := repository.NewGatewayCache(redisClient)
|
||||||
|
schedulerOutboxRepository := repository.NewSchedulerOutboxRepository(db)
|
||||||
|
schedulerSnapshotService := service.ProvideSchedulerSnapshotService(schedulerCache, schedulerOutboxRepository, accountRepository, groupRepository, configConfig)
|
||||||
|
antigravityTokenProvider := service.ProvideAntigravityTokenProvider(accountRepository, geminiTokenCache, antigravityOAuthService, oAuthRefreshAPI, tempUnschedCache)
|
||||||
|
internal500CounterCache := repository.NewInternal500CounterCache(redisClient)
|
||||||
|
antigravityGatewayService := service.NewAntigravityGatewayService(accountRepository, gatewayCache, schedulerSnapshotService, antigravityTokenProvider, rateLimitService, httpUpstream, settingService, internal500CounterCache)
|
||||||
|
accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig, tlsFingerprintProfileService)
|
||||||
|
crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig)
|
||||||
|
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache, rpmCache, compositeTokenCacheInvalidator)
|
||||||
|
adminAnnouncementHandler := admin.NewAnnouncementHandler(announcementService)
|
||||||
|
dataManagementService := service.NewDataManagementService()
|
||||||
|
dataManagementHandler := admin.NewDataManagementHandler(dataManagementService)
|
||||||
|
backupObjectStoreFactory := repository.NewS3BackupStoreFactory()
|
||||||
|
dbDumper := repository.NewPgDumper(configConfig)
|
||||||
|
backupService := service.ProvideBackupService(settingRepository, configConfig, secretEncryptor, backupObjectStoreFactory, dbDumper)
|
||||||
|
backupHandler := admin.NewBackupHandler(backupService, userService)
|
||||||
oAuthHandler := admin.NewOAuthHandler(oAuthService)
|
oAuthHandler := admin.NewOAuthHandler(oAuthService)
|
||||||
openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService)
|
openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService)
|
||||||
|
geminiOAuthHandler := admin.NewGeminiOAuthHandler(geminiOAuthService)
|
||||||
|
antigravityOAuthHandler := admin.NewAntigravityOAuthHandler(antigravityOAuthService)
|
||||||
proxyHandler := admin.NewProxyHandler(adminService)
|
proxyHandler := admin.NewProxyHandler(adminService)
|
||||||
adminRedeemHandler := admin.NewRedeemHandler(adminService)
|
adminRedeemHandler := admin.NewRedeemHandler(adminService, redeemService)
|
||||||
settingHandler := admin.NewSettingHandler(settingService, emailService)
|
promoHandler := admin.NewPromoHandler(promoService)
|
||||||
updateCache := repository.NewUpdateCache(client)
|
opsRepository := repository.NewOpsRepository(db)
|
||||||
gitHubReleaseClient := repository.NewGitHubReleaseClient()
|
usageBillingRepository := repository.NewUsageBillingRepository(client, db)
|
||||||
serviceBuildInfo := provideServiceBuildInfo(buildInfo)
|
pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig)
|
||||||
updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo)
|
|
||||||
systemHandler := handler.ProvideSystemHandler(updateService)
|
|
||||||
adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService)
|
|
||||||
adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService)
|
|
||||||
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, proxyHandler, adminRedeemHandler, settingHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler)
|
|
||||||
gatewayCache := repository.NewGatewayCache(client)
|
|
||||||
pricingRemoteClient := repository.NewPricingRemoteClient()
|
|
||||||
pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient)
|
pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
billingService := service.NewBillingService(configConfig, pricingService)
|
billingService := service.NewBillingService(configConfig, pricingService)
|
||||||
identityCache := repository.NewIdentityCache(client)
|
|
||||||
identityService := service.NewIdentityService(identityCache)
|
identityService := service.NewIdentityService(identityCache)
|
||||||
gatewayService := service.NewGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, billingService, rateLimitService, billingCacheService, identityService, httpUpstream)
|
deferredService := service.ProvideDeferredService(accountRepository, timingWheelService)
|
||||||
gatewayHandler := handler.NewGatewayHandler(gatewayService, userService, concurrencyService, billingCacheService)
|
claudeTokenProvider := service.ProvideClaudeTokenProvider(accountRepository, geminiTokenCache, oAuthService, oAuthRefreshAPI)
|
||||||
openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, billingService, rateLimitService, billingCacheService, httpUpstream)
|
digestSessionStore := service.NewDigestSessionStore()
|
||||||
openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService)
|
channelRepository := repository.NewChannelRepository(db)
|
||||||
|
channelService := service.NewChannelService(channelRepository, groupRepository, apiKeyAuthCacheInvalidator, pricingService)
|
||||||
|
modelPricingResolver := service.NewModelPricingResolver(channelService, billingService)
|
||||||
|
balanceNotifyService := service.ProvideBalanceNotifyService(emailService, settingRepository, accountRepository)
|
||||||
|
gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, usageBillingRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache, rpmCache, digestSessionStore, settingService, tlsFingerprintProfileService, channelService, modelPricingResolver, balanceNotifyService)
|
||||||
|
openAITokenProvider := service.ProvideOpenAITokenProvider(accountRepository, geminiTokenCache, openAIOAuthService, oAuthRefreshAPI)
|
||||||
|
openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, usageBillingRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService, openAITokenProvider, modelPricingResolver, channelService, balanceNotifyService)
|
||||||
|
geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, schedulerSnapshotService, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig)
|
||||||
|
opsSystemLogSink := service.ProvideOpsSystemLogSink(opsRepository)
|
||||||
|
opsService := service.NewOpsService(opsRepository, settingRepository, configConfig, accountRepository, userRepository, concurrencyService, gatewayService, openAIGatewayService, geminiMessagesCompatService, antigravityGatewayService, opsSystemLogSink)
|
||||||
|
encryptionKey, err := payment.ProvideEncryptionKey(configConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
paymentConfigService := service.ProvidePaymentConfigService(client, settingRepository, encryptionKey)
|
||||||
|
registry := payment.ProvideRegistry()
|
||||||
|
defaultLoadBalancer := payment.ProvideDefaultLoadBalancer(client, encryptionKey)
|
||||||
|
paymentService := service.NewPaymentService(client, registry, defaultLoadBalancer, redeemService, subscriptionService, paymentConfigService, userRepository, groupRepository)
|
||||||
|
settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService, paymentConfigService, paymentService)
|
||||||
|
opsHandler := admin.NewOpsHandler(opsService)
|
||||||
|
updateCache := repository.NewUpdateCache(redisClient)
|
||||||
|
gitHubReleaseClient := repository.ProvideGitHubReleaseClient(configConfig)
|
||||||
|
serviceBuildInfo := provideServiceBuildInfo(buildInfo)
|
||||||
|
updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo)
|
||||||
|
idempotencyRepository := repository.NewIdempotencyRepository(client, db)
|
||||||
|
systemOperationLockService := service.ProvideSystemOperationLockService(idempotencyRepository, configConfig)
|
||||||
|
systemHandler := handler.ProvideSystemHandler(updateService, systemOperationLockService)
|
||||||
|
adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService)
|
||||||
|
usageCleanupRepository := repository.NewUsageCleanupRepository(client, db)
|
||||||
|
usageCleanupService := service.ProvideUsageCleanupService(usageCleanupRepository, timingWheelService, dashboardAggregationService, configConfig)
|
||||||
|
adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService, usageCleanupService)
|
||||||
|
userAttributeDefinitionRepository := repository.NewUserAttributeDefinitionRepository(client)
|
||||||
|
userAttributeValueRepository := repository.NewUserAttributeValueRepository(client)
|
||||||
|
userAttributeService := service.NewUserAttributeService(userAttributeDefinitionRepository, userAttributeValueRepository)
|
||||||
|
userAttributeHandler := admin.NewUserAttributeHandler(userAttributeService)
|
||||||
|
errorPassthroughRepository := repository.NewErrorPassthroughRepository(client)
|
||||||
|
errorPassthroughCache := repository.NewErrorPassthroughCache(redisClient)
|
||||||
|
errorPassthroughService := service.NewErrorPassthroughService(errorPassthroughRepository, errorPassthroughCache)
|
||||||
|
errorPassthroughHandler := admin.NewErrorPassthroughHandler(errorPassthroughService)
|
||||||
|
tlsFingerprintProfileHandler := admin.NewTLSFingerprintProfileHandler(tlsFingerprintProfileService)
|
||||||
|
adminAPIKeyHandler := admin.NewAdminAPIKeyHandler(adminService)
|
||||||
|
scheduledTestPlanRepository := repository.NewScheduledTestPlanRepository(db)
|
||||||
|
scheduledTestResultRepository := repository.NewScheduledTestResultRepository(db)
|
||||||
|
scheduledTestService := service.ProvideScheduledTestService(scheduledTestPlanRepository, scheduledTestResultRepository)
|
||||||
|
scheduledTestHandler := admin.NewScheduledTestHandler(scheduledTestService)
|
||||||
|
channelHandler := admin.NewChannelHandler(channelService, billingService)
|
||||||
|
sqlDB, err := repository.ProvideSQLDB(client)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
channelMonitorRepository := repository.NewChannelMonitorRepository(client, sqlDB)
|
||||||
|
channelMonitorRequestTemplateRepository := repository.NewChannelMonitorRequestTemplateRepository(client, sqlDB)
|
||||||
|
channelMonitorRequestTemplateService := service.NewChannelMonitorRequestTemplateService(channelMonitorRequestTemplateRepository)
|
||||||
|
channelMonitorRequestTemplateHandler := admin.NewChannelMonitorRequestTemplateHandler(channelMonitorRequestTemplateService)
|
||||||
|
channelMonitorService := service.ProvideChannelMonitorService(channelMonitorRepository, secretEncryptor)
|
||||||
|
channelMonitorHandler := admin.NewChannelMonitorHandler(channelMonitorService)
|
||||||
|
channelMonitorUserHandler := handler.NewChannelMonitorUserHandler(channelMonitorService, settingService)
|
||||||
|
channelMonitorRunner := service.ProvideChannelMonitorRunner(channelMonitorService, settingService)
|
||||||
|
paymentHandler := admin.NewPaymentHandler(paymentService, paymentConfigService)
|
||||||
|
availableChannelUserHandler := handler.NewAvailableChannelHandler(channelService, apiKeyService, settingService)
|
||||||
|
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, backupHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler, tlsFingerprintProfileHandler, adminAPIKeyHandler, scheduledTestHandler, channelHandler, channelMonitorHandler, channelMonitorRequestTemplateHandler, paymentHandler)
|
||||||
|
usageRecordWorkerPool := service.NewUsageRecordWorkerPool(configConfig)
|
||||||
|
userMsgQueueCache := repository.NewUserMsgQueueCache(redisClient)
|
||||||
|
userMessageQueueService := service.ProvideUserMessageQueueService(userMsgQueueCache, rpmCache, configConfig)
|
||||||
|
gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, usageService, apiKeyService, usageRecordWorkerPool, errorPassthroughService, userMessageQueueService, configConfig, settingService)
|
||||||
|
openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, apiKeyService, usageRecordWorkerPool, errorPassthroughService, configConfig)
|
||||||
handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo)
|
handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo)
|
||||||
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler)
|
totpHandler := handler.NewTotpHandler(totpService)
|
||||||
groupService := service.NewGroupService(groupRepository)
|
handlerPaymentHandler := handler.NewPaymentHandler(paymentService, paymentConfigService, channelService)
|
||||||
accountService := service.NewAccountService(accountRepository, groupRepository)
|
paymentWebhookHandler := handler.NewPaymentWebhookHandler(paymentService, registry)
|
||||||
proxyService := service.NewProxyService(proxyRepository)
|
idempotencyCoordinator := service.ProvideIdempotencyCoordinator(idempotencyRepository, configConfig)
|
||||||
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, configConfig)
|
idempotencyCleanupService := service.ProvideIdempotencyCleanupService(idempotencyRepository, configConfig)
|
||||||
services := &service.Services{
|
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, channelMonitorUserHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler, totpHandler, handlerPaymentHandler, paymentWebhookHandler, availableChannelUserHandler, idempotencyCoordinator, idempotencyCleanupService)
|
||||||
Auth: authService,
|
jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService)
|
||||||
User: userService,
|
adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService)
|
||||||
ApiKey: apiKeyService,
|
apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig)
|
||||||
Group: groupService,
|
engine := server.ProvideRouter(configConfig, handlers, jwtAuthMiddleware, adminAuthMiddleware, apiKeyAuthMiddleware, apiKeyService, subscriptionService, opsService, settingService, redisClient)
|
||||||
Account: accountService,
|
|
||||||
Proxy: proxyService,
|
|
||||||
Redeem: redeemService,
|
|
||||||
Usage: usageService,
|
|
||||||
Pricing: pricingService,
|
|
||||||
Billing: billingService,
|
|
||||||
BillingCache: billingCacheService,
|
|
||||||
Admin: adminService,
|
|
||||||
Gateway: gatewayService,
|
|
||||||
OpenAIGateway: openAIGatewayService,
|
|
||||||
OAuth: oAuthService,
|
|
||||||
OpenAIOAuth: openAIOAuthService,
|
|
||||||
RateLimit: rateLimitService,
|
|
||||||
AccountUsage: accountUsageService,
|
|
||||||
AccountTest: accountTestService,
|
|
||||||
Setting: settingService,
|
|
||||||
Email: emailService,
|
|
||||||
EmailQueue: emailQueueService,
|
|
||||||
Turnstile: turnstileService,
|
|
||||||
Subscription: subscriptionService,
|
|
||||||
Concurrency: concurrencyService,
|
|
||||||
Identity: identityService,
|
|
||||||
Update: updateService,
|
|
||||||
TokenRefresh: tokenRefreshService,
|
|
||||||
}
|
|
||||||
repositories := &repository.Repositories{
|
|
||||||
User: userRepository,
|
|
||||||
ApiKey: apiKeyRepository,
|
|
||||||
Group: groupRepository,
|
|
||||||
Account: accountRepository,
|
|
||||||
Proxy: proxyRepository,
|
|
||||||
RedeemCode: redeemCodeRepository,
|
|
||||||
UsageLog: usageLogRepository,
|
|
||||||
Setting: settingRepository,
|
|
||||||
UserSubscription: userSubscriptionRepository,
|
|
||||||
}
|
|
||||||
engine := server.ProvideRouter(configConfig, handlers, services, repositories)
|
|
||||||
httpServer := server.ProvideHTTPServer(configConfig, engine)
|
httpServer := server.ProvideHTTPServer(configConfig, engine)
|
||||||
v := provideCleanup(db, client, services)
|
opsMetricsCollector := service.ProvideOpsMetricsCollector(opsRepository, settingRepository, accountRepository, concurrencyService, db, redisClient, configConfig)
|
||||||
|
opsAggregationService := service.ProvideOpsAggregationService(opsRepository, settingRepository, db, redisClient, configConfig)
|
||||||
|
opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig)
|
||||||
|
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig, channelMonitorService)
|
||||||
|
opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig)
|
||||||
|
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig, tempUnschedCache, privacyClientFactory, proxyRepository, oAuthRefreshAPI)
|
||||||
|
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
|
||||||
|
subscriptionExpiryService := service.ProvideSubscriptionExpiryService(userSubscriptionRepository)
|
||||||
|
scheduledTestRunnerService := service.ProvideScheduledTestRunnerService(scheduledTestPlanRepository, scheduledTestService, accountTestService, rateLimitService, configConfig)
|
||||||
|
paymentOrderExpiryService := service.ProvidePaymentOrderExpiryService(paymentService)
|
||||||
|
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, openAIGatewayService, scheduledTestRunnerService, backupService, paymentOrderExpiryService, channelMonitorRunner)
|
||||||
application := &Application{
|
application := &Application{
|
||||||
Server: httpServer,
|
Server: httpServer,
|
||||||
Cleanup: v,
|
Cleanup: v,
|
||||||
@@ -177,6 +278,10 @@ type Application struct {
|
|||||||
Cleanup func()
|
Cleanup func()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func providePrivacyClientFactory() service.PrivacyClientFactory {
|
||||||
|
return repository.CreatePrivacyReqClient
|
||||||
|
}
|
||||||
|
|
||||||
func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
||||||
return service.BuildInfo{
|
return service.BuildInfo{
|
||||||
Version: buildInfo.Version,
|
Version: buildInfo.Version,
|
||||||
@@ -185,59 +290,229 @@ func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func provideCleanup(
|
func provideCleanup(
|
||||||
db *gorm.DB,
|
entClient *ent.Client,
|
||||||
rdb *redis.Client,
|
rdb *redis.Client,
|
||||||
services *service.Services,
|
opsMetricsCollector *service.OpsMetricsCollector,
|
||||||
|
opsAggregation *service.OpsAggregationService,
|
||||||
|
opsAlertEvaluator *service.OpsAlertEvaluatorService,
|
||||||
|
opsCleanup *service.OpsCleanupService,
|
||||||
|
opsScheduledReport *service.OpsScheduledReportService,
|
||||||
|
opsSystemLogSink *service.OpsSystemLogSink,
|
||||||
|
schedulerSnapshot *service.SchedulerSnapshotService,
|
||||||
|
tokenRefresh *service.TokenRefreshService,
|
||||||
|
accountExpiry *service.AccountExpiryService,
|
||||||
|
subscriptionExpiry *service.SubscriptionExpiryService,
|
||||||
|
usageCleanup *service.UsageCleanupService,
|
||||||
|
idempotencyCleanup *service.IdempotencyCleanupService,
|
||||||
|
pricing *service.PricingService,
|
||||||
|
emailQueue *service.EmailQueueService,
|
||||||
|
billingCache *service.BillingCacheService,
|
||||||
|
usageRecordWorkerPool *service.UsageRecordWorkerPool,
|
||||||
|
subscriptionService *service.SubscriptionService,
|
||||||
|
oauth *service.OAuthService,
|
||||||
|
openaiOAuth *service.OpenAIOAuthService,
|
||||||
|
geminiOAuth *service.GeminiOAuthService,
|
||||||
|
antigravityOAuth *service.AntigravityOAuthService,
|
||||||
|
openAIGateway *service.OpenAIGatewayService,
|
||||||
|
scheduledTestRunner *service.ScheduledTestRunnerService,
|
||||||
|
backupSvc *service.BackupService,
|
||||||
|
paymentOrderExpiry *service.PaymentOrderExpiryService,
|
||||||
|
channelMonitorRunner *service.ChannelMonitorRunner,
|
||||||
) func() {
|
) func() {
|
||||||
return func() {
|
return func() {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
cleanupSteps := []struct {
|
type cleanupStep struct {
|
||||||
name string
|
name string
|
||||||
fn func() error
|
fn func() error
|
||||||
}{
|
}
|
||||||
|
|
||||||
|
parallelSteps := []cleanupStep{
|
||||||
|
{"OpsScheduledReportService", func() error {
|
||||||
|
if opsScheduledReport != nil {
|
||||||
|
opsScheduledReport.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsCleanupService", func() error {
|
||||||
|
if opsCleanup != nil {
|
||||||
|
opsCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsSystemLogSink", func() error {
|
||||||
|
if opsSystemLogSink != nil {
|
||||||
|
opsSystemLogSink.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAlertEvaluatorService", func() error {
|
||||||
|
if opsAlertEvaluator != nil {
|
||||||
|
opsAlertEvaluator.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAggregationService", func() error {
|
||||||
|
if opsAggregation != nil {
|
||||||
|
opsAggregation.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsMetricsCollector", func() error {
|
||||||
|
if opsMetricsCollector != nil {
|
||||||
|
opsMetricsCollector.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SchedulerSnapshotService", func() error {
|
||||||
|
if schedulerSnapshot != nil {
|
||||||
|
schedulerSnapshot.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"UsageCleanupService", func() error {
|
||||||
|
if usageCleanup != nil {
|
||||||
|
usageCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"IdempotencyCleanupService", func() error {
|
||||||
|
if idempotencyCleanup != nil {
|
||||||
|
idempotencyCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
{"TokenRefreshService", func() error {
|
{"TokenRefreshService", func() error {
|
||||||
services.TokenRefresh.Stop()
|
tokenRefresh.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"AccountExpiryService", func() error {
|
||||||
|
accountExpiry.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SubscriptionExpiryService", func() error {
|
||||||
|
subscriptionExpiry.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SubscriptionService", func() error {
|
||||||
|
if subscriptionService != nil {
|
||||||
|
subscriptionService.Stop()
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
{"PricingService", func() error {
|
{"PricingService", func() error {
|
||||||
services.Pricing.Stop()
|
pricing.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
{"EmailQueueService", func() error {
|
{"EmailQueueService", func() error {
|
||||||
services.EmailQueue.Stop()
|
emailQueue.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"BillingCacheService", func() error {
|
||||||
|
billingCache.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"UsageRecordWorkerPool", func() error {
|
||||||
|
if usageRecordWorkerPool != nil {
|
||||||
|
usageRecordWorkerPool.Stop()
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
{"OAuthService", func() error {
|
{"OAuthService", func() error {
|
||||||
services.OAuth.Stop()
|
oauth.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
{"OpenAIOAuthService", func() error {
|
{"OpenAIOAuthService", func() error {
|
||||||
services.OpenAIOAuth.Stop()
|
openaiOAuth.Stop()
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
{"Redis", func() error {
|
{"GeminiOAuthService", func() error {
|
||||||
return rdb.Close()
|
geminiOAuth.Stop()
|
||||||
|
return nil
|
||||||
}},
|
}},
|
||||||
{"Database", func() error {
|
{"AntigravityOAuthService", func() error {
|
||||||
sqlDB, err := db.DB()
|
antigravityOAuth.Stop()
|
||||||
if err != nil {
|
return nil
|
||||||
return err
|
}},
|
||||||
|
{"OpenAIWSPool", func() error {
|
||||||
|
if openAIGateway != nil {
|
||||||
|
openAIGateway.CloseOpenAIWSPool()
|
||||||
}
|
}
|
||||||
return sqlDB.Close()
|
return nil
|
||||||
|
}},
|
||||||
|
{"ScheduledTestRunnerService", func() error {
|
||||||
|
if scheduledTestRunner != nil {
|
||||||
|
scheduledTestRunner.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"BackupService", func() error {
|
||||||
|
if backupSvc != nil {
|
||||||
|
backupSvc.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"PaymentOrderExpiryService", func() error {
|
||||||
|
if paymentOrderExpiry != nil {
|
||||||
|
paymentOrderExpiry.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"ChannelMonitorRunner", func() error {
|
||||||
|
if channelMonitorRunner != nil {
|
||||||
|
channelMonitorRunner.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, step := range cleanupSteps {
|
infraSteps := []cleanupStep{
|
||||||
if err := step.fn(); err != nil {
|
{"Redis", func() error {
|
||||||
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
if rdb == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return rdb.Close()
|
||||||
|
}},
|
||||||
|
{"Ent", func() error {
|
||||||
|
if entClient == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return entClient.Close()
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
runParallel := func(steps []cleanupStep) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
for i := range steps {
|
||||||
|
step := steps[i]
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := step.fn(); err != nil {
|
||||||
|
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
runSequential := func(steps []cleanupStep) {
|
||||||
|
for i := range steps {
|
||||||
|
step := steps[i]
|
||||||
|
if err := step.fn(); err != nil {
|
||||||
|
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
log.Printf("[Cleanup] %s succeeded", step.name)
|
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runParallel(parallelSteps)
|
||||||
|
runSequential(infraSteps)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
log.Printf("[Cleanup] Warning: cleanup timed out after 10 seconds")
|
log.Printf("[Cleanup] Warning: cleanup timed out after 10 seconds")
|
||||||
|
|||||||
85
backend/cmd/server/wire_gen_test.go
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProvideServiceBuildInfo(t *testing.T) {
|
||||||
|
in := handler.BuildInfo{
|
||||||
|
Version: "v-test",
|
||||||
|
BuildType: "release",
|
||||||
|
}
|
||||||
|
out := provideServiceBuildInfo(in)
|
||||||
|
require.Equal(t, in.Version, out.Version)
|
||||||
|
require.Equal(t, in.BuildType, out.BuildType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProvideCleanup_WithMinimalDependencies_NoPanic(t *testing.T) {
|
||||||
|
cfg := &config.Config{}
|
||||||
|
|
||||||
|
oauthSvc := service.NewOAuthService(nil, nil)
|
||||||
|
openAIOAuthSvc := service.NewOpenAIOAuthService(nil, nil)
|
||||||
|
geminiOAuthSvc := service.NewGeminiOAuthService(nil, nil, nil, nil, cfg)
|
||||||
|
antigravityOAuthSvc := service.NewAntigravityOAuthService(nil)
|
||||||
|
|
||||||
|
tokenRefreshSvc := service.NewTokenRefreshService(
|
||||||
|
nil,
|
||||||
|
oauthSvc,
|
||||||
|
openAIOAuthSvc,
|
||||||
|
geminiOAuthSvc,
|
||||||
|
antigravityOAuthSvc,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
cfg,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
accountExpirySvc := service.NewAccountExpiryService(nil, time.Second)
|
||||||
|
subscriptionExpirySvc := service.NewSubscriptionExpiryService(nil, time.Second)
|
||||||
|
pricingSvc := service.NewPricingService(cfg, nil)
|
||||||
|
emailQueueSvc := service.NewEmailQueueService(nil, 1)
|
||||||
|
billingCacheSvc := service.NewBillingCacheService(nil, nil, nil, nil, nil, nil, cfg)
|
||||||
|
idempotencyCleanupSvc := service.NewIdempotencyCleanupService(nil, cfg)
|
||||||
|
schedulerSnapshotSvc := service.NewSchedulerSnapshotService(nil, nil, nil, nil, cfg)
|
||||||
|
opsSystemLogSinkSvc := service.NewOpsSystemLogSink(nil)
|
||||||
|
|
||||||
|
cleanup := provideCleanup(
|
||||||
|
nil, // entClient
|
||||||
|
nil, // redis
|
||||||
|
&service.OpsMetricsCollector{},
|
||||||
|
&service.OpsAggregationService{},
|
||||||
|
&service.OpsAlertEvaluatorService{},
|
||||||
|
&service.OpsCleanupService{},
|
||||||
|
&service.OpsScheduledReportService{},
|
||||||
|
opsSystemLogSinkSvc,
|
||||||
|
schedulerSnapshotSvc,
|
||||||
|
tokenRefreshSvc,
|
||||||
|
accountExpirySvc,
|
||||||
|
subscriptionExpirySvc,
|
||||||
|
&service.UsageCleanupService{},
|
||||||
|
idempotencyCleanupSvc,
|
||||||
|
pricingSvc,
|
||||||
|
emailQueueSvc,
|
||||||
|
billingCacheSvc,
|
||||||
|
&service.UsageRecordWorkerPool{},
|
||||||
|
&service.SubscriptionService{},
|
||||||
|
oauthSvc,
|
||||||
|
openAIOAuthSvc,
|
||||||
|
geminiOAuthSvc,
|
||||||
|
antigravityOAuthSvc,
|
||||||
|
nil, // openAIGateway
|
||||||
|
nil, // scheduledTestRunner
|
||||||
|
nil, // backupSvc
|
||||||
|
nil, // paymentOrderExpiry
|
||||||
|
nil, // channelMonitorRunner
|
||||||
|
)
|
||||||
|
|
||||||
|
require.NotPanics(t, func() {
|
||||||
|
cleanup()
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
server:
|
|
||||||
host: "0.0.0.0"
|
|
||||||
port: 8080
|
|
||||||
mode: "debug" # debug/release
|
|
||||||
|
|
||||||
database:
|
|
||||||
host: "127.0.0.1"
|
|
||||||
port: 5432
|
|
||||||
user: "postgres"
|
|
||||||
password: "XZeRr7nkjHWhm8fw"
|
|
||||||
dbname: "sub2api"
|
|
||||||
sslmode: "disable"
|
|
||||||
|
|
||||||
redis:
|
|
||||||
host: "127.0.0.1"
|
|
||||||
port: 6379
|
|
||||||
password: ""
|
|
||||||
db: 0
|
|
||||||
|
|
||||||
jwt:
|
|
||||||
secret: "your-secret-key-change-in-production"
|
|
||||||
expire_hour: 24
|
|
||||||
|
|
||||||
default:
|
|
||||||
admin_email: "admin@sub2api.com"
|
|
||||||
admin_password: "admin123"
|
|
||||||
user_concurrency: 5
|
|
||||||
user_balance: 0
|
|
||||||
api_key_prefix: "sk-"
|
|
||||||
rate_multiplier: 1.0
|
|
||||||
|
|
||||||
# Timezone configuration (similar to PHP's date_default_timezone_set)
|
|
||||||
# This affects ALL time operations:
|
|
||||||
# - Database timestamps
|
|
||||||
# - Usage statistics "today" boundary
|
|
||||||
# - Subscription expiry times
|
|
||||||
# Common values: Asia/Shanghai, America/New_York, Europe/London, UTC
|
|
||||||
timezone: "Asia/Shanghai"
|
|
||||||
536
backend/ent/account.go
Normal file
@@ -0,0 +1,536 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Account is the model entity for the Account schema.
|
||||||
|
type Account struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// DeletedAt holds the value of the "deleted_at" field.
|
||||||
|
DeletedAt *time.Time `json:"deleted_at,omitempty"`
|
||||||
|
// Name holds the value of the "name" field.
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
// Notes holds the value of the "notes" field.
|
||||||
|
Notes *string `json:"notes,omitempty"`
|
||||||
|
// Platform holds the value of the "platform" field.
|
||||||
|
Platform string `json:"platform,omitempty"`
|
||||||
|
// Type holds the value of the "type" field.
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
// Credentials holds the value of the "credentials" field.
|
||||||
|
Credentials map[string]interface{} `json:"credentials,omitempty"`
|
||||||
|
// Extra holds the value of the "extra" field.
|
||||||
|
Extra map[string]interface{} `json:"extra,omitempty"`
|
||||||
|
// ProxyID holds the value of the "proxy_id" field.
|
||||||
|
ProxyID *int64 `json:"proxy_id,omitempty"`
|
||||||
|
// Concurrency holds the value of the "concurrency" field.
|
||||||
|
Concurrency int `json:"concurrency,omitempty"`
|
||||||
|
// LoadFactor holds the value of the "load_factor" field.
|
||||||
|
LoadFactor *int `json:"load_factor,omitempty"`
|
||||||
|
// Priority holds the value of the "priority" field.
|
||||||
|
Priority int `json:"priority,omitempty"`
|
||||||
|
// RateMultiplier holds the value of the "rate_multiplier" field.
|
||||||
|
RateMultiplier float64 `json:"rate_multiplier,omitempty"`
|
||||||
|
// Status holds the value of the "status" field.
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
// ErrorMessage holds the value of the "error_message" field.
|
||||||
|
ErrorMessage *string `json:"error_message,omitempty"`
|
||||||
|
// LastUsedAt holds the value of the "last_used_at" field.
|
||||||
|
LastUsedAt *time.Time `json:"last_used_at,omitempty"`
|
||||||
|
// Account expiration time (NULL means no expiration).
|
||||||
|
ExpiresAt *time.Time `json:"expires_at,omitempty"`
|
||||||
|
// Auto pause scheduling when account expires.
|
||||||
|
AutoPauseOnExpired bool `json:"auto_pause_on_expired,omitempty"`
|
||||||
|
// Schedulable holds the value of the "schedulable" field.
|
||||||
|
Schedulable bool `json:"schedulable,omitempty"`
|
||||||
|
// RateLimitedAt holds the value of the "rate_limited_at" field.
|
||||||
|
RateLimitedAt *time.Time `json:"rate_limited_at,omitempty"`
|
||||||
|
// RateLimitResetAt holds the value of the "rate_limit_reset_at" field.
|
||||||
|
RateLimitResetAt *time.Time `json:"rate_limit_reset_at,omitempty"`
|
||||||
|
// OverloadUntil holds the value of the "overload_until" field.
|
||||||
|
OverloadUntil *time.Time `json:"overload_until,omitempty"`
|
||||||
|
// TempUnschedulableUntil holds the value of the "temp_unschedulable_until" field.
|
||||||
|
TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until,omitempty"`
|
||||||
|
// TempUnschedulableReason holds the value of the "temp_unschedulable_reason" field.
|
||||||
|
TempUnschedulableReason *string `json:"temp_unschedulable_reason,omitempty"`
|
||||||
|
// SessionWindowStart holds the value of the "session_window_start" field.
|
||||||
|
SessionWindowStart *time.Time `json:"session_window_start,omitempty"`
|
||||||
|
// SessionWindowEnd holds the value of the "session_window_end" field.
|
||||||
|
SessionWindowEnd *time.Time `json:"session_window_end,omitempty"`
|
||||||
|
// SessionWindowStatus holds the value of the "session_window_status" field.
|
||||||
|
SessionWindowStatus *string `json:"session_window_status,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the AccountQuery when eager-loading is set.
|
||||||
|
Edges AccountEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type AccountEdges struct {
|
||||||
|
// Groups holds the value of the groups edge.
|
||||||
|
Groups []*Group `json:"groups,omitempty"`
|
||||||
|
// Proxy holds the value of the proxy edge.
|
||||||
|
Proxy *Proxy `json:"proxy,omitempty"`
|
||||||
|
// UsageLogs holds the value of the usage_logs edge.
|
||||||
|
UsageLogs []*UsageLog `json:"usage_logs,omitempty"`
|
||||||
|
// AccountGroups holds the value of the account_groups edge.
|
||||||
|
AccountGroups []*AccountGroup `json:"account_groups,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [4]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupsOrErr returns the Groups value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e AccountEdges) GroupsOrErr() ([]*Group, error) {
|
||||||
|
if e.loadedTypes[0] {
|
||||||
|
return e.Groups, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "groups"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxyOrErr returns the Proxy value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e AccountEdges) ProxyOrErr() (*Proxy, error) {
|
||||||
|
if e.Proxy != nil {
|
||||||
|
return e.Proxy, nil
|
||||||
|
} else if e.loadedTypes[1] {
|
||||||
|
return nil, &NotFoundError{label: proxy.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "proxy"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsageLogsOrErr returns the UsageLogs value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e AccountEdges) UsageLogsOrErr() ([]*UsageLog, error) {
|
||||||
|
if e.loadedTypes[2] {
|
||||||
|
return e.UsageLogs, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "usage_logs"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupsOrErr returns the AccountGroups value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e AccountEdges) AccountGroupsOrErr() ([]*AccountGroup, error) {
|
||||||
|
if e.loadedTypes[3] {
|
||||||
|
return e.AccountGroups, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "account_groups"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*Account) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case account.FieldCredentials, account.FieldExtra:
|
||||||
|
values[i] = new([]byte)
|
||||||
|
case account.FieldAutoPauseOnExpired, account.FieldSchedulable:
|
||||||
|
values[i] = new(sql.NullBool)
|
||||||
|
case account.FieldRateMultiplier:
|
||||||
|
values[i] = new(sql.NullFloat64)
|
||||||
|
case account.FieldID, account.FieldProxyID, account.FieldConcurrency, account.FieldLoadFactor, account.FieldPriority:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case account.FieldName, account.FieldNotes, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldTempUnschedulableReason, account.FieldSessionWindowStatus:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case account.FieldCreatedAt, account.FieldUpdatedAt, account.FieldDeletedAt, account.FieldLastUsedAt, account.FieldExpiresAt, account.FieldRateLimitedAt, account.FieldRateLimitResetAt, account.FieldOverloadUntil, account.FieldTempUnschedulableUntil, account.FieldSessionWindowStart, account.FieldSessionWindowEnd:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the Account fields.
|
||||||
|
func (_m *Account) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case account.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case account.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldDeletedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.DeletedAt = new(time.Time)
|
||||||
|
*_m.DeletedAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldName:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Name = value.String
|
||||||
|
}
|
||||||
|
case account.FieldNotes:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field notes", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Notes = new(string)
|
||||||
|
*_m.Notes = value.String
|
||||||
|
}
|
||||||
|
case account.FieldPlatform:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field platform", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Platform = value.String
|
||||||
|
}
|
||||||
|
case account.FieldType:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field type", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Type = value.String
|
||||||
|
}
|
||||||
|
case account.FieldCredentials:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field credentials", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.Credentials); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field credentials: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case account.FieldExtra:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field extra", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.Extra); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field extra: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case account.FieldProxyID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field proxy_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ProxyID = new(int64)
|
||||||
|
*_m.ProxyID = value.Int64
|
||||||
|
}
|
||||||
|
case account.FieldConcurrency:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field concurrency", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Concurrency = int(value.Int64)
|
||||||
|
}
|
||||||
|
case account.FieldLoadFactor:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field load_factor", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.LoadFactor = new(int)
|
||||||
|
*_m.LoadFactor = int(value.Int64)
|
||||||
|
}
|
||||||
|
case account.FieldPriority:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field priority", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Priority = int(value.Int64)
|
||||||
|
}
|
||||||
|
case account.FieldRateMultiplier:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_multiplier", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateMultiplier = value.Float64
|
||||||
|
}
|
||||||
|
case account.FieldStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Status = value.String
|
||||||
|
}
|
||||||
|
case account.FieldErrorMessage:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field error_message", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ErrorMessage = new(string)
|
||||||
|
*_m.ErrorMessage = value.String
|
||||||
|
}
|
||||||
|
case account.FieldLastUsedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field last_used_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.LastUsedAt = new(time.Time)
|
||||||
|
*_m.LastUsedAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldExpiresAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field expires_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ExpiresAt = new(time.Time)
|
||||||
|
*_m.ExpiresAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldAutoPauseOnExpired:
|
||||||
|
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field auto_pause_on_expired", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.AutoPauseOnExpired = value.Bool
|
||||||
|
}
|
||||||
|
case account.FieldSchedulable:
|
||||||
|
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field schedulable", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Schedulable = value.Bool
|
||||||
|
}
|
||||||
|
case account.FieldRateLimitedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_limited_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateLimitedAt = new(time.Time)
|
||||||
|
*_m.RateLimitedAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldRateLimitResetAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_limit_reset_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateLimitResetAt = new(time.Time)
|
||||||
|
*_m.RateLimitResetAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldOverloadUntil:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field overload_until", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.OverloadUntil = new(time.Time)
|
||||||
|
*_m.OverloadUntil = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldTempUnschedulableUntil:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field temp_unschedulable_until", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.TempUnschedulableUntil = new(time.Time)
|
||||||
|
*_m.TempUnschedulableUntil = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldTempUnschedulableReason:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field temp_unschedulable_reason", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.TempUnschedulableReason = new(string)
|
||||||
|
*_m.TempUnschedulableReason = value.String
|
||||||
|
}
|
||||||
|
case account.FieldSessionWindowStart:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field session_window_start", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SessionWindowStart = new(time.Time)
|
||||||
|
*_m.SessionWindowStart = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldSessionWindowEnd:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field session_window_end", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SessionWindowEnd = new(time.Time)
|
||||||
|
*_m.SessionWindowEnd = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldSessionWindowStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field session_window_status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SessionWindowStatus = new(string)
|
||||||
|
*_m.SessionWindowStatus = value.String
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the Account.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *Account) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroups queries the "groups" edge of the Account entity.
|
||||||
|
func (_m *Account) QueryGroups() *GroupQuery {
|
||||||
|
return NewAccountClient(_m.config).QueryGroups(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryProxy queries the "proxy" edge of the Account entity.
|
||||||
|
func (_m *Account) QueryProxy() *ProxyQuery {
|
||||||
|
return NewAccountClient(_m.config).QueryProxy(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageLogs queries the "usage_logs" edge of the Account entity.
|
||||||
|
func (_m *Account) QueryUsageLogs() *UsageLogQuery {
|
||||||
|
return NewAccountClient(_m.config).QueryUsageLogs(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccountGroups queries the "account_groups" edge of the Account entity.
|
||||||
|
func (_m *Account) QueryAccountGroups() *AccountGroupQuery {
|
||||||
|
return NewAccountClient(_m.config).QueryAccountGroups(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this Account.
|
||||||
|
// Note that you need to call Account.Unwrap() before calling this method if this Account
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *Account) Update() *AccountUpdateOne {
|
||||||
|
return NewAccountClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the Account entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *Account) Unwrap() *Account {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: Account is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *Account) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("Account(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.DeletedAt; v != nil {
|
||||||
|
builder.WriteString("deleted_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("name=")
|
||||||
|
builder.WriteString(_m.Name)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Notes; v != nil {
|
||||||
|
builder.WriteString("notes=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("platform=")
|
||||||
|
builder.WriteString(_m.Platform)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("type=")
|
||||||
|
builder.WriteString(_m.Type)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("credentials=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Credentials))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("extra=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Extra))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ProxyID; v != nil {
|
||||||
|
builder.WriteString("proxy_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("concurrency=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Concurrency))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.LoadFactor; v != nil {
|
||||||
|
builder.WriteString("load_factor=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("priority=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Priority))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("rate_multiplier=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.RateMultiplier))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("status=")
|
||||||
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ErrorMessage; v != nil {
|
||||||
|
builder.WriteString("error_message=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.LastUsedAt; v != nil {
|
||||||
|
builder.WriteString("last_used_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ExpiresAt; v != nil {
|
||||||
|
builder.WriteString("expires_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("auto_pause_on_expired=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.AutoPauseOnExpired))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("schedulable=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Schedulable))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.RateLimitedAt; v != nil {
|
||||||
|
builder.WriteString("rate_limited_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.RateLimitResetAt; v != nil {
|
||||||
|
builder.WriteString("rate_limit_reset_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.OverloadUntil; v != nil {
|
||||||
|
builder.WriteString("overload_until=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.TempUnschedulableUntil; v != nil {
|
||||||
|
builder.WriteString("temp_unschedulable_until=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.TempUnschedulableReason; v != nil {
|
||||||
|
builder.WriteString("temp_unschedulable_reason=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.SessionWindowStart; v != nil {
|
||||||
|
builder.WriteString("session_window_start=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.SessionWindowEnd; v != nil {
|
||||||
|
builder.WriteString("session_window_end=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.SessionWindowStatus; v != nil {
|
||||||
|
builder.WriteString("session_window_status=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accounts is a parsable slice of Account.
|
||||||
|
type Accounts []*Account
|
||||||
416
backend/ent/account/account.go
Normal file
@@ -0,0 +1,416 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package account
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the account type in the database.
|
||||||
|
Label = "account"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
|
||||||
|
FieldDeletedAt = "deleted_at"
|
||||||
|
// FieldName holds the string denoting the name field in the database.
|
||||||
|
FieldName = "name"
|
||||||
|
// FieldNotes holds the string denoting the notes field in the database.
|
||||||
|
FieldNotes = "notes"
|
||||||
|
// FieldPlatform holds the string denoting the platform field in the database.
|
||||||
|
FieldPlatform = "platform"
|
||||||
|
// FieldType holds the string denoting the type field in the database.
|
||||||
|
FieldType = "type"
|
||||||
|
// FieldCredentials holds the string denoting the credentials field in the database.
|
||||||
|
FieldCredentials = "credentials"
|
||||||
|
// FieldExtra holds the string denoting the extra field in the database.
|
||||||
|
FieldExtra = "extra"
|
||||||
|
// FieldProxyID holds the string denoting the proxy_id field in the database.
|
||||||
|
FieldProxyID = "proxy_id"
|
||||||
|
// FieldConcurrency holds the string denoting the concurrency field in the database.
|
||||||
|
FieldConcurrency = "concurrency"
|
||||||
|
// FieldLoadFactor holds the string denoting the load_factor field in the database.
|
||||||
|
FieldLoadFactor = "load_factor"
|
||||||
|
// FieldPriority holds the string denoting the priority field in the database.
|
||||||
|
FieldPriority = "priority"
|
||||||
|
// FieldRateMultiplier holds the string denoting the rate_multiplier field in the database.
|
||||||
|
FieldRateMultiplier = "rate_multiplier"
|
||||||
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
|
FieldStatus = "status"
|
||||||
|
// FieldErrorMessage holds the string denoting the error_message field in the database.
|
||||||
|
FieldErrorMessage = "error_message"
|
||||||
|
// FieldLastUsedAt holds the string denoting the last_used_at field in the database.
|
||||||
|
FieldLastUsedAt = "last_used_at"
|
||||||
|
// FieldExpiresAt holds the string denoting the expires_at field in the database.
|
||||||
|
FieldExpiresAt = "expires_at"
|
||||||
|
// FieldAutoPauseOnExpired holds the string denoting the auto_pause_on_expired field in the database.
|
||||||
|
FieldAutoPauseOnExpired = "auto_pause_on_expired"
|
||||||
|
// FieldSchedulable holds the string denoting the schedulable field in the database.
|
||||||
|
FieldSchedulable = "schedulable"
|
||||||
|
// FieldRateLimitedAt holds the string denoting the rate_limited_at field in the database.
|
||||||
|
FieldRateLimitedAt = "rate_limited_at"
|
||||||
|
// FieldRateLimitResetAt holds the string denoting the rate_limit_reset_at field in the database.
|
||||||
|
FieldRateLimitResetAt = "rate_limit_reset_at"
|
||||||
|
// FieldOverloadUntil holds the string denoting the overload_until field in the database.
|
||||||
|
FieldOverloadUntil = "overload_until"
|
||||||
|
// FieldTempUnschedulableUntil holds the string denoting the temp_unschedulable_until field in the database.
|
||||||
|
FieldTempUnschedulableUntil = "temp_unschedulable_until"
|
||||||
|
// FieldTempUnschedulableReason holds the string denoting the temp_unschedulable_reason field in the database.
|
||||||
|
FieldTempUnschedulableReason = "temp_unschedulable_reason"
|
||||||
|
// FieldSessionWindowStart holds the string denoting the session_window_start field in the database.
|
||||||
|
FieldSessionWindowStart = "session_window_start"
|
||||||
|
// FieldSessionWindowEnd holds the string denoting the session_window_end field in the database.
|
||||||
|
FieldSessionWindowEnd = "session_window_end"
|
||||||
|
// FieldSessionWindowStatus holds the string denoting the session_window_status field in the database.
|
||||||
|
FieldSessionWindowStatus = "session_window_status"
|
||||||
|
// EdgeGroups holds the string denoting the groups edge name in mutations.
|
||||||
|
EdgeGroups = "groups"
|
||||||
|
// EdgeProxy holds the string denoting the proxy edge name in mutations.
|
||||||
|
EdgeProxy = "proxy"
|
||||||
|
// EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations.
|
||||||
|
EdgeUsageLogs = "usage_logs"
|
||||||
|
// EdgeAccountGroups holds the string denoting the account_groups edge name in mutations.
|
||||||
|
EdgeAccountGroups = "account_groups"
|
||||||
|
// Table holds the table name of the account in the database.
|
||||||
|
Table = "accounts"
|
||||||
|
// GroupsTable is the table that holds the groups relation/edge. The primary key declared below.
|
||||||
|
GroupsTable = "account_groups"
|
||||||
|
// GroupsInverseTable is the table name for the Group entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||||
|
GroupsInverseTable = "groups"
|
||||||
|
// ProxyTable is the table that holds the proxy relation/edge.
|
||||||
|
ProxyTable = "accounts"
|
||||||
|
// ProxyInverseTable is the table name for the Proxy entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "proxy" package.
|
||||||
|
ProxyInverseTable = "proxies"
|
||||||
|
// ProxyColumn is the table column denoting the proxy relation/edge.
|
||||||
|
ProxyColumn = "proxy_id"
|
||||||
|
// UsageLogsTable is the table that holds the usage_logs relation/edge.
|
||||||
|
UsageLogsTable = "usage_logs"
|
||||||
|
// UsageLogsInverseTable is the table name for the UsageLog entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "usagelog" package.
|
||||||
|
UsageLogsInverseTable = "usage_logs"
|
||||||
|
// UsageLogsColumn is the table column denoting the usage_logs relation/edge.
|
||||||
|
UsageLogsColumn = "account_id"
|
||||||
|
// AccountGroupsTable is the table that holds the account_groups relation/edge.
|
||||||
|
AccountGroupsTable = "account_groups"
|
||||||
|
// AccountGroupsInverseTable is the table name for the AccountGroup entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "accountgroup" package.
|
||||||
|
AccountGroupsInverseTable = "account_groups"
|
||||||
|
// AccountGroupsColumn is the table column denoting the account_groups relation/edge.
|
||||||
|
AccountGroupsColumn = "account_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for account fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
FieldDeletedAt,
|
||||||
|
FieldName,
|
||||||
|
FieldNotes,
|
||||||
|
FieldPlatform,
|
||||||
|
FieldType,
|
||||||
|
FieldCredentials,
|
||||||
|
FieldExtra,
|
||||||
|
FieldProxyID,
|
||||||
|
FieldConcurrency,
|
||||||
|
FieldLoadFactor,
|
||||||
|
FieldPriority,
|
||||||
|
FieldRateMultiplier,
|
||||||
|
FieldStatus,
|
||||||
|
FieldErrorMessage,
|
||||||
|
FieldLastUsedAt,
|
||||||
|
FieldExpiresAt,
|
||||||
|
FieldAutoPauseOnExpired,
|
||||||
|
FieldSchedulable,
|
||||||
|
FieldRateLimitedAt,
|
||||||
|
FieldRateLimitResetAt,
|
||||||
|
FieldOverloadUntil,
|
||||||
|
FieldTempUnschedulableUntil,
|
||||||
|
FieldTempUnschedulableReason,
|
||||||
|
FieldSessionWindowStart,
|
||||||
|
FieldSessionWindowEnd,
|
||||||
|
FieldSessionWindowStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// GroupsPrimaryKey and GroupsColumn2 are the table columns denoting the
|
||||||
|
// primary key for the groups relation (M2M).
|
||||||
|
GroupsPrimaryKey = []string{"account_id", "group_id"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that the variables below are initialized by the runtime
|
||||||
|
// package on the initialization of the application. Therefore,
|
||||||
|
// it should be imported in the main as follows:
|
||||||
|
//
|
||||||
|
// import _ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
|
var (
|
||||||
|
Hooks [1]ent.Hook
|
||||||
|
Interceptors [1]ent.Interceptor
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
NameValidator func(string) error
|
||||||
|
// PlatformValidator is a validator for the "platform" field. It is called by the builders before save.
|
||||||
|
PlatformValidator func(string) error
|
||||||
|
// TypeValidator is a validator for the "type" field. It is called by the builders before save.
|
||||||
|
TypeValidator func(string) error
|
||||||
|
// DefaultCredentials holds the default value on creation for the "credentials" field.
|
||||||
|
DefaultCredentials func() map[string]interface{}
|
||||||
|
// DefaultExtra holds the default value on creation for the "extra" field.
|
||||||
|
DefaultExtra func() map[string]interface{}
|
||||||
|
// DefaultConcurrency holds the default value on creation for the "concurrency" field.
|
||||||
|
DefaultConcurrency int
|
||||||
|
// DefaultPriority holds the default value on creation for the "priority" field.
|
||||||
|
DefaultPriority int
|
||||||
|
// DefaultRateMultiplier holds the default value on creation for the "rate_multiplier" field.
|
||||||
|
DefaultRateMultiplier float64
|
||||||
|
// DefaultStatus holds the default value on creation for the "status" field.
|
||||||
|
DefaultStatus string
|
||||||
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
StatusValidator func(string) error
|
||||||
|
// DefaultAutoPauseOnExpired holds the default value on creation for the "auto_pause_on_expired" field.
|
||||||
|
DefaultAutoPauseOnExpired bool
|
||||||
|
// DefaultSchedulable holds the default value on creation for the "schedulable" field.
|
||||||
|
DefaultSchedulable bool
|
||||||
|
// SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save.
|
||||||
|
SessionWindowStatusValidator func(string) error
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the Account queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDeletedAt orders the results by the deleted_at field.
|
||||||
|
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByName orders the results by the name field.
|
||||||
|
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByNotes orders the results by the notes field.
|
||||||
|
func ByNotes(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldNotes, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPlatform orders the results by the platform field.
|
||||||
|
func ByPlatform(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPlatform, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByType orders the results by the type field.
|
||||||
|
func ByType(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldType, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByProxyID orders the results by the proxy_id field.
|
||||||
|
func ByProxyID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldProxyID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByConcurrency orders the results by the concurrency field.
|
||||||
|
func ByConcurrency(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldConcurrency, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByLoadFactor orders the results by the load_factor field.
|
||||||
|
func ByLoadFactor(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldLoadFactor, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPriority orders the results by the priority field.
|
||||||
|
func ByPriority(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPriority, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateMultiplier orders the results by the rate_multiplier field.
|
||||||
|
func ByRateMultiplier(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateMultiplier, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStatus orders the results by the status field.
|
||||||
|
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByErrorMessage orders the results by the error_message field.
|
||||||
|
func ByErrorMessage(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldErrorMessage, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByLastUsedAt orders the results by the last_used_at field.
|
||||||
|
func ByLastUsedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldLastUsedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByExpiresAt orders the results by the expires_at field.
|
||||||
|
func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAutoPauseOnExpired orders the results by the auto_pause_on_expired field.
|
||||||
|
func ByAutoPauseOnExpired(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldAutoPauseOnExpired, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySchedulable orders the results by the schedulable field.
|
||||||
|
func BySchedulable(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSchedulable, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateLimitedAt orders the results by the rate_limited_at field.
|
||||||
|
func ByRateLimitedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateLimitedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateLimitResetAt orders the results by the rate_limit_reset_at field.
|
||||||
|
func ByRateLimitResetAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateLimitResetAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByOverloadUntil orders the results by the overload_until field.
|
||||||
|
func ByOverloadUntil(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldOverloadUntil, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByTempUnschedulableUntil orders the results by the temp_unschedulable_until field.
|
||||||
|
func ByTempUnschedulableUntil(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldTempUnschedulableUntil, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByTempUnschedulableReason orders the results by the temp_unschedulable_reason field.
|
||||||
|
func ByTempUnschedulableReason(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldTempUnschedulableReason, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySessionWindowStart orders the results by the session_window_start field.
|
||||||
|
func BySessionWindowStart(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSessionWindowStart, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySessionWindowEnd orders the results by the session_window_end field.
|
||||||
|
func BySessionWindowEnd(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSessionWindowEnd, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySessionWindowStatus orders the results by the session_window_status field.
|
||||||
|
func BySessionWindowStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSessionWindowStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupsCount orders the results by groups count.
|
||||||
|
func ByGroupsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newGroupsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroups orders the results by groups terms.
|
||||||
|
func ByGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newGroupsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByProxyField orders the results by proxy field.
|
||||||
|
func ByProxyField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newProxyStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageLogsCount orders the results by usage_logs count.
|
||||||
|
func ByUsageLogsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newUsageLogsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageLogs orders the results by usage_logs terms.
|
||||||
|
func ByUsageLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUsageLogsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccountGroupsCount orders the results by account_groups count.
|
||||||
|
func ByAccountGroupsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newAccountGroupsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccountGroups orders the results by account_groups terms.
|
||||||
|
func ByAccountGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newAccountGroupsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newGroupsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(GroupsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2M, false, GroupsTable, GroupsPrimaryKey...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newProxyStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(ProxyInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, ProxyTable, ProxyColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newUsageLogsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UsageLogsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newAccountGroupsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(AccountGroupsInverseTable, AccountGroupsColumn),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, true, AccountGroupsTable, AccountGroupsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
1603
backend/ent/account/where.go
Normal file
2550
backend/ent/account_create.go
Normal file
88
backend/ent/account_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountDelete is the builder for deleting a Account entity.
|
||||||
|
type AccountDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AccountMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountDelete builder.
|
||||||
|
func (_d *AccountDelete) Where(ps ...predicate.Account) *AccountDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *AccountDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AccountDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *AccountDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(account.Table, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountDeleteOne is the builder for deleting a single Account entity.
|
||||||
|
type AccountDeleteOne struct {
|
||||||
|
_d *AccountDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountDelete builder.
|
||||||
|
func (_d *AccountDeleteOne) Where(ps ...predicate.Account) *AccountDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *AccountDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{account.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AccountDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
900
backend/ent/account_query.go
Normal file
@@ -0,0 +1,900 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountQuery is the builder for querying Account entities.
|
||||||
|
type AccountQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []account.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.Account
|
||||||
|
withGroups *GroupQuery
|
||||||
|
withProxy *ProxyQuery
|
||||||
|
withUsageLogs *UsageLogQuery
|
||||||
|
withAccountGroups *AccountGroupQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the AccountQuery builder.
|
||||||
|
func (_q *AccountQuery) Where(ps ...predicate.Account) *AccountQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *AccountQuery) Limit(limit int) *AccountQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *AccountQuery) Offset(offset int) *AccountQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *AccountQuery) Unique(unique bool) *AccountQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *AccountQuery) Order(o ...account.OrderOption) *AccountQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroups chains the current query on the "groups" edge.
|
||||||
|
func (_q *AccountQuery) QueryGroups() *GroupQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(account.Table, account.FieldID, selector),
|
||||||
|
sqlgraph.To(group.Table, group.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2M, false, account.GroupsTable, account.GroupsPrimaryKey...),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryProxy chains the current query on the "proxy" edge.
|
||||||
|
func (_q *AccountQuery) QueryProxy() *ProxyQuery {
|
||||||
|
query := (&ProxyClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(account.Table, account.FieldID, selector),
|
||||||
|
sqlgraph.To(proxy.Table, proxy.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, account.ProxyTable, account.ProxyColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageLogs chains the current query on the "usage_logs" edge.
|
||||||
|
func (_q *AccountQuery) QueryUsageLogs() *UsageLogQuery {
|
||||||
|
query := (&UsageLogClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(account.Table, account.FieldID, selector),
|
||||||
|
sqlgraph.To(usagelog.Table, usagelog.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, account.UsageLogsTable, account.UsageLogsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccountGroups chains the current query on the "account_groups" edge.
|
||||||
|
func (_q *AccountQuery) QueryAccountGroups() *AccountGroupQuery {
|
||||||
|
query := (&AccountGroupClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(account.Table, account.FieldID, selector),
|
||||||
|
sqlgraph.To(accountgroup.Table, accountgroup.AccountColumn),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, true, account.AccountGroupsTable, account.AccountGroupsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first Account entity from the query.
|
||||||
|
// Returns a *NotFoundError when no Account was found.
|
||||||
|
func (_q *AccountQuery) First(ctx context.Context) (*Account, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{account.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) FirstX(ctx context.Context) *Account {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first Account ID from the query.
|
||||||
|
// Returns a *NotFoundError when no Account ID was found.
|
||||||
|
func (_q *AccountQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{account.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single Account entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one Account entity is found.
|
||||||
|
// Returns a *NotFoundError when no Account entities are found.
|
||||||
|
func (_q *AccountQuery) Only(ctx context.Context) (*Account, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{account.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{account.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) OnlyX(ctx context.Context) *Account {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only Account ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one Account ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *AccountQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{account.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{account.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of Accounts.
|
||||||
|
func (_q *AccountQuery) All(ctx context.Context) ([]*Account, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*Account, *AccountQuery]()
|
||||||
|
return withInterceptors[[]*Account](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) AllX(ctx context.Context) []*Account {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of Account IDs.
|
||||||
|
func (_q *AccountQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(account.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *AccountQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*AccountQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *AccountQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the AccountQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *AccountQuery) Clone() *AccountQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &AccountQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]account.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.Account{}, _q.predicates...),
|
||||||
|
withGroups: _q.withGroups.Clone(),
|
||||||
|
withProxy: _q.withProxy.Clone(),
|
||||||
|
withUsageLogs: _q.withUsageLogs.Clone(),
|
||||||
|
withAccountGroups: _q.withAccountGroups.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGroups tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "groups" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountQuery) WithGroups(opts ...func(*GroupQuery)) *AccountQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withGroups = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithProxy tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "proxy" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountQuery) WithProxy(opts ...func(*ProxyQuery)) *AccountQuery {
|
||||||
|
query := (&ProxyClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withProxy = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUsageLogs tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "usage_logs" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountQuery) WithUsageLogs(opts ...func(*UsageLogQuery)) *AccountQuery {
|
||||||
|
query := (&UsageLogClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUsageLogs = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAccountGroups tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "account_groups" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountQuery) WithAccountGroups(opts ...func(*AccountGroupQuery)) *AccountQuery {
|
||||||
|
query := (&AccountGroupClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withAccountGroups = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.Account.Query().
|
||||||
|
// GroupBy(account.FieldCreatedAt).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AccountQuery) GroupBy(field string, fields ...string) *AccountGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &AccountGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = account.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.Account.Query().
|
||||||
|
// Select(account.FieldCreatedAt).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AccountQuery) Select(fields ...string) *AccountSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &AccountSelect{AccountQuery: _q}
|
||||||
|
sbuild.label = account.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a AccountSelect configured with the given aggregations.
|
||||||
|
func (_q *AccountQuery) Aggregate(fns ...AggregateFunc) *AccountSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !account.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Account, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*Account{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [4]bool{
|
||||||
|
_q.withGroups != nil,
|
||||||
|
_q.withProxy != nil,
|
||||||
|
_q.withUsageLogs != nil,
|
||||||
|
_q.withAccountGroups != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*Account).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &Account{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withGroups; query != nil {
|
||||||
|
if err := _q.loadGroups(ctx, query, nodes,
|
||||||
|
func(n *Account) { n.Edges.Groups = []*Group{} },
|
||||||
|
func(n *Account, e *Group) { n.Edges.Groups = append(n.Edges.Groups, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withProxy; query != nil {
|
||||||
|
if err := _q.loadProxy(ctx, query, nodes, nil,
|
||||||
|
func(n *Account, e *Proxy) { n.Edges.Proxy = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withUsageLogs; query != nil {
|
||||||
|
if err := _q.loadUsageLogs(ctx, query, nodes,
|
||||||
|
func(n *Account) { n.Edges.UsageLogs = []*UsageLog{} },
|
||||||
|
func(n *Account, e *UsageLog) { n.Edges.UsageLogs = append(n.Edges.UsageLogs, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withAccountGroups; query != nil {
|
||||||
|
if err := _q.loadAccountGroups(ctx, query, nodes,
|
||||||
|
func(n *Account) { n.Edges.AccountGroups = []*AccountGroup{} },
|
||||||
|
func(n *Account, e *AccountGroup) { n.Edges.AccountGroups = append(n.Edges.AccountGroups, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) loadGroups(ctx context.Context, query *GroupQuery, nodes []*Account, init func(*Account), assign func(*Account, *Group)) error {
|
||||||
|
edgeIDs := make([]driver.Value, len(nodes))
|
||||||
|
byID := make(map[int64]*Account)
|
||||||
|
nids := make(map[int64]map[*Account]struct{})
|
||||||
|
for i, node := range nodes {
|
||||||
|
edgeIDs[i] = node.ID
|
||||||
|
byID[node.ID] = node
|
||||||
|
if init != nil {
|
||||||
|
init(node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
query.Where(func(s *sql.Selector) {
|
||||||
|
joinT := sql.Table(account.GroupsTable)
|
||||||
|
s.Join(joinT).On(s.C(group.FieldID), joinT.C(account.GroupsPrimaryKey[1]))
|
||||||
|
s.Where(sql.InValues(joinT.C(account.GroupsPrimaryKey[0]), edgeIDs...))
|
||||||
|
columns := s.SelectedColumns()
|
||||||
|
s.Select(joinT.C(account.GroupsPrimaryKey[0]))
|
||||||
|
s.AppendSelect(columns...)
|
||||||
|
s.SetDistinct(false)
|
||||||
|
})
|
||||||
|
if err := query.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||||
|
return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) {
|
||||||
|
assign := spec.Assign
|
||||||
|
values := spec.ScanValues
|
||||||
|
spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
values, err := values(columns[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return append([]any{new(sql.NullInt64)}, values...), nil
|
||||||
|
}
|
||||||
|
spec.Assign = func(columns []string, values []any) error {
|
||||||
|
outValue := values[0].(*sql.NullInt64).Int64
|
||||||
|
inValue := values[1].(*sql.NullInt64).Int64
|
||||||
|
if nids[inValue] == nil {
|
||||||
|
nids[inValue] = map[*Account]struct{}{byID[outValue]: {}}
|
||||||
|
return assign(columns[1:], values[1:])
|
||||||
|
}
|
||||||
|
nids[inValue][byID[outValue]] = struct{}{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
neighbors, err := withInterceptors[[]*Group](ctx, query, qr, query.inters)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected "groups" node returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for kn := range nodes {
|
||||||
|
assign(kn, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *AccountQuery) loadProxy(ctx context.Context, query *ProxyQuery, nodes []*Account, init func(*Account), assign func(*Account, *Proxy)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*Account)
|
||||||
|
for i := range nodes {
|
||||||
|
if nodes[i].ProxyID == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fk := *nodes[i].ProxyID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(proxy.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "proxy_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *AccountQuery) loadUsageLogs(ctx context.Context, query *UsageLogQuery, nodes []*Account, init func(*Account), assign func(*Account, *UsageLog)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*Account)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(usagelog.FieldAccountID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.UsageLog(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(account.UsageLogsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.AccountID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "account_id" returned %v for node %v`, fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *AccountQuery) loadAccountGroups(ctx context.Context, query *AccountGroupQuery, nodes []*Account, init func(*Account), assign func(*Account, *AccountGroup)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*Account)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(accountgroup.FieldAccountID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.AccountGroup(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(account.AccountGroupsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.AccountID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "account_id" returned %v for node %v`, fk, n)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(account.Table, account.Columns, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, account.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != account.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.withProxy != nil {
|
||||||
|
_spec.Node.AddColumnOnce(account.FieldProxyID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(account.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = account.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *AccountQuery) ForUpdate(opts ...sql.LockOption) *AccountQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *AccountQuery) ForShare(opts ...sql.LockOption) *AccountQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupBy is the group-by builder for Account entities.
|
||||||
|
type AccountGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *AccountQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *AccountGroupBy) Aggregate(fns ...AggregateFunc) *AccountGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *AccountGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AccountQuery, *AccountGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *AccountGroupBy) sqlScan(ctx context.Context, root *AccountQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountSelect is the builder for selecting fields of Account entities.
|
||||||
|
type AccountSelect struct {
|
||||||
|
*AccountQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *AccountSelect) Aggregate(fns ...AggregateFunc) *AccountSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *AccountSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AccountQuery, *AccountSelect](ctx, _s.AccountQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *AccountSelect) sqlScan(ctx context.Context, root *AccountQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
1911
backend/ent/account_update.go
Normal file
176
backend/ent/accountgroup.go
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountGroup is the model entity for the AccountGroup schema.
|
||||||
|
type AccountGroup struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// AccountID holds the value of the "account_id" field.
|
||||||
|
AccountID int64 `json:"account_id,omitempty"`
|
||||||
|
// GroupID holds the value of the "group_id" field.
|
||||||
|
GroupID int64 `json:"group_id,omitempty"`
|
||||||
|
// Priority holds the value of the "priority" field.
|
||||||
|
Priority int `json:"priority,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the AccountGroupQuery when eager-loading is set.
|
||||||
|
Edges AccountGroupEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type AccountGroupEdges struct {
|
||||||
|
// Account holds the value of the account edge.
|
||||||
|
Account *Account `json:"account,omitempty"`
|
||||||
|
// Group holds the value of the group edge.
|
||||||
|
Group *Group `json:"group,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [2]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountOrErr returns the Account value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e AccountGroupEdges) AccountOrErr() (*Account, error) {
|
||||||
|
if e.Account != nil {
|
||||||
|
return e.Account, nil
|
||||||
|
} else if e.loadedTypes[0] {
|
||||||
|
return nil, &NotFoundError{label: account.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "account"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupOrErr returns the Group value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e AccountGroupEdges) GroupOrErr() (*Group, error) {
|
||||||
|
if e.Group != nil {
|
||||||
|
return e.Group, nil
|
||||||
|
} else if e.loadedTypes[1] {
|
||||||
|
return nil, &NotFoundError{label: group.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "group"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*AccountGroup) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case accountgroup.FieldAccountID, accountgroup.FieldGroupID, accountgroup.FieldPriority:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case accountgroup.FieldCreatedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the AccountGroup fields.
|
||||||
|
func (_m *AccountGroup) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case accountgroup.FieldAccountID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field account_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.AccountID = value.Int64
|
||||||
|
}
|
||||||
|
case accountgroup.FieldGroupID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field group_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.GroupID = value.Int64
|
||||||
|
}
|
||||||
|
case accountgroup.FieldPriority:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field priority", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Priority = int(value.Int64)
|
||||||
|
}
|
||||||
|
case accountgroup.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the AccountGroup.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *AccountGroup) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccount queries the "account" edge of the AccountGroup entity.
|
||||||
|
func (_m *AccountGroup) QueryAccount() *AccountQuery {
|
||||||
|
return NewAccountGroupClient(_m.config).QueryAccount(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroup queries the "group" edge of the AccountGroup entity.
|
||||||
|
func (_m *AccountGroup) QueryGroup() *GroupQuery {
|
||||||
|
return NewAccountGroupClient(_m.config).QueryGroup(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this AccountGroup.
|
||||||
|
// Note that you need to call AccountGroup.Unwrap() before calling this method if this AccountGroup
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *AccountGroup) Update() *AccountGroupUpdateOne {
|
||||||
|
return NewAccountGroupClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the AccountGroup entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *AccountGroup) Unwrap() *AccountGroup {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: AccountGroup is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *AccountGroup) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("AccountGroup(")
|
||||||
|
builder.WriteString("account_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.AccountID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("group_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.GroupID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("priority=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Priority))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroups is a parsable slice of AccountGroup.
|
||||||
|
type AccountGroups []*AccountGroup
|
||||||
123
backend/ent/accountgroup/accountgroup.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package accountgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the accountgroup type in the database.
|
||||||
|
Label = "account_group"
|
||||||
|
// FieldAccountID holds the string denoting the account_id field in the database.
|
||||||
|
FieldAccountID = "account_id"
|
||||||
|
// FieldGroupID holds the string denoting the group_id field in the database.
|
||||||
|
FieldGroupID = "group_id"
|
||||||
|
// FieldPriority holds the string denoting the priority field in the database.
|
||||||
|
FieldPriority = "priority"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// EdgeAccount holds the string denoting the account edge name in mutations.
|
||||||
|
EdgeAccount = "account"
|
||||||
|
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||||
|
EdgeGroup = "group"
|
||||||
|
// AccountFieldID holds the string denoting the ID field of the Account.
|
||||||
|
AccountFieldID = "id"
|
||||||
|
// GroupFieldID holds the string denoting the ID field of the Group.
|
||||||
|
GroupFieldID = "id"
|
||||||
|
// Table holds the table name of the accountgroup in the database.
|
||||||
|
Table = "account_groups"
|
||||||
|
// AccountTable is the table that holds the account relation/edge.
|
||||||
|
AccountTable = "account_groups"
|
||||||
|
// AccountInverseTable is the table name for the Account entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "account" package.
|
||||||
|
AccountInverseTable = "accounts"
|
||||||
|
// AccountColumn is the table column denoting the account relation/edge.
|
||||||
|
AccountColumn = "account_id"
|
||||||
|
// GroupTable is the table that holds the group relation/edge.
|
||||||
|
GroupTable = "account_groups"
|
||||||
|
// GroupInverseTable is the table name for the Group entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||||
|
GroupInverseTable = "groups"
|
||||||
|
// GroupColumn is the table column denoting the group relation/edge.
|
||||||
|
GroupColumn = "group_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for accountgroup fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldAccountID,
|
||||||
|
FieldGroupID,
|
||||||
|
FieldPriority,
|
||||||
|
FieldCreatedAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultPriority holds the default value on creation for the "priority" field.
|
||||||
|
DefaultPriority int
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the AccountGroup queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByAccountID orders the results by the account_id field.
|
||||||
|
func ByAccountID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldAccountID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupID orders the results by the group_id field.
|
||||||
|
func ByGroupID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldGroupID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPriority orders the results by the priority field.
|
||||||
|
func ByPriority(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPriority, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccountField orders the results by account field.
|
||||||
|
func ByAccountField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newAccountStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupField orders the results by group field.
|
||||||
|
func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newAccountStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, AccountColumn),
|
||||||
|
sqlgraph.To(AccountInverseTable, AccountFieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, AccountTable, AccountColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newGroupStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, GroupColumn),
|
||||||
|
sqlgraph.To(GroupInverseTable, GroupFieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
212
backend/ent/accountgroup/where.go
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package accountgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountID applies equality check predicate on the "account_id" field. It's identical to AccountIDEQ.
|
||||||
|
func AccountID(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldAccountID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ.
|
||||||
|
func GroupID(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority applies equality check predicate on the "priority" field. It's identical to PriorityEQ.
|
||||||
|
func Priority(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountIDEQ applies the EQ predicate on the "account_id" field.
|
||||||
|
func AccountIDEQ(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldAccountID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountIDNEQ applies the NEQ predicate on the "account_id" field.
|
||||||
|
func AccountIDNEQ(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNEQ(FieldAccountID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountIDIn applies the In predicate on the "account_id" field.
|
||||||
|
func AccountIDIn(vs ...int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldIn(FieldAccountID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountIDNotIn applies the NotIn predicate on the "account_id" field.
|
||||||
|
func AccountIDNotIn(vs ...int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNotIn(FieldAccountID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDEQ applies the EQ predicate on the "group_id" field.
|
||||||
|
func GroupIDEQ(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDNEQ applies the NEQ predicate on the "group_id" field.
|
||||||
|
func GroupIDNEQ(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDIn applies the In predicate on the "group_id" field.
|
||||||
|
func GroupIDIn(vs ...int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldIn(FieldGroupID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDNotIn applies the NotIn predicate on the "group_id" field.
|
||||||
|
func GroupIDNotIn(vs ...int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNotIn(FieldGroupID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityEQ applies the EQ predicate on the "priority" field.
|
||||||
|
func PriorityEQ(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityNEQ applies the NEQ predicate on the "priority" field.
|
||||||
|
func PriorityNEQ(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNEQ(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityIn applies the In predicate on the "priority" field.
|
||||||
|
func PriorityIn(vs ...int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldIn(FieldPriority, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityNotIn applies the NotIn predicate on the "priority" field.
|
||||||
|
func PriorityNotIn(vs ...int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNotIn(FieldPriority, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityGT applies the GT predicate on the "priority" field.
|
||||||
|
func PriorityGT(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldGT(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityGTE applies the GTE predicate on the "priority" field.
|
||||||
|
func PriorityGTE(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldGTE(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityLT applies the LT predicate on the "priority" field.
|
||||||
|
func PriorityLT(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldLT(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityLTE applies the LTE predicate on the "priority" field.
|
||||||
|
func PriorityLTE(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldLTE(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAccount applies the HasEdge predicate on the "account" edge.
|
||||||
|
func HasAccount() predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, AccountColumn),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, AccountTable, AccountColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAccountWith applies the HasEdge predicate on the "account" edge with a given conditions (other predicates).
|
||||||
|
func HasAccountWith(preds ...predicate.Account) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(func(s *sql.Selector) {
|
||||||
|
step := newAccountStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasGroup applies the HasEdge predicate on the "group" edge.
|
||||||
|
func HasGroup() predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, GroupColumn),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||||
|
func HasGroupWith(preds ...predicate.Group) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(func(s *sql.Selector) {
|
||||||
|
step := newGroupStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.AccountGroup) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.AccountGroup) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.AccountGroup) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
653
backend/ent/accountgroup_create.go
Normal file
@@ -0,0 +1,653 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountGroupCreate is the builder for creating a AccountGroup entity.
|
||||||
|
type AccountGroupCreate struct {
|
||||||
|
config
|
||||||
|
mutation *AccountGroupMutation
|
||||||
|
hooks []Hook
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (_c *AccountGroupCreate) SetAccountID(v int64) *AccountGroupCreate {
|
||||||
|
_c.mutation.SetAccountID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (_c *AccountGroupCreate) SetGroupID(v int64) *AccountGroupCreate {
|
||||||
|
_c.mutation.SetGroupID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (_c *AccountGroupCreate) SetPriority(v int) *AccountGroupCreate {
|
||||||
|
_c.mutation.SetPriority(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePriority sets the "priority" field if the given value is not nil.
|
||||||
|
func (_c *AccountGroupCreate) SetNillablePriority(v *int) *AccountGroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetPriority(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCreatedAt sets the "created_at" field.
|
||||||
|
func (_c *AccountGroupCreate) SetCreatedAt(v time.Time) *AccountGroupCreate {
|
||||||
|
_c.mutation.SetCreatedAt(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||||
|
func (_c *AccountGroupCreate) SetNillableCreatedAt(v *time.Time) *AccountGroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetCreatedAt(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccount sets the "account" edge to the Account entity.
|
||||||
|
func (_c *AccountGroupCreate) SetAccount(v *Account) *AccountGroupCreate {
|
||||||
|
return _c.SetAccountID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroup sets the "group" edge to the Group entity.
|
||||||
|
func (_c *AccountGroupCreate) SetGroup(v *Group) *AccountGroupCreate {
|
||||||
|
return _c.SetGroupID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AccountGroupMutation object of the builder.
|
||||||
|
func (_c *AccountGroupCreate) Mutation() *AccountGroupMutation {
|
||||||
|
return _c.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the AccountGroup in the database.
|
||||||
|
func (_c *AccountGroupCreate) Save(ctx context.Context) (*AccountGroup, error) {
|
||||||
|
_c.defaults()
|
||||||
|
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX calls Save and panics if Save returns an error.
|
||||||
|
func (_c *AccountGroupCreate) SaveX(ctx context.Context) *AccountGroup {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *AccountGroupCreate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *AccountGroupCreate) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_c *AccountGroupCreate) defaults() {
|
||||||
|
if _, ok := _c.mutation.Priority(); !ok {
|
||||||
|
v := accountgroup.DefaultPriority
|
||||||
|
_c.mutation.SetPriority(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
|
v := accountgroup.DefaultCreatedAt()
|
||||||
|
_c.mutation.SetCreatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_c *AccountGroupCreate) check() error {
|
||||||
|
if _, ok := _c.mutation.AccountID(); !ok {
|
||||||
|
return &ValidationError{Name: "account_id", err: errors.New(`ent: missing required field "AccountGroup.account_id"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.GroupID(); !ok {
|
||||||
|
return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "AccountGroup.group_id"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Priority(); !ok {
|
||||||
|
return &ValidationError{Name: "priority", err: errors.New(`ent: missing required field "AccountGroup.priority"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
|
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "AccountGroup.created_at"`)}
|
||||||
|
}
|
||||||
|
if len(_c.mutation.AccountIDs()) == 0 {
|
||||||
|
return &ValidationError{Name: "account", err: errors.New(`ent: missing required edge "AccountGroup.account"`)}
|
||||||
|
}
|
||||||
|
if len(_c.mutation.GroupIDs()) == 0 {
|
||||||
|
return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "AccountGroup.group"`)}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *AccountGroupCreate) sqlSave(ctx context.Context) (*AccountGroup, error) {
|
||||||
|
if err := _c.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_node, _spec := _c.createSpec()
|
||||||
|
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *AccountGroupCreate) createSpec() (*AccountGroup, *sqlgraph.CreateSpec) {
|
||||||
|
var (
|
||||||
|
_node = &AccountGroup{config: _c.config}
|
||||||
|
_spec = sqlgraph.NewCreateSpec(accountgroup.Table, nil)
|
||||||
|
)
|
||||||
|
_spec.OnConflict = _c.conflict
|
||||||
|
if value, ok := _c.mutation.Priority(); ok {
|
||||||
|
_spec.SetField(accountgroup.FieldPriority, field.TypeInt, value)
|
||||||
|
_node.Priority = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.CreatedAt(); ok {
|
||||||
|
_spec.SetField(accountgroup.FieldCreatedAt, field.TypeTime, value)
|
||||||
|
_node.CreatedAt = value
|
||||||
|
}
|
||||||
|
if nodes := _c.mutation.AccountIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.AccountTable,
|
||||||
|
Columns: []string{accountgroup.AccountColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_node.AccountID = nodes[0]
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
|
if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.GroupTable,
|
||||||
|
Columns: []string{accountgroup.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_node.GroupID = nodes[0]
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
|
return _node, _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// SetAccountID(v).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.AccountGroupUpsert) {
|
||||||
|
// SetAccountID(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AccountGroupCreate) OnConflict(opts ...sql.ConflictOption) *AccountGroupUpsertOne {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &AccountGroupUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AccountGroupCreate) OnConflictColumns(columns ...string) *AccountGroupUpsertOne {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &AccountGroupUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// AccountGroupUpsertOne is the builder for "upsert"-ing
|
||||||
|
// one AccountGroup node.
|
||||||
|
AccountGroupUpsertOne struct {
|
||||||
|
create *AccountGroupCreate
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupUpsert is the "OnConflict" setter.
|
||||||
|
AccountGroupUpsert struct {
|
||||||
|
*sql.UpdateSet
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (u *AccountGroupUpsert) SetAccountID(v int64) *AccountGroupUpsert {
|
||||||
|
u.Set(accountgroup.FieldAccountID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAccountID sets the "account_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsert) UpdateAccountID() *AccountGroupUpsert {
|
||||||
|
u.SetExcluded(accountgroup.FieldAccountID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (u *AccountGroupUpsert) SetGroupID(v int64) *AccountGroupUpsert {
|
||||||
|
u.Set(accountgroup.FieldGroupID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateGroupID sets the "group_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsert) UpdateGroupID() *AccountGroupUpsert {
|
||||||
|
u.SetExcluded(accountgroup.FieldGroupID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (u *AccountGroupUpsert) SetPriority(v int) *AccountGroupUpsert {
|
||||||
|
u.Set(accountgroup.FieldPriority, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePriority sets the "priority" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsert) UpdatePriority() *AccountGroupUpsert {
|
||||||
|
u.SetExcluded(accountgroup.FieldPriority)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPriority adds v to the "priority" field.
|
||||||
|
func (u *AccountGroupUpsert) AddPriority(v int) *AccountGroupUpsert {
|
||||||
|
u.Add(accountgroup.FieldPriority, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AccountGroupUpsertOne) UpdateNewValues() *AccountGroupUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||||
|
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||||
|
s.SetIgnore(accountgroup.FieldCreatedAt)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AccountGroupUpsertOne) Ignore() *AccountGroupUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *AccountGroupUpsertOne) DoNothing() *AccountGroupUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the AccountGroupCreate.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *AccountGroupUpsertOne) Update(set func(*AccountGroupUpsert)) *AccountGroupUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&AccountGroupUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (u *AccountGroupUpsertOne) SetAccountID(v int64) *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetAccountID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAccountID sets the "account_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertOne) UpdateAccountID() *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdateAccountID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (u *AccountGroupUpsertOne) SetGroupID(v int64) *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetGroupID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateGroupID sets the "group_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertOne) UpdateGroupID() *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdateGroupID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (u *AccountGroupUpsertOne) SetPriority(v int) *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetPriority(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPriority adds v to the "priority" field.
|
||||||
|
func (u *AccountGroupUpsertOne) AddPriority(v int) *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.AddPriority(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePriority sets the "priority" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertOne) UpdatePriority() *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdatePriority()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *AccountGroupUpsertOne) Exec(ctx context.Context) error {
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for AccountGroupCreate.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *AccountGroupUpsertOne) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupCreateBulk is the builder for creating many AccountGroup entities in bulk.
|
||||||
|
type AccountGroupCreateBulk struct {
|
||||||
|
config
|
||||||
|
err error
|
||||||
|
builders []*AccountGroupCreate
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the AccountGroup entities in the database.
|
||||||
|
func (_c *AccountGroupCreateBulk) Save(ctx context.Context) ([]*AccountGroup, error) {
|
||||||
|
if _c.err != nil {
|
||||||
|
return nil, _c.err
|
||||||
|
}
|
||||||
|
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||||
|
nodes := make([]*AccountGroup, len(_c.builders))
|
||||||
|
mutators := make([]Mutator, len(_c.builders))
|
||||||
|
for i := range _c.builders {
|
||||||
|
func(i int, root context.Context) {
|
||||||
|
builder := _c.builders[i]
|
||||||
|
builder.defaults()
|
||||||
|
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||||
|
mutation, ok := m.(*AccountGroupMutation)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||||
|
}
|
||||||
|
if err := builder.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
builder.mutation = mutation
|
||||||
|
var err error
|
||||||
|
nodes[i], specs[i] = builder.createSpec()
|
||||||
|
if i < len(mutators)-1 {
|
||||||
|
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||||
|
} else {
|
||||||
|
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||||
|
spec.OnConflict = _c.conflict
|
||||||
|
// Invoke the actual operation on the latest mutation in the chain.
|
||||||
|
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mutation.done = true
|
||||||
|
return nodes[i], nil
|
||||||
|
})
|
||||||
|
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||||
|
mut = builder.hooks[i](mut)
|
||||||
|
}
|
||||||
|
mutators[i] = mut
|
||||||
|
}(i, ctx)
|
||||||
|
}
|
||||||
|
if len(mutators) > 0 {
|
||||||
|
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_c *AccountGroupCreateBulk) SaveX(ctx context.Context) []*AccountGroup {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *AccountGroupCreateBulk) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *AccountGroupCreateBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.CreateBulk(builders...).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.AccountGroupUpsert) {
|
||||||
|
// SetAccountID(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AccountGroupCreateBulk) OnConflict(opts ...sql.ConflictOption) *AccountGroupUpsertBulk {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &AccountGroupUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AccountGroupCreateBulk) OnConflictColumns(columns ...string) *AccountGroupUpsertBulk {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &AccountGroupUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupUpsertBulk is the builder for "upsert"-ing
|
||||||
|
// a bulk of AccountGroup nodes.
|
||||||
|
type AccountGroupUpsertBulk struct {
|
||||||
|
create *AccountGroupCreateBulk
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that
|
||||||
|
// were set on create. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AccountGroupUpsertBulk) UpdateNewValues() *AccountGroupUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||||
|
for _, b := range u.create.builders {
|
||||||
|
if _, exists := b.mutation.CreatedAt(); exists {
|
||||||
|
s.SetIgnore(accountgroup.FieldCreatedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AccountGroupUpsertBulk) Ignore() *AccountGroupUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *AccountGroupUpsertBulk) DoNothing() *AccountGroupUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the AccountGroupCreateBulk.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *AccountGroupUpsertBulk) Update(set func(*AccountGroupUpsert)) *AccountGroupUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&AccountGroupUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (u *AccountGroupUpsertBulk) SetAccountID(v int64) *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetAccountID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAccountID sets the "account_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertBulk) UpdateAccountID() *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdateAccountID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (u *AccountGroupUpsertBulk) SetGroupID(v int64) *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetGroupID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateGroupID sets the "group_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertBulk) UpdateGroupID() *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdateGroupID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (u *AccountGroupUpsertBulk) SetPriority(v int) *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetPriority(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPriority adds v to the "priority" field.
|
||||||
|
func (u *AccountGroupUpsertBulk) AddPriority(v int) *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.AddPriority(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePriority sets the "priority" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertBulk) UpdatePriority() *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdatePriority()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *AccountGroupUpsertBulk) Exec(ctx context.Context) error {
|
||||||
|
if u.create.err != nil {
|
||||||
|
return u.create.err
|
||||||
|
}
|
||||||
|
for i, b := range u.create.builders {
|
||||||
|
if len(b.conflict) != 0 {
|
||||||
|
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AccountGroupCreateBulk instead", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for AccountGroupCreateBulk.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *AccountGroupUpsertBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
87
backend/ent/accountgroup_delete.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountGroupDelete is the builder for deleting a AccountGroup entity.
|
||||||
|
type AccountGroupDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AccountGroupMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountGroupDelete builder.
|
||||||
|
func (_d *AccountGroupDelete) Where(ps ...predicate.AccountGroup) *AccountGroupDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *AccountGroupDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AccountGroupDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *AccountGroupDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(accountgroup.Table, nil)
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupDeleteOne is the builder for deleting a single AccountGroup entity.
|
||||||
|
type AccountGroupDeleteOne struct {
|
||||||
|
_d *AccountGroupDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountGroupDelete builder.
|
||||||
|
func (_d *AccountGroupDeleteOne) Where(ps ...predicate.AccountGroup) *AccountGroupDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *AccountGroupDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{accountgroup.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AccountGroupDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
640
backend/ent/accountgroup_query.go
Normal file
@@ -0,0 +1,640 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountGroupQuery is the builder for querying AccountGroup entities.
|
||||||
|
type AccountGroupQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []accountgroup.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.AccountGroup
|
||||||
|
withAccount *AccountQuery
|
||||||
|
withGroup *GroupQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the AccountGroupQuery builder.
|
||||||
|
func (_q *AccountGroupQuery) Where(ps ...predicate.AccountGroup) *AccountGroupQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *AccountGroupQuery) Limit(limit int) *AccountGroupQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *AccountGroupQuery) Offset(offset int) *AccountGroupQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *AccountGroupQuery) Unique(unique bool) *AccountGroupQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *AccountGroupQuery) Order(o ...accountgroup.OrderOption) *AccountGroupQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccount chains the current query on the "account" edge.
|
||||||
|
func (_q *AccountGroupQuery) QueryAccount() *AccountQuery {
|
||||||
|
query := (&AccountClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(accountgroup.Table, accountgroup.AccountColumn, selector),
|
||||||
|
sqlgraph.To(account.Table, account.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, accountgroup.AccountTable, accountgroup.AccountColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroup chains the current query on the "group" edge.
|
||||||
|
func (_q *AccountGroupQuery) QueryGroup() *GroupQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(accountgroup.Table, accountgroup.GroupColumn, selector),
|
||||||
|
sqlgraph.To(group.Table, group.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, accountgroup.GroupTable, accountgroup.GroupColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first AccountGroup entity from the query.
|
||||||
|
// Returns a *NotFoundError when no AccountGroup was found.
|
||||||
|
func (_q *AccountGroupQuery) First(ctx context.Context) (*AccountGroup, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{accountgroup.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *AccountGroupQuery) FirstX(ctx context.Context) *AccountGroup {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single AccountGroup entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one AccountGroup entity is found.
|
||||||
|
// Returns a *NotFoundError when no AccountGroup entities are found.
|
||||||
|
func (_q *AccountGroupQuery) Only(ctx context.Context) (*AccountGroup, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{accountgroup.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{accountgroup.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *AccountGroupQuery) OnlyX(ctx context.Context) *AccountGroup {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of AccountGroups.
|
||||||
|
func (_q *AccountGroupQuery) All(ctx context.Context) ([]*AccountGroup, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*AccountGroup, *AccountGroupQuery]()
|
||||||
|
return withInterceptors[[]*AccountGroup](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *AccountGroupQuery) AllX(ctx context.Context) []*AccountGroup {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *AccountGroupQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*AccountGroupQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *AccountGroupQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *AccountGroupQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.First(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *AccountGroupQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the AccountGroupQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *AccountGroupQuery) Clone() *AccountGroupQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &AccountGroupQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]accountgroup.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.AccountGroup{}, _q.predicates...),
|
||||||
|
withAccount: _q.withAccount.Clone(),
|
||||||
|
withGroup: _q.withGroup.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAccount tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "account" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountGroupQuery) WithAccount(opts ...func(*AccountQuery)) *AccountGroupQuery {
|
||||||
|
query := (&AccountClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withAccount = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGroup tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountGroupQuery) WithGroup(opts ...func(*GroupQuery)) *AccountGroupQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withGroup = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// AccountID int64 `json:"account_id,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Query().
|
||||||
|
// GroupBy(accountgroup.FieldAccountID).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AccountGroupQuery) GroupBy(field string, fields ...string) *AccountGroupGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &AccountGroupGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = accountgroup.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// AccountID int64 `json:"account_id,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Query().
|
||||||
|
// Select(accountgroup.FieldAccountID).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AccountGroupQuery) Select(fields ...string) *AccountGroupSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &AccountGroupSelect{AccountGroupQuery: _q}
|
||||||
|
sbuild.label = accountgroup.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a AccountGroupSelect configured with the given aggregations.
|
||||||
|
func (_q *AccountGroupQuery) Aggregate(fns ...AggregateFunc) *AccountGroupSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !accountgroup.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AccountGroup, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*AccountGroup{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [2]bool{
|
||||||
|
_q.withAccount != nil,
|
||||||
|
_q.withGroup != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*AccountGroup).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &AccountGroup{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withAccount; query != nil {
|
||||||
|
if err := _q.loadAccount(ctx, query, nodes, nil,
|
||||||
|
func(n *AccountGroup, e *Account) { n.Edges.Account = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withGroup; query != nil {
|
||||||
|
if err := _q.loadGroup(ctx, query, nodes, nil,
|
||||||
|
func(n *AccountGroup, e *Group) { n.Edges.Group = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) loadAccount(ctx context.Context, query *AccountQuery, nodes []*AccountGroup, init func(*AccountGroup), assign func(*AccountGroup, *Account)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*AccountGroup)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].AccountID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(account.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "account_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *AccountGroupQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*AccountGroup, init func(*AccountGroup), assign func(*AccountGroup, *Group)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*AccountGroup)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].GroupID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(group.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Unique = false
|
||||||
|
_spec.Node.Columns = nil
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(accountgroup.Table, accountgroup.Columns, nil)
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
for i := range fields {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
if _q.withAccount != nil {
|
||||||
|
_spec.Node.AddColumnOnce(accountgroup.FieldAccountID)
|
||||||
|
}
|
||||||
|
if _q.withGroup != nil {
|
||||||
|
_spec.Node.AddColumnOnce(accountgroup.FieldGroupID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(accountgroup.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = accountgroup.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *AccountGroupQuery) ForUpdate(opts ...sql.LockOption) *AccountGroupQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *AccountGroupQuery) ForShare(opts ...sql.LockOption) *AccountGroupQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupGroupBy is the group-by builder for AccountGroup entities.
|
||||||
|
type AccountGroupGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *AccountGroupQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *AccountGroupGroupBy) Aggregate(fns ...AggregateFunc) *AccountGroupGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *AccountGroupGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AccountGroupQuery, *AccountGroupGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *AccountGroupGroupBy) sqlScan(ctx context.Context, root *AccountGroupQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupSelect is the builder for selecting fields of AccountGroup entities.
|
||||||
|
type AccountGroupSelect struct {
|
||||||
|
*AccountGroupQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *AccountGroupSelect) Aggregate(fns ...AggregateFunc) *AccountGroupSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *AccountGroupSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AccountGroupQuery, *AccountGroupSelect](ctx, _s.AccountGroupQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *AccountGroupSelect) sqlScan(ctx context.Context, root *AccountGroupQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
477
backend/ent/accountgroup_update.go
Normal file
@@ -0,0 +1,477 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountGroupUpdate is the builder for updating AccountGroup entities.
|
||||||
|
type AccountGroupUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AccountGroupMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountGroupUpdate builder.
|
||||||
|
func (_u *AccountGroupUpdate) Where(ps ...predicate.AccountGroup) *AccountGroupUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (_u *AccountGroupUpdate) SetAccountID(v int64) *AccountGroupUpdate {
|
||||||
|
_u.mutation.SetAccountID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableAccountID sets the "account_id" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdate) SetNillableAccountID(v *int64) *AccountGroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetAccountID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (_u *AccountGroupUpdate) SetGroupID(v int64) *AccountGroupUpdate {
|
||||||
|
_u.mutation.SetGroupID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableGroupID sets the "group_id" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdate) SetNillableGroupID(v *int64) *AccountGroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetGroupID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (_u *AccountGroupUpdate) SetPriority(v int) *AccountGroupUpdate {
|
||||||
|
_u.mutation.ResetPriority()
|
||||||
|
_u.mutation.SetPriority(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePriority sets the "priority" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdate) SetNillablePriority(v *int) *AccountGroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPriority(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPriority adds value to the "priority" field.
|
||||||
|
func (_u *AccountGroupUpdate) AddPriority(v int) *AccountGroupUpdate {
|
||||||
|
_u.mutation.AddPriority(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccount sets the "account" edge to the Account entity.
|
||||||
|
func (_u *AccountGroupUpdate) SetAccount(v *Account) *AccountGroupUpdate {
|
||||||
|
return _u.SetAccountID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroup sets the "group" edge to the Group entity.
|
||||||
|
func (_u *AccountGroupUpdate) SetGroup(v *Group) *AccountGroupUpdate {
|
||||||
|
return _u.SetGroupID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AccountGroupMutation object of the builder.
|
||||||
|
func (_u *AccountGroupUpdate) Mutation() *AccountGroupMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAccount clears the "account" edge to the Account entity.
|
||||||
|
func (_u *AccountGroupUpdate) ClearAccount() *AccountGroupUpdate {
|
||||||
|
_u.mutation.ClearAccount()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroup clears the "group" edge to the Group entity.
|
||||||
|
func (_u *AccountGroupUpdate) ClearGroup() *AccountGroupUpdate {
|
||||||
|
_u.mutation.ClearGroup()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *AccountGroupUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AccountGroupUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *AccountGroupUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AccountGroupUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AccountGroupUpdate) check() error {
|
||||||
|
if _u.mutation.AccountCleared() && len(_u.mutation.AccountIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AccountGroup.account"`)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AccountGroup.group"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AccountGroupUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(accountgroup.Table, accountgroup.Columns, sqlgraph.NewFieldSpec(accountgroup.FieldAccountID, field.TypeInt64), sqlgraph.NewFieldSpec(accountgroup.FieldGroupID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Priority(); ok {
|
||||||
|
_spec.SetField(accountgroup.FieldPriority, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedPriority(); ok {
|
||||||
|
_spec.AddField(accountgroup.FieldPriority, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.AccountCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.AccountTable,
|
||||||
|
Columns: []string{accountgroup.AccountColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.AccountIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.AccountTable,
|
||||||
|
Columns: []string{accountgroup.AccountColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.GroupTable,
|
||||||
|
Columns: []string{accountgroup.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.GroupTable,
|
||||||
|
Columns: []string{accountgroup.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{accountgroup.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupUpdateOne is the builder for updating a single AccountGroup entity.
|
||||||
|
type AccountGroupUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AccountGroupMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetAccountID(v int64) *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.SetAccountID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableAccountID sets the "account_id" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetNillableAccountID(v *int64) *AccountGroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetAccountID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetGroupID(v int64) *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.SetGroupID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableGroupID sets the "group_id" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetNillableGroupID(v *int64) *AccountGroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetGroupID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetPriority(v int) *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.ResetPriority()
|
||||||
|
_u.mutation.SetPriority(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePriority sets the "priority" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetNillablePriority(v *int) *AccountGroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPriority(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPriority adds value to the "priority" field.
|
||||||
|
func (_u *AccountGroupUpdateOne) AddPriority(v int) *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.AddPriority(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccount sets the "account" edge to the Account entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetAccount(v *Account) *AccountGroupUpdateOne {
|
||||||
|
return _u.SetAccountID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroup sets the "group" edge to the Group entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetGroup(v *Group) *AccountGroupUpdateOne {
|
||||||
|
return _u.SetGroupID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AccountGroupMutation object of the builder.
|
||||||
|
func (_u *AccountGroupUpdateOne) Mutation() *AccountGroupMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAccount clears the "account" edge to the Account entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) ClearAccount() *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.ClearAccount()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroup clears the "group" edge to the Group entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) ClearGroup() *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.ClearGroup()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountGroupUpdate builder.
|
||||||
|
func (_u *AccountGroupUpdateOne) Where(ps ...predicate.AccountGroup) *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *AccountGroupUpdateOne) Select(field string, fields ...string) *AccountGroupUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated AccountGroup entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) Save(ctx context.Context) (*AccountGroup, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AccountGroupUpdateOne) SaveX(ctx context.Context) *AccountGroup {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AccountGroupUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AccountGroupUpdateOne) check() error {
|
||||||
|
if _u.mutation.AccountCleared() && len(_u.mutation.AccountIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AccountGroup.account"`)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AccountGroup.group"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AccountGroupUpdateOne) sqlSave(ctx context.Context) (_node *AccountGroup, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(accountgroup.Table, accountgroup.Columns, sqlgraph.NewFieldSpec(accountgroup.FieldAccountID, field.TypeInt64), sqlgraph.NewFieldSpec(accountgroup.FieldGroupID, field.TypeInt64))
|
||||||
|
if id, ok := _u.mutation.AccountID(); !ok {
|
||||||
|
return nil, &ValidationError{Name: "account_id", err: errors.New(`ent: missing "AccountGroup.account_id" for update`)}
|
||||||
|
} else {
|
||||||
|
_spec.Node.CompositeID[0].Value = id
|
||||||
|
}
|
||||||
|
if id, ok := _u.mutation.GroupID(); !ok {
|
||||||
|
return nil, &ValidationError{Name: "group_id", err: errors.New(`ent: missing "AccountGroup.group_id" for update`)}
|
||||||
|
} else {
|
||||||
|
_spec.Node.CompositeID[1].Value = id
|
||||||
|
}
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, len(fields))
|
||||||
|
for i, f := range fields {
|
||||||
|
if !accountgroup.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
_spec.Node.Columns[i] = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Priority(); ok {
|
||||||
|
_spec.SetField(accountgroup.FieldPriority, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedPriority(); ok {
|
||||||
|
_spec.AddField(accountgroup.FieldPriority, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.AccountCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.AccountTable,
|
||||||
|
Columns: []string{accountgroup.AccountColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.AccountIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.AccountTable,
|
||||||
|
Columns: []string{accountgroup.AccountColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.GroupTable,
|
||||||
|
Columns: []string{accountgroup.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.GroupTable,
|
||||||
|
Columns: []string{accountgroup.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &AccountGroup{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{accountgroup.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
260
backend/ent/announcement.go
Normal file
@@ -0,0 +1,260 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcement"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/domain"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Announcement is the model entity for the Announcement schema.
|
||||||
|
type Announcement struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// 公告标题
|
||||||
|
Title string `json:"title,omitempty"`
|
||||||
|
// 公告内容(支持 Markdown)
|
||||||
|
Content string `json:"content,omitempty"`
|
||||||
|
// 状态: draft, active, archived
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
// 通知模式: silent(仅铃铛), popup(弹窗提醒)
|
||||||
|
NotifyMode string `json:"notify_mode,omitempty"`
|
||||||
|
// 展示条件(JSON 规则)
|
||||||
|
Targeting domain.AnnouncementTargeting `json:"targeting,omitempty"`
|
||||||
|
// 开始展示时间(为空表示立即生效)
|
||||||
|
StartsAt *time.Time `json:"starts_at,omitempty"`
|
||||||
|
// 结束展示时间(为空表示永久生效)
|
||||||
|
EndsAt *time.Time `json:"ends_at,omitempty"`
|
||||||
|
// 创建人用户ID(管理员)
|
||||||
|
CreatedBy *int64 `json:"created_by,omitempty"`
|
||||||
|
// 更新人用户ID(管理员)
|
||||||
|
UpdatedBy *int64 `json:"updated_by,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the AnnouncementQuery when eager-loading is set.
|
||||||
|
Edges AnnouncementEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type AnnouncementEdges struct {
|
||||||
|
// Reads holds the value of the reads edge.
|
||||||
|
Reads []*AnnouncementRead `json:"reads,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [1]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadsOrErr returns the Reads value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e AnnouncementEdges) ReadsOrErr() ([]*AnnouncementRead, error) {
|
||||||
|
if e.loadedTypes[0] {
|
||||||
|
return e.Reads, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "reads"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*Announcement) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case announcement.FieldTargeting:
|
||||||
|
values[i] = new([]byte)
|
||||||
|
case announcement.FieldID, announcement.FieldCreatedBy, announcement.FieldUpdatedBy:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case announcement.FieldTitle, announcement.FieldContent, announcement.FieldStatus, announcement.FieldNotifyMode:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case announcement.FieldStartsAt, announcement.FieldEndsAt, announcement.FieldCreatedAt, announcement.FieldUpdatedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the Announcement fields.
|
||||||
|
func (_m *Announcement) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case announcement.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case announcement.FieldTitle:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field title", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Title = value.String
|
||||||
|
}
|
||||||
|
case announcement.FieldContent:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field content", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Content = value.String
|
||||||
|
}
|
||||||
|
case announcement.FieldStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Status = value.String
|
||||||
|
}
|
||||||
|
case announcement.FieldNotifyMode:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field notify_mode", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.NotifyMode = value.String
|
||||||
|
}
|
||||||
|
case announcement.FieldTargeting:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field targeting", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.Targeting); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field targeting: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case announcement.FieldStartsAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field starts_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.StartsAt = new(time.Time)
|
||||||
|
*_m.StartsAt = value.Time
|
||||||
|
}
|
||||||
|
case announcement.FieldEndsAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field ends_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.EndsAt = new(time.Time)
|
||||||
|
*_m.EndsAt = value.Time
|
||||||
|
}
|
||||||
|
case announcement.FieldCreatedBy:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_by", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedBy = new(int64)
|
||||||
|
*_m.CreatedBy = value.Int64
|
||||||
|
}
|
||||||
|
case announcement.FieldUpdatedBy:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_by", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedBy = new(int64)
|
||||||
|
*_m.UpdatedBy = value.Int64
|
||||||
|
}
|
||||||
|
case announcement.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case announcement.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the Announcement.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *Announcement) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryReads queries the "reads" edge of the Announcement entity.
|
||||||
|
func (_m *Announcement) QueryReads() *AnnouncementReadQuery {
|
||||||
|
return NewAnnouncementClient(_m.config).QueryReads(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this Announcement.
|
||||||
|
// Note that you need to call Announcement.Unwrap() before calling this method if this Announcement
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *Announcement) Update() *AnnouncementUpdateOne {
|
||||||
|
return NewAnnouncementClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the Announcement entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *Announcement) Unwrap() *Announcement {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: Announcement is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *Announcement) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("Announcement(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("title=")
|
||||||
|
builder.WriteString(_m.Title)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("content=")
|
||||||
|
builder.WriteString(_m.Content)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("status=")
|
||||||
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("notify_mode=")
|
||||||
|
builder.WriteString(_m.NotifyMode)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("targeting=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Targeting))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.StartsAt; v != nil {
|
||||||
|
builder.WriteString("starts_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.EndsAt; v != nil {
|
||||||
|
builder.WriteString("ends_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.CreatedBy; v != nil {
|
||||||
|
builder.WriteString("created_by=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.UpdatedBy; v != nil {
|
||||||
|
builder.WriteString("updated_by=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Announcements is a parsable slice of Announcement.
|
||||||
|
type Announcements []*Announcement
|
||||||
176
backend/ent/announcement/announcement.go
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package announcement
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the announcement type in the database.
|
||||||
|
Label = "announcement"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldTitle holds the string denoting the title field in the database.
|
||||||
|
FieldTitle = "title"
|
||||||
|
// FieldContent holds the string denoting the content field in the database.
|
||||||
|
FieldContent = "content"
|
||||||
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
|
FieldStatus = "status"
|
||||||
|
// FieldNotifyMode holds the string denoting the notify_mode field in the database.
|
||||||
|
FieldNotifyMode = "notify_mode"
|
||||||
|
// FieldTargeting holds the string denoting the targeting field in the database.
|
||||||
|
FieldTargeting = "targeting"
|
||||||
|
// FieldStartsAt holds the string denoting the starts_at field in the database.
|
||||||
|
FieldStartsAt = "starts_at"
|
||||||
|
// FieldEndsAt holds the string denoting the ends_at field in the database.
|
||||||
|
FieldEndsAt = "ends_at"
|
||||||
|
// FieldCreatedBy holds the string denoting the created_by field in the database.
|
||||||
|
FieldCreatedBy = "created_by"
|
||||||
|
// FieldUpdatedBy holds the string denoting the updated_by field in the database.
|
||||||
|
FieldUpdatedBy = "updated_by"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// EdgeReads holds the string denoting the reads edge name in mutations.
|
||||||
|
EdgeReads = "reads"
|
||||||
|
// Table holds the table name of the announcement in the database.
|
||||||
|
Table = "announcements"
|
||||||
|
// ReadsTable is the table that holds the reads relation/edge.
|
||||||
|
ReadsTable = "announcement_reads"
|
||||||
|
// ReadsInverseTable is the table name for the AnnouncementRead entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "announcementread" package.
|
||||||
|
ReadsInverseTable = "announcement_reads"
|
||||||
|
// ReadsColumn is the table column denoting the reads relation/edge.
|
||||||
|
ReadsColumn = "announcement_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for announcement fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldTitle,
|
||||||
|
FieldContent,
|
||||||
|
FieldStatus,
|
||||||
|
FieldNotifyMode,
|
||||||
|
FieldTargeting,
|
||||||
|
FieldStartsAt,
|
||||||
|
FieldEndsAt,
|
||||||
|
FieldCreatedBy,
|
||||||
|
FieldUpdatedBy,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// TitleValidator is a validator for the "title" field. It is called by the builders before save.
|
||||||
|
TitleValidator func(string) error
|
||||||
|
// ContentValidator is a validator for the "content" field. It is called by the builders before save.
|
||||||
|
ContentValidator func(string) error
|
||||||
|
// DefaultStatus holds the default value on creation for the "status" field.
|
||||||
|
DefaultStatus string
|
||||||
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
StatusValidator func(string) error
|
||||||
|
// DefaultNotifyMode holds the default value on creation for the "notify_mode" field.
|
||||||
|
DefaultNotifyMode string
|
||||||
|
// NotifyModeValidator is a validator for the "notify_mode" field. It is called by the builders before save.
|
||||||
|
NotifyModeValidator func(string) error
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the Announcement queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByTitle orders the results by the title field.
|
||||||
|
func ByTitle(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldTitle, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByContent orders the results by the content field.
|
||||||
|
func ByContent(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldContent, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStatus orders the results by the status field.
|
||||||
|
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByNotifyMode orders the results by the notify_mode field.
|
||||||
|
func ByNotifyMode(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldNotifyMode, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStartsAt orders the results by the starts_at field.
|
||||||
|
func ByStartsAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStartsAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByEndsAt orders the results by the ends_at field.
|
||||||
|
func ByEndsAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldEndsAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedBy orders the results by the created_by field.
|
||||||
|
func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedBy, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedBy orders the results by the updated_by field.
|
||||||
|
func ByUpdatedBy(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedBy, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByReadsCount orders the results by reads count.
|
||||||
|
func ByReadsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newReadsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByReads orders the results by reads terms.
|
||||||
|
func ByReads(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newReadsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newReadsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(ReadsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, ReadsTable, ReadsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
694
backend/ent/announcement/where.go
Normal file
@@ -0,0 +1,694 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package announcement
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Title applies equality check predicate on the "title" field. It's identical to TitleEQ.
|
||||||
|
func Title(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Content applies equality check predicate on the "content" field. It's identical to ContentEQ.
|
||||||
|
func Content(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status applies equality check predicate on the "status" field. It's identical to StatusEQ.
|
||||||
|
func Status(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyMode applies equality check predicate on the "notify_mode" field. It's identical to NotifyModeEQ.
|
||||||
|
func NotifyMode(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartsAt applies equality check predicate on the "starts_at" field. It's identical to StartsAtEQ.
|
||||||
|
func StartsAt(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldStartsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndsAt applies equality check predicate on the "ends_at" field. It's identical to EndsAtEQ.
|
||||||
|
func EndsAt(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldEndsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ.
|
||||||
|
func CreatedBy(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedBy applies equality check predicate on the "updated_by" field. It's identical to UpdatedByEQ.
|
||||||
|
func UpdatedBy(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldUpdatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||||
|
func UpdatedAt(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleEQ applies the EQ predicate on the "title" field.
|
||||||
|
func TitleEQ(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleNEQ applies the NEQ predicate on the "title" field.
|
||||||
|
func TitleNEQ(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleIn applies the In predicate on the "title" field.
|
||||||
|
func TitleIn(vs ...string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldTitle, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleNotIn applies the NotIn predicate on the "title" field.
|
||||||
|
func TitleNotIn(vs ...string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldTitle, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleGT applies the GT predicate on the "title" field.
|
||||||
|
func TitleGT(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleGTE applies the GTE predicate on the "title" field.
|
||||||
|
func TitleGTE(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleLT applies the LT predicate on the "title" field.
|
||||||
|
func TitleLT(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleLTE applies the LTE predicate on the "title" field.
|
||||||
|
func TitleLTE(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleContains applies the Contains predicate on the "title" field.
|
||||||
|
func TitleContains(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldContains(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleHasPrefix applies the HasPrefix predicate on the "title" field.
|
||||||
|
func TitleHasPrefix(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldHasPrefix(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleHasSuffix applies the HasSuffix predicate on the "title" field.
|
||||||
|
func TitleHasSuffix(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldHasSuffix(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleEqualFold applies the EqualFold predicate on the "title" field.
|
||||||
|
func TitleEqualFold(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEqualFold(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TitleContainsFold applies the ContainsFold predicate on the "title" field.
|
||||||
|
func TitleContainsFold(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldContainsFold(FieldTitle, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentEQ applies the EQ predicate on the "content" field.
|
||||||
|
func ContentEQ(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentNEQ applies the NEQ predicate on the "content" field.
|
||||||
|
func ContentNEQ(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentIn applies the In predicate on the "content" field.
|
||||||
|
func ContentIn(vs ...string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldContent, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentNotIn applies the NotIn predicate on the "content" field.
|
||||||
|
func ContentNotIn(vs ...string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldContent, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentGT applies the GT predicate on the "content" field.
|
||||||
|
func ContentGT(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentGTE applies the GTE predicate on the "content" field.
|
||||||
|
func ContentGTE(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentLT applies the LT predicate on the "content" field.
|
||||||
|
func ContentLT(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentLTE applies the LTE predicate on the "content" field.
|
||||||
|
func ContentLTE(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentContains applies the Contains predicate on the "content" field.
|
||||||
|
func ContentContains(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldContains(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentHasPrefix applies the HasPrefix predicate on the "content" field.
|
||||||
|
func ContentHasPrefix(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldHasPrefix(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentHasSuffix applies the HasSuffix predicate on the "content" field.
|
||||||
|
func ContentHasSuffix(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldHasSuffix(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentEqualFold applies the EqualFold predicate on the "content" field.
|
||||||
|
func ContentEqualFold(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEqualFold(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentContainsFold applies the ContainsFold predicate on the "content" field.
|
||||||
|
func ContentContainsFold(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldContainsFold(FieldContent, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEQ applies the EQ predicate on the "status" field.
|
||||||
|
func StatusEQ(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNEQ applies the NEQ predicate on the "status" field.
|
||||||
|
func StatusNEQ(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusIn applies the In predicate on the "status" field.
|
||||||
|
func StatusIn(vs ...string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNotIn applies the NotIn predicate on the "status" field.
|
||||||
|
func StatusNotIn(vs ...string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGT applies the GT predicate on the "status" field.
|
||||||
|
func StatusGT(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGTE applies the GTE predicate on the "status" field.
|
||||||
|
func StatusGTE(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLT applies the LT predicate on the "status" field.
|
||||||
|
func StatusLT(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLTE applies the LTE predicate on the "status" field.
|
||||||
|
func StatusLTE(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContains applies the Contains predicate on the "status" field.
|
||||||
|
func StatusContains(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldContains(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasPrefix applies the HasPrefix predicate on the "status" field.
|
||||||
|
func StatusHasPrefix(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldHasPrefix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasSuffix applies the HasSuffix predicate on the "status" field.
|
||||||
|
func StatusHasSuffix(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldHasSuffix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEqualFold applies the EqualFold predicate on the "status" field.
|
||||||
|
func StatusEqualFold(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEqualFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContainsFold applies the ContainsFold predicate on the "status" field.
|
||||||
|
func StatusContainsFold(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldContainsFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeEQ applies the EQ predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeEQ(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeNEQ applies the NEQ predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeNEQ(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeIn applies the In predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeIn(vs ...string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldNotifyMode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeNotIn applies the NotIn predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeNotIn(vs ...string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldNotifyMode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeGT applies the GT predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeGT(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeGTE applies the GTE predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeGTE(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeLT applies the LT predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeLT(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeLTE applies the LTE predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeLTE(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeContains applies the Contains predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeContains(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldContains(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeHasPrefix applies the HasPrefix predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeHasPrefix(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldHasPrefix(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeHasSuffix applies the HasSuffix predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeHasSuffix(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldHasSuffix(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeEqualFold applies the EqualFold predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeEqualFold(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEqualFold(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyModeContainsFold applies the ContainsFold predicate on the "notify_mode" field.
|
||||||
|
func NotifyModeContainsFold(v string) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldContainsFold(FieldNotifyMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TargetingIsNil applies the IsNil predicate on the "targeting" field.
|
||||||
|
func TargetingIsNil() predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIsNull(FieldTargeting))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TargetingNotNil applies the NotNil predicate on the "targeting" field.
|
||||||
|
func TargetingNotNil() predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotNull(FieldTargeting))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartsAtEQ applies the EQ predicate on the "starts_at" field.
|
||||||
|
func StartsAtEQ(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldStartsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartsAtNEQ applies the NEQ predicate on the "starts_at" field.
|
||||||
|
func StartsAtNEQ(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldStartsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartsAtIn applies the In predicate on the "starts_at" field.
|
||||||
|
func StartsAtIn(vs ...time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldStartsAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartsAtNotIn applies the NotIn predicate on the "starts_at" field.
|
||||||
|
func StartsAtNotIn(vs ...time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldStartsAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartsAtGT applies the GT predicate on the "starts_at" field.
|
||||||
|
func StartsAtGT(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldStartsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartsAtGTE applies the GTE predicate on the "starts_at" field.
|
||||||
|
func StartsAtGTE(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldStartsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartsAtLT applies the LT predicate on the "starts_at" field.
|
||||||
|
func StartsAtLT(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldStartsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartsAtLTE applies the LTE predicate on the "starts_at" field.
|
||||||
|
func StartsAtLTE(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldStartsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartsAtIsNil applies the IsNil predicate on the "starts_at" field.
|
||||||
|
func StartsAtIsNil() predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIsNull(FieldStartsAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartsAtNotNil applies the NotNil predicate on the "starts_at" field.
|
||||||
|
func StartsAtNotNil() predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotNull(FieldStartsAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndsAtEQ applies the EQ predicate on the "ends_at" field.
|
||||||
|
func EndsAtEQ(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldEndsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndsAtNEQ applies the NEQ predicate on the "ends_at" field.
|
||||||
|
func EndsAtNEQ(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldEndsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndsAtIn applies the In predicate on the "ends_at" field.
|
||||||
|
func EndsAtIn(vs ...time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldEndsAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndsAtNotIn applies the NotIn predicate on the "ends_at" field.
|
||||||
|
func EndsAtNotIn(vs ...time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldEndsAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndsAtGT applies the GT predicate on the "ends_at" field.
|
||||||
|
func EndsAtGT(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldEndsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndsAtGTE applies the GTE predicate on the "ends_at" field.
|
||||||
|
func EndsAtGTE(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldEndsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndsAtLT applies the LT predicate on the "ends_at" field.
|
||||||
|
func EndsAtLT(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldEndsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndsAtLTE applies the LTE predicate on the "ends_at" field.
|
||||||
|
func EndsAtLTE(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldEndsAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndsAtIsNil applies the IsNil predicate on the "ends_at" field.
|
||||||
|
func EndsAtIsNil() predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIsNull(FieldEndsAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndsAtNotNil applies the NotNil predicate on the "ends_at" field.
|
||||||
|
func EndsAtNotNil() predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotNull(FieldEndsAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByEQ applies the EQ predicate on the "created_by" field.
|
||||||
|
func CreatedByEQ(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByNEQ applies the NEQ predicate on the "created_by" field.
|
||||||
|
func CreatedByNEQ(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByIn applies the In predicate on the "created_by" field.
|
||||||
|
func CreatedByIn(vs ...int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldCreatedBy, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByNotIn applies the NotIn predicate on the "created_by" field.
|
||||||
|
func CreatedByNotIn(vs ...int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldCreatedBy, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByGT applies the GT predicate on the "created_by" field.
|
||||||
|
func CreatedByGT(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByGTE applies the GTE predicate on the "created_by" field.
|
||||||
|
func CreatedByGTE(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByLT applies the LT predicate on the "created_by" field.
|
||||||
|
func CreatedByLT(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByLTE applies the LTE predicate on the "created_by" field.
|
||||||
|
func CreatedByLTE(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByIsNil applies the IsNil predicate on the "created_by" field.
|
||||||
|
func CreatedByIsNil() predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIsNull(FieldCreatedBy))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByNotNil applies the NotNil predicate on the "created_by" field.
|
||||||
|
func CreatedByNotNil() predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotNull(FieldCreatedBy))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedByEQ applies the EQ predicate on the "updated_by" field.
|
||||||
|
func UpdatedByEQ(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldUpdatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedByNEQ applies the NEQ predicate on the "updated_by" field.
|
||||||
|
func UpdatedByNEQ(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldUpdatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedByIn applies the In predicate on the "updated_by" field.
|
||||||
|
func UpdatedByIn(vs ...int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldUpdatedBy, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedByNotIn applies the NotIn predicate on the "updated_by" field.
|
||||||
|
func UpdatedByNotIn(vs ...int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldUpdatedBy, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedByGT applies the GT predicate on the "updated_by" field.
|
||||||
|
func UpdatedByGT(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldUpdatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedByGTE applies the GTE predicate on the "updated_by" field.
|
||||||
|
func UpdatedByGTE(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldUpdatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedByLT applies the LT predicate on the "updated_by" field.
|
||||||
|
func UpdatedByLT(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldUpdatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedByLTE applies the LTE predicate on the "updated_by" field.
|
||||||
|
func UpdatedByLTE(v int64) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldUpdatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedByIsNil applies the IsNil predicate on the "updated_by" field.
|
||||||
|
func UpdatedByIsNil() predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIsNull(FieldUpdatedBy))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedByNotNil applies the NotNil predicate on the "updated_by" field.
|
||||||
|
func UpdatedByNotNil() predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotNull(FieldUpdatedBy))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtEQ(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNEQ(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtIn(vs ...time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNotIn(vs ...time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGT(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGTE(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldGTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLT(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLTE(v time.Time) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.FieldLTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasReads applies the HasEdge predicate on the "reads" edge.
|
||||||
|
func HasReads() predicate.Announcement {
|
||||||
|
return predicate.Announcement(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, ReadsTable, ReadsColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasReadsWith applies the HasEdge predicate on the "reads" edge with a given conditions (other predicates).
|
||||||
|
func HasReadsWith(preds ...predicate.AnnouncementRead) predicate.Announcement {
|
||||||
|
return predicate.Announcement(func(s *sql.Selector) {
|
||||||
|
step := newReadsStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.Announcement) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.Announcement) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.Announcement) predicate.Announcement {
|
||||||
|
return predicate.Announcement(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
1229
backend/ent/announcement_create.go
Normal file
88
backend/ent/announcement_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcement"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnnouncementDelete is the builder for deleting a Announcement entity.
|
||||||
|
type AnnouncementDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AnnouncementMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AnnouncementDelete builder.
|
||||||
|
func (_d *AnnouncementDelete) Where(ps ...predicate.Announcement) *AnnouncementDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *AnnouncementDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AnnouncementDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *AnnouncementDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(announcement.Table, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementDeleteOne is the builder for deleting a single Announcement entity.
|
||||||
|
type AnnouncementDeleteOne struct {
|
||||||
|
_d *AnnouncementDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AnnouncementDelete builder.
|
||||||
|
func (_d *AnnouncementDeleteOne) Where(ps ...predicate.Announcement) *AnnouncementDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *AnnouncementDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{announcement.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AnnouncementDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
643
backend/ent/announcement_query.go
Normal file
@@ -0,0 +1,643 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcement"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcementread"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnnouncementQuery is the builder for querying Announcement entities.
|
||||||
|
type AnnouncementQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []announcement.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.Announcement
|
||||||
|
withReads *AnnouncementReadQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the AnnouncementQuery builder.
|
||||||
|
func (_q *AnnouncementQuery) Where(ps ...predicate.Announcement) *AnnouncementQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *AnnouncementQuery) Limit(limit int) *AnnouncementQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *AnnouncementQuery) Offset(offset int) *AnnouncementQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *AnnouncementQuery) Unique(unique bool) *AnnouncementQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *AnnouncementQuery) Order(o ...announcement.OrderOption) *AnnouncementQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryReads chains the current query on the "reads" edge.
|
||||||
|
func (_q *AnnouncementQuery) QueryReads() *AnnouncementReadQuery {
|
||||||
|
query := (&AnnouncementReadClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(announcement.Table, announcement.FieldID, selector),
|
||||||
|
sqlgraph.To(announcementread.Table, announcementread.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, announcement.ReadsTable, announcement.ReadsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first Announcement entity from the query.
|
||||||
|
// Returns a *NotFoundError when no Announcement was found.
|
||||||
|
func (_q *AnnouncementQuery) First(ctx context.Context) (*Announcement, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{announcement.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementQuery) FirstX(ctx context.Context) *Announcement {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first Announcement ID from the query.
|
||||||
|
// Returns a *NotFoundError when no Announcement ID was found.
|
||||||
|
func (_q *AnnouncementQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{announcement.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single Announcement entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one Announcement entity is found.
|
||||||
|
// Returns a *NotFoundError when no Announcement entities are found.
|
||||||
|
func (_q *AnnouncementQuery) Only(ctx context.Context) (*Announcement, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{announcement.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{announcement.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementQuery) OnlyX(ctx context.Context) *Announcement {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only Announcement ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one Announcement ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *AnnouncementQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{announcement.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{announcement.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of Announcements.
|
||||||
|
func (_q *AnnouncementQuery) All(ctx context.Context) ([]*Announcement, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*Announcement, *AnnouncementQuery]()
|
||||||
|
return withInterceptors[[]*Announcement](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementQuery) AllX(ctx context.Context) []*Announcement {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of Announcement IDs.
|
||||||
|
func (_q *AnnouncementQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(announcement.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *AnnouncementQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*AnnouncementQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *AnnouncementQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the AnnouncementQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *AnnouncementQuery) Clone() *AnnouncementQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &AnnouncementQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]announcement.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.Announcement{}, _q.predicates...),
|
||||||
|
withReads: _q.withReads.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithReads tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "reads" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AnnouncementQuery) WithReads(opts ...func(*AnnouncementReadQuery)) *AnnouncementQuery {
|
||||||
|
query := (&AnnouncementReadClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withReads = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// Title string `json:"title,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.Announcement.Query().
|
||||||
|
// GroupBy(announcement.FieldTitle).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AnnouncementQuery) GroupBy(field string, fields ...string) *AnnouncementGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &AnnouncementGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = announcement.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// Title string `json:"title,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.Announcement.Query().
|
||||||
|
// Select(announcement.FieldTitle).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AnnouncementQuery) Select(fields ...string) *AnnouncementSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &AnnouncementSelect{AnnouncementQuery: _q}
|
||||||
|
sbuild.label = announcement.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a AnnouncementSelect configured with the given aggregations.
|
||||||
|
func (_q *AnnouncementQuery) Aggregate(fns ...AggregateFunc) *AnnouncementSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !announcement.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Announcement, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*Announcement{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [1]bool{
|
||||||
|
_q.withReads != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*Announcement).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &Announcement{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withReads; query != nil {
|
||||||
|
if err := _q.loadReads(ctx, query, nodes,
|
||||||
|
func(n *Announcement) { n.Edges.Reads = []*AnnouncementRead{} },
|
||||||
|
func(n *Announcement, e *AnnouncementRead) { n.Edges.Reads = append(n.Edges.Reads, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementQuery) loadReads(ctx context.Context, query *AnnouncementReadQuery, nodes []*Announcement, init func(*Announcement), assign func(*Announcement, *AnnouncementRead)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*Announcement)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(announcementread.FieldAnnouncementID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.AnnouncementRead(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(announcement.ReadsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.AnnouncementID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "announcement_id" returned %v for node %v`, fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(announcement.Table, announcement.Columns, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, announcement.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != announcement.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(announcement.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = announcement.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *AnnouncementQuery) ForUpdate(opts ...sql.LockOption) *AnnouncementQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *AnnouncementQuery) ForShare(opts ...sql.LockOption) *AnnouncementQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementGroupBy is the group-by builder for Announcement entities.
|
||||||
|
type AnnouncementGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *AnnouncementQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *AnnouncementGroupBy) Aggregate(fns ...AggregateFunc) *AnnouncementGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *AnnouncementGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AnnouncementQuery, *AnnouncementGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *AnnouncementGroupBy) sqlScan(ctx context.Context, root *AnnouncementQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementSelect is the builder for selecting fields of Announcement entities.
|
||||||
|
type AnnouncementSelect struct {
|
||||||
|
*AnnouncementQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *AnnouncementSelect) Aggregate(fns ...AggregateFunc) *AnnouncementSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *AnnouncementSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AnnouncementQuery, *AnnouncementSelect](ctx, _s.AnnouncementQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *AnnouncementSelect) sqlScan(ctx context.Context, root *AnnouncementQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
868
backend/ent/announcement_update.go
Normal file
@@ -0,0 +1,868 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcement"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcementread"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/domain"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnnouncementUpdate is the builder for updating Announcement entities.
|
||||||
|
type AnnouncementUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AnnouncementMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AnnouncementUpdate builder.
|
||||||
|
func (_u *AnnouncementUpdate) Where(ps ...predicate.Announcement) *AnnouncementUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTitle sets the "title" field.
|
||||||
|
func (_u *AnnouncementUpdate) SetTitle(v string) *AnnouncementUpdate {
|
||||||
|
_u.mutation.SetTitle(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableTitle sets the "title" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdate) SetNillableTitle(v *string) *AnnouncementUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetTitle(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetContent sets the "content" field.
|
||||||
|
func (_u *AnnouncementUpdate) SetContent(v string) *AnnouncementUpdate {
|
||||||
|
_u.mutation.SetContent(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableContent sets the "content" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdate) SetNillableContent(v *string) *AnnouncementUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetContent(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *AnnouncementUpdate) SetStatus(v string) *AnnouncementUpdate {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdate) SetNillableStatus(v *string) *AnnouncementUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNotifyMode sets the "notify_mode" field.
|
||||||
|
func (_u *AnnouncementUpdate) SetNotifyMode(v string) *AnnouncementUpdate {
|
||||||
|
_u.mutation.SetNotifyMode(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableNotifyMode sets the "notify_mode" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdate) SetNillableNotifyMode(v *string) *AnnouncementUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetNotifyMode(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTargeting sets the "targeting" field.
|
||||||
|
func (_u *AnnouncementUpdate) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpdate {
|
||||||
|
_u.mutation.SetTargeting(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableTargeting sets the "targeting" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdate) SetNillableTargeting(v *domain.AnnouncementTargeting) *AnnouncementUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetTargeting(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTargeting clears the value of the "targeting" field.
|
||||||
|
func (_u *AnnouncementUpdate) ClearTargeting() *AnnouncementUpdate {
|
||||||
|
_u.mutation.ClearTargeting()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStartsAt sets the "starts_at" field.
|
||||||
|
func (_u *AnnouncementUpdate) SetStartsAt(v time.Time) *AnnouncementUpdate {
|
||||||
|
_u.mutation.SetStartsAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStartsAt sets the "starts_at" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdate) SetNillableStartsAt(v *time.Time) *AnnouncementUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStartsAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearStartsAt clears the value of the "starts_at" field.
|
||||||
|
func (_u *AnnouncementUpdate) ClearStartsAt() *AnnouncementUpdate {
|
||||||
|
_u.mutation.ClearStartsAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEndsAt sets the "ends_at" field.
|
||||||
|
func (_u *AnnouncementUpdate) SetEndsAt(v time.Time) *AnnouncementUpdate {
|
||||||
|
_u.mutation.SetEndsAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableEndsAt sets the "ends_at" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdate) SetNillableEndsAt(v *time.Time) *AnnouncementUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetEndsAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearEndsAt clears the value of the "ends_at" field.
|
||||||
|
func (_u *AnnouncementUpdate) ClearEndsAt() *AnnouncementUpdate {
|
||||||
|
_u.mutation.ClearEndsAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCreatedBy sets the "created_by" field.
|
||||||
|
func (_u *AnnouncementUpdate) SetCreatedBy(v int64) *AnnouncementUpdate {
|
||||||
|
_u.mutation.ResetCreatedBy()
|
||||||
|
_u.mutation.SetCreatedBy(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCreatedBy sets the "created_by" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdate) SetNillableCreatedBy(v *int64) *AnnouncementUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetCreatedBy(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddCreatedBy adds value to the "created_by" field.
|
||||||
|
func (_u *AnnouncementUpdate) AddCreatedBy(v int64) *AnnouncementUpdate {
|
||||||
|
_u.mutation.AddCreatedBy(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearCreatedBy clears the value of the "created_by" field.
|
||||||
|
func (_u *AnnouncementUpdate) ClearCreatedBy() *AnnouncementUpdate {
|
||||||
|
_u.mutation.ClearCreatedBy()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedBy sets the "updated_by" field.
|
||||||
|
func (_u *AnnouncementUpdate) SetUpdatedBy(v int64) *AnnouncementUpdate {
|
||||||
|
_u.mutation.ResetUpdatedBy()
|
||||||
|
_u.mutation.SetUpdatedBy(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdate) SetNillableUpdatedBy(v *int64) *AnnouncementUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUpdatedBy(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUpdatedBy adds value to the "updated_by" field.
|
||||||
|
func (_u *AnnouncementUpdate) AddUpdatedBy(v int64) *AnnouncementUpdate {
|
||||||
|
_u.mutation.AddUpdatedBy(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUpdatedBy clears the value of the "updated_by" field.
|
||||||
|
func (_u *AnnouncementUpdate) ClearUpdatedBy() *AnnouncementUpdate {
|
||||||
|
_u.mutation.ClearUpdatedBy()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *AnnouncementUpdate) SetUpdatedAt(v time.Time) *AnnouncementUpdate {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddReadIDs adds the "reads" edge to the AnnouncementRead entity by IDs.
|
||||||
|
func (_u *AnnouncementUpdate) AddReadIDs(ids ...int64) *AnnouncementUpdate {
|
||||||
|
_u.mutation.AddReadIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddReads adds the "reads" edges to the AnnouncementRead entity.
|
||||||
|
func (_u *AnnouncementUpdate) AddReads(v ...*AnnouncementRead) *AnnouncementUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddReadIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AnnouncementMutation object of the builder.
|
||||||
|
func (_u *AnnouncementUpdate) Mutation() *AnnouncementMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearReads clears all "reads" edges to the AnnouncementRead entity.
|
||||||
|
func (_u *AnnouncementUpdate) ClearReads() *AnnouncementUpdate {
|
||||||
|
_u.mutation.ClearReads()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveReadIDs removes the "reads" edge to AnnouncementRead entities by IDs.
|
||||||
|
func (_u *AnnouncementUpdate) RemoveReadIDs(ids ...int64) *AnnouncementUpdate {
|
||||||
|
_u.mutation.RemoveReadIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveReads removes "reads" edges to AnnouncementRead entities.
|
||||||
|
func (_u *AnnouncementUpdate) RemoveReads(v ...*AnnouncementRead) *AnnouncementUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveReadIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *AnnouncementUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AnnouncementUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *AnnouncementUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AnnouncementUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *AnnouncementUpdate) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := announcement.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AnnouncementUpdate) check() error {
|
||||||
|
if v, ok := _u.mutation.Title(); ok {
|
||||||
|
if err := announcement.TitleValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Announcement.title": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Content(); ok {
|
||||||
|
if err := announcement.ContentValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "content", err: fmt.Errorf(`ent: validator failed for field "Announcement.content": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := announcement.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.NotifyMode(); ok {
|
||||||
|
if err := announcement.NotifyModeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "notify_mode", err: fmt.Errorf(`ent: validator failed for field "Announcement.notify_mode": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AnnouncementUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(announcement.Table, announcement.Columns, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Title(); ok {
|
||||||
|
_spec.SetField(announcement.FieldTitle, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Content(); ok {
|
||||||
|
_spec.SetField(announcement.FieldContent, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(announcement.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.NotifyMode(); ok {
|
||||||
|
_spec.SetField(announcement.FieldNotifyMode, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Targeting(); ok {
|
||||||
|
_spec.SetField(announcement.FieldTargeting, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.TargetingCleared() {
|
||||||
|
_spec.ClearField(announcement.FieldTargeting, field.TypeJSON)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.StartsAt(); ok {
|
||||||
|
_spec.SetField(announcement.FieldStartsAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.StartsAtCleared() {
|
||||||
|
_spec.ClearField(announcement.FieldStartsAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.EndsAt(); ok {
|
||||||
|
_spec.SetField(announcement.FieldEndsAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.EndsAtCleared() {
|
||||||
|
_spec.ClearField(announcement.FieldEndsAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.CreatedBy(); ok {
|
||||||
|
_spec.SetField(announcement.FieldCreatedBy, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedCreatedBy(); ok {
|
||||||
|
_spec.AddField(announcement.FieldCreatedBy, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.CreatedByCleared() {
|
||||||
|
_spec.ClearField(announcement.FieldCreatedBy, field.TypeInt64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedBy(); ok {
|
||||||
|
_spec.SetField(announcement.FieldUpdatedBy, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUpdatedBy(); ok {
|
||||||
|
_spec.AddField(announcement.FieldUpdatedBy, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UpdatedByCleared() {
|
||||||
|
_spec.ClearField(announcement.FieldUpdatedBy, field.TypeInt64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(announcement.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ReadsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: announcement.ReadsTable,
|
||||||
|
Columns: []string{announcement.ReadsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedReadsIDs(); len(nodes) > 0 && !_u.mutation.ReadsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: announcement.ReadsTable,
|
||||||
|
Columns: []string{announcement.ReadsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.ReadsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: announcement.ReadsTable,
|
||||||
|
Columns: []string{announcement.ReadsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{announcement.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementUpdateOne is the builder for updating a single Announcement entity.
|
||||||
|
type AnnouncementUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AnnouncementMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTitle sets the "title" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetTitle(v string) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.SetTitle(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableTitle sets the "title" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNillableTitle(v *string) *AnnouncementUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetTitle(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetContent sets the "content" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetContent(v string) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.SetContent(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableContent sets the "content" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNillableContent(v *string) *AnnouncementUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetContent(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetStatus(v string) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNillableStatus(v *string) *AnnouncementUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNotifyMode sets the "notify_mode" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNotifyMode(v string) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.SetNotifyMode(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableNotifyMode sets the "notify_mode" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNillableNotifyMode(v *string) *AnnouncementUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetNotifyMode(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTargeting sets the "targeting" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetTargeting(v domain.AnnouncementTargeting) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.SetTargeting(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableTargeting sets the "targeting" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNillableTargeting(v *domain.AnnouncementTargeting) *AnnouncementUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetTargeting(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearTargeting clears the value of the "targeting" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) ClearTargeting() *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.ClearTargeting()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStartsAt sets the "starts_at" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetStartsAt(v time.Time) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.SetStartsAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStartsAt sets the "starts_at" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNillableStartsAt(v *time.Time) *AnnouncementUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStartsAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearStartsAt clears the value of the "starts_at" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) ClearStartsAt() *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.ClearStartsAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEndsAt sets the "ends_at" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetEndsAt(v time.Time) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.SetEndsAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableEndsAt sets the "ends_at" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNillableEndsAt(v *time.Time) *AnnouncementUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetEndsAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearEndsAt clears the value of the "ends_at" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) ClearEndsAt() *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.ClearEndsAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCreatedBy sets the "created_by" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetCreatedBy(v int64) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.ResetCreatedBy()
|
||||||
|
_u.mutation.SetCreatedBy(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCreatedBy sets the "created_by" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNillableCreatedBy(v *int64) *AnnouncementUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetCreatedBy(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddCreatedBy adds value to the "created_by" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) AddCreatedBy(v int64) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.AddCreatedBy(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearCreatedBy clears the value of the "created_by" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) ClearCreatedBy() *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.ClearCreatedBy()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedBy sets the "updated_by" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetUpdatedBy(v int64) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.ResetUpdatedBy()
|
||||||
|
_u.mutation.SetUpdatedBy(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUpdatedBy sets the "updated_by" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetNillableUpdatedBy(v *int64) *AnnouncementUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUpdatedBy(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUpdatedBy adds value to the "updated_by" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) AddUpdatedBy(v int64) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.AddUpdatedBy(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUpdatedBy clears the value of the "updated_by" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) ClearUpdatedBy() *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.ClearUpdatedBy()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *AnnouncementUpdateOne) SetUpdatedAt(v time.Time) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddReadIDs adds the "reads" edge to the AnnouncementRead entity by IDs.
|
||||||
|
func (_u *AnnouncementUpdateOne) AddReadIDs(ids ...int64) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.AddReadIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddReads adds the "reads" edges to the AnnouncementRead entity.
|
||||||
|
func (_u *AnnouncementUpdateOne) AddReads(v ...*AnnouncementRead) *AnnouncementUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddReadIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AnnouncementMutation object of the builder.
|
||||||
|
func (_u *AnnouncementUpdateOne) Mutation() *AnnouncementMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearReads clears all "reads" edges to the AnnouncementRead entity.
|
||||||
|
func (_u *AnnouncementUpdateOne) ClearReads() *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.ClearReads()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveReadIDs removes the "reads" edge to AnnouncementRead entities by IDs.
|
||||||
|
func (_u *AnnouncementUpdateOne) RemoveReadIDs(ids ...int64) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.RemoveReadIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveReads removes "reads" edges to AnnouncementRead entities.
|
||||||
|
func (_u *AnnouncementUpdateOne) RemoveReads(v ...*AnnouncementRead) *AnnouncementUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveReadIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AnnouncementUpdate builder.
|
||||||
|
func (_u *AnnouncementUpdateOne) Where(ps ...predicate.Announcement) *AnnouncementUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *AnnouncementUpdateOne) Select(field string, fields ...string) *AnnouncementUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated Announcement entity.
|
||||||
|
func (_u *AnnouncementUpdateOne) Save(ctx context.Context) (*Announcement, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AnnouncementUpdateOne) SaveX(ctx context.Context) *Announcement {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *AnnouncementUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AnnouncementUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *AnnouncementUpdateOne) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := announcement.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AnnouncementUpdateOne) check() error {
|
||||||
|
if v, ok := _u.mutation.Title(); ok {
|
||||||
|
if err := announcement.TitleValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "title", err: fmt.Errorf(`ent: validator failed for field "Announcement.title": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Content(); ok {
|
||||||
|
if err := announcement.ContentValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "content", err: fmt.Errorf(`ent: validator failed for field "Announcement.content": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := announcement.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Announcement.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.NotifyMode(); ok {
|
||||||
|
if err := announcement.NotifyModeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "notify_mode", err: fmt.Errorf(`ent: validator failed for field "Announcement.notify_mode": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AnnouncementUpdateOne) sqlSave(ctx context.Context) (_node *Announcement, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(announcement.Table, announcement.Columns, sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Announcement.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, announcement.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !announcement.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != announcement.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Title(); ok {
|
||||||
|
_spec.SetField(announcement.FieldTitle, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Content(); ok {
|
||||||
|
_spec.SetField(announcement.FieldContent, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(announcement.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.NotifyMode(); ok {
|
||||||
|
_spec.SetField(announcement.FieldNotifyMode, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Targeting(); ok {
|
||||||
|
_spec.SetField(announcement.FieldTargeting, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.TargetingCleared() {
|
||||||
|
_spec.ClearField(announcement.FieldTargeting, field.TypeJSON)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.StartsAt(); ok {
|
||||||
|
_spec.SetField(announcement.FieldStartsAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.StartsAtCleared() {
|
||||||
|
_spec.ClearField(announcement.FieldStartsAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.EndsAt(); ok {
|
||||||
|
_spec.SetField(announcement.FieldEndsAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.EndsAtCleared() {
|
||||||
|
_spec.ClearField(announcement.FieldEndsAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.CreatedBy(); ok {
|
||||||
|
_spec.SetField(announcement.FieldCreatedBy, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedCreatedBy(); ok {
|
||||||
|
_spec.AddField(announcement.FieldCreatedBy, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.CreatedByCleared() {
|
||||||
|
_spec.ClearField(announcement.FieldCreatedBy, field.TypeInt64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedBy(); ok {
|
||||||
|
_spec.SetField(announcement.FieldUpdatedBy, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUpdatedBy(); ok {
|
||||||
|
_spec.AddField(announcement.FieldUpdatedBy, field.TypeInt64, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UpdatedByCleared() {
|
||||||
|
_spec.ClearField(announcement.FieldUpdatedBy, field.TypeInt64)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(announcement.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ReadsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: announcement.ReadsTable,
|
||||||
|
Columns: []string{announcement.ReadsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedReadsIDs(); len(nodes) > 0 && !_u.mutation.ReadsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: announcement.ReadsTable,
|
||||||
|
Columns: []string{announcement.ReadsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.ReadsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: announcement.ReadsTable,
|
||||||
|
Columns: []string{announcement.ReadsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &Announcement{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{announcement.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
185
backend/ent/announcementread.go
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcement"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcementread"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnnouncementRead is the model entity for the AnnouncementRead schema.
|
||||||
|
type AnnouncementRead struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// AnnouncementID holds the value of the "announcement_id" field.
|
||||||
|
AnnouncementID int64 `json:"announcement_id,omitempty"`
|
||||||
|
// UserID holds the value of the "user_id" field.
|
||||||
|
UserID int64 `json:"user_id,omitempty"`
|
||||||
|
// 用户首次已读时间
|
||||||
|
ReadAt time.Time `json:"read_at,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the AnnouncementReadQuery when eager-loading is set.
|
||||||
|
Edges AnnouncementReadEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementReadEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type AnnouncementReadEdges struct {
|
||||||
|
// Announcement holds the value of the announcement edge.
|
||||||
|
Announcement *Announcement `json:"announcement,omitempty"`
|
||||||
|
// User holds the value of the user edge.
|
||||||
|
User *User `json:"user,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [2]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementOrErr returns the Announcement value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e AnnouncementReadEdges) AnnouncementOrErr() (*Announcement, error) {
|
||||||
|
if e.Announcement != nil {
|
||||||
|
return e.Announcement, nil
|
||||||
|
} else if e.loadedTypes[0] {
|
||||||
|
return nil, &NotFoundError{label: announcement.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "announcement"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserOrErr returns the User value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e AnnouncementReadEdges) UserOrErr() (*User, error) {
|
||||||
|
if e.User != nil {
|
||||||
|
return e.User, nil
|
||||||
|
} else if e.loadedTypes[1] {
|
||||||
|
return nil, &NotFoundError{label: user.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "user"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*AnnouncementRead) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case announcementread.FieldID, announcementread.FieldAnnouncementID, announcementread.FieldUserID:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case announcementread.FieldReadAt, announcementread.FieldCreatedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the AnnouncementRead fields.
|
||||||
|
func (_m *AnnouncementRead) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case announcementread.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case announcementread.FieldAnnouncementID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field announcement_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.AnnouncementID = value.Int64
|
||||||
|
}
|
||||||
|
case announcementread.FieldUserID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field user_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UserID = value.Int64
|
||||||
|
}
|
||||||
|
case announcementread.FieldReadAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field read_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ReadAt = value.Time
|
||||||
|
}
|
||||||
|
case announcementread.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the AnnouncementRead.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *AnnouncementRead) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAnnouncement queries the "announcement" edge of the AnnouncementRead entity.
|
||||||
|
func (_m *AnnouncementRead) QueryAnnouncement() *AnnouncementQuery {
|
||||||
|
return NewAnnouncementReadClient(_m.config).QueryAnnouncement(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser queries the "user" edge of the AnnouncementRead entity.
|
||||||
|
func (_m *AnnouncementRead) QueryUser() *UserQuery {
|
||||||
|
return NewAnnouncementReadClient(_m.config).QueryUser(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this AnnouncementRead.
|
||||||
|
// Note that you need to call AnnouncementRead.Unwrap() before calling this method if this AnnouncementRead
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *AnnouncementRead) Update() *AnnouncementReadUpdateOne {
|
||||||
|
return NewAnnouncementReadClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the AnnouncementRead entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *AnnouncementRead) Unwrap() *AnnouncementRead {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: AnnouncementRead is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *AnnouncementRead) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("AnnouncementRead(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("announcement_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.AnnouncementID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("user_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.UserID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("read_at=")
|
||||||
|
builder.WriteString(_m.ReadAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementReads is a parsable slice of AnnouncementRead.
|
||||||
|
type AnnouncementReads []*AnnouncementRead
|
||||||
127
backend/ent/announcementread/announcementread.go
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package announcementread
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the announcementread type in the database.
|
||||||
|
Label = "announcement_read"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldAnnouncementID holds the string denoting the announcement_id field in the database.
|
||||||
|
FieldAnnouncementID = "announcement_id"
|
||||||
|
// FieldUserID holds the string denoting the user_id field in the database.
|
||||||
|
FieldUserID = "user_id"
|
||||||
|
// FieldReadAt holds the string denoting the read_at field in the database.
|
||||||
|
FieldReadAt = "read_at"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// EdgeAnnouncement holds the string denoting the announcement edge name in mutations.
|
||||||
|
EdgeAnnouncement = "announcement"
|
||||||
|
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||||
|
EdgeUser = "user"
|
||||||
|
// Table holds the table name of the announcementread in the database.
|
||||||
|
Table = "announcement_reads"
|
||||||
|
// AnnouncementTable is the table that holds the announcement relation/edge.
|
||||||
|
AnnouncementTable = "announcement_reads"
|
||||||
|
// AnnouncementInverseTable is the table name for the Announcement entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "announcement" package.
|
||||||
|
AnnouncementInverseTable = "announcements"
|
||||||
|
// AnnouncementColumn is the table column denoting the announcement relation/edge.
|
||||||
|
AnnouncementColumn = "announcement_id"
|
||||||
|
// UserTable is the table that holds the user relation/edge.
|
||||||
|
UserTable = "announcement_reads"
|
||||||
|
// UserInverseTable is the table name for the User entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||||
|
UserInverseTable = "users"
|
||||||
|
// UserColumn is the table column denoting the user relation/edge.
|
||||||
|
UserColumn = "user_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for announcementread fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldAnnouncementID,
|
||||||
|
FieldUserID,
|
||||||
|
FieldReadAt,
|
||||||
|
FieldCreatedAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultReadAt holds the default value on creation for the "read_at" field.
|
||||||
|
DefaultReadAt func() time.Time
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the AnnouncementRead queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAnnouncementID orders the results by the announcement_id field.
|
||||||
|
func ByAnnouncementID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldAnnouncementID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserID orders the results by the user_id field.
|
||||||
|
func ByUserID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUserID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByReadAt orders the results by the read_at field.
|
||||||
|
func ByReadAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldReadAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAnnouncementField orders the results by announcement field.
|
||||||
|
func ByAnnouncementField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newAnnouncementStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserField orders the results by user field.
|
||||||
|
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newAnnouncementStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(AnnouncementInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, AnnouncementTable, AnnouncementColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newUserStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UserInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
257
backend/ent/announcementread/where.go
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package announcementread
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementID applies equality check predicate on the "announcement_id" field. It's identical to AnnouncementIDEQ.
|
||||||
|
func AnnouncementID(v int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldEQ(FieldAnnouncementID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ.
|
||||||
|
func UserID(v int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAt applies equality check predicate on the "read_at" field. It's identical to ReadAtEQ.
|
||||||
|
func ReadAt(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldEQ(FieldReadAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementIDEQ applies the EQ predicate on the "announcement_id" field.
|
||||||
|
func AnnouncementIDEQ(v int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldEQ(FieldAnnouncementID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementIDNEQ applies the NEQ predicate on the "announcement_id" field.
|
||||||
|
func AnnouncementIDNEQ(v int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldNEQ(FieldAnnouncementID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementIDIn applies the In predicate on the "announcement_id" field.
|
||||||
|
func AnnouncementIDIn(vs ...int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldIn(FieldAnnouncementID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementIDNotIn applies the NotIn predicate on the "announcement_id" field.
|
||||||
|
func AnnouncementIDNotIn(vs ...int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldNotIn(FieldAnnouncementID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDEQ applies the EQ predicate on the "user_id" field.
|
||||||
|
func UserIDEQ(v int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDNEQ applies the NEQ predicate on the "user_id" field.
|
||||||
|
func UserIDNEQ(v int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldNEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDIn applies the In predicate on the "user_id" field.
|
||||||
|
func UserIDIn(vs ...int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldIn(FieldUserID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDNotIn applies the NotIn predicate on the "user_id" field.
|
||||||
|
func UserIDNotIn(vs ...int64) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldNotIn(FieldUserID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAtEQ applies the EQ predicate on the "read_at" field.
|
||||||
|
func ReadAtEQ(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldEQ(FieldReadAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAtNEQ applies the NEQ predicate on the "read_at" field.
|
||||||
|
func ReadAtNEQ(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldNEQ(FieldReadAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAtIn applies the In predicate on the "read_at" field.
|
||||||
|
func ReadAtIn(vs ...time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldIn(FieldReadAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAtNotIn applies the NotIn predicate on the "read_at" field.
|
||||||
|
func ReadAtNotIn(vs ...time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldNotIn(FieldReadAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAtGT applies the GT predicate on the "read_at" field.
|
||||||
|
func ReadAtGT(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldGT(FieldReadAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAtGTE applies the GTE predicate on the "read_at" field.
|
||||||
|
func ReadAtGTE(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldGTE(FieldReadAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAtLT applies the LT predicate on the "read_at" field.
|
||||||
|
func ReadAtLT(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldLT(FieldReadAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAtLTE applies the LTE predicate on the "read_at" field.
|
||||||
|
func ReadAtLTE(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldLTE(FieldReadAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAnnouncement applies the HasEdge predicate on the "announcement" edge.
|
||||||
|
func HasAnnouncement() predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, AnnouncementTable, AnnouncementColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAnnouncementWith applies the HasEdge predicate on the "announcement" edge with a given conditions (other predicates).
|
||||||
|
func HasAnnouncementWith(preds ...predicate.Announcement) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(func(s *sql.Selector) {
|
||||||
|
step := newAnnouncementStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||||
|
func HasUser() predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
||||||
|
func HasUserWith(preds ...predicate.User) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(func(s *sql.Selector) {
|
||||||
|
step := newUserStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.AnnouncementRead) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.AnnouncementRead) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.AnnouncementRead) predicate.AnnouncementRead {
|
||||||
|
return predicate.AnnouncementRead(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
660
backend/ent/announcementread_create.go
Normal file
@@ -0,0 +1,660 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcement"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcementread"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnnouncementReadCreate is the builder for creating a AnnouncementRead entity.
|
||||||
|
type AnnouncementReadCreate struct {
|
||||||
|
config
|
||||||
|
mutation *AnnouncementReadMutation
|
||||||
|
hooks []Hook
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAnnouncementID sets the "announcement_id" field.
|
||||||
|
func (_c *AnnouncementReadCreate) SetAnnouncementID(v int64) *AnnouncementReadCreate {
|
||||||
|
_c.mutation.SetAnnouncementID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_c *AnnouncementReadCreate) SetUserID(v int64) *AnnouncementReadCreate {
|
||||||
|
_c.mutation.SetUserID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReadAt sets the "read_at" field.
|
||||||
|
func (_c *AnnouncementReadCreate) SetReadAt(v time.Time) *AnnouncementReadCreate {
|
||||||
|
_c.mutation.SetReadAt(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableReadAt sets the "read_at" field if the given value is not nil.
|
||||||
|
func (_c *AnnouncementReadCreate) SetNillableReadAt(v *time.Time) *AnnouncementReadCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetReadAt(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCreatedAt sets the "created_at" field.
|
||||||
|
func (_c *AnnouncementReadCreate) SetCreatedAt(v time.Time) *AnnouncementReadCreate {
|
||||||
|
_c.mutation.SetCreatedAt(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||||
|
func (_c *AnnouncementReadCreate) SetNillableCreatedAt(v *time.Time) *AnnouncementReadCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetCreatedAt(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAnnouncement sets the "announcement" edge to the Announcement entity.
|
||||||
|
func (_c *AnnouncementReadCreate) SetAnnouncement(v *Announcement) *AnnouncementReadCreate {
|
||||||
|
return _c.SetAnnouncementID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_c *AnnouncementReadCreate) SetUser(v *User) *AnnouncementReadCreate {
|
||||||
|
return _c.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AnnouncementReadMutation object of the builder.
|
||||||
|
func (_c *AnnouncementReadCreate) Mutation() *AnnouncementReadMutation {
|
||||||
|
return _c.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the AnnouncementRead in the database.
|
||||||
|
func (_c *AnnouncementReadCreate) Save(ctx context.Context) (*AnnouncementRead, error) {
|
||||||
|
_c.defaults()
|
||||||
|
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX calls Save and panics if Save returns an error.
|
||||||
|
func (_c *AnnouncementReadCreate) SaveX(ctx context.Context) *AnnouncementRead {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *AnnouncementReadCreate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *AnnouncementReadCreate) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_c *AnnouncementReadCreate) defaults() {
|
||||||
|
if _, ok := _c.mutation.ReadAt(); !ok {
|
||||||
|
v := announcementread.DefaultReadAt()
|
||||||
|
_c.mutation.SetReadAt(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
|
v := announcementread.DefaultCreatedAt()
|
||||||
|
_c.mutation.SetCreatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_c *AnnouncementReadCreate) check() error {
|
||||||
|
if _, ok := _c.mutation.AnnouncementID(); !ok {
|
||||||
|
return &ValidationError{Name: "announcement_id", err: errors.New(`ent: missing required field "AnnouncementRead.announcement_id"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.UserID(); !ok {
|
||||||
|
return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "AnnouncementRead.user_id"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.ReadAt(); !ok {
|
||||||
|
return &ValidationError{Name: "read_at", err: errors.New(`ent: missing required field "AnnouncementRead.read_at"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
|
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "AnnouncementRead.created_at"`)}
|
||||||
|
}
|
||||||
|
if len(_c.mutation.AnnouncementIDs()) == 0 {
|
||||||
|
return &ValidationError{Name: "announcement", err: errors.New(`ent: missing required edge "AnnouncementRead.announcement"`)}
|
||||||
|
}
|
||||||
|
if len(_c.mutation.UserIDs()) == 0 {
|
||||||
|
return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "AnnouncementRead.user"`)}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *AnnouncementReadCreate) sqlSave(ctx context.Context) (*AnnouncementRead, error) {
|
||||||
|
if err := _c.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_node, _spec := _c.createSpec()
|
||||||
|
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
id := _spec.ID.Value.(int64)
|
||||||
|
_node.ID = int64(id)
|
||||||
|
_c.mutation.id = &_node.ID
|
||||||
|
_c.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *AnnouncementReadCreate) createSpec() (*AnnouncementRead, *sqlgraph.CreateSpec) {
|
||||||
|
var (
|
||||||
|
_node = &AnnouncementRead{config: _c.config}
|
||||||
|
_spec = sqlgraph.NewCreateSpec(announcementread.Table, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64))
|
||||||
|
)
|
||||||
|
_spec.OnConflict = _c.conflict
|
||||||
|
if value, ok := _c.mutation.ReadAt(); ok {
|
||||||
|
_spec.SetField(announcementread.FieldReadAt, field.TypeTime, value)
|
||||||
|
_node.ReadAt = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.CreatedAt(); ok {
|
||||||
|
_spec.SetField(announcementread.FieldCreatedAt, field.TypeTime, value)
|
||||||
|
_node.CreatedAt = value
|
||||||
|
}
|
||||||
|
if nodes := _c.mutation.AnnouncementIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: announcementread.AnnouncementTable,
|
||||||
|
Columns: []string{announcementread.AnnouncementColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_node.AnnouncementID = nodes[0]
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
|
if nodes := _c.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: announcementread.UserTable,
|
||||||
|
Columns: []string{announcementread.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_node.UserID = nodes[0]
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
|
return _node, _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.AnnouncementRead.Create().
|
||||||
|
// SetAnnouncementID(v).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.AnnouncementReadUpsert) {
|
||||||
|
// SetAnnouncementID(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AnnouncementReadCreate) OnConflict(opts ...sql.ConflictOption) *AnnouncementReadUpsertOne {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &AnnouncementReadUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AnnouncementRead.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AnnouncementReadCreate) OnConflictColumns(columns ...string) *AnnouncementReadUpsertOne {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &AnnouncementReadUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// AnnouncementReadUpsertOne is the builder for "upsert"-ing
|
||||||
|
// one AnnouncementRead node.
|
||||||
|
AnnouncementReadUpsertOne struct {
|
||||||
|
create *AnnouncementReadCreate
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementReadUpsert is the "OnConflict" setter.
|
||||||
|
AnnouncementReadUpsert struct {
|
||||||
|
*sql.UpdateSet
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetAnnouncementID sets the "announcement_id" field.
|
||||||
|
func (u *AnnouncementReadUpsert) SetAnnouncementID(v int64) *AnnouncementReadUpsert {
|
||||||
|
u.Set(announcementread.FieldAnnouncementID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAnnouncementID sets the "announcement_id" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementReadUpsert) UpdateAnnouncementID() *AnnouncementReadUpsert {
|
||||||
|
u.SetExcluded(announcementread.FieldAnnouncementID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (u *AnnouncementReadUpsert) SetUserID(v int64) *AnnouncementReadUpsert {
|
||||||
|
u.Set(announcementread.FieldUserID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUserID sets the "user_id" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementReadUpsert) UpdateUserID() *AnnouncementReadUpsert {
|
||||||
|
u.SetExcluded(announcementread.FieldUserID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReadAt sets the "read_at" field.
|
||||||
|
func (u *AnnouncementReadUpsert) SetReadAt(v time.Time) *AnnouncementReadUpsert {
|
||||||
|
u.Set(announcementread.FieldReadAt, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateReadAt sets the "read_at" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementReadUpsert) UpdateReadAt() *AnnouncementReadUpsert {
|
||||||
|
u.SetExcluded(announcementread.FieldReadAt)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AnnouncementRead.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AnnouncementReadUpsertOne) UpdateNewValues() *AnnouncementReadUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||||
|
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||||
|
s.SetIgnore(announcementread.FieldCreatedAt)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AnnouncementRead.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AnnouncementReadUpsertOne) Ignore() *AnnouncementReadUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *AnnouncementReadUpsertOne) DoNothing() *AnnouncementReadUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the AnnouncementReadCreate.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *AnnouncementReadUpsertOne) Update(set func(*AnnouncementReadUpsert)) *AnnouncementReadUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&AnnouncementReadUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAnnouncementID sets the "announcement_id" field.
|
||||||
|
func (u *AnnouncementReadUpsertOne) SetAnnouncementID(v int64) *AnnouncementReadUpsertOne {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.SetAnnouncementID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAnnouncementID sets the "announcement_id" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementReadUpsertOne) UpdateAnnouncementID() *AnnouncementReadUpsertOne {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.UpdateAnnouncementID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (u *AnnouncementReadUpsertOne) SetUserID(v int64) *AnnouncementReadUpsertOne {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.SetUserID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUserID sets the "user_id" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementReadUpsertOne) UpdateUserID() *AnnouncementReadUpsertOne {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.UpdateUserID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReadAt sets the "read_at" field.
|
||||||
|
func (u *AnnouncementReadUpsertOne) SetReadAt(v time.Time) *AnnouncementReadUpsertOne {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.SetReadAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateReadAt sets the "read_at" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementReadUpsertOne) UpdateReadAt() *AnnouncementReadUpsertOne {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.UpdateReadAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *AnnouncementReadUpsertOne) Exec(ctx context.Context) error {
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for AnnouncementReadCreate.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *AnnouncementReadUpsertOne) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||||
|
func (u *AnnouncementReadUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||||
|
node, err := u.create.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
return node.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDX is like ID, but panics if an error occurs.
|
||||||
|
func (u *AnnouncementReadUpsertOne) IDX(ctx context.Context) int64 {
|
||||||
|
id, err := u.ID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementReadCreateBulk is the builder for creating many AnnouncementRead entities in bulk.
|
||||||
|
type AnnouncementReadCreateBulk struct {
|
||||||
|
config
|
||||||
|
err error
|
||||||
|
builders []*AnnouncementReadCreate
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the AnnouncementRead entities in the database.
|
||||||
|
func (_c *AnnouncementReadCreateBulk) Save(ctx context.Context) ([]*AnnouncementRead, error) {
|
||||||
|
if _c.err != nil {
|
||||||
|
return nil, _c.err
|
||||||
|
}
|
||||||
|
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||||
|
nodes := make([]*AnnouncementRead, len(_c.builders))
|
||||||
|
mutators := make([]Mutator, len(_c.builders))
|
||||||
|
for i := range _c.builders {
|
||||||
|
func(i int, root context.Context) {
|
||||||
|
builder := _c.builders[i]
|
||||||
|
builder.defaults()
|
||||||
|
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||||
|
mutation, ok := m.(*AnnouncementReadMutation)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||||
|
}
|
||||||
|
if err := builder.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
builder.mutation = mutation
|
||||||
|
var err error
|
||||||
|
nodes[i], specs[i] = builder.createSpec()
|
||||||
|
if i < len(mutators)-1 {
|
||||||
|
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||||
|
} else {
|
||||||
|
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||||
|
spec.OnConflict = _c.conflict
|
||||||
|
// Invoke the actual operation on the latest mutation in the chain.
|
||||||
|
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mutation.id = &nodes[i].ID
|
||||||
|
if specs[i].ID.Value != nil {
|
||||||
|
id := specs[i].ID.Value.(int64)
|
||||||
|
nodes[i].ID = int64(id)
|
||||||
|
}
|
||||||
|
mutation.done = true
|
||||||
|
return nodes[i], nil
|
||||||
|
})
|
||||||
|
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||||
|
mut = builder.hooks[i](mut)
|
||||||
|
}
|
||||||
|
mutators[i] = mut
|
||||||
|
}(i, ctx)
|
||||||
|
}
|
||||||
|
if len(mutators) > 0 {
|
||||||
|
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_c *AnnouncementReadCreateBulk) SaveX(ctx context.Context) []*AnnouncementRead {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *AnnouncementReadCreateBulk) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *AnnouncementReadCreateBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.AnnouncementRead.CreateBulk(builders...).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.AnnouncementReadUpsert) {
|
||||||
|
// SetAnnouncementID(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AnnouncementReadCreateBulk) OnConflict(opts ...sql.ConflictOption) *AnnouncementReadUpsertBulk {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &AnnouncementReadUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AnnouncementRead.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AnnouncementReadCreateBulk) OnConflictColumns(columns ...string) *AnnouncementReadUpsertBulk {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &AnnouncementReadUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementReadUpsertBulk is the builder for "upsert"-ing
|
||||||
|
// a bulk of AnnouncementRead nodes.
|
||||||
|
type AnnouncementReadUpsertBulk struct {
|
||||||
|
create *AnnouncementReadCreateBulk
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that
|
||||||
|
// were set on create. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AnnouncementRead.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AnnouncementReadUpsertBulk) UpdateNewValues() *AnnouncementReadUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||||
|
for _, b := range u.create.builders {
|
||||||
|
if _, exists := b.mutation.CreatedAt(); exists {
|
||||||
|
s.SetIgnore(announcementread.FieldCreatedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AnnouncementRead.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AnnouncementReadUpsertBulk) Ignore() *AnnouncementReadUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *AnnouncementReadUpsertBulk) DoNothing() *AnnouncementReadUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the AnnouncementReadCreateBulk.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *AnnouncementReadUpsertBulk) Update(set func(*AnnouncementReadUpsert)) *AnnouncementReadUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&AnnouncementReadUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAnnouncementID sets the "announcement_id" field.
|
||||||
|
func (u *AnnouncementReadUpsertBulk) SetAnnouncementID(v int64) *AnnouncementReadUpsertBulk {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.SetAnnouncementID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAnnouncementID sets the "announcement_id" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementReadUpsertBulk) UpdateAnnouncementID() *AnnouncementReadUpsertBulk {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.UpdateAnnouncementID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (u *AnnouncementReadUpsertBulk) SetUserID(v int64) *AnnouncementReadUpsertBulk {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.SetUserID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUserID sets the "user_id" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementReadUpsertBulk) UpdateUserID() *AnnouncementReadUpsertBulk {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.UpdateUserID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReadAt sets the "read_at" field.
|
||||||
|
func (u *AnnouncementReadUpsertBulk) SetReadAt(v time.Time) *AnnouncementReadUpsertBulk {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.SetReadAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateReadAt sets the "read_at" field to the value that was provided on create.
|
||||||
|
func (u *AnnouncementReadUpsertBulk) UpdateReadAt() *AnnouncementReadUpsertBulk {
|
||||||
|
return u.Update(func(s *AnnouncementReadUpsert) {
|
||||||
|
s.UpdateReadAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *AnnouncementReadUpsertBulk) Exec(ctx context.Context) error {
|
||||||
|
if u.create.err != nil {
|
||||||
|
return u.create.err
|
||||||
|
}
|
||||||
|
for i, b := range u.create.builders {
|
||||||
|
if len(b.conflict) != 0 {
|
||||||
|
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AnnouncementReadCreateBulk instead", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for AnnouncementReadCreateBulk.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *AnnouncementReadUpsertBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
88
backend/ent/announcementread_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcementread"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnnouncementReadDelete is the builder for deleting a AnnouncementRead entity.
|
||||||
|
type AnnouncementReadDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AnnouncementReadMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AnnouncementReadDelete builder.
|
||||||
|
func (_d *AnnouncementReadDelete) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *AnnouncementReadDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AnnouncementReadDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *AnnouncementReadDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(announcementread.Table, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementReadDeleteOne is the builder for deleting a single AnnouncementRead entity.
|
||||||
|
type AnnouncementReadDeleteOne struct {
|
||||||
|
_d *AnnouncementReadDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AnnouncementReadDelete builder.
|
||||||
|
func (_d *AnnouncementReadDeleteOne) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *AnnouncementReadDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{announcementread.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AnnouncementReadDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
718
backend/ent/announcementread_query.go
Normal file
@@ -0,0 +1,718 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcement"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcementread"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnnouncementReadQuery is the builder for querying AnnouncementRead entities.
|
||||||
|
type AnnouncementReadQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []announcementread.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.AnnouncementRead
|
||||||
|
withAnnouncement *AnnouncementQuery
|
||||||
|
withUser *UserQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the AnnouncementReadQuery builder.
|
||||||
|
func (_q *AnnouncementReadQuery) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *AnnouncementReadQuery) Limit(limit int) *AnnouncementReadQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *AnnouncementReadQuery) Offset(offset int) *AnnouncementReadQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *AnnouncementReadQuery) Unique(unique bool) *AnnouncementReadQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *AnnouncementReadQuery) Order(o ...announcementread.OrderOption) *AnnouncementReadQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAnnouncement chains the current query on the "announcement" edge.
|
||||||
|
func (_q *AnnouncementReadQuery) QueryAnnouncement() *AnnouncementQuery {
|
||||||
|
query := (&AnnouncementClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(announcementread.Table, announcementread.FieldID, selector),
|
||||||
|
sqlgraph.To(announcement.Table, announcement.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, announcementread.AnnouncementTable, announcementread.AnnouncementColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser chains the current query on the "user" edge.
|
||||||
|
func (_q *AnnouncementReadQuery) QueryUser() *UserQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(announcementread.Table, announcementread.FieldID, selector),
|
||||||
|
sqlgraph.To(user.Table, user.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, announcementread.UserTable, announcementread.UserColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first AnnouncementRead entity from the query.
|
||||||
|
// Returns a *NotFoundError when no AnnouncementRead was found.
|
||||||
|
func (_q *AnnouncementReadQuery) First(ctx context.Context) (*AnnouncementRead, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{announcementread.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementReadQuery) FirstX(ctx context.Context) *AnnouncementRead {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first AnnouncementRead ID from the query.
|
||||||
|
// Returns a *NotFoundError when no AnnouncementRead ID was found.
|
||||||
|
func (_q *AnnouncementReadQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{announcementread.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementReadQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single AnnouncementRead entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one AnnouncementRead entity is found.
|
||||||
|
// Returns a *NotFoundError when no AnnouncementRead entities are found.
|
||||||
|
func (_q *AnnouncementReadQuery) Only(ctx context.Context) (*AnnouncementRead, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{announcementread.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{announcementread.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementReadQuery) OnlyX(ctx context.Context) *AnnouncementRead {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only AnnouncementRead ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one AnnouncementRead ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *AnnouncementReadQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{announcementread.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{announcementread.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementReadQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of AnnouncementReads.
|
||||||
|
func (_q *AnnouncementReadQuery) All(ctx context.Context) ([]*AnnouncementRead, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*AnnouncementRead, *AnnouncementReadQuery]()
|
||||||
|
return withInterceptors[[]*AnnouncementRead](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementReadQuery) AllX(ctx context.Context) []*AnnouncementRead {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of AnnouncementRead IDs.
|
||||||
|
func (_q *AnnouncementReadQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(announcementread.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementReadQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *AnnouncementReadQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*AnnouncementReadQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementReadQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *AnnouncementReadQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *AnnouncementReadQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the AnnouncementReadQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *AnnouncementReadQuery) Clone() *AnnouncementReadQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &AnnouncementReadQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]announcementread.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.AnnouncementRead{}, _q.predicates...),
|
||||||
|
withAnnouncement: _q.withAnnouncement.Clone(),
|
||||||
|
withUser: _q.withUser.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAnnouncement tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "announcement" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AnnouncementReadQuery) WithAnnouncement(opts ...func(*AnnouncementQuery)) *AnnouncementReadQuery {
|
||||||
|
query := (&AnnouncementClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withAnnouncement = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AnnouncementReadQuery) WithUser(opts ...func(*UserQuery)) *AnnouncementReadQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUser = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// AnnouncementID int64 `json:"announcement_id,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.AnnouncementRead.Query().
|
||||||
|
// GroupBy(announcementread.FieldAnnouncementID).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AnnouncementReadQuery) GroupBy(field string, fields ...string) *AnnouncementReadGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &AnnouncementReadGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = announcementread.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// AnnouncementID int64 `json:"announcement_id,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.AnnouncementRead.Query().
|
||||||
|
// Select(announcementread.FieldAnnouncementID).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AnnouncementReadQuery) Select(fields ...string) *AnnouncementReadSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &AnnouncementReadSelect{AnnouncementReadQuery: _q}
|
||||||
|
sbuild.label = announcementread.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a AnnouncementReadSelect configured with the given aggregations.
|
||||||
|
func (_q *AnnouncementReadQuery) Aggregate(fns ...AggregateFunc) *AnnouncementReadSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementReadQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !announcementread.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementReadQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AnnouncementRead, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*AnnouncementRead{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [2]bool{
|
||||||
|
_q.withAnnouncement != nil,
|
||||||
|
_q.withUser != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*AnnouncementRead).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &AnnouncementRead{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withAnnouncement; query != nil {
|
||||||
|
if err := _q.loadAnnouncement(ctx, query, nodes, nil,
|
||||||
|
func(n *AnnouncementRead, e *Announcement) { n.Edges.Announcement = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withUser; query != nil {
|
||||||
|
if err := _q.loadUser(ctx, query, nodes, nil,
|
||||||
|
func(n *AnnouncementRead, e *User) { n.Edges.User = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementReadQuery) loadAnnouncement(ctx context.Context, query *AnnouncementQuery, nodes []*AnnouncementRead, init func(*AnnouncementRead), assign func(*AnnouncementRead, *Announcement)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*AnnouncementRead)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].AnnouncementID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(announcement.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "announcement_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *AnnouncementReadQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*AnnouncementRead, init func(*AnnouncementRead), assign func(*AnnouncementRead, *User)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*AnnouncementRead)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].UserID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(user.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementReadQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementReadQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(announcementread.Table, announcementread.Columns, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, announcementread.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != announcementread.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.withAnnouncement != nil {
|
||||||
|
_spec.Node.AddColumnOnce(announcementread.FieldAnnouncementID)
|
||||||
|
}
|
||||||
|
if _q.withUser != nil {
|
||||||
|
_spec.Node.AddColumnOnce(announcementread.FieldUserID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AnnouncementReadQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(announcementread.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = announcementread.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *AnnouncementReadQuery) ForUpdate(opts ...sql.LockOption) *AnnouncementReadQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *AnnouncementReadQuery) ForShare(opts ...sql.LockOption) *AnnouncementReadQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementReadGroupBy is the group-by builder for AnnouncementRead entities.
|
||||||
|
type AnnouncementReadGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *AnnouncementReadQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *AnnouncementReadGroupBy) Aggregate(fns ...AggregateFunc) *AnnouncementReadGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *AnnouncementReadGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AnnouncementReadQuery, *AnnouncementReadGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *AnnouncementReadGroupBy) sqlScan(ctx context.Context, root *AnnouncementReadQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementReadSelect is the builder for selecting fields of AnnouncementRead entities.
|
||||||
|
type AnnouncementReadSelect struct {
|
||||||
|
*AnnouncementReadQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *AnnouncementReadSelect) Aggregate(fns ...AggregateFunc) *AnnouncementReadSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *AnnouncementReadSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AnnouncementReadQuery, *AnnouncementReadSelect](ctx, _s.AnnouncementReadQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *AnnouncementReadSelect) sqlScan(ctx context.Context, root *AnnouncementReadQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
456
backend/ent/announcementread_update.go
Normal file
@@ -0,0 +1,456 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcement"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/announcementread"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AnnouncementReadUpdate is the builder for updating AnnouncementRead entities.
|
||||||
|
type AnnouncementReadUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AnnouncementReadMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AnnouncementReadUpdate builder.
|
||||||
|
func (_u *AnnouncementReadUpdate) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAnnouncementID sets the "announcement_id" field.
|
||||||
|
func (_u *AnnouncementReadUpdate) SetAnnouncementID(v int64) *AnnouncementReadUpdate {
|
||||||
|
_u.mutation.SetAnnouncementID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableAnnouncementID sets the "announcement_id" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementReadUpdate) SetNillableAnnouncementID(v *int64) *AnnouncementReadUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetAnnouncementID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_u *AnnouncementReadUpdate) SetUserID(v int64) *AnnouncementReadUpdate {
|
||||||
|
_u.mutation.SetUserID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementReadUpdate) SetNillableUserID(v *int64) *AnnouncementReadUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUserID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReadAt sets the "read_at" field.
|
||||||
|
func (_u *AnnouncementReadUpdate) SetReadAt(v time.Time) *AnnouncementReadUpdate {
|
||||||
|
_u.mutation.SetReadAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableReadAt sets the "read_at" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementReadUpdate) SetNillableReadAt(v *time.Time) *AnnouncementReadUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetReadAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAnnouncement sets the "announcement" edge to the Announcement entity.
|
||||||
|
func (_u *AnnouncementReadUpdate) SetAnnouncement(v *Announcement) *AnnouncementReadUpdate {
|
||||||
|
return _u.SetAnnouncementID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *AnnouncementReadUpdate) SetUser(v *User) *AnnouncementReadUpdate {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AnnouncementReadMutation object of the builder.
|
||||||
|
func (_u *AnnouncementReadUpdate) Mutation() *AnnouncementReadMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAnnouncement clears the "announcement" edge to the Announcement entity.
|
||||||
|
func (_u *AnnouncementReadUpdate) ClearAnnouncement() *AnnouncementReadUpdate {
|
||||||
|
_u.mutation.ClearAnnouncement()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *AnnouncementReadUpdate) ClearUser() *AnnouncementReadUpdate {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *AnnouncementReadUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AnnouncementReadUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *AnnouncementReadUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AnnouncementReadUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AnnouncementReadUpdate) check() error {
|
||||||
|
if _u.mutation.AnnouncementCleared() && len(_u.mutation.AnnouncementIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AnnouncementRead.announcement"`)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AnnouncementRead.user"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AnnouncementReadUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(announcementread.Table, announcementread.Columns, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ReadAt(); ok {
|
||||||
|
_spec.SetField(announcementread.FieldReadAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.AnnouncementCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: announcementread.AnnouncementTable,
|
||||||
|
Columns: []string{announcementread.AnnouncementColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.AnnouncementIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: announcementread.AnnouncementTable,
|
||||||
|
Columns: []string{announcementread.AnnouncementColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: announcementread.UserTable,
|
||||||
|
Columns: []string{announcementread.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: announcementread.UserTable,
|
||||||
|
Columns: []string{announcementread.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{announcementread.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncementReadUpdateOne is the builder for updating a single AnnouncementRead entity.
|
||||||
|
type AnnouncementReadUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AnnouncementReadMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAnnouncementID sets the "announcement_id" field.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) SetAnnouncementID(v int64) *AnnouncementReadUpdateOne {
|
||||||
|
_u.mutation.SetAnnouncementID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableAnnouncementID sets the "announcement_id" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) SetNillableAnnouncementID(v *int64) *AnnouncementReadUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetAnnouncementID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) SetUserID(v int64) *AnnouncementReadUpdateOne {
|
||||||
|
_u.mutation.SetUserID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) SetNillableUserID(v *int64) *AnnouncementReadUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUserID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReadAt sets the "read_at" field.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) SetReadAt(v time.Time) *AnnouncementReadUpdateOne {
|
||||||
|
_u.mutation.SetReadAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableReadAt sets the "read_at" field if the given value is not nil.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) SetNillableReadAt(v *time.Time) *AnnouncementReadUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetReadAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAnnouncement sets the "announcement" edge to the Announcement entity.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) SetAnnouncement(v *Announcement) *AnnouncementReadUpdateOne {
|
||||||
|
return _u.SetAnnouncementID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) SetUser(v *User) *AnnouncementReadUpdateOne {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AnnouncementReadMutation object of the builder.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) Mutation() *AnnouncementReadMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAnnouncement clears the "announcement" edge to the Announcement entity.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) ClearAnnouncement() *AnnouncementReadUpdateOne {
|
||||||
|
_u.mutation.ClearAnnouncement()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) ClearUser() *AnnouncementReadUpdateOne {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AnnouncementReadUpdate builder.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) Where(ps ...predicate.AnnouncementRead) *AnnouncementReadUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) Select(field string, fields ...string) *AnnouncementReadUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated AnnouncementRead entity.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) Save(ctx context.Context) (*AnnouncementRead, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) SaveX(ctx context.Context) *AnnouncementRead {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AnnouncementReadUpdateOne) check() error {
|
||||||
|
if _u.mutation.AnnouncementCleared() && len(_u.mutation.AnnouncementIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AnnouncementRead.announcement"`)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AnnouncementRead.user"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AnnouncementReadUpdateOne) sqlSave(ctx context.Context) (_node *AnnouncementRead, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(announcementread.Table, announcementread.Columns, sqlgraph.NewFieldSpec(announcementread.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AnnouncementRead.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, announcementread.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !announcementread.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != announcementread.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ReadAt(); ok {
|
||||||
|
_spec.SetField(announcementread.FieldReadAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.AnnouncementCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: announcementread.AnnouncementTable,
|
||||||
|
Columns: []string{announcementread.AnnouncementColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.AnnouncementIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: announcementread.AnnouncementTable,
|
||||||
|
Columns: []string{announcementread.AnnouncementColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(announcement.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: announcementread.UserTable,
|
||||||
|
Columns: []string{announcementread.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: announcementread.UserTable,
|
||||||
|
Columns: []string{announcementread.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &AnnouncementRead{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{announcementread.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
442
backend/ent/apikey.go
Normal file
@@ -0,0 +1,442 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIKey is the model entity for the APIKey schema.
|
||||||
|
type APIKey struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// DeletedAt holds the value of the "deleted_at" field.
|
||||||
|
DeletedAt *time.Time `json:"deleted_at,omitempty"`
|
||||||
|
// UserID holds the value of the "user_id" field.
|
||||||
|
UserID int64 `json:"user_id,omitempty"`
|
||||||
|
// Key holds the value of the "key" field.
|
||||||
|
Key string `json:"key,omitempty"`
|
||||||
|
// Name holds the value of the "name" field.
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
// GroupID holds the value of the "group_id" field.
|
||||||
|
GroupID *int64 `json:"group_id,omitempty"`
|
||||||
|
// Status holds the value of the "status" field.
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
// Last usage time of this API key
|
||||||
|
LastUsedAt *time.Time `json:"last_used_at,omitempty"`
|
||||||
|
// Allowed IPs/CIDRs, e.g. ["192.168.1.100", "10.0.0.0/8"]
|
||||||
|
IPWhitelist []string `json:"ip_whitelist,omitempty"`
|
||||||
|
// Blocked IPs/CIDRs
|
||||||
|
IPBlacklist []string `json:"ip_blacklist,omitempty"`
|
||||||
|
// Quota limit in USD for this API key (0 = unlimited)
|
||||||
|
Quota float64 `json:"quota,omitempty"`
|
||||||
|
// Used quota amount in USD
|
||||||
|
QuotaUsed float64 `json:"quota_used,omitempty"`
|
||||||
|
// Expiration time for this API key (null = never expires)
|
||||||
|
ExpiresAt *time.Time `json:"expires_at,omitempty"`
|
||||||
|
// Rate limit in USD per 5 hours (0 = unlimited)
|
||||||
|
RateLimit5h float64 `json:"rate_limit_5h,omitempty"`
|
||||||
|
// Rate limit in USD per day (0 = unlimited)
|
||||||
|
RateLimit1d float64 `json:"rate_limit_1d,omitempty"`
|
||||||
|
// Rate limit in USD per 7 days (0 = unlimited)
|
||||||
|
RateLimit7d float64 `json:"rate_limit_7d,omitempty"`
|
||||||
|
// Used amount in USD for the current 5h window
|
||||||
|
Usage5h float64 `json:"usage_5h,omitempty"`
|
||||||
|
// Used amount in USD for the current 1d window
|
||||||
|
Usage1d float64 `json:"usage_1d,omitempty"`
|
||||||
|
// Used amount in USD for the current 7d window
|
||||||
|
Usage7d float64 `json:"usage_7d,omitempty"`
|
||||||
|
// Start time of the current 5h rate limit window
|
||||||
|
Window5hStart *time.Time `json:"window_5h_start,omitempty"`
|
||||||
|
// Start time of the current 1d rate limit window
|
||||||
|
Window1dStart *time.Time `json:"window_1d_start,omitempty"`
|
||||||
|
// Start time of the current 7d rate limit window
|
||||||
|
Window7dStart *time.Time `json:"window_7d_start,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the APIKeyQuery when eager-loading is set.
|
||||||
|
Edges APIKeyEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type APIKeyEdges struct {
|
||||||
|
// User holds the value of the user edge.
|
||||||
|
User *User `json:"user,omitempty"`
|
||||||
|
// Group holds the value of the group edge.
|
||||||
|
Group *Group `json:"group,omitempty"`
|
||||||
|
// UsageLogs holds the value of the usage_logs edge.
|
||||||
|
UsageLogs []*UsageLog `json:"usage_logs,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [3]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserOrErr returns the User value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e APIKeyEdges) UserOrErr() (*User, error) {
|
||||||
|
if e.User != nil {
|
||||||
|
return e.User, nil
|
||||||
|
} else if e.loadedTypes[0] {
|
||||||
|
return nil, &NotFoundError{label: user.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "user"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupOrErr returns the Group value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e APIKeyEdges) GroupOrErr() (*Group, error) {
|
||||||
|
if e.Group != nil {
|
||||||
|
return e.Group, nil
|
||||||
|
} else if e.loadedTypes[1] {
|
||||||
|
return nil, &NotFoundError{label: group.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "group"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsageLogsOrErr returns the UsageLogs value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e APIKeyEdges) UsageLogsOrErr() ([]*UsageLog, error) {
|
||||||
|
if e.loadedTypes[2] {
|
||||||
|
return e.UsageLogs, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "usage_logs"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*APIKey) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case apikey.FieldIPWhitelist, apikey.FieldIPBlacklist:
|
||||||
|
values[i] = new([]byte)
|
||||||
|
case apikey.FieldQuota, apikey.FieldQuotaUsed, apikey.FieldRateLimit5h, apikey.FieldRateLimit1d, apikey.FieldRateLimit7d, apikey.FieldUsage5h, apikey.FieldUsage1d, apikey.FieldUsage7d:
|
||||||
|
values[i] = new(sql.NullFloat64)
|
||||||
|
case apikey.FieldID, apikey.FieldUserID, apikey.FieldGroupID:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case apikey.FieldKey, apikey.FieldName, apikey.FieldStatus:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case apikey.FieldCreatedAt, apikey.FieldUpdatedAt, apikey.FieldDeletedAt, apikey.FieldLastUsedAt, apikey.FieldExpiresAt, apikey.FieldWindow5hStart, apikey.FieldWindow1dStart, apikey.FieldWindow7dStart:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the APIKey fields.
|
||||||
|
func (_m *APIKey) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case apikey.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case apikey.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldDeletedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.DeletedAt = new(time.Time)
|
||||||
|
*_m.DeletedAt = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldUserID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field user_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UserID = value.Int64
|
||||||
|
}
|
||||||
|
case apikey.FieldKey:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field key", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Key = value.String
|
||||||
|
}
|
||||||
|
case apikey.FieldName:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Name = value.String
|
||||||
|
}
|
||||||
|
case apikey.FieldGroupID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field group_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.GroupID = new(int64)
|
||||||
|
*_m.GroupID = value.Int64
|
||||||
|
}
|
||||||
|
case apikey.FieldStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Status = value.String
|
||||||
|
}
|
||||||
|
case apikey.FieldLastUsedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field last_used_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.LastUsedAt = new(time.Time)
|
||||||
|
*_m.LastUsedAt = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldIPWhitelist:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field ip_whitelist", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.IPWhitelist); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field ip_whitelist: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case apikey.FieldIPBlacklist:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field ip_blacklist", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.IPBlacklist); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field ip_blacklist: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case apikey.FieldQuota:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field quota", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Quota = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldQuotaUsed:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field quota_used", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.QuotaUsed = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldExpiresAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field expires_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ExpiresAt = new(time.Time)
|
||||||
|
*_m.ExpiresAt = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldRateLimit5h:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_limit_5h", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateLimit5h = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldRateLimit1d:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_limit_1d", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateLimit1d = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldRateLimit7d:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_limit_7d", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateLimit7d = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldUsage5h:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field usage_5h", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Usage5h = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldUsage1d:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field usage_1d", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Usage1d = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldUsage7d:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field usage_7d", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Usage7d = value.Float64
|
||||||
|
}
|
||||||
|
case apikey.FieldWindow5hStart:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field window_5h_start", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Window5hStart = new(time.Time)
|
||||||
|
*_m.Window5hStart = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldWindow1dStart:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field window_1d_start", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Window1dStart = new(time.Time)
|
||||||
|
*_m.Window1dStart = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldWindow7dStart:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field window_7d_start", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Window7dStart = new(time.Time)
|
||||||
|
*_m.Window7dStart = value.Time
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the APIKey.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *APIKey) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser queries the "user" edge of the APIKey entity.
|
||||||
|
func (_m *APIKey) QueryUser() *UserQuery {
|
||||||
|
return NewAPIKeyClient(_m.config).QueryUser(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroup queries the "group" edge of the APIKey entity.
|
||||||
|
func (_m *APIKey) QueryGroup() *GroupQuery {
|
||||||
|
return NewAPIKeyClient(_m.config).QueryGroup(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageLogs queries the "usage_logs" edge of the APIKey entity.
|
||||||
|
func (_m *APIKey) QueryUsageLogs() *UsageLogQuery {
|
||||||
|
return NewAPIKeyClient(_m.config).QueryUsageLogs(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this APIKey.
|
||||||
|
// Note that you need to call APIKey.Unwrap() before calling this method if this APIKey
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *APIKey) Update() *APIKeyUpdateOne {
|
||||||
|
return NewAPIKeyClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the APIKey entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *APIKey) Unwrap() *APIKey {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: APIKey is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *APIKey) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("APIKey(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.DeletedAt; v != nil {
|
||||||
|
builder.WriteString("deleted_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("user_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.UserID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("key=")
|
||||||
|
builder.WriteString(_m.Key)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("name=")
|
||||||
|
builder.WriteString(_m.Name)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.GroupID; v != nil {
|
||||||
|
builder.WriteString("group_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("status=")
|
||||||
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.LastUsedAt; v != nil {
|
||||||
|
builder.WriteString("last_used_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("ip_whitelist=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.IPWhitelist))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("ip_blacklist=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.IPBlacklist))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("quota=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Quota))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("quota_used=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.QuotaUsed))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ExpiresAt; v != nil {
|
||||||
|
builder.WriteString("expires_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("rate_limit_5h=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.RateLimit5h))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("rate_limit_1d=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.RateLimit1d))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("rate_limit_7d=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.RateLimit7d))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("usage_5h=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Usage5h))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("usage_1d=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Usage1d))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("usage_7d=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Usage7d))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Window5hStart; v != nil {
|
||||||
|
builder.WriteString("window_5h_start=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Window1dStart; v != nil {
|
||||||
|
builder.WriteString("window_1d_start=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Window7dStart; v != nil {
|
||||||
|
builder.WriteString("window_7d_start=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeys is a parsable slice of APIKey.
|
||||||
|
type APIKeys []*APIKey
|
||||||
333
backend/ent/apikey/apikey.go
Normal file
@@ -0,0 +1,333 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package apikey
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the apikey type in the database.
|
||||||
|
Label = "api_key"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
|
||||||
|
FieldDeletedAt = "deleted_at"
|
||||||
|
// FieldUserID holds the string denoting the user_id field in the database.
|
||||||
|
FieldUserID = "user_id"
|
||||||
|
// FieldKey holds the string denoting the key field in the database.
|
||||||
|
FieldKey = "key"
|
||||||
|
// FieldName holds the string denoting the name field in the database.
|
||||||
|
FieldName = "name"
|
||||||
|
// FieldGroupID holds the string denoting the group_id field in the database.
|
||||||
|
FieldGroupID = "group_id"
|
||||||
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
|
FieldStatus = "status"
|
||||||
|
// FieldLastUsedAt holds the string denoting the last_used_at field in the database.
|
||||||
|
FieldLastUsedAt = "last_used_at"
|
||||||
|
// FieldIPWhitelist holds the string denoting the ip_whitelist field in the database.
|
||||||
|
FieldIPWhitelist = "ip_whitelist"
|
||||||
|
// FieldIPBlacklist holds the string denoting the ip_blacklist field in the database.
|
||||||
|
FieldIPBlacklist = "ip_blacklist"
|
||||||
|
// FieldQuota holds the string denoting the quota field in the database.
|
||||||
|
FieldQuota = "quota"
|
||||||
|
// FieldQuotaUsed holds the string denoting the quota_used field in the database.
|
||||||
|
FieldQuotaUsed = "quota_used"
|
||||||
|
// FieldExpiresAt holds the string denoting the expires_at field in the database.
|
||||||
|
FieldExpiresAt = "expires_at"
|
||||||
|
// FieldRateLimit5h holds the string denoting the rate_limit_5h field in the database.
|
||||||
|
FieldRateLimit5h = "rate_limit_5h"
|
||||||
|
// FieldRateLimit1d holds the string denoting the rate_limit_1d field in the database.
|
||||||
|
FieldRateLimit1d = "rate_limit_1d"
|
||||||
|
// FieldRateLimit7d holds the string denoting the rate_limit_7d field in the database.
|
||||||
|
FieldRateLimit7d = "rate_limit_7d"
|
||||||
|
// FieldUsage5h holds the string denoting the usage_5h field in the database.
|
||||||
|
FieldUsage5h = "usage_5h"
|
||||||
|
// FieldUsage1d holds the string denoting the usage_1d field in the database.
|
||||||
|
FieldUsage1d = "usage_1d"
|
||||||
|
// FieldUsage7d holds the string denoting the usage_7d field in the database.
|
||||||
|
FieldUsage7d = "usage_7d"
|
||||||
|
// FieldWindow5hStart holds the string denoting the window_5h_start field in the database.
|
||||||
|
FieldWindow5hStart = "window_5h_start"
|
||||||
|
// FieldWindow1dStart holds the string denoting the window_1d_start field in the database.
|
||||||
|
FieldWindow1dStart = "window_1d_start"
|
||||||
|
// FieldWindow7dStart holds the string denoting the window_7d_start field in the database.
|
||||||
|
FieldWindow7dStart = "window_7d_start"
|
||||||
|
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||||
|
EdgeUser = "user"
|
||||||
|
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||||
|
EdgeGroup = "group"
|
||||||
|
// EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations.
|
||||||
|
EdgeUsageLogs = "usage_logs"
|
||||||
|
// Table holds the table name of the apikey in the database.
|
||||||
|
Table = "api_keys"
|
||||||
|
// UserTable is the table that holds the user relation/edge.
|
||||||
|
UserTable = "api_keys"
|
||||||
|
// UserInverseTable is the table name for the User entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||||
|
UserInverseTable = "users"
|
||||||
|
// UserColumn is the table column denoting the user relation/edge.
|
||||||
|
UserColumn = "user_id"
|
||||||
|
// GroupTable is the table that holds the group relation/edge.
|
||||||
|
GroupTable = "api_keys"
|
||||||
|
// GroupInverseTable is the table name for the Group entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||||
|
GroupInverseTable = "groups"
|
||||||
|
// GroupColumn is the table column denoting the group relation/edge.
|
||||||
|
GroupColumn = "group_id"
|
||||||
|
// UsageLogsTable is the table that holds the usage_logs relation/edge.
|
||||||
|
UsageLogsTable = "usage_logs"
|
||||||
|
// UsageLogsInverseTable is the table name for the UsageLog entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "usagelog" package.
|
||||||
|
UsageLogsInverseTable = "usage_logs"
|
||||||
|
// UsageLogsColumn is the table column denoting the usage_logs relation/edge.
|
||||||
|
UsageLogsColumn = "api_key_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for apikey fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
FieldDeletedAt,
|
||||||
|
FieldUserID,
|
||||||
|
FieldKey,
|
||||||
|
FieldName,
|
||||||
|
FieldGroupID,
|
||||||
|
FieldStatus,
|
||||||
|
FieldLastUsedAt,
|
||||||
|
FieldIPWhitelist,
|
||||||
|
FieldIPBlacklist,
|
||||||
|
FieldQuota,
|
||||||
|
FieldQuotaUsed,
|
||||||
|
FieldExpiresAt,
|
||||||
|
FieldRateLimit5h,
|
||||||
|
FieldRateLimit1d,
|
||||||
|
FieldRateLimit7d,
|
||||||
|
FieldUsage5h,
|
||||||
|
FieldUsage1d,
|
||||||
|
FieldUsage7d,
|
||||||
|
FieldWindow5hStart,
|
||||||
|
FieldWindow1dStart,
|
||||||
|
FieldWindow7dStart,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that the variables below are initialized by the runtime
|
||||||
|
// package on the initialization of the application. Therefore,
|
||||||
|
// it should be imported in the main as follows:
|
||||||
|
//
|
||||||
|
// import _ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
|
var (
|
||||||
|
Hooks [1]ent.Hook
|
||||||
|
Interceptors [1]ent.Interceptor
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
// KeyValidator is a validator for the "key" field. It is called by the builders before save.
|
||||||
|
KeyValidator func(string) error
|
||||||
|
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
NameValidator func(string) error
|
||||||
|
// DefaultStatus holds the default value on creation for the "status" field.
|
||||||
|
DefaultStatus string
|
||||||
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
StatusValidator func(string) error
|
||||||
|
// DefaultQuota holds the default value on creation for the "quota" field.
|
||||||
|
DefaultQuota float64
|
||||||
|
// DefaultQuotaUsed holds the default value on creation for the "quota_used" field.
|
||||||
|
DefaultQuotaUsed float64
|
||||||
|
// DefaultRateLimit5h holds the default value on creation for the "rate_limit_5h" field.
|
||||||
|
DefaultRateLimit5h float64
|
||||||
|
// DefaultRateLimit1d holds the default value on creation for the "rate_limit_1d" field.
|
||||||
|
DefaultRateLimit1d float64
|
||||||
|
// DefaultRateLimit7d holds the default value on creation for the "rate_limit_7d" field.
|
||||||
|
DefaultRateLimit7d float64
|
||||||
|
// DefaultUsage5h holds the default value on creation for the "usage_5h" field.
|
||||||
|
DefaultUsage5h float64
|
||||||
|
// DefaultUsage1d holds the default value on creation for the "usage_1d" field.
|
||||||
|
DefaultUsage1d float64
|
||||||
|
// DefaultUsage7d holds the default value on creation for the "usage_7d" field.
|
||||||
|
DefaultUsage7d float64
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the APIKey queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDeletedAt orders the results by the deleted_at field.
|
||||||
|
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserID orders the results by the user_id field.
|
||||||
|
func ByUserID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUserID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByKey orders the results by the key field.
|
||||||
|
func ByKey(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldKey, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByName orders the results by the name field.
|
||||||
|
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupID orders the results by the group_id field.
|
||||||
|
func ByGroupID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldGroupID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStatus orders the results by the status field.
|
||||||
|
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByLastUsedAt orders the results by the last_used_at field.
|
||||||
|
func ByLastUsedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldLastUsedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByQuota orders the results by the quota field.
|
||||||
|
func ByQuota(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldQuota, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByQuotaUsed orders the results by the quota_used field.
|
||||||
|
func ByQuotaUsed(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldQuotaUsed, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByExpiresAt orders the results by the expires_at field.
|
||||||
|
func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateLimit5h orders the results by the rate_limit_5h field.
|
||||||
|
func ByRateLimit5h(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateLimit5h, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateLimit1d orders the results by the rate_limit_1d field.
|
||||||
|
func ByRateLimit1d(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateLimit1d, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateLimit7d orders the results by the rate_limit_7d field.
|
||||||
|
func ByRateLimit7d(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateLimit7d, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsage5h orders the results by the usage_5h field.
|
||||||
|
func ByUsage5h(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsage5h, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsage1d orders the results by the usage_1d field.
|
||||||
|
func ByUsage1d(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsage1d, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsage7d orders the results by the usage_7d field.
|
||||||
|
func ByUsage7d(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsage7d, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByWindow5hStart orders the results by the window_5h_start field.
|
||||||
|
func ByWindow5hStart(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldWindow5hStart, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByWindow1dStart orders the results by the window_1d_start field.
|
||||||
|
func ByWindow1dStart(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldWindow1dStart, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByWindow7dStart orders the results by the window_7d_start field.
|
||||||
|
func ByWindow7dStart(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldWindow7dStart, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserField orders the results by user field.
|
||||||
|
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupField orders the results by group field.
|
||||||
|
func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageLogsCount orders the results by usage_logs count.
|
||||||
|
func ByUsageLogsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newUsageLogsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageLogs orders the results by usage_logs terms.
|
||||||
|
func ByUsageLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUsageLogsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newUserStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UserInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newGroupStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(GroupInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newUsageLogsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UsageLogsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
1210
backend/ent/apikey/where.go
Normal file
2197
backend/ent/apikey_create.go
Normal file
88
backend/ent/apikey_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIKeyDelete is the builder for deleting a APIKey entity.
|
||||||
|
type APIKeyDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *APIKeyMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the APIKeyDelete builder.
|
||||||
|
func (_d *APIKeyDelete) Where(ps ...predicate.APIKey) *APIKeyDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *APIKeyDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *APIKeyDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *APIKeyDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(apikey.Table, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyDeleteOne is the builder for deleting a single APIKey entity.
|
||||||
|
type APIKeyDeleteOne struct {
|
||||||
|
_d *APIKeyDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the APIKeyDelete builder.
|
||||||
|
func (_d *APIKeyDeleteOne) Where(ps ...predicate.APIKey) *APIKeyDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *APIKeyDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{apikey.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *APIKeyDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
796
backend/ent/apikey_query.go
Normal file
@@ -0,0 +1,796 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIKeyQuery is the builder for querying APIKey entities.
|
||||||
|
type APIKeyQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []apikey.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.APIKey
|
||||||
|
withUser *UserQuery
|
||||||
|
withGroup *GroupQuery
|
||||||
|
withUsageLogs *UsageLogQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the APIKeyQuery builder.
|
||||||
|
func (_q *APIKeyQuery) Where(ps ...predicate.APIKey) *APIKeyQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *APIKeyQuery) Limit(limit int) *APIKeyQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *APIKeyQuery) Offset(offset int) *APIKeyQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *APIKeyQuery) Unique(unique bool) *APIKeyQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *APIKeyQuery) Order(o ...apikey.OrderOption) *APIKeyQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser chains the current query on the "user" edge.
|
||||||
|
func (_q *APIKeyQuery) QueryUser() *UserQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(apikey.Table, apikey.FieldID, selector),
|
||||||
|
sqlgraph.To(user.Table, user.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, apikey.UserTable, apikey.UserColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroup chains the current query on the "group" edge.
|
||||||
|
func (_q *APIKeyQuery) QueryGroup() *GroupQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(apikey.Table, apikey.FieldID, selector),
|
||||||
|
sqlgraph.To(group.Table, group.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, apikey.GroupTable, apikey.GroupColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageLogs chains the current query on the "usage_logs" edge.
|
||||||
|
func (_q *APIKeyQuery) QueryUsageLogs() *UsageLogQuery {
|
||||||
|
query := (&UsageLogClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(apikey.Table, apikey.FieldID, selector),
|
||||||
|
sqlgraph.To(usagelog.Table, usagelog.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, apikey.UsageLogsTable, apikey.UsageLogsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first APIKey entity from the query.
|
||||||
|
// Returns a *NotFoundError when no APIKey was found.
|
||||||
|
func (_q *APIKeyQuery) First(ctx context.Context) (*APIKey, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{apikey.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) FirstX(ctx context.Context) *APIKey {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first APIKey ID from the query.
|
||||||
|
// Returns a *NotFoundError when no APIKey ID was found.
|
||||||
|
func (_q *APIKeyQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{apikey.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single APIKey entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one APIKey entity is found.
|
||||||
|
// Returns a *NotFoundError when no APIKey entities are found.
|
||||||
|
func (_q *APIKeyQuery) Only(ctx context.Context) (*APIKey, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{apikey.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{apikey.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) OnlyX(ctx context.Context) *APIKey {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only APIKey ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one APIKey ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *APIKeyQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{apikey.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{apikey.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of APIKeys.
|
||||||
|
func (_q *APIKeyQuery) All(ctx context.Context) ([]*APIKey, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*APIKey, *APIKeyQuery]()
|
||||||
|
return withInterceptors[[]*APIKey](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) AllX(ctx context.Context) []*APIKey {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of APIKey IDs.
|
||||||
|
func (_q *APIKeyQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(apikey.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *APIKeyQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*APIKeyQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *APIKeyQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the APIKeyQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *APIKeyQuery) Clone() *APIKeyQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &APIKeyQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]apikey.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.APIKey{}, _q.predicates...),
|
||||||
|
withUser: _q.withUser.Clone(),
|
||||||
|
withGroup: _q.withGroup.Clone(),
|
||||||
|
withUsageLogs: _q.withUsageLogs.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *APIKeyQuery) WithUser(opts ...func(*UserQuery)) *APIKeyQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUser = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGroup tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *APIKeyQuery) WithGroup(opts ...func(*GroupQuery)) *APIKeyQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withGroup = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUsageLogs tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "usage_logs" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *APIKeyQuery) WithUsageLogs(opts ...func(*UsageLogQuery)) *APIKeyQuery {
|
||||||
|
query := (&UsageLogClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUsageLogs = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.APIKey.Query().
|
||||||
|
// GroupBy(apikey.FieldCreatedAt).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *APIKeyQuery) GroupBy(field string, fields ...string) *APIKeyGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &APIKeyGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = apikey.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.APIKey.Query().
|
||||||
|
// Select(apikey.FieldCreatedAt).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *APIKeyQuery) Select(fields ...string) *APIKeySelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &APIKeySelect{APIKeyQuery: _q}
|
||||||
|
sbuild.label = apikey.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a APIKeySelect configured with the given aggregations.
|
||||||
|
func (_q *APIKeyQuery) Aggregate(fns ...AggregateFunc) *APIKeySelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !apikey.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*APIKey, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*APIKey{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [3]bool{
|
||||||
|
_q.withUser != nil,
|
||||||
|
_q.withGroup != nil,
|
||||||
|
_q.withUsageLogs != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*APIKey).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &APIKey{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withUser; query != nil {
|
||||||
|
if err := _q.loadUser(ctx, query, nodes, nil,
|
||||||
|
func(n *APIKey, e *User) { n.Edges.User = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withGroup; query != nil {
|
||||||
|
if err := _q.loadGroup(ctx, query, nodes, nil,
|
||||||
|
func(n *APIKey, e *Group) { n.Edges.Group = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withUsageLogs; query != nil {
|
||||||
|
if err := _q.loadUsageLogs(ctx, query, nodes,
|
||||||
|
func(n *APIKey) { n.Edges.UsageLogs = []*UsageLog{} },
|
||||||
|
func(n *APIKey, e *UsageLog) { n.Edges.UsageLogs = append(n.Edges.UsageLogs, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*APIKey, init func(*APIKey), assign func(*APIKey, *User)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*APIKey)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].UserID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(user.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *APIKeyQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*APIKey, init func(*APIKey), assign func(*APIKey, *Group)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*APIKey)
|
||||||
|
for i := range nodes {
|
||||||
|
if nodes[i].GroupID == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fk := *nodes[i].GroupID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(group.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *APIKeyQuery) loadUsageLogs(ctx context.Context, query *UsageLogQuery, nodes []*APIKey, init func(*APIKey), assign func(*APIKey, *UsageLog)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*APIKey)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(usagelog.FieldAPIKeyID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.UsageLog(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(apikey.UsageLogsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.APIKeyID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "api_key_id" returned %v for node %v`, fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(apikey.Table, apikey.Columns, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, apikey.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != apikey.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.withUser != nil {
|
||||||
|
_spec.Node.AddColumnOnce(apikey.FieldUserID)
|
||||||
|
}
|
||||||
|
if _q.withGroup != nil {
|
||||||
|
_spec.Node.AddColumnOnce(apikey.FieldGroupID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(apikey.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = apikey.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *APIKeyQuery) ForUpdate(opts ...sql.LockOption) *APIKeyQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *APIKeyQuery) ForShare(opts ...sql.LockOption) *APIKeyQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyGroupBy is the group-by builder for APIKey entities.
|
||||||
|
type APIKeyGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *APIKeyQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *APIKeyGroupBy) Aggregate(fns ...AggregateFunc) *APIKeyGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *APIKeyGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*APIKeyQuery, *APIKeyGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *APIKeyGroupBy) sqlScan(ctx context.Context, root *APIKeyQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeySelect is the builder for selecting fields of APIKey entities.
|
||||||
|
type APIKeySelect struct {
|
||||||
|
*APIKeyQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *APIKeySelect) Aggregate(fns ...AggregateFunc) *APIKeySelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *APIKeySelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*APIKeyQuery, *APIKeySelect](ctx, _s.APIKeyQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *APIKeySelect) sqlScan(ctx context.Context, root *APIKeyQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
1632
backend/ent/apikey_update.go
Normal file
266
backend/ent/authidentity.go
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AuthIdentity is the model entity for the AuthIdentity schema.
|
||||||
|
type AuthIdentity struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// UserID holds the value of the "user_id" field.
|
||||||
|
UserID int64 `json:"user_id,omitempty"`
|
||||||
|
// ProviderType holds the value of the "provider_type" field.
|
||||||
|
ProviderType string `json:"provider_type,omitempty"`
|
||||||
|
// ProviderKey holds the value of the "provider_key" field.
|
||||||
|
ProviderKey string `json:"provider_key,omitempty"`
|
||||||
|
// ProviderSubject holds the value of the "provider_subject" field.
|
||||||
|
ProviderSubject string `json:"provider_subject,omitempty"`
|
||||||
|
// VerifiedAt holds the value of the "verified_at" field.
|
||||||
|
VerifiedAt *time.Time `json:"verified_at,omitempty"`
|
||||||
|
// Issuer holds the value of the "issuer" field.
|
||||||
|
Issuer *string `json:"issuer,omitempty"`
|
||||||
|
// Metadata holds the value of the "metadata" field.
|
||||||
|
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the AuthIdentityQuery when eager-loading is set.
|
||||||
|
Edges AuthIdentityEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type AuthIdentityEdges struct {
|
||||||
|
// User holds the value of the user edge.
|
||||||
|
User *User `json:"user,omitempty"`
|
||||||
|
// Channels holds the value of the channels edge.
|
||||||
|
Channels []*AuthIdentityChannel `json:"channels,omitempty"`
|
||||||
|
// AdoptionDecisions holds the value of the adoption_decisions edge.
|
||||||
|
AdoptionDecisions []*IdentityAdoptionDecision `json:"adoption_decisions,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [3]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserOrErr returns the User value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e AuthIdentityEdges) UserOrErr() (*User, error) {
|
||||||
|
if e.User != nil {
|
||||||
|
return e.User, nil
|
||||||
|
} else if e.loadedTypes[0] {
|
||||||
|
return nil, &NotFoundError{label: user.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "user"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelsOrErr returns the Channels value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e AuthIdentityEdges) ChannelsOrErr() ([]*AuthIdentityChannel, error) {
|
||||||
|
if e.loadedTypes[1] {
|
||||||
|
return e.Channels, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "channels"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AdoptionDecisionsOrErr returns the AdoptionDecisions value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e AuthIdentityEdges) AdoptionDecisionsOrErr() ([]*IdentityAdoptionDecision, error) {
|
||||||
|
if e.loadedTypes[2] {
|
||||||
|
return e.AdoptionDecisions, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "adoption_decisions"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*AuthIdentity) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case authidentity.FieldMetadata:
|
||||||
|
values[i] = new([]byte)
|
||||||
|
case authidentity.FieldID, authidentity.FieldUserID:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case authidentity.FieldProviderType, authidentity.FieldProviderKey, authidentity.FieldProviderSubject, authidentity.FieldIssuer:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case authidentity.FieldCreatedAt, authidentity.FieldUpdatedAt, authidentity.FieldVerifiedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the AuthIdentity fields.
|
||||||
|
func (_m *AuthIdentity) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case authidentity.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case authidentity.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case authidentity.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
case authidentity.FieldUserID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field user_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UserID = value.Int64
|
||||||
|
}
|
||||||
|
case authidentity.FieldProviderType:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field provider_type", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ProviderType = value.String
|
||||||
|
}
|
||||||
|
case authidentity.FieldProviderKey:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field provider_key", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ProviderKey = value.String
|
||||||
|
}
|
||||||
|
case authidentity.FieldProviderSubject:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field provider_subject", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ProviderSubject = value.String
|
||||||
|
}
|
||||||
|
case authidentity.FieldVerifiedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field verified_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.VerifiedAt = new(time.Time)
|
||||||
|
*_m.VerifiedAt = value.Time
|
||||||
|
}
|
||||||
|
case authidentity.FieldIssuer:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field issuer", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Issuer = new(string)
|
||||||
|
*_m.Issuer = value.String
|
||||||
|
}
|
||||||
|
case authidentity.FieldMetadata:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field metadata", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.Metadata); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field metadata: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the AuthIdentity.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *AuthIdentity) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser queries the "user" edge of the AuthIdentity entity.
|
||||||
|
func (_m *AuthIdentity) QueryUser() *UserQuery {
|
||||||
|
return NewAuthIdentityClient(_m.config).QueryUser(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryChannels queries the "channels" edge of the AuthIdentity entity.
|
||||||
|
func (_m *AuthIdentity) QueryChannels() *AuthIdentityChannelQuery {
|
||||||
|
return NewAuthIdentityClient(_m.config).QueryChannels(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAdoptionDecisions queries the "adoption_decisions" edge of the AuthIdentity entity.
|
||||||
|
func (_m *AuthIdentity) QueryAdoptionDecisions() *IdentityAdoptionDecisionQuery {
|
||||||
|
return NewAuthIdentityClient(_m.config).QueryAdoptionDecisions(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this AuthIdentity.
|
||||||
|
// Note that you need to call AuthIdentity.Unwrap() before calling this method if this AuthIdentity
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *AuthIdentity) Update() *AuthIdentityUpdateOne {
|
||||||
|
return NewAuthIdentityClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the AuthIdentity entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *AuthIdentity) Unwrap() *AuthIdentity {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: AuthIdentity is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *AuthIdentity) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("AuthIdentity(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("user_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.UserID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("provider_type=")
|
||||||
|
builder.WriteString(_m.ProviderType)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("provider_key=")
|
||||||
|
builder.WriteString(_m.ProviderKey)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("provider_subject=")
|
||||||
|
builder.WriteString(_m.ProviderSubject)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.VerifiedAt; v != nil {
|
||||||
|
builder.WriteString("verified_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Issuer; v != nil {
|
||||||
|
builder.WriteString("issuer=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("metadata=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Metadata))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentities is a parsable slice of AuthIdentity.
|
||||||
|
type AuthIdentities []*AuthIdentity
|
||||||
209
backend/ent/authidentity/authidentity.go
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package authidentity
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the authidentity type in the database.
|
||||||
|
Label = "auth_identity"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// FieldUserID holds the string denoting the user_id field in the database.
|
||||||
|
FieldUserID = "user_id"
|
||||||
|
// FieldProviderType holds the string denoting the provider_type field in the database.
|
||||||
|
FieldProviderType = "provider_type"
|
||||||
|
// FieldProviderKey holds the string denoting the provider_key field in the database.
|
||||||
|
FieldProviderKey = "provider_key"
|
||||||
|
// FieldProviderSubject holds the string denoting the provider_subject field in the database.
|
||||||
|
FieldProviderSubject = "provider_subject"
|
||||||
|
// FieldVerifiedAt holds the string denoting the verified_at field in the database.
|
||||||
|
FieldVerifiedAt = "verified_at"
|
||||||
|
// FieldIssuer holds the string denoting the issuer field in the database.
|
||||||
|
FieldIssuer = "issuer"
|
||||||
|
// FieldMetadata holds the string denoting the metadata field in the database.
|
||||||
|
FieldMetadata = "metadata"
|
||||||
|
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||||
|
EdgeUser = "user"
|
||||||
|
// EdgeChannels holds the string denoting the channels edge name in mutations.
|
||||||
|
EdgeChannels = "channels"
|
||||||
|
// EdgeAdoptionDecisions holds the string denoting the adoption_decisions edge name in mutations.
|
||||||
|
EdgeAdoptionDecisions = "adoption_decisions"
|
||||||
|
// Table holds the table name of the authidentity in the database.
|
||||||
|
Table = "auth_identities"
|
||||||
|
// UserTable is the table that holds the user relation/edge.
|
||||||
|
UserTable = "auth_identities"
|
||||||
|
// UserInverseTable is the table name for the User entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||||
|
UserInverseTable = "users"
|
||||||
|
// UserColumn is the table column denoting the user relation/edge.
|
||||||
|
UserColumn = "user_id"
|
||||||
|
// ChannelsTable is the table that holds the channels relation/edge.
|
||||||
|
ChannelsTable = "auth_identity_channels"
|
||||||
|
// ChannelsInverseTable is the table name for the AuthIdentityChannel entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "authidentitychannel" package.
|
||||||
|
ChannelsInverseTable = "auth_identity_channels"
|
||||||
|
// ChannelsColumn is the table column denoting the channels relation/edge.
|
||||||
|
ChannelsColumn = "identity_id"
|
||||||
|
// AdoptionDecisionsTable is the table that holds the adoption_decisions relation/edge.
|
||||||
|
AdoptionDecisionsTable = "identity_adoption_decisions"
|
||||||
|
// AdoptionDecisionsInverseTable is the table name for the IdentityAdoptionDecision entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "identityadoptiondecision" package.
|
||||||
|
AdoptionDecisionsInverseTable = "identity_adoption_decisions"
|
||||||
|
// AdoptionDecisionsColumn is the table column denoting the adoption_decisions relation/edge.
|
||||||
|
AdoptionDecisionsColumn = "identity_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for authidentity fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
FieldUserID,
|
||||||
|
FieldProviderType,
|
||||||
|
FieldProviderKey,
|
||||||
|
FieldProviderSubject,
|
||||||
|
FieldVerifiedAt,
|
||||||
|
FieldIssuer,
|
||||||
|
FieldMetadata,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
// ProviderTypeValidator is a validator for the "provider_type" field. It is called by the builders before save.
|
||||||
|
ProviderTypeValidator func(string) error
|
||||||
|
// ProviderKeyValidator is a validator for the "provider_key" field. It is called by the builders before save.
|
||||||
|
ProviderKeyValidator func(string) error
|
||||||
|
// ProviderSubjectValidator is a validator for the "provider_subject" field. It is called by the builders before save.
|
||||||
|
ProviderSubjectValidator func(string) error
|
||||||
|
// DefaultMetadata holds the default value on creation for the "metadata" field.
|
||||||
|
DefaultMetadata func() map[string]interface{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the AuthIdentity queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserID orders the results by the user_id field.
|
||||||
|
func ByUserID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUserID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByProviderType orders the results by the provider_type field.
|
||||||
|
func ByProviderType(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldProviderType, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByProviderKey orders the results by the provider_key field.
|
||||||
|
func ByProviderKey(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldProviderKey, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByProviderSubject orders the results by the provider_subject field.
|
||||||
|
func ByProviderSubject(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldProviderSubject, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByVerifiedAt orders the results by the verified_at field.
|
||||||
|
func ByVerifiedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldVerifiedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByIssuer orders the results by the issuer field.
|
||||||
|
func ByIssuer(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldIssuer, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserField orders the results by user field.
|
||||||
|
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByChannelsCount orders the results by channels count.
|
||||||
|
func ByChannelsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newChannelsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByChannels orders the results by channels terms.
|
||||||
|
func ByChannels(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newChannelsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAdoptionDecisionsCount orders the results by adoption_decisions count.
|
||||||
|
func ByAdoptionDecisionsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newAdoptionDecisionsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAdoptionDecisions orders the results by adoption_decisions terms.
|
||||||
|
func ByAdoptionDecisions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newAdoptionDecisionsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newUserStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UserInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newChannelsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(ChannelsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, ChannelsTable, ChannelsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newAdoptionDecisionsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(AdoptionDecisionsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, AdoptionDecisionsTable, AdoptionDecisionsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
600
backend/ent/authidentity/where.go
Normal file
@@ -0,0 +1,600 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package authidentity
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||||
|
func UpdatedAt(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ.
|
||||||
|
func UserID(v int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderType applies equality check predicate on the "provider_type" field. It's identical to ProviderTypeEQ.
|
||||||
|
func ProviderType(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKey applies equality check predicate on the "provider_key" field. It's identical to ProviderKeyEQ.
|
||||||
|
func ProviderKey(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubject applies equality check predicate on the "provider_subject" field. It's identical to ProviderSubjectEQ.
|
||||||
|
func ProviderSubject(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifiedAt applies equality check predicate on the "verified_at" field. It's identical to VerifiedAtEQ.
|
||||||
|
func VerifiedAt(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldVerifiedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Issuer applies equality check predicate on the "issuer" field. It's identical to IssuerEQ.
|
||||||
|
func Issuer(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtEQ(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNEQ(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtIn(vs ...time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNotIn(vs ...time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGT(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGTE(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLT(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLTE(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDEQ applies the EQ predicate on the "user_id" field.
|
||||||
|
func UserIDEQ(v int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDNEQ applies the NEQ predicate on the "user_id" field.
|
||||||
|
func UserIDNEQ(v int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDIn applies the In predicate on the "user_id" field.
|
||||||
|
func UserIDIn(vs ...int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldIn(FieldUserID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDNotIn applies the NotIn predicate on the "user_id" field.
|
||||||
|
func UserIDNotIn(vs ...int64) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNotIn(FieldUserID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeEQ applies the EQ predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeEQ(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeNEQ applies the NEQ predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeNEQ(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNEQ(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeIn applies the In predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeIn(vs ...string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldIn(FieldProviderType, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeNotIn applies the NotIn predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeNotIn(vs ...string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNotIn(FieldProviderType, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeGT applies the GT predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeGT(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGT(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeGTE applies the GTE predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeGTE(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGTE(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeLT applies the LT predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeLT(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLT(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeLTE applies the LTE predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeLTE(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLTE(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeContains applies the Contains predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeContains(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldContains(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeHasPrefix applies the HasPrefix predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeHasPrefix(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldHasPrefix(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeHasSuffix applies the HasSuffix predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeHasSuffix(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldHasSuffix(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeEqualFold applies the EqualFold predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeEqualFold(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEqualFold(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeContainsFold applies the ContainsFold predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeContainsFold(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldContainsFold(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyEQ applies the EQ predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyEQ(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyNEQ applies the NEQ predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyNEQ(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNEQ(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyIn applies the In predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyIn(vs ...string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldIn(FieldProviderKey, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyNotIn applies the NotIn predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyNotIn(vs ...string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNotIn(FieldProviderKey, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyGT applies the GT predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyGT(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGT(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyGTE applies the GTE predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyGTE(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGTE(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyLT applies the LT predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyLT(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLT(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyLTE applies the LTE predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyLTE(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLTE(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyContains applies the Contains predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyContains(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldContains(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyHasPrefix applies the HasPrefix predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyHasPrefix(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldHasPrefix(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyHasSuffix applies the HasSuffix predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyHasSuffix(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldHasSuffix(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyEqualFold applies the EqualFold predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyEqualFold(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEqualFold(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyContainsFold applies the ContainsFold predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyContainsFold(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldContainsFold(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectEQ applies the EQ predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectEQ(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectNEQ applies the NEQ predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectNEQ(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNEQ(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectIn applies the In predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectIn(vs ...string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldIn(FieldProviderSubject, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectNotIn applies the NotIn predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectNotIn(vs ...string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNotIn(FieldProviderSubject, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectGT applies the GT predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectGT(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGT(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectGTE applies the GTE predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectGTE(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGTE(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectLT applies the LT predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectLT(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLT(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectLTE applies the LTE predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectLTE(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLTE(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectContains applies the Contains predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectContains(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldContains(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectHasPrefix applies the HasPrefix predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectHasPrefix(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldHasPrefix(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectHasSuffix applies the HasSuffix predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectHasSuffix(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldHasSuffix(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectEqualFold applies the EqualFold predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectEqualFold(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEqualFold(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderSubjectContainsFold applies the ContainsFold predicate on the "provider_subject" field.
|
||||||
|
func ProviderSubjectContainsFold(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldContainsFold(FieldProviderSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifiedAtEQ applies the EQ predicate on the "verified_at" field.
|
||||||
|
func VerifiedAtEQ(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldVerifiedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifiedAtNEQ applies the NEQ predicate on the "verified_at" field.
|
||||||
|
func VerifiedAtNEQ(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNEQ(FieldVerifiedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifiedAtIn applies the In predicate on the "verified_at" field.
|
||||||
|
func VerifiedAtIn(vs ...time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldIn(FieldVerifiedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifiedAtNotIn applies the NotIn predicate on the "verified_at" field.
|
||||||
|
func VerifiedAtNotIn(vs ...time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNotIn(FieldVerifiedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifiedAtGT applies the GT predicate on the "verified_at" field.
|
||||||
|
func VerifiedAtGT(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGT(FieldVerifiedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifiedAtGTE applies the GTE predicate on the "verified_at" field.
|
||||||
|
func VerifiedAtGTE(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGTE(FieldVerifiedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifiedAtLT applies the LT predicate on the "verified_at" field.
|
||||||
|
func VerifiedAtLT(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLT(FieldVerifiedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifiedAtLTE applies the LTE predicate on the "verified_at" field.
|
||||||
|
func VerifiedAtLTE(v time.Time) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLTE(FieldVerifiedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifiedAtIsNil applies the IsNil predicate on the "verified_at" field.
|
||||||
|
func VerifiedAtIsNil() predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldIsNull(FieldVerifiedAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifiedAtNotNil applies the NotNil predicate on the "verified_at" field.
|
||||||
|
func VerifiedAtNotNil() predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNotNull(FieldVerifiedAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerEQ applies the EQ predicate on the "issuer" field.
|
||||||
|
func IssuerEQ(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEQ(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerNEQ applies the NEQ predicate on the "issuer" field.
|
||||||
|
func IssuerNEQ(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNEQ(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerIn applies the In predicate on the "issuer" field.
|
||||||
|
func IssuerIn(vs ...string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldIn(FieldIssuer, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerNotIn applies the NotIn predicate on the "issuer" field.
|
||||||
|
func IssuerNotIn(vs ...string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNotIn(FieldIssuer, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerGT applies the GT predicate on the "issuer" field.
|
||||||
|
func IssuerGT(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGT(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerGTE applies the GTE predicate on the "issuer" field.
|
||||||
|
func IssuerGTE(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldGTE(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerLT applies the LT predicate on the "issuer" field.
|
||||||
|
func IssuerLT(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLT(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerLTE applies the LTE predicate on the "issuer" field.
|
||||||
|
func IssuerLTE(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldLTE(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerContains applies the Contains predicate on the "issuer" field.
|
||||||
|
func IssuerContains(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldContains(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerHasPrefix applies the HasPrefix predicate on the "issuer" field.
|
||||||
|
func IssuerHasPrefix(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldHasPrefix(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerHasSuffix applies the HasSuffix predicate on the "issuer" field.
|
||||||
|
func IssuerHasSuffix(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldHasSuffix(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerIsNil applies the IsNil predicate on the "issuer" field.
|
||||||
|
func IssuerIsNil() predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldIsNull(FieldIssuer))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerNotNil applies the NotNil predicate on the "issuer" field.
|
||||||
|
func IssuerNotNil() predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldNotNull(FieldIssuer))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerEqualFold applies the EqualFold predicate on the "issuer" field.
|
||||||
|
func IssuerEqualFold(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldEqualFold(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IssuerContainsFold applies the ContainsFold predicate on the "issuer" field.
|
||||||
|
func IssuerContainsFold(v string) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.FieldContainsFold(FieldIssuer, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||||
|
func HasUser() predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
||||||
|
func HasUserWith(preds ...predicate.User) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||||
|
step := newUserStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasChannels applies the HasEdge predicate on the "channels" edge.
|
||||||
|
func HasChannels() predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, ChannelsTable, ChannelsColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasChannelsWith applies the HasEdge predicate on the "channels" edge with a given conditions (other predicates).
|
||||||
|
func HasChannelsWith(preds ...predicate.AuthIdentityChannel) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||||
|
step := newChannelsStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAdoptionDecisions applies the HasEdge predicate on the "adoption_decisions" edge.
|
||||||
|
func HasAdoptionDecisions() predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, AdoptionDecisionsTable, AdoptionDecisionsColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAdoptionDecisionsWith applies the HasEdge predicate on the "adoption_decisions" edge with a given conditions (other predicates).
|
||||||
|
func HasAdoptionDecisionsWith(preds ...predicate.IdentityAdoptionDecision) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||||
|
step := newAdoptionDecisionsStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.AuthIdentity) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.AuthIdentity) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.AuthIdentity) predicate.AuthIdentity {
|
||||||
|
return predicate.AuthIdentity(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
1036
backend/ent/authidentity_create.go
Normal file
88
backend/ent/authidentity_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AuthIdentityDelete is the builder for deleting a AuthIdentity entity.
|
||||||
|
type AuthIdentityDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AuthIdentityMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AuthIdentityDelete builder.
|
||||||
|
func (_d *AuthIdentityDelete) Where(ps ...predicate.AuthIdentity) *AuthIdentityDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *AuthIdentityDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AuthIdentityDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *AuthIdentityDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(authidentity.Table, sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityDeleteOne is the builder for deleting a single AuthIdentity entity.
|
||||||
|
type AuthIdentityDeleteOne struct {
|
||||||
|
_d *AuthIdentityDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AuthIdentityDelete builder.
|
||||||
|
func (_d *AuthIdentityDeleteOne) Where(ps ...predicate.AuthIdentity) *AuthIdentityDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *AuthIdentityDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{authidentity.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AuthIdentityDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
797
backend/ent/authidentity_query.go
Normal file
@@ -0,0 +1,797 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/identityadoptiondecision"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AuthIdentityQuery is the builder for querying AuthIdentity entities.
|
||||||
|
type AuthIdentityQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []authidentity.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.AuthIdentity
|
||||||
|
withUser *UserQuery
|
||||||
|
withChannels *AuthIdentityChannelQuery
|
||||||
|
withAdoptionDecisions *IdentityAdoptionDecisionQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the AuthIdentityQuery builder.
|
||||||
|
func (_q *AuthIdentityQuery) Where(ps ...predicate.AuthIdentity) *AuthIdentityQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *AuthIdentityQuery) Limit(limit int) *AuthIdentityQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *AuthIdentityQuery) Offset(offset int) *AuthIdentityQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *AuthIdentityQuery) Unique(unique bool) *AuthIdentityQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *AuthIdentityQuery) Order(o ...authidentity.OrderOption) *AuthIdentityQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser chains the current query on the "user" edge.
|
||||||
|
func (_q *AuthIdentityQuery) QueryUser() *UserQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(authidentity.Table, authidentity.FieldID, selector),
|
||||||
|
sqlgraph.To(user.Table, user.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, authidentity.UserTable, authidentity.UserColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryChannels chains the current query on the "channels" edge.
|
||||||
|
func (_q *AuthIdentityQuery) QueryChannels() *AuthIdentityChannelQuery {
|
||||||
|
query := (&AuthIdentityChannelClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(authidentity.Table, authidentity.FieldID, selector),
|
||||||
|
sqlgraph.To(authidentitychannel.Table, authidentitychannel.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, authidentity.ChannelsTable, authidentity.ChannelsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAdoptionDecisions chains the current query on the "adoption_decisions" edge.
|
||||||
|
func (_q *AuthIdentityQuery) QueryAdoptionDecisions() *IdentityAdoptionDecisionQuery {
|
||||||
|
query := (&IdentityAdoptionDecisionClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(authidentity.Table, authidentity.FieldID, selector),
|
||||||
|
sqlgraph.To(identityadoptiondecision.Table, identityadoptiondecision.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, authidentity.AdoptionDecisionsTable, authidentity.AdoptionDecisionsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first AuthIdentity entity from the query.
|
||||||
|
// Returns a *NotFoundError when no AuthIdentity was found.
|
||||||
|
func (_q *AuthIdentityQuery) First(ctx context.Context) (*AuthIdentity, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{authidentity.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityQuery) FirstX(ctx context.Context) *AuthIdentity {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first AuthIdentity ID from the query.
|
||||||
|
// Returns a *NotFoundError when no AuthIdentity ID was found.
|
||||||
|
func (_q *AuthIdentityQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{authidentity.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single AuthIdentity entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one AuthIdentity entity is found.
|
||||||
|
// Returns a *NotFoundError when no AuthIdentity entities are found.
|
||||||
|
func (_q *AuthIdentityQuery) Only(ctx context.Context) (*AuthIdentity, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{authidentity.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{authidentity.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityQuery) OnlyX(ctx context.Context) *AuthIdentity {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only AuthIdentity ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one AuthIdentity ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *AuthIdentityQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{authidentity.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{authidentity.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of AuthIdentities.
|
||||||
|
func (_q *AuthIdentityQuery) All(ctx context.Context) ([]*AuthIdentity, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*AuthIdentity, *AuthIdentityQuery]()
|
||||||
|
return withInterceptors[[]*AuthIdentity](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityQuery) AllX(ctx context.Context) []*AuthIdentity {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of AuthIdentity IDs.
|
||||||
|
func (_q *AuthIdentityQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(authidentity.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *AuthIdentityQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*AuthIdentityQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *AuthIdentityQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the AuthIdentityQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *AuthIdentityQuery) Clone() *AuthIdentityQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &AuthIdentityQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]authidentity.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.AuthIdentity{}, _q.predicates...),
|
||||||
|
withUser: _q.withUser.Clone(),
|
||||||
|
withChannels: _q.withChannels.Clone(),
|
||||||
|
withAdoptionDecisions: _q.withAdoptionDecisions.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AuthIdentityQuery) WithUser(opts ...func(*UserQuery)) *AuthIdentityQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUser = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithChannels tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "channels" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AuthIdentityQuery) WithChannels(opts ...func(*AuthIdentityChannelQuery)) *AuthIdentityQuery {
|
||||||
|
query := (&AuthIdentityChannelClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withChannels = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAdoptionDecisions tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "adoption_decisions" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AuthIdentityQuery) WithAdoptionDecisions(opts ...func(*IdentityAdoptionDecisionQuery)) *AuthIdentityQuery {
|
||||||
|
query := (&IdentityAdoptionDecisionClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withAdoptionDecisions = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.AuthIdentity.Query().
|
||||||
|
// GroupBy(authidentity.FieldCreatedAt).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AuthIdentityQuery) GroupBy(field string, fields ...string) *AuthIdentityGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &AuthIdentityGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = authidentity.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.AuthIdentity.Query().
|
||||||
|
// Select(authidentity.FieldCreatedAt).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AuthIdentityQuery) Select(fields ...string) *AuthIdentitySelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &AuthIdentitySelect{AuthIdentityQuery: _q}
|
||||||
|
sbuild.label = authidentity.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a AuthIdentitySelect configured with the given aggregations.
|
||||||
|
func (_q *AuthIdentityQuery) Aggregate(fns ...AggregateFunc) *AuthIdentitySelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !authidentity.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AuthIdentity, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*AuthIdentity{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [3]bool{
|
||||||
|
_q.withUser != nil,
|
||||||
|
_q.withChannels != nil,
|
||||||
|
_q.withAdoptionDecisions != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*AuthIdentity).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &AuthIdentity{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withUser; query != nil {
|
||||||
|
if err := _q.loadUser(ctx, query, nodes, nil,
|
||||||
|
func(n *AuthIdentity, e *User) { n.Edges.User = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withChannels; query != nil {
|
||||||
|
if err := _q.loadChannels(ctx, query, nodes,
|
||||||
|
func(n *AuthIdentity) { n.Edges.Channels = []*AuthIdentityChannel{} },
|
||||||
|
func(n *AuthIdentity, e *AuthIdentityChannel) { n.Edges.Channels = append(n.Edges.Channels, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withAdoptionDecisions; query != nil {
|
||||||
|
if err := _q.loadAdoptionDecisions(ctx, query, nodes,
|
||||||
|
func(n *AuthIdentity) { n.Edges.AdoptionDecisions = []*IdentityAdoptionDecision{} },
|
||||||
|
func(n *AuthIdentity, e *IdentityAdoptionDecision) {
|
||||||
|
n.Edges.AdoptionDecisions = append(n.Edges.AdoptionDecisions, e)
|
||||||
|
}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*AuthIdentity, init func(*AuthIdentity), assign func(*AuthIdentity, *User)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*AuthIdentity)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].UserID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(user.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *AuthIdentityQuery) loadChannels(ctx context.Context, query *AuthIdentityChannelQuery, nodes []*AuthIdentity, init func(*AuthIdentity), assign func(*AuthIdentity, *AuthIdentityChannel)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*AuthIdentity)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(authidentitychannel.FieldIdentityID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.AuthIdentityChannel(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(authidentity.ChannelsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.IdentityID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "identity_id" returned %v for node %v`, fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *AuthIdentityQuery) loadAdoptionDecisions(ctx context.Context, query *IdentityAdoptionDecisionQuery, nodes []*AuthIdentity, init func(*AuthIdentity), assign func(*AuthIdentity, *IdentityAdoptionDecision)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*AuthIdentity)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(identityadoptiondecision.FieldIdentityID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.IdentityAdoptionDecision(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(authidentity.AdoptionDecisionsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.IdentityID
|
||||||
|
if fk == nil {
|
||||||
|
return fmt.Errorf(`foreign-key "identity_id" is nil for node %v`, n.ID)
|
||||||
|
}
|
||||||
|
node, ok := nodeids[*fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "identity_id" returned %v for node %v`, *fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(authidentity.Table, authidentity.Columns, sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, authidentity.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != authidentity.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.withUser != nil {
|
||||||
|
_spec.Node.AddColumnOnce(authidentity.FieldUserID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(authidentity.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = authidentity.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *AuthIdentityQuery) ForUpdate(opts ...sql.LockOption) *AuthIdentityQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *AuthIdentityQuery) ForShare(opts ...sql.LockOption) *AuthIdentityQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityGroupBy is the group-by builder for AuthIdentity entities.
|
||||||
|
type AuthIdentityGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *AuthIdentityQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *AuthIdentityGroupBy) Aggregate(fns ...AggregateFunc) *AuthIdentityGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *AuthIdentityGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AuthIdentityQuery, *AuthIdentityGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *AuthIdentityGroupBy) sqlScan(ctx context.Context, root *AuthIdentityQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentitySelect is the builder for selecting fields of AuthIdentity entities.
|
||||||
|
type AuthIdentitySelect struct {
|
||||||
|
*AuthIdentityQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *AuthIdentitySelect) Aggregate(fns ...AggregateFunc) *AuthIdentitySelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *AuthIdentitySelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AuthIdentityQuery, *AuthIdentitySelect](ctx, _s.AuthIdentityQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *AuthIdentitySelect) sqlScan(ctx context.Context, root *AuthIdentityQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
923
backend/ent/authidentity_update.go
Normal file
@@ -0,0 +1,923 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/identityadoptiondecision"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AuthIdentityUpdate is the builder for updating AuthIdentity entities.
|
||||||
|
type AuthIdentityUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AuthIdentityMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AuthIdentityUpdate builder.
|
||||||
|
func (_u *AuthIdentityUpdate) Where(ps ...predicate.AuthIdentity) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *AuthIdentityUpdate) SetUpdatedAt(v time.Time) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_u *AuthIdentityUpdate) SetUserID(v int64) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.SetUserID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdate) SetNillableUserID(v *int64) *AuthIdentityUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUserID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderType sets the "provider_type" field.
|
||||||
|
func (_u *AuthIdentityUpdate) SetProviderType(v string) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.SetProviderType(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProviderType sets the "provider_type" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdate) SetNillableProviderType(v *string) *AuthIdentityUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProviderType(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderKey sets the "provider_key" field.
|
||||||
|
func (_u *AuthIdentityUpdate) SetProviderKey(v string) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.SetProviderKey(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProviderKey sets the "provider_key" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdate) SetNillableProviderKey(v *string) *AuthIdentityUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProviderKey(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderSubject sets the "provider_subject" field.
|
||||||
|
func (_u *AuthIdentityUpdate) SetProviderSubject(v string) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.SetProviderSubject(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProviderSubject sets the "provider_subject" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdate) SetNillableProviderSubject(v *string) *AuthIdentityUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProviderSubject(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetVerifiedAt sets the "verified_at" field.
|
||||||
|
func (_u *AuthIdentityUpdate) SetVerifiedAt(v time.Time) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.SetVerifiedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableVerifiedAt sets the "verified_at" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdate) SetNillableVerifiedAt(v *time.Time) *AuthIdentityUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetVerifiedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearVerifiedAt clears the value of the "verified_at" field.
|
||||||
|
func (_u *AuthIdentityUpdate) ClearVerifiedAt() *AuthIdentityUpdate {
|
||||||
|
_u.mutation.ClearVerifiedAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIssuer sets the "issuer" field.
|
||||||
|
func (_u *AuthIdentityUpdate) SetIssuer(v string) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.SetIssuer(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableIssuer sets the "issuer" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdate) SetNillableIssuer(v *string) *AuthIdentityUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetIssuer(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIssuer clears the value of the "issuer" field.
|
||||||
|
func (_u *AuthIdentityUpdate) ClearIssuer() *AuthIdentityUpdate {
|
||||||
|
_u.mutation.ClearIssuer()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets the "metadata" field.
|
||||||
|
func (_u *AuthIdentityUpdate) SetMetadata(v map[string]interface{}) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.SetMetadata(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *AuthIdentityUpdate) SetUser(v *User) *AuthIdentityUpdate {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddChannelIDs adds the "channels" edge to the AuthIdentityChannel entity by IDs.
|
||||||
|
func (_u *AuthIdentityUpdate) AddChannelIDs(ids ...int64) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.AddChannelIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddChannels adds the "channels" edges to the AuthIdentityChannel entity.
|
||||||
|
func (_u *AuthIdentityUpdate) AddChannels(v ...*AuthIdentityChannel) *AuthIdentityUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddChannelIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddAdoptionDecisionIDs adds the "adoption_decisions" edge to the IdentityAdoptionDecision entity by IDs.
|
||||||
|
func (_u *AuthIdentityUpdate) AddAdoptionDecisionIDs(ids ...int64) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.AddAdoptionDecisionIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddAdoptionDecisions adds the "adoption_decisions" edges to the IdentityAdoptionDecision entity.
|
||||||
|
func (_u *AuthIdentityUpdate) AddAdoptionDecisions(v ...*IdentityAdoptionDecision) *AuthIdentityUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddAdoptionDecisionIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AuthIdentityMutation object of the builder.
|
||||||
|
func (_u *AuthIdentityUpdate) Mutation() *AuthIdentityMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *AuthIdentityUpdate) ClearUser() *AuthIdentityUpdate {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearChannels clears all "channels" edges to the AuthIdentityChannel entity.
|
||||||
|
func (_u *AuthIdentityUpdate) ClearChannels() *AuthIdentityUpdate {
|
||||||
|
_u.mutation.ClearChannels()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveChannelIDs removes the "channels" edge to AuthIdentityChannel entities by IDs.
|
||||||
|
func (_u *AuthIdentityUpdate) RemoveChannelIDs(ids ...int64) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.RemoveChannelIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveChannels removes "channels" edges to AuthIdentityChannel entities.
|
||||||
|
func (_u *AuthIdentityUpdate) RemoveChannels(v ...*AuthIdentityChannel) *AuthIdentityUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveChannelIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAdoptionDecisions clears all "adoption_decisions" edges to the IdentityAdoptionDecision entity.
|
||||||
|
func (_u *AuthIdentityUpdate) ClearAdoptionDecisions() *AuthIdentityUpdate {
|
||||||
|
_u.mutation.ClearAdoptionDecisions()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAdoptionDecisionIDs removes the "adoption_decisions" edge to IdentityAdoptionDecision entities by IDs.
|
||||||
|
func (_u *AuthIdentityUpdate) RemoveAdoptionDecisionIDs(ids ...int64) *AuthIdentityUpdate {
|
||||||
|
_u.mutation.RemoveAdoptionDecisionIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAdoptionDecisions removes "adoption_decisions" edges to IdentityAdoptionDecision entities.
|
||||||
|
func (_u *AuthIdentityUpdate) RemoveAdoptionDecisions(v ...*IdentityAdoptionDecision) *AuthIdentityUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveAdoptionDecisionIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *AuthIdentityUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AuthIdentityUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *AuthIdentityUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AuthIdentityUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *AuthIdentityUpdate) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := authidentity.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AuthIdentityUpdate) check() error {
|
||||||
|
if v, ok := _u.mutation.ProviderType(); ok {
|
||||||
|
if err := authidentity.ProviderTypeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_type", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_type": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ProviderKey(); ok {
|
||||||
|
if err := authidentity.ProviderKeyValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_key", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_key": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ProviderSubject(); ok {
|
||||||
|
if err := authidentity.ProviderSubjectValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_subject", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_subject": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AuthIdentity.user"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AuthIdentityUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(authidentity.Table, authidentity.Columns, sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ProviderType(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldProviderType, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ProviderKey(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldProviderKey, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ProviderSubject(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldProviderSubject, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.VerifiedAt(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldVerifiedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.VerifiedAtCleared() {
|
||||||
|
_spec.ClearField(authidentity.FieldVerifiedAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Issuer(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldIssuer, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.IssuerCleared() {
|
||||||
|
_spec.ClearField(authidentity.FieldIssuer, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Metadata(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldMetadata, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: authidentity.UserTable,
|
||||||
|
Columns: []string{authidentity.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: authidentity.UserTable,
|
||||||
|
Columns: []string{authidentity.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.ChannelsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.ChannelsTable,
|
||||||
|
Columns: []string{authidentity.ChannelsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedChannelsIDs(); len(nodes) > 0 && !_u.mutation.ChannelsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.ChannelsTable,
|
||||||
|
Columns: []string{authidentity.ChannelsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.ChannelsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.ChannelsTable,
|
||||||
|
Columns: []string{authidentity.ChannelsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.AdoptionDecisionsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.AdoptionDecisionsTable,
|
||||||
|
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedAdoptionDecisionsIDs(); len(nodes) > 0 && !_u.mutation.AdoptionDecisionsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.AdoptionDecisionsTable,
|
||||||
|
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.AdoptionDecisionsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.AdoptionDecisionsTable,
|
||||||
|
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{authidentity.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityUpdateOne is the builder for updating a single AuthIdentity entity.
|
||||||
|
type AuthIdentityUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AuthIdentityMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetUpdatedAt(v time.Time) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetUserID(v int64) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.SetUserID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetNillableUserID(v *int64) *AuthIdentityUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUserID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderType sets the "provider_type" field.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetProviderType(v string) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.SetProviderType(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProviderType sets the "provider_type" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetNillableProviderType(v *string) *AuthIdentityUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProviderType(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderKey sets the "provider_key" field.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetProviderKey(v string) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.SetProviderKey(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProviderKey sets the "provider_key" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetNillableProviderKey(v *string) *AuthIdentityUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProviderKey(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderSubject sets the "provider_subject" field.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetProviderSubject(v string) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.SetProviderSubject(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProviderSubject sets the "provider_subject" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetNillableProviderSubject(v *string) *AuthIdentityUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProviderSubject(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetVerifiedAt sets the "verified_at" field.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetVerifiedAt(v time.Time) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.SetVerifiedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableVerifiedAt sets the "verified_at" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetNillableVerifiedAt(v *time.Time) *AuthIdentityUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetVerifiedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearVerifiedAt clears the value of the "verified_at" field.
|
||||||
|
func (_u *AuthIdentityUpdateOne) ClearVerifiedAt() *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.ClearVerifiedAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIssuer sets the "issuer" field.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetIssuer(v string) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.SetIssuer(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableIssuer sets the "issuer" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetNillableIssuer(v *string) *AuthIdentityUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetIssuer(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIssuer clears the value of the "issuer" field.
|
||||||
|
func (_u *AuthIdentityUpdateOne) ClearIssuer() *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.ClearIssuer()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets the "metadata" field.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetMetadata(v map[string]interface{}) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.SetMetadata(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SetUser(v *User) *AuthIdentityUpdateOne {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddChannelIDs adds the "channels" edge to the AuthIdentityChannel entity by IDs.
|
||||||
|
func (_u *AuthIdentityUpdateOne) AddChannelIDs(ids ...int64) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.AddChannelIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddChannels adds the "channels" edges to the AuthIdentityChannel entity.
|
||||||
|
func (_u *AuthIdentityUpdateOne) AddChannels(v ...*AuthIdentityChannel) *AuthIdentityUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddChannelIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddAdoptionDecisionIDs adds the "adoption_decisions" edge to the IdentityAdoptionDecision entity by IDs.
|
||||||
|
func (_u *AuthIdentityUpdateOne) AddAdoptionDecisionIDs(ids ...int64) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.AddAdoptionDecisionIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddAdoptionDecisions adds the "adoption_decisions" edges to the IdentityAdoptionDecision entity.
|
||||||
|
func (_u *AuthIdentityUpdateOne) AddAdoptionDecisions(v ...*IdentityAdoptionDecision) *AuthIdentityUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddAdoptionDecisionIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AuthIdentityMutation object of the builder.
|
||||||
|
func (_u *AuthIdentityUpdateOne) Mutation() *AuthIdentityMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *AuthIdentityUpdateOne) ClearUser() *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearChannels clears all "channels" edges to the AuthIdentityChannel entity.
|
||||||
|
func (_u *AuthIdentityUpdateOne) ClearChannels() *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.ClearChannels()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveChannelIDs removes the "channels" edge to AuthIdentityChannel entities by IDs.
|
||||||
|
func (_u *AuthIdentityUpdateOne) RemoveChannelIDs(ids ...int64) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.RemoveChannelIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveChannels removes "channels" edges to AuthIdentityChannel entities.
|
||||||
|
func (_u *AuthIdentityUpdateOne) RemoveChannels(v ...*AuthIdentityChannel) *AuthIdentityUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveChannelIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAdoptionDecisions clears all "adoption_decisions" edges to the IdentityAdoptionDecision entity.
|
||||||
|
func (_u *AuthIdentityUpdateOne) ClearAdoptionDecisions() *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.ClearAdoptionDecisions()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAdoptionDecisionIDs removes the "adoption_decisions" edge to IdentityAdoptionDecision entities by IDs.
|
||||||
|
func (_u *AuthIdentityUpdateOne) RemoveAdoptionDecisionIDs(ids ...int64) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.RemoveAdoptionDecisionIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAdoptionDecisions removes "adoption_decisions" edges to IdentityAdoptionDecision entities.
|
||||||
|
func (_u *AuthIdentityUpdateOne) RemoveAdoptionDecisions(v ...*IdentityAdoptionDecision) *AuthIdentityUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveAdoptionDecisionIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AuthIdentityUpdate builder.
|
||||||
|
func (_u *AuthIdentityUpdateOne) Where(ps ...predicate.AuthIdentity) *AuthIdentityUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *AuthIdentityUpdateOne) Select(field string, fields ...string) *AuthIdentityUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated AuthIdentity entity.
|
||||||
|
func (_u *AuthIdentityUpdateOne) Save(ctx context.Context) (*AuthIdentity, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AuthIdentityUpdateOne) SaveX(ctx context.Context) *AuthIdentity {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *AuthIdentityUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AuthIdentityUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *AuthIdentityUpdateOne) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := authidentity.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AuthIdentityUpdateOne) check() error {
|
||||||
|
if v, ok := _u.mutation.ProviderType(); ok {
|
||||||
|
if err := authidentity.ProviderTypeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_type", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_type": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ProviderKey(); ok {
|
||||||
|
if err := authidentity.ProviderKeyValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_key", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_key": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ProviderSubject(); ok {
|
||||||
|
if err := authidentity.ProviderSubjectValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_subject", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_subject": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AuthIdentity.user"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AuthIdentityUpdateOne) sqlSave(ctx context.Context) (_node *AuthIdentity, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(authidentity.Table, authidentity.Columns, sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AuthIdentity.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, authidentity.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !authidentity.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != authidentity.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ProviderType(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldProviderType, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ProviderKey(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldProviderKey, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ProviderSubject(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldProviderSubject, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.VerifiedAt(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldVerifiedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.VerifiedAtCleared() {
|
||||||
|
_spec.ClearField(authidentity.FieldVerifiedAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Issuer(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldIssuer, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.IssuerCleared() {
|
||||||
|
_spec.ClearField(authidentity.FieldIssuer, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Metadata(); ok {
|
||||||
|
_spec.SetField(authidentity.FieldMetadata, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: authidentity.UserTable,
|
||||||
|
Columns: []string{authidentity.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: authidentity.UserTable,
|
||||||
|
Columns: []string{authidentity.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.ChannelsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.ChannelsTable,
|
||||||
|
Columns: []string{authidentity.ChannelsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedChannelsIDs(); len(nodes) > 0 && !_u.mutation.ChannelsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.ChannelsTable,
|
||||||
|
Columns: []string{authidentity.ChannelsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.ChannelsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.ChannelsTable,
|
||||||
|
Columns: []string{authidentity.ChannelsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.AdoptionDecisionsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.AdoptionDecisionsTable,
|
||||||
|
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedAdoptionDecisionsIDs(); len(nodes) > 0 && !_u.mutation.AdoptionDecisionsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.AdoptionDecisionsTable,
|
||||||
|
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.AdoptionDecisionsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: authidentity.AdoptionDecisionsTable,
|
||||||
|
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &AuthIdentity{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{authidentity.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
228
backend/ent/authidentitychannel.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AuthIdentityChannel is the model entity for the AuthIdentityChannel schema.
|
||||||
|
type AuthIdentityChannel struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// IdentityID holds the value of the "identity_id" field.
|
||||||
|
IdentityID int64 `json:"identity_id,omitempty"`
|
||||||
|
// ProviderType holds the value of the "provider_type" field.
|
||||||
|
ProviderType string `json:"provider_type,omitempty"`
|
||||||
|
// ProviderKey holds the value of the "provider_key" field.
|
||||||
|
ProviderKey string `json:"provider_key,omitempty"`
|
||||||
|
// Channel holds the value of the "channel" field.
|
||||||
|
Channel string `json:"channel,omitempty"`
|
||||||
|
// ChannelAppID holds the value of the "channel_app_id" field.
|
||||||
|
ChannelAppID string `json:"channel_app_id,omitempty"`
|
||||||
|
// ChannelSubject holds the value of the "channel_subject" field.
|
||||||
|
ChannelSubject string `json:"channel_subject,omitempty"`
|
||||||
|
// Metadata holds the value of the "metadata" field.
|
||||||
|
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the AuthIdentityChannelQuery when eager-loading is set.
|
||||||
|
Edges AuthIdentityChannelEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityChannelEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type AuthIdentityChannelEdges struct {
|
||||||
|
// Identity holds the value of the identity edge.
|
||||||
|
Identity *AuthIdentity `json:"identity,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [1]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdentityOrErr returns the Identity value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e AuthIdentityChannelEdges) IdentityOrErr() (*AuthIdentity, error) {
|
||||||
|
if e.Identity != nil {
|
||||||
|
return e.Identity, nil
|
||||||
|
} else if e.loadedTypes[0] {
|
||||||
|
return nil, &NotFoundError{label: authidentity.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "identity"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*AuthIdentityChannel) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case authidentitychannel.FieldMetadata:
|
||||||
|
values[i] = new([]byte)
|
||||||
|
case authidentitychannel.FieldID, authidentitychannel.FieldIdentityID:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case authidentitychannel.FieldProviderType, authidentitychannel.FieldProviderKey, authidentitychannel.FieldChannel, authidentitychannel.FieldChannelAppID, authidentitychannel.FieldChannelSubject:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case authidentitychannel.FieldCreatedAt, authidentitychannel.FieldUpdatedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the AuthIdentityChannel fields.
|
||||||
|
func (_m *AuthIdentityChannel) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case authidentitychannel.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case authidentitychannel.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case authidentitychannel.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
case authidentitychannel.FieldIdentityID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field identity_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.IdentityID = value.Int64
|
||||||
|
}
|
||||||
|
case authidentitychannel.FieldProviderType:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field provider_type", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ProviderType = value.String
|
||||||
|
}
|
||||||
|
case authidentitychannel.FieldProviderKey:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field provider_key", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ProviderKey = value.String
|
||||||
|
}
|
||||||
|
case authidentitychannel.FieldChannel:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field channel", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Channel = value.String
|
||||||
|
}
|
||||||
|
case authidentitychannel.FieldChannelAppID:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field channel_app_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ChannelAppID = value.String
|
||||||
|
}
|
||||||
|
case authidentitychannel.FieldChannelSubject:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field channel_subject", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ChannelSubject = value.String
|
||||||
|
}
|
||||||
|
case authidentitychannel.FieldMetadata:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field metadata", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.Metadata); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field metadata: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the AuthIdentityChannel.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *AuthIdentityChannel) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryIdentity queries the "identity" edge of the AuthIdentityChannel entity.
|
||||||
|
func (_m *AuthIdentityChannel) QueryIdentity() *AuthIdentityQuery {
|
||||||
|
return NewAuthIdentityChannelClient(_m.config).QueryIdentity(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this AuthIdentityChannel.
|
||||||
|
// Note that you need to call AuthIdentityChannel.Unwrap() before calling this method if this AuthIdentityChannel
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *AuthIdentityChannel) Update() *AuthIdentityChannelUpdateOne {
|
||||||
|
return NewAuthIdentityChannelClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the AuthIdentityChannel entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *AuthIdentityChannel) Unwrap() *AuthIdentityChannel {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: AuthIdentityChannel is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *AuthIdentityChannel) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("AuthIdentityChannel(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("identity_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.IdentityID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("provider_type=")
|
||||||
|
builder.WriteString(_m.ProviderType)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("provider_key=")
|
||||||
|
builder.WriteString(_m.ProviderKey)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("channel=")
|
||||||
|
builder.WriteString(_m.Channel)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("channel_app_id=")
|
||||||
|
builder.WriteString(_m.ChannelAppID)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("channel_subject=")
|
||||||
|
builder.WriteString(_m.ChannelSubject)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("metadata=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Metadata))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityChannels is a parsable slice of AuthIdentityChannel.
|
||||||
|
type AuthIdentityChannels []*AuthIdentityChannel
|
||||||
153
backend/ent/authidentitychannel/authidentitychannel.go
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package authidentitychannel
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the authidentitychannel type in the database.
|
||||||
|
Label = "auth_identity_channel"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// FieldIdentityID holds the string denoting the identity_id field in the database.
|
||||||
|
FieldIdentityID = "identity_id"
|
||||||
|
// FieldProviderType holds the string denoting the provider_type field in the database.
|
||||||
|
FieldProviderType = "provider_type"
|
||||||
|
// FieldProviderKey holds the string denoting the provider_key field in the database.
|
||||||
|
FieldProviderKey = "provider_key"
|
||||||
|
// FieldChannel holds the string denoting the channel field in the database.
|
||||||
|
FieldChannel = "channel"
|
||||||
|
// FieldChannelAppID holds the string denoting the channel_app_id field in the database.
|
||||||
|
FieldChannelAppID = "channel_app_id"
|
||||||
|
// FieldChannelSubject holds the string denoting the channel_subject field in the database.
|
||||||
|
FieldChannelSubject = "channel_subject"
|
||||||
|
// FieldMetadata holds the string denoting the metadata field in the database.
|
||||||
|
FieldMetadata = "metadata"
|
||||||
|
// EdgeIdentity holds the string denoting the identity edge name in mutations.
|
||||||
|
EdgeIdentity = "identity"
|
||||||
|
// Table holds the table name of the authidentitychannel in the database.
|
||||||
|
Table = "auth_identity_channels"
|
||||||
|
// IdentityTable is the table that holds the identity relation/edge.
|
||||||
|
IdentityTable = "auth_identity_channels"
|
||||||
|
// IdentityInverseTable is the table name for the AuthIdentity entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "authidentity" package.
|
||||||
|
IdentityInverseTable = "auth_identities"
|
||||||
|
// IdentityColumn is the table column denoting the identity relation/edge.
|
||||||
|
IdentityColumn = "identity_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for authidentitychannel fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
FieldIdentityID,
|
||||||
|
FieldProviderType,
|
||||||
|
FieldProviderKey,
|
||||||
|
FieldChannel,
|
||||||
|
FieldChannelAppID,
|
||||||
|
FieldChannelSubject,
|
||||||
|
FieldMetadata,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
// ProviderTypeValidator is a validator for the "provider_type" field. It is called by the builders before save.
|
||||||
|
ProviderTypeValidator func(string) error
|
||||||
|
// ProviderKeyValidator is a validator for the "provider_key" field. It is called by the builders before save.
|
||||||
|
ProviderKeyValidator func(string) error
|
||||||
|
// ChannelValidator is a validator for the "channel" field. It is called by the builders before save.
|
||||||
|
ChannelValidator func(string) error
|
||||||
|
// ChannelAppIDValidator is a validator for the "channel_app_id" field. It is called by the builders before save.
|
||||||
|
ChannelAppIDValidator func(string) error
|
||||||
|
// ChannelSubjectValidator is a validator for the "channel_subject" field. It is called by the builders before save.
|
||||||
|
ChannelSubjectValidator func(string) error
|
||||||
|
// DefaultMetadata holds the default value on creation for the "metadata" field.
|
||||||
|
DefaultMetadata func() map[string]interface{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the AuthIdentityChannel queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByIdentityID orders the results by the identity_id field.
|
||||||
|
func ByIdentityID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldIdentityID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByProviderType orders the results by the provider_type field.
|
||||||
|
func ByProviderType(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldProviderType, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByProviderKey orders the results by the provider_key field.
|
||||||
|
func ByProviderKey(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldProviderKey, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByChannel orders the results by the channel field.
|
||||||
|
func ByChannel(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldChannel, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByChannelAppID orders the results by the channel_app_id field.
|
||||||
|
func ByChannelAppID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldChannelAppID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByChannelSubject orders the results by the channel_subject field.
|
||||||
|
func ByChannelSubject(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldChannelSubject, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByIdentityField orders the results by identity field.
|
||||||
|
func ByIdentityField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newIdentityStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newIdentityStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(IdentityInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, IdentityTable, IdentityColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
559
backend/ent/authidentitychannel/where.go
Normal file
@@ -0,0 +1,559 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package authidentitychannel
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||||
|
func UpdatedAt(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdentityID applies equality check predicate on the "identity_id" field. It's identical to IdentityIDEQ.
|
||||||
|
func IdentityID(v int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldIdentityID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderType applies equality check predicate on the "provider_type" field. It's identical to ProviderTypeEQ.
|
||||||
|
func ProviderType(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKey applies equality check predicate on the "provider_key" field. It's identical to ProviderKeyEQ.
|
||||||
|
func ProviderKey(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Channel applies equality check predicate on the "channel" field. It's identical to ChannelEQ.
|
||||||
|
func Channel(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppID applies equality check predicate on the "channel_app_id" field. It's identical to ChannelAppIDEQ.
|
||||||
|
func ChannelAppID(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubject applies equality check predicate on the "channel_subject" field. It's identical to ChannelSubjectEQ.
|
||||||
|
func ChannelSubject(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtEQ(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNEQ(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtIn(vs ...time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNotIn(vs ...time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGT(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGTE(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLT(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLTE(v time.Time) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdentityIDEQ applies the EQ predicate on the "identity_id" field.
|
||||||
|
func IdentityIDEQ(v int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldIdentityID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdentityIDNEQ applies the NEQ predicate on the "identity_id" field.
|
||||||
|
func IdentityIDNEQ(v int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldIdentityID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdentityIDIn applies the In predicate on the "identity_id" field.
|
||||||
|
func IdentityIDIn(vs ...int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldIn(FieldIdentityID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IdentityIDNotIn applies the NotIn predicate on the "identity_id" field.
|
||||||
|
func IdentityIDNotIn(vs ...int64) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldIdentityID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeEQ applies the EQ predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeEQ(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeNEQ applies the NEQ predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeNEQ(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeIn applies the In predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeIn(vs ...string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldIn(FieldProviderType, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeNotIn applies the NotIn predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeNotIn(vs ...string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldProviderType, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeGT applies the GT predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeGT(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGT(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeGTE applies the GTE predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeGTE(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeLT applies the LT predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeLT(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLT(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeLTE applies the LTE predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeLTE(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeContains applies the Contains predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeContains(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldContains(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeHasPrefix applies the HasPrefix predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeHasPrefix(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldHasPrefix(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeHasSuffix applies the HasSuffix predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeHasSuffix(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldHasSuffix(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeEqualFold applies the EqualFold predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeEqualFold(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEqualFold(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderTypeContainsFold applies the ContainsFold predicate on the "provider_type" field.
|
||||||
|
func ProviderTypeContainsFold(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldContainsFold(FieldProviderType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyEQ applies the EQ predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyEQ(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyNEQ applies the NEQ predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyNEQ(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyIn applies the In predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyIn(vs ...string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldIn(FieldProviderKey, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyNotIn applies the NotIn predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyNotIn(vs ...string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldProviderKey, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyGT applies the GT predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyGT(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGT(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyGTE applies the GTE predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyGTE(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyLT applies the LT predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyLT(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLT(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyLTE applies the LTE predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyLTE(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyContains applies the Contains predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyContains(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldContains(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyHasPrefix applies the HasPrefix predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyHasPrefix(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldHasPrefix(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyHasSuffix applies the HasSuffix predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyHasSuffix(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldHasSuffix(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyEqualFold applies the EqualFold predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyEqualFold(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEqualFold(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderKeyContainsFold applies the ContainsFold predicate on the "provider_key" field.
|
||||||
|
func ProviderKeyContainsFold(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldContainsFold(FieldProviderKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelEQ applies the EQ predicate on the "channel" field.
|
||||||
|
func ChannelEQ(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelNEQ applies the NEQ predicate on the "channel" field.
|
||||||
|
func ChannelNEQ(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelIn applies the In predicate on the "channel" field.
|
||||||
|
func ChannelIn(vs ...string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldIn(FieldChannel, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelNotIn applies the NotIn predicate on the "channel" field.
|
||||||
|
func ChannelNotIn(vs ...string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldChannel, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelGT applies the GT predicate on the "channel" field.
|
||||||
|
func ChannelGT(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGT(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelGTE applies the GTE predicate on the "channel" field.
|
||||||
|
func ChannelGTE(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelLT applies the LT predicate on the "channel" field.
|
||||||
|
func ChannelLT(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLT(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelLTE applies the LTE predicate on the "channel" field.
|
||||||
|
func ChannelLTE(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelContains applies the Contains predicate on the "channel" field.
|
||||||
|
func ChannelContains(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldContains(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelHasPrefix applies the HasPrefix predicate on the "channel" field.
|
||||||
|
func ChannelHasPrefix(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldHasPrefix(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelHasSuffix applies the HasSuffix predicate on the "channel" field.
|
||||||
|
func ChannelHasSuffix(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldHasSuffix(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelEqualFold applies the EqualFold predicate on the "channel" field.
|
||||||
|
func ChannelEqualFold(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEqualFold(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelContainsFold applies the ContainsFold predicate on the "channel" field.
|
||||||
|
func ChannelContainsFold(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldContainsFold(FieldChannel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDEQ applies the EQ predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDEQ(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDNEQ applies the NEQ predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDNEQ(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDIn applies the In predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDIn(vs ...string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldIn(FieldChannelAppID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDNotIn applies the NotIn predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDNotIn(vs ...string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldChannelAppID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDGT applies the GT predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDGT(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGT(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDGTE applies the GTE predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDGTE(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDLT applies the LT predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDLT(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLT(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDLTE applies the LTE predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDLTE(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDContains applies the Contains predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDContains(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldContains(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDHasPrefix applies the HasPrefix predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDHasPrefix(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldHasPrefix(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDHasSuffix applies the HasSuffix predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDHasSuffix(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldHasSuffix(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDEqualFold applies the EqualFold predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDEqualFold(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEqualFold(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelAppIDContainsFold applies the ContainsFold predicate on the "channel_app_id" field.
|
||||||
|
func ChannelAppIDContainsFold(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldContainsFold(FieldChannelAppID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectEQ applies the EQ predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectEQ(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectNEQ applies the NEQ predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectNEQ(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectIn applies the In predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectIn(vs ...string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldIn(FieldChannelSubject, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectNotIn applies the NotIn predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectNotIn(vs ...string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldChannelSubject, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectGT applies the GT predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectGT(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGT(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectGTE applies the GTE predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectGTE(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectLT applies the LT predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectLT(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLT(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectLTE applies the LTE predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectLTE(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectContains applies the Contains predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectContains(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldContains(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectHasPrefix applies the HasPrefix predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectHasPrefix(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldHasPrefix(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectHasSuffix applies the HasSuffix predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectHasSuffix(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldHasSuffix(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectEqualFold applies the EqualFold predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectEqualFold(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldEqualFold(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelSubjectContainsFold applies the ContainsFold predicate on the "channel_subject" field.
|
||||||
|
func ChannelSubjectContainsFold(v string) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.FieldContainsFold(FieldChannelSubject, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasIdentity applies the HasEdge predicate on the "identity" edge.
|
||||||
|
func HasIdentity() predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, IdentityTable, IdentityColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasIdentityWith applies the HasEdge predicate on the "identity" edge with a given conditions (other predicates).
|
||||||
|
func HasIdentityWith(preds ...predicate.AuthIdentity) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(func(s *sql.Selector) {
|
||||||
|
step := newIdentityStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.AuthIdentityChannel) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.AuthIdentityChannel) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.AuthIdentityChannel) predicate.AuthIdentityChannel {
|
||||||
|
return predicate.AuthIdentityChannel(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
932
backend/ent/authidentitychannel_create.go
Normal file
@@ -0,0 +1,932 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AuthIdentityChannelCreate is the builder for creating a AuthIdentityChannel entity.
|
||||||
|
type AuthIdentityChannelCreate struct {
|
||||||
|
config
|
||||||
|
mutation *AuthIdentityChannelMutation
|
||||||
|
hooks []Hook
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCreatedAt sets the "created_at" field.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetCreatedAt(v time.Time) *AuthIdentityChannelCreate {
|
||||||
|
_c.mutation.SetCreatedAt(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetNillableCreatedAt(v *time.Time) *AuthIdentityChannelCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetCreatedAt(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetUpdatedAt(v time.Time) *AuthIdentityChannelCreate {
|
||||||
|
_c.mutation.SetUpdatedAt(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetNillableUpdatedAt(v *time.Time) *AuthIdentityChannelCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetUpdatedAt(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdentityID sets the "identity_id" field.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetIdentityID(v int64) *AuthIdentityChannelCreate {
|
||||||
|
_c.mutation.SetIdentityID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderType sets the "provider_type" field.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetProviderType(v string) *AuthIdentityChannelCreate {
|
||||||
|
_c.mutation.SetProviderType(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderKey sets the "provider_key" field.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetProviderKey(v string) *AuthIdentityChannelCreate {
|
||||||
|
_c.mutation.SetProviderKey(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannel sets the "channel" field.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetChannel(v string) *AuthIdentityChannelCreate {
|
||||||
|
_c.mutation.SetChannel(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelAppID sets the "channel_app_id" field.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetChannelAppID(v string) *AuthIdentityChannelCreate {
|
||||||
|
_c.mutation.SetChannelAppID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelSubject sets the "channel_subject" field.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetChannelSubject(v string) *AuthIdentityChannelCreate {
|
||||||
|
_c.mutation.SetChannelSubject(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets the "metadata" field.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetMetadata(v map[string]interface{}) *AuthIdentityChannelCreate {
|
||||||
|
_c.mutation.SetMetadata(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdentity sets the "identity" edge to the AuthIdentity entity.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SetIdentity(v *AuthIdentity) *AuthIdentityChannelCreate {
|
||||||
|
return _c.SetIdentityID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AuthIdentityChannelMutation object of the builder.
|
||||||
|
func (_c *AuthIdentityChannelCreate) Mutation() *AuthIdentityChannelMutation {
|
||||||
|
return _c.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the AuthIdentityChannel in the database.
|
||||||
|
func (_c *AuthIdentityChannelCreate) Save(ctx context.Context) (*AuthIdentityChannel, error) {
|
||||||
|
_c.defaults()
|
||||||
|
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX calls Save and panics if Save returns an error.
|
||||||
|
func (_c *AuthIdentityChannelCreate) SaveX(ctx context.Context) *AuthIdentityChannel {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *AuthIdentityChannelCreate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *AuthIdentityChannelCreate) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_c *AuthIdentityChannelCreate) defaults() {
|
||||||
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
|
v := authidentitychannel.DefaultCreatedAt()
|
||||||
|
_c.mutation.SetCreatedAt(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||||
|
v := authidentitychannel.DefaultUpdatedAt()
|
||||||
|
_c.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Metadata(); !ok {
|
||||||
|
v := authidentitychannel.DefaultMetadata()
|
||||||
|
_c.mutation.SetMetadata(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_c *AuthIdentityChannelCreate) check() error {
|
||||||
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
|
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "AuthIdentityChannel.created_at"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||||
|
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "AuthIdentityChannel.updated_at"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.IdentityID(); !ok {
|
||||||
|
return &ValidationError{Name: "identity_id", err: errors.New(`ent: missing required field "AuthIdentityChannel.identity_id"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.ProviderType(); !ok {
|
||||||
|
return &ValidationError{Name: "provider_type", err: errors.New(`ent: missing required field "AuthIdentityChannel.provider_type"`)}
|
||||||
|
}
|
||||||
|
if v, ok := _c.mutation.ProviderType(); ok {
|
||||||
|
if err := authidentitychannel.ProviderTypeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_type", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_type": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.ProviderKey(); !ok {
|
||||||
|
return &ValidationError{Name: "provider_key", err: errors.New(`ent: missing required field "AuthIdentityChannel.provider_key"`)}
|
||||||
|
}
|
||||||
|
if v, ok := _c.mutation.ProviderKey(); ok {
|
||||||
|
if err := authidentitychannel.ProviderKeyValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_key", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_key": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Channel(); !ok {
|
||||||
|
return &ValidationError{Name: "channel", err: errors.New(`ent: missing required field "AuthIdentityChannel.channel"`)}
|
||||||
|
}
|
||||||
|
if v, ok := _c.mutation.Channel(); ok {
|
||||||
|
if err := authidentitychannel.ChannelValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "channel", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.ChannelAppID(); !ok {
|
||||||
|
return &ValidationError{Name: "channel_app_id", err: errors.New(`ent: missing required field "AuthIdentityChannel.channel_app_id"`)}
|
||||||
|
}
|
||||||
|
if v, ok := _c.mutation.ChannelAppID(); ok {
|
||||||
|
if err := authidentitychannel.ChannelAppIDValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "channel_app_id", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_app_id": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.ChannelSubject(); !ok {
|
||||||
|
return &ValidationError{Name: "channel_subject", err: errors.New(`ent: missing required field "AuthIdentityChannel.channel_subject"`)}
|
||||||
|
}
|
||||||
|
if v, ok := _c.mutation.ChannelSubject(); ok {
|
||||||
|
if err := authidentitychannel.ChannelSubjectValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "channel_subject", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_subject": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Metadata(); !ok {
|
||||||
|
return &ValidationError{Name: "metadata", err: errors.New(`ent: missing required field "AuthIdentityChannel.metadata"`)}
|
||||||
|
}
|
||||||
|
if len(_c.mutation.IdentityIDs()) == 0 {
|
||||||
|
return &ValidationError{Name: "identity", err: errors.New(`ent: missing required edge "AuthIdentityChannel.identity"`)}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *AuthIdentityChannelCreate) sqlSave(ctx context.Context) (*AuthIdentityChannel, error) {
|
||||||
|
if err := _c.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_node, _spec := _c.createSpec()
|
||||||
|
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
id := _spec.ID.Value.(int64)
|
||||||
|
_node.ID = int64(id)
|
||||||
|
_c.mutation.id = &_node.ID
|
||||||
|
_c.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *AuthIdentityChannelCreate) createSpec() (*AuthIdentityChannel, *sqlgraph.CreateSpec) {
|
||||||
|
var (
|
||||||
|
_node = &AuthIdentityChannel{config: _c.config}
|
||||||
|
_spec = sqlgraph.NewCreateSpec(authidentitychannel.Table, sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64))
|
||||||
|
)
|
||||||
|
_spec.OnConflict = _c.conflict
|
||||||
|
if value, ok := _c.mutation.CreatedAt(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldCreatedAt, field.TypeTime, value)
|
||||||
|
_node.CreatedAt = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
_node.UpdatedAt = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.ProviderType(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldProviderType, field.TypeString, value)
|
||||||
|
_node.ProviderType = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.ProviderKey(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldProviderKey, field.TypeString, value)
|
||||||
|
_node.ProviderKey = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.Channel(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldChannel, field.TypeString, value)
|
||||||
|
_node.Channel = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.ChannelAppID(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldChannelAppID, field.TypeString, value)
|
||||||
|
_node.ChannelAppID = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.ChannelSubject(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldChannelSubject, field.TypeString, value)
|
||||||
|
_node.ChannelSubject = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.Metadata(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldMetadata, field.TypeJSON, value)
|
||||||
|
_node.Metadata = value
|
||||||
|
}
|
||||||
|
if nodes := _c.mutation.IdentityIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: authidentitychannel.IdentityTable,
|
||||||
|
Columns: []string{authidentitychannel.IdentityColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_node.IdentityID = nodes[0]
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
|
return _node, _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.AuthIdentityChannel.Create().
|
||||||
|
// SetCreatedAt(v).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.AuthIdentityChannelUpsert) {
|
||||||
|
// SetCreatedAt(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AuthIdentityChannelCreate) OnConflict(opts ...sql.ConflictOption) *AuthIdentityChannelUpsertOne {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &AuthIdentityChannelUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AuthIdentityChannel.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AuthIdentityChannelCreate) OnConflictColumns(columns ...string) *AuthIdentityChannelUpsertOne {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &AuthIdentityChannelUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// AuthIdentityChannelUpsertOne is the builder for "upsert"-ing
|
||||||
|
// one AuthIdentityChannel node.
|
||||||
|
AuthIdentityChannelUpsertOne struct {
|
||||||
|
create *AuthIdentityChannelCreate
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityChannelUpsert is the "OnConflict" setter.
|
||||||
|
AuthIdentityChannelUpsert struct {
|
||||||
|
*sql.UpdateSet
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (u *AuthIdentityChannelUpsert) SetUpdatedAt(v time.Time) *AuthIdentityChannelUpsert {
|
||||||
|
u.Set(authidentitychannel.FieldUpdatedAt, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsert) UpdateUpdatedAt() *AuthIdentityChannelUpsert {
|
||||||
|
u.SetExcluded(authidentitychannel.FieldUpdatedAt)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdentityID sets the "identity_id" field.
|
||||||
|
func (u *AuthIdentityChannelUpsert) SetIdentityID(v int64) *AuthIdentityChannelUpsert {
|
||||||
|
u.Set(authidentitychannel.FieldIdentityID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIdentityID sets the "identity_id" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsert) UpdateIdentityID() *AuthIdentityChannelUpsert {
|
||||||
|
u.SetExcluded(authidentitychannel.FieldIdentityID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderType sets the "provider_type" field.
|
||||||
|
func (u *AuthIdentityChannelUpsert) SetProviderType(v string) *AuthIdentityChannelUpsert {
|
||||||
|
u.Set(authidentitychannel.FieldProviderType, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateProviderType sets the "provider_type" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsert) UpdateProviderType() *AuthIdentityChannelUpsert {
|
||||||
|
u.SetExcluded(authidentitychannel.FieldProviderType)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderKey sets the "provider_key" field.
|
||||||
|
func (u *AuthIdentityChannelUpsert) SetProviderKey(v string) *AuthIdentityChannelUpsert {
|
||||||
|
u.Set(authidentitychannel.FieldProviderKey, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateProviderKey sets the "provider_key" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsert) UpdateProviderKey() *AuthIdentityChannelUpsert {
|
||||||
|
u.SetExcluded(authidentitychannel.FieldProviderKey)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannel sets the "channel" field.
|
||||||
|
func (u *AuthIdentityChannelUpsert) SetChannel(v string) *AuthIdentityChannelUpsert {
|
||||||
|
u.Set(authidentitychannel.FieldChannel, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateChannel sets the "channel" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsert) UpdateChannel() *AuthIdentityChannelUpsert {
|
||||||
|
u.SetExcluded(authidentitychannel.FieldChannel)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelAppID sets the "channel_app_id" field.
|
||||||
|
func (u *AuthIdentityChannelUpsert) SetChannelAppID(v string) *AuthIdentityChannelUpsert {
|
||||||
|
u.Set(authidentitychannel.FieldChannelAppID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateChannelAppID sets the "channel_app_id" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsert) UpdateChannelAppID() *AuthIdentityChannelUpsert {
|
||||||
|
u.SetExcluded(authidentitychannel.FieldChannelAppID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelSubject sets the "channel_subject" field.
|
||||||
|
func (u *AuthIdentityChannelUpsert) SetChannelSubject(v string) *AuthIdentityChannelUpsert {
|
||||||
|
u.Set(authidentitychannel.FieldChannelSubject, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateChannelSubject sets the "channel_subject" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsert) UpdateChannelSubject() *AuthIdentityChannelUpsert {
|
||||||
|
u.SetExcluded(authidentitychannel.FieldChannelSubject)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets the "metadata" field.
|
||||||
|
func (u *AuthIdentityChannelUpsert) SetMetadata(v map[string]interface{}) *AuthIdentityChannelUpsert {
|
||||||
|
u.Set(authidentitychannel.FieldMetadata, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMetadata sets the "metadata" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsert) UpdateMetadata() *AuthIdentityChannelUpsert {
|
||||||
|
u.SetExcluded(authidentitychannel.FieldMetadata)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AuthIdentityChannel.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) UpdateNewValues() *AuthIdentityChannelUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||||
|
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||||
|
s.SetIgnore(authidentitychannel.FieldCreatedAt)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AuthIdentityChannel.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) Ignore() *AuthIdentityChannelUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) DoNothing() *AuthIdentityChannelUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the AuthIdentityChannelCreate.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) Update(set func(*AuthIdentityChannelUpsert)) *AuthIdentityChannelUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&AuthIdentityChannelUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) SetUpdatedAt(v time.Time) *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetUpdatedAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) UpdateUpdatedAt() *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateUpdatedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdentityID sets the "identity_id" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) SetIdentityID(v int64) *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetIdentityID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIdentityID sets the "identity_id" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) UpdateIdentityID() *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateIdentityID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderType sets the "provider_type" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) SetProviderType(v string) *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetProviderType(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateProviderType sets the "provider_type" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) UpdateProviderType() *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateProviderType()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderKey sets the "provider_key" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) SetProviderKey(v string) *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetProviderKey(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateProviderKey sets the "provider_key" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) UpdateProviderKey() *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateProviderKey()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannel sets the "channel" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) SetChannel(v string) *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetChannel(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateChannel sets the "channel" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) UpdateChannel() *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateChannel()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelAppID sets the "channel_app_id" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) SetChannelAppID(v string) *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetChannelAppID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateChannelAppID sets the "channel_app_id" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) UpdateChannelAppID() *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateChannelAppID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelSubject sets the "channel_subject" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) SetChannelSubject(v string) *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetChannelSubject(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateChannelSubject sets the "channel_subject" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) UpdateChannelSubject() *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateChannelSubject()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets the "metadata" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) SetMetadata(v map[string]interface{}) *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetMetadata(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMetadata sets the "metadata" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) UpdateMetadata() *AuthIdentityChannelUpsertOne {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateMetadata()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) Exec(ctx context.Context) error {
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for AuthIdentityChannelCreate.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||||
|
node, err := u.create.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
return node.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDX is like ID, but panics if an error occurs.
|
||||||
|
func (u *AuthIdentityChannelUpsertOne) IDX(ctx context.Context) int64 {
|
||||||
|
id, err := u.ID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityChannelCreateBulk is the builder for creating many AuthIdentityChannel entities in bulk.
|
||||||
|
type AuthIdentityChannelCreateBulk struct {
|
||||||
|
config
|
||||||
|
err error
|
||||||
|
builders []*AuthIdentityChannelCreate
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the AuthIdentityChannel entities in the database.
|
||||||
|
func (_c *AuthIdentityChannelCreateBulk) Save(ctx context.Context) ([]*AuthIdentityChannel, error) {
|
||||||
|
if _c.err != nil {
|
||||||
|
return nil, _c.err
|
||||||
|
}
|
||||||
|
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||||
|
nodes := make([]*AuthIdentityChannel, len(_c.builders))
|
||||||
|
mutators := make([]Mutator, len(_c.builders))
|
||||||
|
for i := range _c.builders {
|
||||||
|
func(i int, root context.Context) {
|
||||||
|
builder := _c.builders[i]
|
||||||
|
builder.defaults()
|
||||||
|
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||||
|
mutation, ok := m.(*AuthIdentityChannelMutation)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||||
|
}
|
||||||
|
if err := builder.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
builder.mutation = mutation
|
||||||
|
var err error
|
||||||
|
nodes[i], specs[i] = builder.createSpec()
|
||||||
|
if i < len(mutators)-1 {
|
||||||
|
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||||
|
} else {
|
||||||
|
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||||
|
spec.OnConflict = _c.conflict
|
||||||
|
// Invoke the actual operation on the latest mutation in the chain.
|
||||||
|
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mutation.id = &nodes[i].ID
|
||||||
|
if specs[i].ID.Value != nil {
|
||||||
|
id := specs[i].ID.Value.(int64)
|
||||||
|
nodes[i].ID = int64(id)
|
||||||
|
}
|
||||||
|
mutation.done = true
|
||||||
|
return nodes[i], nil
|
||||||
|
})
|
||||||
|
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||||
|
mut = builder.hooks[i](mut)
|
||||||
|
}
|
||||||
|
mutators[i] = mut
|
||||||
|
}(i, ctx)
|
||||||
|
}
|
||||||
|
if len(mutators) > 0 {
|
||||||
|
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_c *AuthIdentityChannelCreateBulk) SaveX(ctx context.Context) []*AuthIdentityChannel {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *AuthIdentityChannelCreateBulk) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *AuthIdentityChannelCreateBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.AuthIdentityChannel.CreateBulk(builders...).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.AuthIdentityChannelUpsert) {
|
||||||
|
// SetCreatedAt(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AuthIdentityChannelCreateBulk) OnConflict(opts ...sql.ConflictOption) *AuthIdentityChannelUpsertBulk {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &AuthIdentityChannelUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AuthIdentityChannel.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AuthIdentityChannelCreateBulk) OnConflictColumns(columns ...string) *AuthIdentityChannelUpsertBulk {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &AuthIdentityChannelUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityChannelUpsertBulk is the builder for "upsert"-ing
|
||||||
|
// a bulk of AuthIdentityChannel nodes.
|
||||||
|
type AuthIdentityChannelUpsertBulk struct {
|
||||||
|
create *AuthIdentityChannelCreateBulk
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that
|
||||||
|
// were set on create. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AuthIdentityChannel.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) UpdateNewValues() *AuthIdentityChannelUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||||
|
for _, b := range u.create.builders {
|
||||||
|
if _, exists := b.mutation.CreatedAt(); exists {
|
||||||
|
s.SetIgnore(authidentitychannel.FieldCreatedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AuthIdentityChannel.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) Ignore() *AuthIdentityChannelUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) DoNothing() *AuthIdentityChannelUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the AuthIdentityChannelCreateBulk.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) Update(set func(*AuthIdentityChannelUpsert)) *AuthIdentityChannelUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&AuthIdentityChannelUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) SetUpdatedAt(v time.Time) *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetUpdatedAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) UpdateUpdatedAt() *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateUpdatedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdentityID sets the "identity_id" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) SetIdentityID(v int64) *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetIdentityID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateIdentityID sets the "identity_id" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) UpdateIdentityID() *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateIdentityID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderType sets the "provider_type" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) SetProviderType(v string) *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetProviderType(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateProviderType sets the "provider_type" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) UpdateProviderType() *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateProviderType()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderKey sets the "provider_key" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) SetProviderKey(v string) *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetProviderKey(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateProviderKey sets the "provider_key" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) UpdateProviderKey() *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateProviderKey()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannel sets the "channel" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) SetChannel(v string) *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetChannel(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateChannel sets the "channel" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) UpdateChannel() *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateChannel()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelAppID sets the "channel_app_id" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) SetChannelAppID(v string) *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetChannelAppID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateChannelAppID sets the "channel_app_id" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) UpdateChannelAppID() *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateChannelAppID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelSubject sets the "channel_subject" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) SetChannelSubject(v string) *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetChannelSubject(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateChannelSubject sets the "channel_subject" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) UpdateChannelSubject() *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateChannelSubject()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets the "metadata" field.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) SetMetadata(v map[string]interface{}) *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.SetMetadata(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMetadata sets the "metadata" field to the value that was provided on create.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) UpdateMetadata() *AuthIdentityChannelUpsertBulk {
|
||||||
|
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||||
|
s.UpdateMetadata()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) Exec(ctx context.Context) error {
|
||||||
|
if u.create.err != nil {
|
||||||
|
return u.create.err
|
||||||
|
}
|
||||||
|
for i, b := range u.create.builders {
|
||||||
|
if len(b.conflict) != 0 {
|
||||||
|
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AuthIdentityChannelCreateBulk instead", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for AuthIdentityChannelCreateBulk.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *AuthIdentityChannelUpsertBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
88
backend/ent/authidentitychannel_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AuthIdentityChannelDelete is the builder for deleting a AuthIdentityChannel entity.
|
||||||
|
type AuthIdentityChannelDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AuthIdentityChannelMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AuthIdentityChannelDelete builder.
|
||||||
|
func (_d *AuthIdentityChannelDelete) Where(ps ...predicate.AuthIdentityChannel) *AuthIdentityChannelDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *AuthIdentityChannelDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AuthIdentityChannelDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *AuthIdentityChannelDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(authidentitychannel.Table, sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityChannelDeleteOne is the builder for deleting a single AuthIdentityChannel entity.
|
||||||
|
type AuthIdentityChannelDeleteOne struct {
|
||||||
|
_d *AuthIdentityChannelDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AuthIdentityChannelDelete builder.
|
||||||
|
func (_d *AuthIdentityChannelDeleteOne) Where(ps ...predicate.AuthIdentityChannel) *AuthIdentityChannelDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *AuthIdentityChannelDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{authidentitychannel.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AuthIdentityChannelDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
643
backend/ent/authidentitychannel_query.go
Normal file
@@ -0,0 +1,643 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AuthIdentityChannelQuery is the builder for querying AuthIdentityChannel entities.
|
||||||
|
type AuthIdentityChannelQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []authidentitychannel.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.AuthIdentityChannel
|
||||||
|
withIdentity *AuthIdentityQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the AuthIdentityChannelQuery builder.
|
||||||
|
func (_q *AuthIdentityChannelQuery) Where(ps ...predicate.AuthIdentityChannel) *AuthIdentityChannelQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *AuthIdentityChannelQuery) Limit(limit int) *AuthIdentityChannelQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *AuthIdentityChannelQuery) Offset(offset int) *AuthIdentityChannelQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *AuthIdentityChannelQuery) Unique(unique bool) *AuthIdentityChannelQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *AuthIdentityChannelQuery) Order(o ...authidentitychannel.OrderOption) *AuthIdentityChannelQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryIdentity chains the current query on the "identity" edge.
|
||||||
|
func (_q *AuthIdentityChannelQuery) QueryIdentity() *AuthIdentityQuery {
|
||||||
|
query := (&AuthIdentityClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(authidentitychannel.Table, authidentitychannel.FieldID, selector),
|
||||||
|
sqlgraph.To(authidentity.Table, authidentity.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, authidentitychannel.IdentityTable, authidentitychannel.IdentityColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first AuthIdentityChannel entity from the query.
|
||||||
|
// Returns a *NotFoundError when no AuthIdentityChannel was found.
|
||||||
|
func (_q *AuthIdentityChannelQuery) First(ctx context.Context) (*AuthIdentityChannel, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{authidentitychannel.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityChannelQuery) FirstX(ctx context.Context) *AuthIdentityChannel {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first AuthIdentityChannel ID from the query.
|
||||||
|
// Returns a *NotFoundError when no AuthIdentityChannel ID was found.
|
||||||
|
func (_q *AuthIdentityChannelQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{authidentitychannel.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityChannelQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single AuthIdentityChannel entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one AuthIdentityChannel entity is found.
|
||||||
|
// Returns a *NotFoundError when no AuthIdentityChannel entities are found.
|
||||||
|
func (_q *AuthIdentityChannelQuery) Only(ctx context.Context) (*AuthIdentityChannel, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{authidentitychannel.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{authidentitychannel.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityChannelQuery) OnlyX(ctx context.Context) *AuthIdentityChannel {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only AuthIdentityChannel ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one AuthIdentityChannel ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *AuthIdentityChannelQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{authidentitychannel.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{authidentitychannel.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityChannelQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of AuthIdentityChannels.
|
||||||
|
func (_q *AuthIdentityChannelQuery) All(ctx context.Context) ([]*AuthIdentityChannel, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*AuthIdentityChannel, *AuthIdentityChannelQuery]()
|
||||||
|
return withInterceptors[[]*AuthIdentityChannel](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityChannelQuery) AllX(ctx context.Context) []*AuthIdentityChannel {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of AuthIdentityChannel IDs.
|
||||||
|
func (_q *AuthIdentityChannelQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(authidentitychannel.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityChannelQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *AuthIdentityChannelQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*AuthIdentityChannelQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityChannelQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *AuthIdentityChannelQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *AuthIdentityChannelQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the AuthIdentityChannelQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *AuthIdentityChannelQuery) Clone() *AuthIdentityChannelQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &AuthIdentityChannelQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]authidentitychannel.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.AuthIdentityChannel{}, _q.predicates...),
|
||||||
|
withIdentity: _q.withIdentity.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithIdentity tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "identity" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AuthIdentityChannelQuery) WithIdentity(opts ...func(*AuthIdentityQuery)) *AuthIdentityChannelQuery {
|
||||||
|
query := (&AuthIdentityClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withIdentity = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.AuthIdentityChannel.Query().
|
||||||
|
// GroupBy(authidentitychannel.FieldCreatedAt).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AuthIdentityChannelQuery) GroupBy(field string, fields ...string) *AuthIdentityChannelGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &AuthIdentityChannelGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = authidentitychannel.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.AuthIdentityChannel.Query().
|
||||||
|
// Select(authidentitychannel.FieldCreatedAt).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AuthIdentityChannelQuery) Select(fields ...string) *AuthIdentityChannelSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &AuthIdentityChannelSelect{AuthIdentityChannelQuery: _q}
|
||||||
|
sbuild.label = authidentitychannel.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a AuthIdentityChannelSelect configured with the given aggregations.
|
||||||
|
func (_q *AuthIdentityChannelQuery) Aggregate(fns ...AggregateFunc) *AuthIdentityChannelSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityChannelQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !authidentitychannel.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityChannelQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AuthIdentityChannel, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*AuthIdentityChannel{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [1]bool{
|
||||||
|
_q.withIdentity != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*AuthIdentityChannel).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &AuthIdentityChannel{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withIdentity; query != nil {
|
||||||
|
if err := _q.loadIdentity(ctx, query, nodes, nil,
|
||||||
|
func(n *AuthIdentityChannel, e *AuthIdentity) { n.Edges.Identity = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityChannelQuery) loadIdentity(ctx context.Context, query *AuthIdentityQuery, nodes []*AuthIdentityChannel, init func(*AuthIdentityChannel), assign func(*AuthIdentityChannel, *AuthIdentity)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*AuthIdentityChannel)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].IdentityID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(authidentity.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "identity_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityChannelQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityChannelQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(authidentitychannel.Table, authidentitychannel.Columns, sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, authidentitychannel.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != authidentitychannel.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.withIdentity != nil {
|
||||||
|
_spec.Node.AddColumnOnce(authidentitychannel.FieldIdentityID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AuthIdentityChannelQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(authidentitychannel.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = authidentitychannel.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *AuthIdentityChannelQuery) ForUpdate(opts ...sql.LockOption) *AuthIdentityChannelQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *AuthIdentityChannelQuery) ForShare(opts ...sql.LockOption) *AuthIdentityChannelQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityChannelGroupBy is the group-by builder for AuthIdentityChannel entities.
|
||||||
|
type AuthIdentityChannelGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *AuthIdentityChannelQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *AuthIdentityChannelGroupBy) Aggregate(fns ...AggregateFunc) *AuthIdentityChannelGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *AuthIdentityChannelGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AuthIdentityChannelQuery, *AuthIdentityChannelGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *AuthIdentityChannelGroupBy) sqlScan(ctx context.Context, root *AuthIdentityChannelQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityChannelSelect is the builder for selecting fields of AuthIdentityChannel entities.
|
||||||
|
type AuthIdentityChannelSelect struct {
|
||||||
|
*AuthIdentityChannelQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *AuthIdentityChannelSelect) Aggregate(fns ...AggregateFunc) *AuthIdentityChannelSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *AuthIdentityChannelSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AuthIdentityChannelQuery, *AuthIdentityChannelSelect](ctx, _s.AuthIdentityChannelQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *AuthIdentityChannelSelect) sqlScan(ctx context.Context, root *AuthIdentityChannelQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
581
backend/ent/authidentitychannel_update.go
Normal file
@@ -0,0 +1,581 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AuthIdentityChannelUpdate is the builder for updating AuthIdentityChannel entities.
|
||||||
|
type AuthIdentityChannelUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AuthIdentityChannelMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AuthIdentityChannelUpdate builder.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) Where(ps ...predicate.AuthIdentityChannel) *AuthIdentityChannelUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetUpdatedAt(v time.Time) *AuthIdentityChannelUpdate {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdentityID sets the "identity_id" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetIdentityID(v int64) *AuthIdentityChannelUpdate {
|
||||||
|
_u.mutation.SetIdentityID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableIdentityID sets the "identity_id" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetNillableIdentityID(v *int64) *AuthIdentityChannelUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetIdentityID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderType sets the "provider_type" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetProviderType(v string) *AuthIdentityChannelUpdate {
|
||||||
|
_u.mutation.SetProviderType(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProviderType sets the "provider_type" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetNillableProviderType(v *string) *AuthIdentityChannelUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProviderType(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderKey sets the "provider_key" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetProviderKey(v string) *AuthIdentityChannelUpdate {
|
||||||
|
_u.mutation.SetProviderKey(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProviderKey sets the "provider_key" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetNillableProviderKey(v *string) *AuthIdentityChannelUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProviderKey(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannel sets the "channel" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetChannel(v string) *AuthIdentityChannelUpdate {
|
||||||
|
_u.mutation.SetChannel(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableChannel sets the "channel" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetNillableChannel(v *string) *AuthIdentityChannelUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetChannel(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelAppID sets the "channel_app_id" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetChannelAppID(v string) *AuthIdentityChannelUpdate {
|
||||||
|
_u.mutation.SetChannelAppID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableChannelAppID sets the "channel_app_id" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetNillableChannelAppID(v *string) *AuthIdentityChannelUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetChannelAppID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelSubject sets the "channel_subject" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetChannelSubject(v string) *AuthIdentityChannelUpdate {
|
||||||
|
_u.mutation.SetChannelSubject(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableChannelSubject sets the "channel_subject" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetNillableChannelSubject(v *string) *AuthIdentityChannelUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetChannelSubject(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets the "metadata" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetMetadata(v map[string]interface{}) *AuthIdentityChannelUpdate {
|
||||||
|
_u.mutation.SetMetadata(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdentity sets the "identity" edge to the AuthIdentity entity.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SetIdentity(v *AuthIdentity) *AuthIdentityChannelUpdate {
|
||||||
|
return _u.SetIdentityID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AuthIdentityChannelMutation object of the builder.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) Mutation() *AuthIdentityChannelMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIdentity clears the "identity" edge to the AuthIdentity entity.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) ClearIdentity() *AuthIdentityChannelUpdate {
|
||||||
|
_u.mutation.ClearIdentity()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := authidentitychannel.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AuthIdentityChannelUpdate) check() error {
|
||||||
|
if v, ok := _u.mutation.ProviderType(); ok {
|
||||||
|
if err := authidentitychannel.ProviderTypeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_type", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_type": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ProviderKey(); ok {
|
||||||
|
if err := authidentitychannel.ProviderKeyValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_key", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_key": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Channel(); ok {
|
||||||
|
if err := authidentitychannel.ChannelValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "channel", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ChannelAppID(); ok {
|
||||||
|
if err := authidentitychannel.ChannelAppIDValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "channel_app_id", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_app_id": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ChannelSubject(); ok {
|
||||||
|
if err := authidentitychannel.ChannelSubjectValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "channel_subject", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_subject": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _u.mutation.IdentityCleared() && len(_u.mutation.IdentityIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AuthIdentityChannel.identity"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AuthIdentityChannelUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(authidentitychannel.Table, authidentitychannel.Columns, sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ProviderType(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldProviderType, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ProviderKey(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldProviderKey, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Channel(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldChannel, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ChannelAppID(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldChannelAppID, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ChannelSubject(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldChannelSubject, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Metadata(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldMetadata, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.IdentityCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: authidentitychannel.IdentityTable,
|
||||||
|
Columns: []string{authidentitychannel.IdentityColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.IdentityIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: authidentitychannel.IdentityTable,
|
||||||
|
Columns: []string{authidentitychannel.IdentityColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{authidentitychannel.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthIdentityChannelUpdateOne is the builder for updating a single AuthIdentityChannel entity.
|
||||||
|
type AuthIdentityChannelUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AuthIdentityChannelMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetUpdatedAt(v time.Time) *AuthIdentityChannelUpdateOne {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdentityID sets the "identity_id" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetIdentityID(v int64) *AuthIdentityChannelUpdateOne {
|
||||||
|
_u.mutation.SetIdentityID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableIdentityID sets the "identity_id" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetNillableIdentityID(v *int64) *AuthIdentityChannelUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetIdentityID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderType sets the "provider_type" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetProviderType(v string) *AuthIdentityChannelUpdateOne {
|
||||||
|
_u.mutation.SetProviderType(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProviderType sets the "provider_type" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetNillableProviderType(v *string) *AuthIdentityChannelUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProviderType(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProviderKey sets the "provider_key" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetProviderKey(v string) *AuthIdentityChannelUpdateOne {
|
||||||
|
_u.mutation.SetProviderKey(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProviderKey sets the "provider_key" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetNillableProviderKey(v *string) *AuthIdentityChannelUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProviderKey(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannel sets the "channel" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetChannel(v string) *AuthIdentityChannelUpdateOne {
|
||||||
|
_u.mutation.SetChannel(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableChannel sets the "channel" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetNillableChannel(v *string) *AuthIdentityChannelUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetChannel(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelAppID sets the "channel_app_id" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetChannelAppID(v string) *AuthIdentityChannelUpdateOne {
|
||||||
|
_u.mutation.SetChannelAppID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableChannelAppID sets the "channel_app_id" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetNillableChannelAppID(v *string) *AuthIdentityChannelUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetChannelAppID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetChannelSubject sets the "channel_subject" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetChannelSubject(v string) *AuthIdentityChannelUpdateOne {
|
||||||
|
_u.mutation.SetChannelSubject(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableChannelSubject sets the "channel_subject" field if the given value is not nil.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetNillableChannelSubject(v *string) *AuthIdentityChannelUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetChannelSubject(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMetadata sets the "metadata" field.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetMetadata(v map[string]interface{}) *AuthIdentityChannelUpdateOne {
|
||||||
|
_u.mutation.SetMetadata(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdentity sets the "identity" edge to the AuthIdentity entity.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SetIdentity(v *AuthIdentity) *AuthIdentityChannelUpdateOne {
|
||||||
|
return _u.SetIdentityID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AuthIdentityChannelMutation object of the builder.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) Mutation() *AuthIdentityChannelMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIdentity clears the "identity" edge to the AuthIdentity entity.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) ClearIdentity() *AuthIdentityChannelUpdateOne {
|
||||||
|
_u.mutation.ClearIdentity()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AuthIdentityChannelUpdate builder.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) Where(ps ...predicate.AuthIdentityChannel) *AuthIdentityChannelUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) Select(field string, fields ...string) *AuthIdentityChannelUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated AuthIdentityChannel entity.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) Save(ctx context.Context) (*AuthIdentityChannel, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) SaveX(ctx context.Context) *AuthIdentityChannel {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := authidentitychannel.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) check() error {
|
||||||
|
if v, ok := _u.mutation.ProviderType(); ok {
|
||||||
|
if err := authidentitychannel.ProviderTypeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_type", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_type": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ProviderKey(); ok {
|
||||||
|
if err := authidentitychannel.ProviderKeyValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "provider_key", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_key": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Channel(); ok {
|
||||||
|
if err := authidentitychannel.ChannelValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "channel", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ChannelAppID(); ok {
|
||||||
|
if err := authidentitychannel.ChannelAppIDValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "channel_app_id", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_app_id": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.ChannelSubject(); ok {
|
||||||
|
if err := authidentitychannel.ChannelSubjectValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "channel_subject", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_subject": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _u.mutation.IdentityCleared() && len(_u.mutation.IdentityIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AuthIdentityChannel.identity"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AuthIdentityChannelUpdateOne) sqlSave(ctx context.Context) (_node *AuthIdentityChannel, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(authidentitychannel.Table, authidentitychannel.Columns, sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AuthIdentityChannel.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, authidentitychannel.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !authidentitychannel.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != authidentitychannel.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ProviderType(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldProviderType, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ProviderKey(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldProviderKey, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Channel(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldChannel, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ChannelAppID(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldChannelAppID, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ChannelSubject(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldChannelSubject, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Metadata(); ok {
|
||||||
|
_spec.SetField(authidentitychannel.FieldMetadata, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.IdentityCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: authidentitychannel.IdentityTable,
|
||||||
|
Columns: []string{authidentitychannel.IdentityColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.IdentityIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: authidentitychannel.IdentityTable,
|
||||||
|
Columns: []string{authidentitychannel.IdentityColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &AuthIdentityChannel{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{authidentitychannel.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
359
backend/ent/channelmonitor.go
Normal file
@@ -0,0 +1,359 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChannelMonitor is the model entity for the ChannelMonitor schema.
|
||||||
|
type ChannelMonitor struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// Name holds the value of the "name" field.
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
// Provider holds the value of the "provider" field.
|
||||||
|
Provider channelmonitor.Provider `json:"provider,omitempty"`
|
||||||
|
// Provider base origin, e.g. https://api.openai.com
|
||||||
|
Endpoint string `json:"endpoint,omitempty"`
|
||||||
|
// AES-256-GCM encrypted API key
|
||||||
|
APIKeyEncrypted string `json:"-"`
|
||||||
|
// PrimaryModel holds the value of the "primary_model" field.
|
||||||
|
PrimaryModel string `json:"primary_model,omitempty"`
|
||||||
|
// Additional model names to test alongside primary_model
|
||||||
|
ExtraModels []string `json:"extra_models,omitempty"`
|
||||||
|
// GroupName holds the value of the "group_name" field.
|
||||||
|
GroupName string `json:"group_name,omitempty"`
|
||||||
|
// Enabled holds the value of the "enabled" field.
|
||||||
|
Enabled bool `json:"enabled,omitempty"`
|
||||||
|
// IntervalSeconds holds the value of the "interval_seconds" field.
|
||||||
|
IntervalSeconds int `json:"interval_seconds,omitempty"`
|
||||||
|
// LastCheckedAt holds the value of the "last_checked_at" field.
|
||||||
|
LastCheckedAt *time.Time `json:"last_checked_at,omitempty"`
|
||||||
|
// CreatedBy holds the value of the "created_by" field.
|
||||||
|
CreatedBy int64 `json:"created_by,omitempty"`
|
||||||
|
// TemplateID holds the value of the "template_id" field.
|
||||||
|
TemplateID *int64 `json:"template_id,omitempty"`
|
||||||
|
// ExtraHeaders holds the value of the "extra_headers" field.
|
||||||
|
ExtraHeaders map[string]string `json:"extra_headers,omitempty"`
|
||||||
|
// BodyOverrideMode holds the value of the "body_override_mode" field.
|
||||||
|
BodyOverrideMode string `json:"body_override_mode,omitempty"`
|
||||||
|
// BodyOverride holds the value of the "body_override" field.
|
||||||
|
BodyOverride map[string]interface{} `json:"body_override,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the ChannelMonitorQuery when eager-loading is set.
|
||||||
|
Edges ChannelMonitorEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelMonitorEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type ChannelMonitorEdges struct {
|
||||||
|
// History holds the value of the history edge.
|
||||||
|
History []*ChannelMonitorHistory `json:"history,omitempty"`
|
||||||
|
// DailyRollups holds the value of the daily_rollups edge.
|
||||||
|
DailyRollups []*ChannelMonitorDailyRollup `json:"daily_rollups,omitempty"`
|
||||||
|
// RequestTemplate holds the value of the request_template edge.
|
||||||
|
RequestTemplate *ChannelMonitorRequestTemplate `json:"request_template,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [3]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// HistoryOrErr returns the History value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e ChannelMonitorEdges) HistoryOrErr() ([]*ChannelMonitorHistory, error) {
|
||||||
|
if e.loadedTypes[0] {
|
||||||
|
return e.History, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "history"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DailyRollupsOrErr returns the DailyRollups value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e ChannelMonitorEdges) DailyRollupsOrErr() ([]*ChannelMonitorDailyRollup, error) {
|
||||||
|
if e.loadedTypes[1] {
|
||||||
|
return e.DailyRollups, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "daily_rollups"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestTemplateOrErr returns the RequestTemplate value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e ChannelMonitorEdges) RequestTemplateOrErr() (*ChannelMonitorRequestTemplate, error) {
|
||||||
|
if e.RequestTemplate != nil {
|
||||||
|
return e.RequestTemplate, nil
|
||||||
|
} else if e.loadedTypes[2] {
|
||||||
|
return nil, &NotFoundError{label: channelmonitorrequesttemplate.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "request_template"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*ChannelMonitor) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case channelmonitor.FieldExtraModels, channelmonitor.FieldExtraHeaders, channelmonitor.FieldBodyOverride:
|
||||||
|
values[i] = new([]byte)
|
||||||
|
case channelmonitor.FieldEnabled:
|
||||||
|
values[i] = new(sql.NullBool)
|
||||||
|
case channelmonitor.FieldID, channelmonitor.FieldIntervalSeconds, channelmonitor.FieldCreatedBy, channelmonitor.FieldTemplateID:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case channelmonitor.FieldName, channelmonitor.FieldProvider, channelmonitor.FieldEndpoint, channelmonitor.FieldAPIKeyEncrypted, channelmonitor.FieldPrimaryModel, channelmonitor.FieldGroupName, channelmonitor.FieldBodyOverrideMode:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case channelmonitor.FieldCreatedAt, channelmonitor.FieldUpdatedAt, channelmonitor.FieldLastCheckedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the ChannelMonitor fields.
|
||||||
|
func (_m *ChannelMonitor) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case channelmonitor.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case channelmonitor.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldName:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Name = value.String
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldProvider:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field provider", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Provider = channelmonitor.Provider(value.String)
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldEndpoint:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field endpoint", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Endpoint = value.String
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldAPIKeyEncrypted:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field api_key_encrypted", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.APIKeyEncrypted = value.String
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldPrimaryModel:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field primary_model", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.PrimaryModel = value.String
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldExtraModels:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field extra_models", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.ExtraModels); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field extra_models: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldGroupName:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field group_name", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.GroupName = value.String
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldEnabled:
|
||||||
|
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field enabled", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Enabled = value.Bool
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldIntervalSeconds:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field interval_seconds", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.IntervalSeconds = int(value.Int64)
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldLastCheckedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field last_checked_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.LastCheckedAt = new(time.Time)
|
||||||
|
*_m.LastCheckedAt = value.Time
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldCreatedBy:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_by", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedBy = value.Int64
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldTemplateID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field template_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.TemplateID = new(int64)
|
||||||
|
*_m.TemplateID = value.Int64
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldExtraHeaders:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field extra_headers", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.ExtraHeaders); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field extra_headers: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldBodyOverrideMode:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field body_override_mode", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.BodyOverrideMode = value.String
|
||||||
|
}
|
||||||
|
case channelmonitor.FieldBodyOverride:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field body_override", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.BodyOverride); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field body_override: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitor.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *ChannelMonitor) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryHistory queries the "history" edge of the ChannelMonitor entity.
|
||||||
|
func (_m *ChannelMonitor) QueryHistory() *ChannelMonitorHistoryQuery {
|
||||||
|
return NewChannelMonitorClient(_m.config).QueryHistory(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryDailyRollups queries the "daily_rollups" edge of the ChannelMonitor entity.
|
||||||
|
func (_m *ChannelMonitor) QueryDailyRollups() *ChannelMonitorDailyRollupQuery {
|
||||||
|
return NewChannelMonitorClient(_m.config).QueryDailyRollups(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryRequestTemplate queries the "request_template" edge of the ChannelMonitor entity.
|
||||||
|
func (_m *ChannelMonitor) QueryRequestTemplate() *ChannelMonitorRequestTemplateQuery {
|
||||||
|
return NewChannelMonitorClient(_m.config).QueryRequestTemplate(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this ChannelMonitor.
|
||||||
|
// Note that you need to call ChannelMonitor.Unwrap() before calling this method if this ChannelMonitor
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *ChannelMonitor) Update() *ChannelMonitorUpdateOne {
|
||||||
|
return NewChannelMonitorClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the ChannelMonitor entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *ChannelMonitor) Unwrap() *ChannelMonitor {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: ChannelMonitor is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *ChannelMonitor) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("ChannelMonitor(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("name=")
|
||||||
|
builder.WriteString(_m.Name)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("provider=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Provider))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("endpoint=")
|
||||||
|
builder.WriteString(_m.Endpoint)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("api_key_encrypted=<sensitive>")
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("primary_model=")
|
||||||
|
builder.WriteString(_m.PrimaryModel)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("extra_models=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.ExtraModels))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("group_name=")
|
||||||
|
builder.WriteString(_m.GroupName)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("enabled=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Enabled))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("interval_seconds=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.IntervalSeconds))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.LastCheckedAt; v != nil {
|
||||||
|
builder.WriteString("last_checked_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("created_by=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.CreatedBy))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.TemplateID; v != nil {
|
||||||
|
builder.WriteString("template_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("extra_headers=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.ExtraHeaders))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("body_override_mode=")
|
||||||
|
builder.WriteString(_m.BodyOverrideMode)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("body_override=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.BodyOverride))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelMonitors is a parsable slice of ChannelMonitor.
|
||||||
|
type ChannelMonitors []*ChannelMonitor
|
||||||
304
backend/ent/channelmonitor/channelmonitor.go
Normal file
@@ -0,0 +1,304 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package channelmonitor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the channelmonitor type in the database.
|
||||||
|
Label = "channel_monitor"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// FieldName holds the string denoting the name field in the database.
|
||||||
|
FieldName = "name"
|
||||||
|
// FieldProvider holds the string denoting the provider field in the database.
|
||||||
|
FieldProvider = "provider"
|
||||||
|
// FieldEndpoint holds the string denoting the endpoint field in the database.
|
||||||
|
FieldEndpoint = "endpoint"
|
||||||
|
// FieldAPIKeyEncrypted holds the string denoting the api_key_encrypted field in the database.
|
||||||
|
FieldAPIKeyEncrypted = "api_key_encrypted"
|
||||||
|
// FieldPrimaryModel holds the string denoting the primary_model field in the database.
|
||||||
|
FieldPrimaryModel = "primary_model"
|
||||||
|
// FieldExtraModels holds the string denoting the extra_models field in the database.
|
||||||
|
FieldExtraModels = "extra_models"
|
||||||
|
// FieldGroupName holds the string denoting the group_name field in the database.
|
||||||
|
FieldGroupName = "group_name"
|
||||||
|
// FieldEnabled holds the string denoting the enabled field in the database.
|
||||||
|
FieldEnabled = "enabled"
|
||||||
|
// FieldIntervalSeconds holds the string denoting the interval_seconds field in the database.
|
||||||
|
FieldIntervalSeconds = "interval_seconds"
|
||||||
|
// FieldLastCheckedAt holds the string denoting the last_checked_at field in the database.
|
||||||
|
FieldLastCheckedAt = "last_checked_at"
|
||||||
|
// FieldCreatedBy holds the string denoting the created_by field in the database.
|
||||||
|
FieldCreatedBy = "created_by"
|
||||||
|
// FieldTemplateID holds the string denoting the template_id field in the database.
|
||||||
|
FieldTemplateID = "template_id"
|
||||||
|
// FieldExtraHeaders holds the string denoting the extra_headers field in the database.
|
||||||
|
FieldExtraHeaders = "extra_headers"
|
||||||
|
// FieldBodyOverrideMode holds the string denoting the body_override_mode field in the database.
|
||||||
|
FieldBodyOverrideMode = "body_override_mode"
|
||||||
|
// FieldBodyOverride holds the string denoting the body_override field in the database.
|
||||||
|
FieldBodyOverride = "body_override"
|
||||||
|
// EdgeHistory holds the string denoting the history edge name in mutations.
|
||||||
|
EdgeHistory = "history"
|
||||||
|
// EdgeDailyRollups holds the string denoting the daily_rollups edge name in mutations.
|
||||||
|
EdgeDailyRollups = "daily_rollups"
|
||||||
|
// EdgeRequestTemplate holds the string denoting the request_template edge name in mutations.
|
||||||
|
EdgeRequestTemplate = "request_template"
|
||||||
|
// Table holds the table name of the channelmonitor in the database.
|
||||||
|
Table = "channel_monitors"
|
||||||
|
// HistoryTable is the table that holds the history relation/edge.
|
||||||
|
HistoryTable = "channel_monitor_histories"
|
||||||
|
// HistoryInverseTable is the table name for the ChannelMonitorHistory entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "channelmonitorhistory" package.
|
||||||
|
HistoryInverseTable = "channel_monitor_histories"
|
||||||
|
// HistoryColumn is the table column denoting the history relation/edge.
|
||||||
|
HistoryColumn = "monitor_id"
|
||||||
|
// DailyRollupsTable is the table that holds the daily_rollups relation/edge.
|
||||||
|
DailyRollupsTable = "channel_monitor_daily_rollups"
|
||||||
|
// DailyRollupsInverseTable is the table name for the ChannelMonitorDailyRollup entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "channelmonitordailyrollup" package.
|
||||||
|
DailyRollupsInverseTable = "channel_monitor_daily_rollups"
|
||||||
|
// DailyRollupsColumn is the table column denoting the daily_rollups relation/edge.
|
||||||
|
DailyRollupsColumn = "monitor_id"
|
||||||
|
// RequestTemplateTable is the table that holds the request_template relation/edge.
|
||||||
|
RequestTemplateTable = "channel_monitors"
|
||||||
|
// RequestTemplateInverseTable is the table name for the ChannelMonitorRequestTemplate entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "channelmonitorrequesttemplate" package.
|
||||||
|
RequestTemplateInverseTable = "channel_monitor_request_templates"
|
||||||
|
// RequestTemplateColumn is the table column denoting the request_template relation/edge.
|
||||||
|
RequestTemplateColumn = "template_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for channelmonitor fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
FieldName,
|
||||||
|
FieldProvider,
|
||||||
|
FieldEndpoint,
|
||||||
|
FieldAPIKeyEncrypted,
|
||||||
|
FieldPrimaryModel,
|
||||||
|
FieldExtraModels,
|
||||||
|
FieldGroupName,
|
||||||
|
FieldEnabled,
|
||||||
|
FieldIntervalSeconds,
|
||||||
|
FieldLastCheckedAt,
|
||||||
|
FieldCreatedBy,
|
||||||
|
FieldTemplateID,
|
||||||
|
FieldExtraHeaders,
|
||||||
|
FieldBodyOverrideMode,
|
||||||
|
FieldBodyOverride,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
NameValidator func(string) error
|
||||||
|
// EndpointValidator is a validator for the "endpoint" field. It is called by the builders before save.
|
||||||
|
EndpointValidator func(string) error
|
||||||
|
// APIKeyEncryptedValidator is a validator for the "api_key_encrypted" field. It is called by the builders before save.
|
||||||
|
APIKeyEncryptedValidator func(string) error
|
||||||
|
// PrimaryModelValidator is a validator for the "primary_model" field. It is called by the builders before save.
|
||||||
|
PrimaryModelValidator func(string) error
|
||||||
|
// DefaultExtraModels holds the default value on creation for the "extra_models" field.
|
||||||
|
DefaultExtraModels []string
|
||||||
|
// DefaultGroupName holds the default value on creation for the "group_name" field.
|
||||||
|
DefaultGroupName string
|
||||||
|
// GroupNameValidator is a validator for the "group_name" field. It is called by the builders before save.
|
||||||
|
GroupNameValidator func(string) error
|
||||||
|
// DefaultEnabled holds the default value on creation for the "enabled" field.
|
||||||
|
DefaultEnabled bool
|
||||||
|
// IntervalSecondsValidator is a validator for the "interval_seconds" field. It is called by the builders before save.
|
||||||
|
IntervalSecondsValidator func(int) error
|
||||||
|
// DefaultExtraHeaders holds the default value on creation for the "extra_headers" field.
|
||||||
|
DefaultExtraHeaders map[string]string
|
||||||
|
// DefaultBodyOverrideMode holds the default value on creation for the "body_override_mode" field.
|
||||||
|
DefaultBodyOverrideMode string
|
||||||
|
// BodyOverrideModeValidator is a validator for the "body_override_mode" field. It is called by the builders before save.
|
||||||
|
BodyOverrideModeValidator func(string) error
|
||||||
|
)
|
||||||
|
|
||||||
|
// Provider defines the type for the "provider" enum field.
|
||||||
|
type Provider string
|
||||||
|
|
||||||
|
// Provider values.
|
||||||
|
const (
|
||||||
|
ProviderOpenai Provider = "openai"
|
||||||
|
ProviderAnthropic Provider = "anthropic"
|
||||||
|
ProviderGemini Provider = "gemini"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (pr Provider) String() string {
|
||||||
|
return string(pr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderValidator is a validator for the "provider" field enum values. It is called by the builders before save.
|
||||||
|
func ProviderValidator(pr Provider) error {
|
||||||
|
switch pr {
|
||||||
|
case ProviderOpenai, ProviderAnthropic, ProviderGemini:
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("channelmonitor: invalid enum value for provider field: %q", pr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the ChannelMonitor queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByName orders the results by the name field.
|
||||||
|
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByProvider orders the results by the provider field.
|
||||||
|
func ByProvider(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldProvider, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByEndpoint orders the results by the endpoint field.
|
||||||
|
func ByEndpoint(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldEndpoint, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAPIKeyEncrypted orders the results by the api_key_encrypted field.
|
||||||
|
func ByAPIKeyEncrypted(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldAPIKeyEncrypted, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPrimaryModel orders the results by the primary_model field.
|
||||||
|
func ByPrimaryModel(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPrimaryModel, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupName orders the results by the group_name field.
|
||||||
|
func ByGroupName(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldGroupName, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByEnabled orders the results by the enabled field.
|
||||||
|
func ByEnabled(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldEnabled, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByIntervalSeconds orders the results by the interval_seconds field.
|
||||||
|
func ByIntervalSeconds(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldIntervalSeconds, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByLastCheckedAt orders the results by the last_checked_at field.
|
||||||
|
func ByLastCheckedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldLastCheckedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedBy orders the results by the created_by field.
|
||||||
|
func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedBy, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByTemplateID orders the results by the template_id field.
|
||||||
|
func ByTemplateID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldTemplateID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByBodyOverrideMode orders the results by the body_override_mode field.
|
||||||
|
func ByBodyOverrideMode(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldBodyOverrideMode, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByHistoryCount orders the results by history count.
|
||||||
|
func ByHistoryCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newHistoryStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByHistory orders the results by history terms.
|
||||||
|
func ByHistory(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newHistoryStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDailyRollupsCount orders the results by daily_rollups count.
|
||||||
|
func ByDailyRollupsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newDailyRollupsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDailyRollups orders the results by daily_rollups terms.
|
||||||
|
func ByDailyRollups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newDailyRollupsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRequestTemplateField orders the results by request_template field.
|
||||||
|
func ByRequestTemplateField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newRequestTemplateStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newHistoryStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(HistoryInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, HistoryTable, HistoryColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newDailyRollupsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(DailyRollupsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, DailyRollupsTable, DailyRollupsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newRequestTemplateStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(RequestTemplateInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, RequestTemplateTable, RequestTemplateColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
885
backend/ent/channelmonitor/where.go
Normal file
@@ -0,0 +1,885 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package channelmonitor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||||
|
func UpdatedAt(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
||||||
|
func Name(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Endpoint applies equality check predicate on the "endpoint" field. It's identical to EndpointEQ.
|
||||||
|
func Endpoint(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncrypted applies equality check predicate on the "api_key_encrypted" field. It's identical to APIKeyEncryptedEQ.
|
||||||
|
func APIKeyEncrypted(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModel applies equality check predicate on the "primary_model" field. It's identical to PrimaryModelEQ.
|
||||||
|
func PrimaryModel(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupName applies equality check predicate on the "group_name" field. It's identical to GroupNameEQ.
|
||||||
|
func GroupName(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled applies equality check predicate on the "enabled" field. It's identical to EnabledEQ.
|
||||||
|
func Enabled(v bool) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldEnabled, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntervalSeconds applies equality check predicate on the "interval_seconds" field. It's identical to IntervalSecondsEQ.
|
||||||
|
func IntervalSeconds(v int) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldIntervalSeconds, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCheckedAt applies equality check predicate on the "last_checked_at" field. It's identical to LastCheckedAtEQ.
|
||||||
|
func LastCheckedAt(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldLastCheckedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ.
|
||||||
|
func CreatedBy(v int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TemplateID applies equality check predicate on the "template_id" field. It's identical to TemplateIDEQ.
|
||||||
|
func TemplateID(v int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldTemplateID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideMode applies equality check predicate on the "body_override_mode" field. It's identical to BodyOverrideModeEQ.
|
||||||
|
func BodyOverrideMode(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtEQ(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNEQ(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNotIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGT(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGTE(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLT(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLTE(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameEQ applies the EQ predicate on the "name" field.
|
||||||
|
func NameEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameNEQ applies the NEQ predicate on the "name" field.
|
||||||
|
func NameNEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameIn applies the In predicate on the "name" field.
|
||||||
|
func NameIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldName, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameNotIn applies the NotIn predicate on the "name" field.
|
||||||
|
func NameNotIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldName, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameGT applies the GT predicate on the "name" field.
|
||||||
|
func NameGT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameGTE applies the GTE predicate on the "name" field.
|
||||||
|
func NameGTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameLT applies the LT predicate on the "name" field.
|
||||||
|
func NameLT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameLTE applies the LTE predicate on the "name" field.
|
||||||
|
func NameLTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameContains applies the Contains predicate on the "name" field.
|
||||||
|
func NameContains(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContains(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
||||||
|
func NameHasPrefix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
||||||
|
func NameHasSuffix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
||||||
|
func NameEqualFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
||||||
|
func NameContainsFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderEQ applies the EQ predicate on the "provider" field.
|
||||||
|
func ProviderEQ(v Provider) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldProvider, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderNEQ applies the NEQ predicate on the "provider" field.
|
||||||
|
func ProviderNEQ(v Provider) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldProvider, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderIn applies the In predicate on the "provider" field.
|
||||||
|
func ProviderIn(vs ...Provider) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldProvider, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProviderNotIn applies the NotIn predicate on the "provider" field.
|
||||||
|
func ProviderNotIn(vs ...Provider) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldProvider, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointEQ applies the EQ predicate on the "endpoint" field.
|
||||||
|
func EndpointEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointNEQ applies the NEQ predicate on the "endpoint" field.
|
||||||
|
func EndpointNEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointIn applies the In predicate on the "endpoint" field.
|
||||||
|
func EndpointIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldEndpoint, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointNotIn applies the NotIn predicate on the "endpoint" field.
|
||||||
|
func EndpointNotIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldEndpoint, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointGT applies the GT predicate on the "endpoint" field.
|
||||||
|
func EndpointGT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointGTE applies the GTE predicate on the "endpoint" field.
|
||||||
|
func EndpointGTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointLT applies the LT predicate on the "endpoint" field.
|
||||||
|
func EndpointLT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointLTE applies the LTE predicate on the "endpoint" field.
|
||||||
|
func EndpointLTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointContains applies the Contains predicate on the "endpoint" field.
|
||||||
|
func EndpointContains(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContains(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointHasPrefix applies the HasPrefix predicate on the "endpoint" field.
|
||||||
|
func EndpointHasPrefix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointHasSuffix applies the HasSuffix predicate on the "endpoint" field.
|
||||||
|
func EndpointHasSuffix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointEqualFold applies the EqualFold predicate on the "endpoint" field.
|
||||||
|
func EndpointEqualFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointContainsFold applies the ContainsFold predicate on the "endpoint" field.
|
||||||
|
func EndpointContainsFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldEndpoint, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedEQ applies the EQ predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedNEQ applies the NEQ predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedNEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedIn applies the In predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldAPIKeyEncrypted, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedNotIn applies the NotIn predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedNotIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldAPIKeyEncrypted, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedGT applies the GT predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedGT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedGTE applies the GTE predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedGTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedLT applies the LT predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedLT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedLTE applies the LTE predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedLTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedContains applies the Contains predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedContains(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContains(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedHasPrefix applies the HasPrefix predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedHasPrefix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedHasSuffix applies the HasSuffix predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedHasSuffix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedEqualFold applies the EqualFold predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedEqualFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEncryptedContainsFold applies the ContainsFold predicate on the "api_key_encrypted" field.
|
||||||
|
func APIKeyEncryptedContainsFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldAPIKeyEncrypted, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelEQ applies the EQ predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelNEQ applies the NEQ predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelNEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelIn applies the In predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldPrimaryModel, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelNotIn applies the NotIn predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelNotIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldPrimaryModel, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelGT applies the GT predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelGT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelGTE applies the GTE predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelGTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelLT applies the LT predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelLT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelLTE applies the LTE predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelLTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelContains applies the Contains predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelContains(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContains(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelHasPrefix applies the HasPrefix predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelHasPrefix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelHasSuffix applies the HasSuffix predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelHasSuffix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelEqualFold applies the EqualFold predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelEqualFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrimaryModelContainsFold applies the ContainsFold predicate on the "primary_model" field.
|
||||||
|
func PrimaryModelContainsFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldPrimaryModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameEQ applies the EQ predicate on the "group_name" field.
|
||||||
|
func GroupNameEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameNEQ applies the NEQ predicate on the "group_name" field.
|
||||||
|
func GroupNameNEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameIn applies the In predicate on the "group_name" field.
|
||||||
|
func GroupNameIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldGroupName, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameNotIn applies the NotIn predicate on the "group_name" field.
|
||||||
|
func GroupNameNotIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldGroupName, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameGT applies the GT predicate on the "group_name" field.
|
||||||
|
func GroupNameGT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameGTE applies the GTE predicate on the "group_name" field.
|
||||||
|
func GroupNameGTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameLT applies the LT predicate on the "group_name" field.
|
||||||
|
func GroupNameLT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameLTE applies the LTE predicate on the "group_name" field.
|
||||||
|
func GroupNameLTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameContains applies the Contains predicate on the "group_name" field.
|
||||||
|
func GroupNameContains(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContains(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameHasPrefix applies the HasPrefix predicate on the "group_name" field.
|
||||||
|
func GroupNameHasPrefix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameHasSuffix applies the HasSuffix predicate on the "group_name" field.
|
||||||
|
func GroupNameHasSuffix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameIsNil applies the IsNil predicate on the "group_name" field.
|
||||||
|
func GroupNameIsNil() predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIsNull(FieldGroupName))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameNotNil applies the NotNil predicate on the "group_name" field.
|
||||||
|
func GroupNameNotNil() predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotNull(FieldGroupName))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameEqualFold applies the EqualFold predicate on the "group_name" field.
|
||||||
|
func GroupNameEqualFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupNameContainsFold applies the ContainsFold predicate on the "group_name" field.
|
||||||
|
func GroupNameContainsFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldGroupName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnabledEQ applies the EQ predicate on the "enabled" field.
|
||||||
|
func EnabledEQ(v bool) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldEnabled, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnabledNEQ applies the NEQ predicate on the "enabled" field.
|
||||||
|
func EnabledNEQ(v bool) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldEnabled, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntervalSecondsEQ applies the EQ predicate on the "interval_seconds" field.
|
||||||
|
func IntervalSecondsEQ(v int) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldIntervalSeconds, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntervalSecondsNEQ applies the NEQ predicate on the "interval_seconds" field.
|
||||||
|
func IntervalSecondsNEQ(v int) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldIntervalSeconds, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntervalSecondsIn applies the In predicate on the "interval_seconds" field.
|
||||||
|
func IntervalSecondsIn(vs ...int) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldIntervalSeconds, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntervalSecondsNotIn applies the NotIn predicate on the "interval_seconds" field.
|
||||||
|
func IntervalSecondsNotIn(vs ...int) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldIntervalSeconds, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntervalSecondsGT applies the GT predicate on the "interval_seconds" field.
|
||||||
|
func IntervalSecondsGT(v int) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldIntervalSeconds, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntervalSecondsGTE applies the GTE predicate on the "interval_seconds" field.
|
||||||
|
func IntervalSecondsGTE(v int) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldIntervalSeconds, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntervalSecondsLT applies the LT predicate on the "interval_seconds" field.
|
||||||
|
func IntervalSecondsLT(v int) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldIntervalSeconds, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntervalSecondsLTE applies the LTE predicate on the "interval_seconds" field.
|
||||||
|
func IntervalSecondsLTE(v int) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldIntervalSeconds, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCheckedAtEQ applies the EQ predicate on the "last_checked_at" field.
|
||||||
|
func LastCheckedAtEQ(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldLastCheckedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCheckedAtNEQ applies the NEQ predicate on the "last_checked_at" field.
|
||||||
|
func LastCheckedAtNEQ(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldLastCheckedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCheckedAtIn applies the In predicate on the "last_checked_at" field.
|
||||||
|
func LastCheckedAtIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldLastCheckedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCheckedAtNotIn applies the NotIn predicate on the "last_checked_at" field.
|
||||||
|
func LastCheckedAtNotIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldLastCheckedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCheckedAtGT applies the GT predicate on the "last_checked_at" field.
|
||||||
|
func LastCheckedAtGT(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldLastCheckedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCheckedAtGTE applies the GTE predicate on the "last_checked_at" field.
|
||||||
|
func LastCheckedAtGTE(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldLastCheckedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCheckedAtLT applies the LT predicate on the "last_checked_at" field.
|
||||||
|
func LastCheckedAtLT(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldLastCheckedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCheckedAtLTE applies the LTE predicate on the "last_checked_at" field.
|
||||||
|
func LastCheckedAtLTE(v time.Time) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldLastCheckedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCheckedAtIsNil applies the IsNil predicate on the "last_checked_at" field.
|
||||||
|
func LastCheckedAtIsNil() predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIsNull(FieldLastCheckedAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastCheckedAtNotNil applies the NotNil predicate on the "last_checked_at" field.
|
||||||
|
func LastCheckedAtNotNil() predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotNull(FieldLastCheckedAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByEQ applies the EQ predicate on the "created_by" field.
|
||||||
|
func CreatedByEQ(v int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByNEQ applies the NEQ predicate on the "created_by" field.
|
||||||
|
func CreatedByNEQ(v int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByIn applies the In predicate on the "created_by" field.
|
||||||
|
func CreatedByIn(vs ...int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldCreatedBy, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByNotIn applies the NotIn predicate on the "created_by" field.
|
||||||
|
func CreatedByNotIn(vs ...int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldCreatedBy, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByGT applies the GT predicate on the "created_by" field.
|
||||||
|
func CreatedByGT(v int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByGTE applies the GTE predicate on the "created_by" field.
|
||||||
|
func CreatedByGTE(v int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByLT applies the LT predicate on the "created_by" field.
|
||||||
|
func CreatedByLT(v int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedByLTE applies the LTE predicate on the "created_by" field.
|
||||||
|
func CreatedByLTE(v int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldCreatedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TemplateIDEQ applies the EQ predicate on the "template_id" field.
|
||||||
|
func TemplateIDEQ(v int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldTemplateID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TemplateIDNEQ applies the NEQ predicate on the "template_id" field.
|
||||||
|
func TemplateIDNEQ(v int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldTemplateID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TemplateIDIn applies the In predicate on the "template_id" field.
|
||||||
|
func TemplateIDIn(vs ...int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldTemplateID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TemplateIDNotIn applies the NotIn predicate on the "template_id" field.
|
||||||
|
func TemplateIDNotIn(vs ...int64) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldTemplateID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TemplateIDIsNil applies the IsNil predicate on the "template_id" field.
|
||||||
|
func TemplateIDIsNil() predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIsNull(FieldTemplateID))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TemplateIDNotNil applies the NotNil predicate on the "template_id" field.
|
||||||
|
func TemplateIDNotNil() predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotNull(FieldTemplateID))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeEQ applies the EQ predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEQ(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeNEQ applies the NEQ predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeNEQ(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNEQ(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeIn applies the In predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIn(FieldBodyOverrideMode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeNotIn applies the NotIn predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeNotIn(vs ...string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotIn(FieldBodyOverrideMode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeGT applies the GT predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeGT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGT(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeGTE applies the GTE predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeGTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldGTE(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeLT applies the LT predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeLT(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLT(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeLTE applies the LTE predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeLTE(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldLTE(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeContains applies the Contains predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeContains(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContains(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeHasPrefix applies the HasPrefix predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeHasPrefix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeHasSuffix applies the HasSuffix predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeHasSuffix(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeEqualFold applies the EqualFold predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeEqualFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideModeContainsFold applies the ContainsFold predicate on the "body_override_mode" field.
|
||||||
|
func BodyOverrideModeContainsFold(v string) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldBodyOverrideMode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideIsNil applies the IsNil predicate on the "body_override" field.
|
||||||
|
func BodyOverrideIsNil() predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldIsNull(FieldBodyOverride))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BodyOverrideNotNil applies the NotNil predicate on the "body_override" field.
|
||||||
|
func BodyOverrideNotNil() predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.FieldNotNull(FieldBodyOverride))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasHistory applies the HasEdge predicate on the "history" edge.
|
||||||
|
func HasHistory() predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, HistoryTable, HistoryColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasHistoryWith applies the HasEdge predicate on the "history" edge with a given conditions (other predicates).
|
||||||
|
func HasHistoryWith(preds ...predicate.ChannelMonitorHistory) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||||
|
step := newHistoryStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasDailyRollups applies the HasEdge predicate on the "daily_rollups" edge.
|
||||||
|
func HasDailyRollups() predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, DailyRollupsTable, DailyRollupsColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasDailyRollupsWith applies the HasEdge predicate on the "daily_rollups" edge with a given conditions (other predicates).
|
||||||
|
func HasDailyRollupsWith(preds ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||||
|
step := newDailyRollupsStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasRequestTemplate applies the HasEdge predicate on the "request_template" edge.
|
||||||
|
func HasRequestTemplate() predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, RequestTemplateTable, RequestTemplateColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasRequestTemplateWith applies the HasEdge predicate on the "request_template" edge with a given conditions (other predicates).
|
||||||
|
func HasRequestTemplateWith(preds ...predicate.ChannelMonitorRequestTemplate) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||||
|
step := newRequestTemplateStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.ChannelMonitor) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.ChannelMonitor) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.ChannelMonitor) predicate.ChannelMonitor {
|
||||||
|
return predicate.ChannelMonitor(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
1610
backend/ent/channelmonitor_create.go
Normal file
88
backend/ent/channelmonitor_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChannelMonitorDelete is the builder for deleting a ChannelMonitor entity.
|
||||||
|
type ChannelMonitorDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *ChannelMonitorMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the ChannelMonitorDelete builder.
|
||||||
|
func (_d *ChannelMonitorDelete) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *ChannelMonitorDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *ChannelMonitorDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *ChannelMonitorDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(channelmonitor.Table, sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelMonitorDeleteOne is the builder for deleting a single ChannelMonitor entity.
|
||||||
|
type ChannelMonitorDeleteOne struct {
|
||||||
|
_d *ChannelMonitorDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the ChannelMonitorDelete builder.
|
||||||
|
func (_d *ChannelMonitorDeleteOne) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *ChannelMonitorDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{channelmonitor.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *ChannelMonitorDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
797
backend/ent/channelmonitor_query.go
Normal file
@@ -0,0 +1,797 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChannelMonitorQuery is the builder for querying ChannelMonitor entities.
|
||||||
|
type ChannelMonitorQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []channelmonitor.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.ChannelMonitor
|
||||||
|
withHistory *ChannelMonitorHistoryQuery
|
||||||
|
withDailyRollups *ChannelMonitorDailyRollupQuery
|
||||||
|
withRequestTemplate *ChannelMonitorRequestTemplateQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the ChannelMonitorQuery builder.
|
||||||
|
func (_q *ChannelMonitorQuery) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *ChannelMonitorQuery) Limit(limit int) *ChannelMonitorQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *ChannelMonitorQuery) Offset(offset int) *ChannelMonitorQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *ChannelMonitorQuery) Unique(unique bool) *ChannelMonitorQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *ChannelMonitorQuery) Order(o ...channelmonitor.OrderOption) *ChannelMonitorQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryHistory chains the current query on the "history" edge.
|
||||||
|
func (_q *ChannelMonitorQuery) QueryHistory() *ChannelMonitorHistoryQuery {
|
||||||
|
query := (&ChannelMonitorHistoryClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector),
|
||||||
|
sqlgraph.To(channelmonitorhistory.Table, channelmonitorhistory.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.HistoryTable, channelmonitor.HistoryColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryDailyRollups chains the current query on the "daily_rollups" edge.
|
||||||
|
func (_q *ChannelMonitorQuery) QueryDailyRollups() *ChannelMonitorDailyRollupQuery {
|
||||||
|
query := (&ChannelMonitorDailyRollupClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector),
|
||||||
|
sqlgraph.To(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.DailyRollupsTable, channelmonitor.DailyRollupsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryRequestTemplate chains the current query on the "request_template" edge.
|
||||||
|
func (_q *ChannelMonitorQuery) QueryRequestTemplate() *ChannelMonitorRequestTemplateQuery {
|
||||||
|
query := (&ChannelMonitorRequestTemplateClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector),
|
||||||
|
sqlgraph.To(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, channelmonitor.RequestTemplateTable, channelmonitor.RequestTemplateColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first ChannelMonitor entity from the query.
|
||||||
|
// Returns a *NotFoundError when no ChannelMonitor was found.
|
||||||
|
func (_q *ChannelMonitorQuery) First(ctx context.Context) (*ChannelMonitor, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{channelmonitor.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *ChannelMonitorQuery) FirstX(ctx context.Context) *ChannelMonitor {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first ChannelMonitor ID from the query.
|
||||||
|
// Returns a *NotFoundError when no ChannelMonitor ID was found.
|
||||||
|
func (_q *ChannelMonitorQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{channelmonitor.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *ChannelMonitorQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single ChannelMonitor entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one ChannelMonitor entity is found.
|
||||||
|
// Returns a *NotFoundError when no ChannelMonitor entities are found.
|
||||||
|
func (_q *ChannelMonitorQuery) Only(ctx context.Context) (*ChannelMonitor, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{channelmonitor.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{channelmonitor.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *ChannelMonitorQuery) OnlyX(ctx context.Context) *ChannelMonitor {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only ChannelMonitor ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one ChannelMonitor ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *ChannelMonitorQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{channelmonitor.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{channelmonitor.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *ChannelMonitorQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of ChannelMonitors.
|
||||||
|
func (_q *ChannelMonitorQuery) All(ctx context.Context) ([]*ChannelMonitor, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*ChannelMonitor, *ChannelMonitorQuery]()
|
||||||
|
return withInterceptors[[]*ChannelMonitor](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *ChannelMonitorQuery) AllX(ctx context.Context) []*ChannelMonitor {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of ChannelMonitor IDs.
|
||||||
|
func (_q *ChannelMonitorQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(channelmonitor.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *ChannelMonitorQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *ChannelMonitorQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *ChannelMonitorQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *ChannelMonitorQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *ChannelMonitorQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the ChannelMonitorQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *ChannelMonitorQuery) Clone() *ChannelMonitorQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &ChannelMonitorQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]channelmonitor.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.ChannelMonitor{}, _q.predicates...),
|
||||||
|
withHistory: _q.withHistory.Clone(),
|
||||||
|
withDailyRollups: _q.withDailyRollups.Clone(),
|
||||||
|
withRequestTemplate: _q.withRequestTemplate.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHistory tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "history" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *ChannelMonitorQuery) WithHistory(opts ...func(*ChannelMonitorHistoryQuery)) *ChannelMonitorQuery {
|
||||||
|
query := (&ChannelMonitorHistoryClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withHistory = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDailyRollups tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "daily_rollups" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *ChannelMonitorQuery) WithDailyRollups(opts ...func(*ChannelMonitorDailyRollupQuery)) *ChannelMonitorQuery {
|
||||||
|
query := (&ChannelMonitorDailyRollupClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withDailyRollups = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRequestTemplate tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "request_template" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *ChannelMonitorQuery) WithRequestTemplate(opts ...func(*ChannelMonitorRequestTemplateQuery)) *ChannelMonitorQuery {
|
||||||
|
query := (&ChannelMonitorRequestTemplateClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withRequestTemplate = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.ChannelMonitor.Query().
|
||||||
|
// GroupBy(channelmonitor.FieldCreatedAt).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *ChannelMonitorQuery) GroupBy(field string, fields ...string) *ChannelMonitorGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &ChannelMonitorGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = channelmonitor.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.ChannelMonitor.Query().
|
||||||
|
// Select(channelmonitor.FieldCreatedAt).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *ChannelMonitorQuery) Select(fields ...string) *ChannelMonitorSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &ChannelMonitorSelect{ChannelMonitorQuery: _q}
|
||||||
|
sbuild.label = channelmonitor.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a ChannelMonitorSelect configured with the given aggregations.
|
||||||
|
func (_q *ChannelMonitorQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ChannelMonitorQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !channelmonitor.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ChannelMonitorQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitor, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*ChannelMonitor{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [3]bool{
|
||||||
|
_q.withHistory != nil,
|
||||||
|
_q.withDailyRollups != nil,
|
||||||
|
_q.withRequestTemplate != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*ChannelMonitor).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &ChannelMonitor{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withHistory; query != nil {
|
||||||
|
if err := _q.loadHistory(ctx, query, nodes,
|
||||||
|
func(n *ChannelMonitor) { n.Edges.History = []*ChannelMonitorHistory{} },
|
||||||
|
func(n *ChannelMonitor, e *ChannelMonitorHistory) { n.Edges.History = append(n.Edges.History, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withDailyRollups; query != nil {
|
||||||
|
if err := _q.loadDailyRollups(ctx, query, nodes,
|
||||||
|
func(n *ChannelMonitor) { n.Edges.DailyRollups = []*ChannelMonitorDailyRollup{} },
|
||||||
|
func(n *ChannelMonitor, e *ChannelMonitorDailyRollup) {
|
||||||
|
n.Edges.DailyRollups = append(n.Edges.DailyRollups, e)
|
||||||
|
}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withRequestTemplate; query != nil {
|
||||||
|
if err := _q.loadRequestTemplate(ctx, query, nodes, nil,
|
||||||
|
func(n *ChannelMonitor, e *ChannelMonitorRequestTemplate) { n.Edges.RequestTemplate = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ChannelMonitorQuery) loadHistory(ctx context.Context, query *ChannelMonitorHistoryQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorHistory)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*ChannelMonitor)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(channelmonitorhistory.FieldMonitorID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.ChannelMonitorHistory(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(channelmonitor.HistoryColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.MonitorID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "monitor_id" returned %v for node %v`, fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *ChannelMonitorQuery) loadDailyRollups(ctx context.Context, query *ChannelMonitorDailyRollupQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorDailyRollup)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*ChannelMonitor)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(channelmonitordailyrollup.FieldMonitorID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(channelmonitor.DailyRollupsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.MonitorID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "monitor_id" returned %v for node %v`, fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *ChannelMonitorQuery) loadRequestTemplate(ctx context.Context, query *ChannelMonitorRequestTemplateQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorRequestTemplate)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*ChannelMonitor)
|
||||||
|
for i := range nodes {
|
||||||
|
if nodes[i].TemplateID == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fk := *nodes[i].TemplateID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(channelmonitorrequesttemplate.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "template_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ChannelMonitorQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ChannelMonitorQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(channelmonitor.Table, channelmonitor.Columns, sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitor.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != channelmonitor.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.withRequestTemplate != nil {
|
||||||
|
_spec.Node.AddColumnOnce(channelmonitor.FieldTemplateID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ChannelMonitorQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(channelmonitor.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = channelmonitor.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *ChannelMonitorQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *ChannelMonitorQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelMonitorGroupBy is the group-by builder for ChannelMonitor entities.
|
||||||
|
type ChannelMonitorGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *ChannelMonitorQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *ChannelMonitorGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *ChannelMonitorGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*ChannelMonitorQuery, *ChannelMonitorGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *ChannelMonitorGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelMonitorSelect is the builder for selecting fields of ChannelMonitor entities.
|
||||||
|
type ChannelMonitorSelect struct {
|
||||||
|
*ChannelMonitorQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *ChannelMonitorSelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *ChannelMonitorSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*ChannelMonitorQuery, *ChannelMonitorSelect](ctx, _s.ChannelMonitorQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *ChannelMonitorSelect) sqlScan(ctx context.Context, root *ChannelMonitorQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
1328
backend/ent/channelmonitor_update.go
Normal file
278
backend/ent/channelmonitordailyrollup.go
Normal file
@@ -0,0 +1,278 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChannelMonitorDailyRollup is the model entity for the ChannelMonitorDailyRollup schema.
|
||||||
|
type ChannelMonitorDailyRollup struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// MonitorID holds the value of the "monitor_id" field.
|
||||||
|
MonitorID int64 `json:"monitor_id,omitempty"`
|
||||||
|
// Model holds the value of the "model" field.
|
||||||
|
Model string `json:"model,omitempty"`
|
||||||
|
// BucketDate holds the value of the "bucket_date" field.
|
||||||
|
BucketDate time.Time `json:"bucket_date,omitempty"`
|
||||||
|
// TotalChecks holds the value of the "total_checks" field.
|
||||||
|
TotalChecks int `json:"total_checks,omitempty"`
|
||||||
|
// OkCount holds the value of the "ok_count" field.
|
||||||
|
OkCount int `json:"ok_count,omitempty"`
|
||||||
|
// OperationalCount holds the value of the "operational_count" field.
|
||||||
|
OperationalCount int `json:"operational_count,omitempty"`
|
||||||
|
// DegradedCount holds the value of the "degraded_count" field.
|
||||||
|
DegradedCount int `json:"degraded_count,omitempty"`
|
||||||
|
// FailedCount holds the value of the "failed_count" field.
|
||||||
|
FailedCount int `json:"failed_count,omitempty"`
|
||||||
|
// ErrorCount holds the value of the "error_count" field.
|
||||||
|
ErrorCount int `json:"error_count,omitempty"`
|
||||||
|
// SumLatencyMs holds the value of the "sum_latency_ms" field.
|
||||||
|
SumLatencyMs int64 `json:"sum_latency_ms,omitempty"`
|
||||||
|
// CountLatency holds the value of the "count_latency" field.
|
||||||
|
CountLatency int `json:"count_latency,omitempty"`
|
||||||
|
// SumPingLatencyMs holds the value of the "sum_ping_latency_ms" field.
|
||||||
|
SumPingLatencyMs int64 `json:"sum_ping_latency_ms,omitempty"`
|
||||||
|
// CountPingLatency holds the value of the "count_ping_latency" field.
|
||||||
|
CountPingLatency int `json:"count_ping_latency,omitempty"`
|
||||||
|
// ComputedAt holds the value of the "computed_at" field.
|
||||||
|
ComputedAt time.Time `json:"computed_at,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the ChannelMonitorDailyRollupQuery when eager-loading is set.
|
||||||
|
Edges ChannelMonitorDailyRollupEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelMonitorDailyRollupEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type ChannelMonitorDailyRollupEdges struct {
|
||||||
|
// Monitor holds the value of the monitor edge.
|
||||||
|
Monitor *ChannelMonitor `json:"monitor,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [1]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// MonitorOrErr returns the Monitor value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e ChannelMonitorDailyRollupEdges) MonitorOrErr() (*ChannelMonitor, error) {
|
||||||
|
if e.Monitor != nil {
|
||||||
|
return e.Monitor, nil
|
||||||
|
} else if e.loadedTypes[0] {
|
||||||
|
return nil, &NotFoundError{label: channelmonitor.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "monitor"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*ChannelMonitorDailyRollup) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case channelmonitordailyrollup.FieldID, channelmonitordailyrollup.FieldMonitorID, channelmonitordailyrollup.FieldTotalChecks, channelmonitordailyrollup.FieldOkCount, channelmonitordailyrollup.FieldOperationalCount, channelmonitordailyrollup.FieldDegradedCount, channelmonitordailyrollup.FieldFailedCount, channelmonitordailyrollup.FieldErrorCount, channelmonitordailyrollup.FieldSumLatencyMs, channelmonitordailyrollup.FieldCountLatency, channelmonitordailyrollup.FieldSumPingLatencyMs, channelmonitordailyrollup.FieldCountPingLatency:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case channelmonitordailyrollup.FieldModel:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case channelmonitordailyrollup.FieldBucketDate, channelmonitordailyrollup.FieldComputedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the ChannelMonitorDailyRollup fields.
|
||||||
|
func (_m *ChannelMonitorDailyRollup) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case channelmonitordailyrollup.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case channelmonitordailyrollup.FieldMonitorID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field monitor_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.MonitorID = value.Int64
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldModel:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field model", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Model = value.String
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldBucketDate:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field bucket_date", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.BucketDate = value.Time
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldTotalChecks:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field total_checks", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.TotalChecks = int(value.Int64)
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldOkCount:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field ok_count", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.OkCount = int(value.Int64)
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldOperationalCount:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field operational_count", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.OperationalCount = int(value.Int64)
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldDegradedCount:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field degraded_count", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.DegradedCount = int(value.Int64)
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldFailedCount:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field failed_count", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.FailedCount = int(value.Int64)
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldErrorCount:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field error_count", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ErrorCount = int(value.Int64)
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldSumLatencyMs:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field sum_latency_ms", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SumLatencyMs = value.Int64
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldCountLatency:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field count_latency", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CountLatency = int(value.Int64)
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldSumPingLatencyMs:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field sum_ping_latency_ms", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SumPingLatencyMs = value.Int64
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldCountPingLatency:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field count_ping_latency", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CountPingLatency = int(value.Int64)
|
||||||
|
}
|
||||||
|
case channelmonitordailyrollup.FieldComputedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field computed_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ComputedAt = value.Time
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitorDailyRollup.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *ChannelMonitorDailyRollup) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryMonitor queries the "monitor" edge of the ChannelMonitorDailyRollup entity.
|
||||||
|
func (_m *ChannelMonitorDailyRollup) QueryMonitor() *ChannelMonitorQuery {
|
||||||
|
return NewChannelMonitorDailyRollupClient(_m.config).QueryMonitor(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this ChannelMonitorDailyRollup.
|
||||||
|
// Note that you need to call ChannelMonitorDailyRollup.Unwrap() before calling this method if this ChannelMonitorDailyRollup
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *ChannelMonitorDailyRollup) Update() *ChannelMonitorDailyRollupUpdateOne {
|
||||||
|
return NewChannelMonitorDailyRollupClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the ChannelMonitorDailyRollup entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *ChannelMonitorDailyRollup) Unwrap() *ChannelMonitorDailyRollup {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: ChannelMonitorDailyRollup is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *ChannelMonitorDailyRollup) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("ChannelMonitorDailyRollup(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("monitor_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.MonitorID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("model=")
|
||||||
|
builder.WriteString(_m.Model)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("bucket_date=")
|
||||||
|
builder.WriteString(_m.BucketDate.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("total_checks=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.TotalChecks))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("ok_count=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.OkCount))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("operational_count=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.OperationalCount))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("degraded_count=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.DegradedCount))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("failed_count=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.FailedCount))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("error_count=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.ErrorCount))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("sum_latency_ms=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.SumLatencyMs))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("count_latency=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.CountLatency))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("sum_ping_latency_ms=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.SumPingLatencyMs))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("count_ping_latency=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.CountPingLatency))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("computed_at=")
|
||||||
|
builder.WriteString(_m.ComputedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelMonitorDailyRollups is a parsable slice of ChannelMonitorDailyRollup.
|
||||||
|
type ChannelMonitorDailyRollups []*ChannelMonitorDailyRollup
|
||||||
@@ -0,0 +1,206 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package channelmonitordailyrollup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the channelmonitordailyrollup type in the database.
|
||||||
|
Label = "channel_monitor_daily_rollup"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldMonitorID holds the string denoting the monitor_id field in the database.
|
||||||
|
FieldMonitorID = "monitor_id"
|
||||||
|
// FieldModel holds the string denoting the model field in the database.
|
||||||
|
FieldModel = "model"
|
||||||
|
// FieldBucketDate holds the string denoting the bucket_date field in the database.
|
||||||
|
FieldBucketDate = "bucket_date"
|
||||||
|
// FieldTotalChecks holds the string denoting the total_checks field in the database.
|
||||||
|
FieldTotalChecks = "total_checks"
|
||||||
|
// FieldOkCount holds the string denoting the ok_count field in the database.
|
||||||
|
FieldOkCount = "ok_count"
|
||||||
|
// FieldOperationalCount holds the string denoting the operational_count field in the database.
|
||||||
|
FieldOperationalCount = "operational_count"
|
||||||
|
// FieldDegradedCount holds the string denoting the degraded_count field in the database.
|
||||||
|
FieldDegradedCount = "degraded_count"
|
||||||
|
// FieldFailedCount holds the string denoting the failed_count field in the database.
|
||||||
|
FieldFailedCount = "failed_count"
|
||||||
|
// FieldErrorCount holds the string denoting the error_count field in the database.
|
||||||
|
FieldErrorCount = "error_count"
|
||||||
|
// FieldSumLatencyMs holds the string denoting the sum_latency_ms field in the database.
|
||||||
|
FieldSumLatencyMs = "sum_latency_ms"
|
||||||
|
// FieldCountLatency holds the string denoting the count_latency field in the database.
|
||||||
|
FieldCountLatency = "count_latency"
|
||||||
|
// FieldSumPingLatencyMs holds the string denoting the sum_ping_latency_ms field in the database.
|
||||||
|
FieldSumPingLatencyMs = "sum_ping_latency_ms"
|
||||||
|
// FieldCountPingLatency holds the string denoting the count_ping_latency field in the database.
|
||||||
|
FieldCountPingLatency = "count_ping_latency"
|
||||||
|
// FieldComputedAt holds the string denoting the computed_at field in the database.
|
||||||
|
FieldComputedAt = "computed_at"
|
||||||
|
// EdgeMonitor holds the string denoting the monitor edge name in mutations.
|
||||||
|
EdgeMonitor = "monitor"
|
||||||
|
// Table holds the table name of the channelmonitordailyrollup in the database.
|
||||||
|
Table = "channel_monitor_daily_rollups"
|
||||||
|
// MonitorTable is the table that holds the monitor relation/edge.
|
||||||
|
MonitorTable = "channel_monitor_daily_rollups"
|
||||||
|
// MonitorInverseTable is the table name for the ChannelMonitor entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "channelmonitor" package.
|
||||||
|
MonitorInverseTable = "channel_monitors"
|
||||||
|
// MonitorColumn is the table column denoting the monitor relation/edge.
|
||||||
|
MonitorColumn = "monitor_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for channelmonitordailyrollup fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldMonitorID,
|
||||||
|
FieldModel,
|
||||||
|
FieldBucketDate,
|
||||||
|
FieldTotalChecks,
|
||||||
|
FieldOkCount,
|
||||||
|
FieldOperationalCount,
|
||||||
|
FieldDegradedCount,
|
||||||
|
FieldFailedCount,
|
||||||
|
FieldErrorCount,
|
||||||
|
FieldSumLatencyMs,
|
||||||
|
FieldCountLatency,
|
||||||
|
FieldSumPingLatencyMs,
|
||||||
|
FieldCountPingLatency,
|
||||||
|
FieldComputedAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ModelValidator is a validator for the "model" field. It is called by the builders before save.
|
||||||
|
ModelValidator func(string) error
|
||||||
|
// DefaultTotalChecks holds the default value on creation for the "total_checks" field.
|
||||||
|
DefaultTotalChecks int
|
||||||
|
// DefaultOkCount holds the default value on creation for the "ok_count" field.
|
||||||
|
DefaultOkCount int
|
||||||
|
// DefaultOperationalCount holds the default value on creation for the "operational_count" field.
|
||||||
|
DefaultOperationalCount int
|
||||||
|
// DefaultDegradedCount holds the default value on creation for the "degraded_count" field.
|
||||||
|
DefaultDegradedCount int
|
||||||
|
// DefaultFailedCount holds the default value on creation for the "failed_count" field.
|
||||||
|
DefaultFailedCount int
|
||||||
|
// DefaultErrorCount holds the default value on creation for the "error_count" field.
|
||||||
|
DefaultErrorCount int
|
||||||
|
// DefaultSumLatencyMs holds the default value on creation for the "sum_latency_ms" field.
|
||||||
|
DefaultSumLatencyMs int64
|
||||||
|
// DefaultCountLatency holds the default value on creation for the "count_latency" field.
|
||||||
|
DefaultCountLatency int
|
||||||
|
// DefaultSumPingLatencyMs holds the default value on creation for the "sum_ping_latency_ms" field.
|
||||||
|
DefaultSumPingLatencyMs int64
|
||||||
|
// DefaultCountPingLatency holds the default value on creation for the "count_ping_latency" field.
|
||||||
|
DefaultCountPingLatency int
|
||||||
|
// DefaultComputedAt holds the default value on creation for the "computed_at" field.
|
||||||
|
DefaultComputedAt func() time.Time
|
||||||
|
// UpdateDefaultComputedAt holds the default value on update for the "computed_at" field.
|
||||||
|
UpdateDefaultComputedAt func() time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the ChannelMonitorDailyRollup queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByMonitorID orders the results by the monitor_id field.
|
||||||
|
func ByMonitorID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldMonitorID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByModel orders the results by the model field.
|
||||||
|
func ByModel(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldModel, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByBucketDate orders the results by the bucket_date field.
|
||||||
|
func ByBucketDate(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldBucketDate, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByTotalChecks orders the results by the total_checks field.
|
||||||
|
func ByTotalChecks(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldTotalChecks, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByOkCount orders the results by the ok_count field.
|
||||||
|
func ByOkCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldOkCount, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByOperationalCount orders the results by the operational_count field.
|
||||||
|
func ByOperationalCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldOperationalCount, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDegradedCount orders the results by the degraded_count field.
|
||||||
|
func ByDegradedCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldDegradedCount, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByFailedCount orders the results by the failed_count field.
|
||||||
|
func ByFailedCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldFailedCount, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByErrorCount orders the results by the error_count field.
|
||||||
|
func ByErrorCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldErrorCount, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySumLatencyMs orders the results by the sum_latency_ms field.
|
||||||
|
func BySumLatencyMs(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSumLatencyMs, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCountLatency orders the results by the count_latency field.
|
||||||
|
func ByCountLatency(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCountLatency, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySumPingLatencyMs orders the results by the sum_ping_latency_ms field.
|
||||||
|
func BySumPingLatencyMs(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSumPingLatencyMs, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCountPingLatency orders the results by the count_ping_latency field.
|
||||||
|
func ByCountPingLatency(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCountPingLatency, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByComputedAt orders the results by the computed_at field.
|
||||||
|
func ByComputedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldComputedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByMonitorField orders the results by monitor field.
|
||||||
|
func ByMonitorField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newMonitorStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newMonitorStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(MonitorInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
729
backend/ent/channelmonitordailyrollup/where.go
Normal file
@@ -0,0 +1,729 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package channelmonitordailyrollup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MonitorID applies equality check predicate on the "monitor_id" field. It's identical to MonitorIDEQ.
|
||||||
|
func MonitorID(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldMonitorID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Model applies equality check predicate on the "model" field. It's identical to ModelEQ.
|
||||||
|
func Model(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketDate applies equality check predicate on the "bucket_date" field. It's identical to BucketDateEQ.
|
||||||
|
func BucketDate(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldBucketDate, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalChecks applies equality check predicate on the "total_checks" field. It's identical to TotalChecksEQ.
|
||||||
|
func TotalChecks(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldTotalChecks, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OkCount applies equality check predicate on the "ok_count" field. It's identical to OkCountEQ.
|
||||||
|
func OkCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOkCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationalCount applies equality check predicate on the "operational_count" field. It's identical to OperationalCountEQ.
|
||||||
|
func OperationalCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOperationalCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegradedCount applies equality check predicate on the "degraded_count" field. It's identical to DegradedCountEQ.
|
||||||
|
func DegradedCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldDegradedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedCount applies equality check predicate on the "failed_count" field. It's identical to FailedCountEQ.
|
||||||
|
func FailedCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldFailedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorCount applies equality check predicate on the "error_count" field. It's identical to ErrorCountEQ.
|
||||||
|
func ErrorCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldErrorCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumLatencyMs applies equality check predicate on the "sum_latency_ms" field. It's identical to SumLatencyMsEQ.
|
||||||
|
func SumLatencyMs(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountLatency applies equality check predicate on the "count_latency" field. It's identical to CountLatencyEQ.
|
||||||
|
func CountLatency(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumPingLatencyMs applies equality check predicate on the "sum_ping_latency_ms" field. It's identical to SumPingLatencyMsEQ.
|
||||||
|
func SumPingLatencyMs(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumPingLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountPingLatency applies equality check predicate on the "count_ping_latency" field. It's identical to CountPingLatencyEQ.
|
||||||
|
func CountPingLatency(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountPingLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputedAt applies equality check predicate on the "computed_at" field. It's identical to ComputedAtEQ.
|
||||||
|
func ComputedAt(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldComputedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MonitorIDEQ applies the EQ predicate on the "monitor_id" field.
|
||||||
|
func MonitorIDEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldMonitorID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MonitorIDNEQ applies the NEQ predicate on the "monitor_id" field.
|
||||||
|
func MonitorIDNEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldMonitorID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MonitorIDIn applies the In predicate on the "monitor_id" field.
|
||||||
|
func MonitorIDIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldMonitorID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MonitorIDNotIn applies the NotIn predicate on the "monitor_id" field.
|
||||||
|
func MonitorIDNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldMonitorID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelEQ applies the EQ predicate on the "model" field.
|
||||||
|
func ModelEQ(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelNEQ applies the NEQ predicate on the "model" field.
|
||||||
|
func ModelNEQ(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelIn applies the In predicate on the "model" field.
|
||||||
|
func ModelIn(vs ...string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldModel, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelNotIn applies the NotIn predicate on the "model" field.
|
||||||
|
func ModelNotIn(vs ...string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldModel, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelGT applies the GT predicate on the "model" field.
|
||||||
|
func ModelGT(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelGTE applies the GTE predicate on the "model" field.
|
||||||
|
func ModelGTE(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelLT applies the LT predicate on the "model" field.
|
||||||
|
func ModelLT(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelLTE applies the LTE predicate on the "model" field.
|
||||||
|
func ModelLTE(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelContains applies the Contains predicate on the "model" field.
|
||||||
|
func ModelContains(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldContains(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelHasPrefix applies the HasPrefix predicate on the "model" field.
|
||||||
|
func ModelHasPrefix(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldHasPrefix(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelHasSuffix applies the HasSuffix predicate on the "model" field.
|
||||||
|
func ModelHasSuffix(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldHasSuffix(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelEqualFold applies the EqualFold predicate on the "model" field.
|
||||||
|
func ModelEqualFold(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEqualFold(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelContainsFold applies the ContainsFold predicate on the "model" field.
|
||||||
|
func ModelContainsFold(v string) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldContainsFold(FieldModel, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketDateEQ applies the EQ predicate on the "bucket_date" field.
|
||||||
|
func BucketDateEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldBucketDate, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketDateNEQ applies the NEQ predicate on the "bucket_date" field.
|
||||||
|
func BucketDateNEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldBucketDate, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketDateIn applies the In predicate on the "bucket_date" field.
|
||||||
|
func BucketDateIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldBucketDate, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketDateNotIn applies the NotIn predicate on the "bucket_date" field.
|
||||||
|
func BucketDateNotIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldBucketDate, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketDateGT applies the GT predicate on the "bucket_date" field.
|
||||||
|
func BucketDateGT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldBucketDate, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketDateGTE applies the GTE predicate on the "bucket_date" field.
|
||||||
|
func BucketDateGTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldBucketDate, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketDateLT applies the LT predicate on the "bucket_date" field.
|
||||||
|
func BucketDateLT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldBucketDate, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketDateLTE applies the LTE predicate on the "bucket_date" field.
|
||||||
|
func BucketDateLTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldBucketDate, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalChecksEQ applies the EQ predicate on the "total_checks" field.
|
||||||
|
func TotalChecksEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldTotalChecks, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalChecksNEQ applies the NEQ predicate on the "total_checks" field.
|
||||||
|
func TotalChecksNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldTotalChecks, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalChecksIn applies the In predicate on the "total_checks" field.
|
||||||
|
func TotalChecksIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldTotalChecks, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalChecksNotIn applies the NotIn predicate on the "total_checks" field.
|
||||||
|
func TotalChecksNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldTotalChecks, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalChecksGT applies the GT predicate on the "total_checks" field.
|
||||||
|
func TotalChecksGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldTotalChecks, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalChecksGTE applies the GTE predicate on the "total_checks" field.
|
||||||
|
func TotalChecksGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldTotalChecks, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalChecksLT applies the LT predicate on the "total_checks" field.
|
||||||
|
func TotalChecksLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldTotalChecks, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalChecksLTE applies the LTE predicate on the "total_checks" field.
|
||||||
|
func TotalChecksLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldTotalChecks, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OkCountEQ applies the EQ predicate on the "ok_count" field.
|
||||||
|
func OkCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOkCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OkCountNEQ applies the NEQ predicate on the "ok_count" field.
|
||||||
|
func OkCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldOkCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OkCountIn applies the In predicate on the "ok_count" field.
|
||||||
|
func OkCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldOkCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OkCountNotIn applies the NotIn predicate on the "ok_count" field.
|
||||||
|
func OkCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldOkCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OkCountGT applies the GT predicate on the "ok_count" field.
|
||||||
|
func OkCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldOkCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OkCountGTE applies the GTE predicate on the "ok_count" field.
|
||||||
|
func OkCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldOkCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OkCountLT applies the LT predicate on the "ok_count" field.
|
||||||
|
func OkCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldOkCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OkCountLTE applies the LTE predicate on the "ok_count" field.
|
||||||
|
func OkCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldOkCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationalCountEQ applies the EQ predicate on the "operational_count" field.
|
||||||
|
func OperationalCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOperationalCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationalCountNEQ applies the NEQ predicate on the "operational_count" field.
|
||||||
|
func OperationalCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldOperationalCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationalCountIn applies the In predicate on the "operational_count" field.
|
||||||
|
func OperationalCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldOperationalCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationalCountNotIn applies the NotIn predicate on the "operational_count" field.
|
||||||
|
func OperationalCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldOperationalCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationalCountGT applies the GT predicate on the "operational_count" field.
|
||||||
|
func OperationalCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldOperationalCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationalCountGTE applies the GTE predicate on the "operational_count" field.
|
||||||
|
func OperationalCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldOperationalCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationalCountLT applies the LT predicate on the "operational_count" field.
|
||||||
|
func OperationalCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldOperationalCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OperationalCountLTE applies the LTE predicate on the "operational_count" field.
|
||||||
|
func OperationalCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldOperationalCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegradedCountEQ applies the EQ predicate on the "degraded_count" field.
|
||||||
|
func DegradedCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldDegradedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegradedCountNEQ applies the NEQ predicate on the "degraded_count" field.
|
||||||
|
func DegradedCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldDegradedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegradedCountIn applies the In predicate on the "degraded_count" field.
|
||||||
|
func DegradedCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldDegradedCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegradedCountNotIn applies the NotIn predicate on the "degraded_count" field.
|
||||||
|
func DegradedCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldDegradedCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegradedCountGT applies the GT predicate on the "degraded_count" field.
|
||||||
|
func DegradedCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldDegradedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegradedCountGTE applies the GTE predicate on the "degraded_count" field.
|
||||||
|
func DegradedCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldDegradedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegradedCountLT applies the LT predicate on the "degraded_count" field.
|
||||||
|
func DegradedCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldDegradedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DegradedCountLTE applies the LTE predicate on the "degraded_count" field.
|
||||||
|
func DegradedCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldDegradedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedCountEQ applies the EQ predicate on the "failed_count" field.
|
||||||
|
func FailedCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldFailedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedCountNEQ applies the NEQ predicate on the "failed_count" field.
|
||||||
|
func FailedCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldFailedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedCountIn applies the In predicate on the "failed_count" field.
|
||||||
|
func FailedCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldFailedCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedCountNotIn applies the NotIn predicate on the "failed_count" field.
|
||||||
|
func FailedCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldFailedCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedCountGT applies the GT predicate on the "failed_count" field.
|
||||||
|
func FailedCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldFailedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedCountGTE applies the GTE predicate on the "failed_count" field.
|
||||||
|
func FailedCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldFailedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedCountLT applies the LT predicate on the "failed_count" field.
|
||||||
|
func FailedCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldFailedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailedCountLTE applies the LTE predicate on the "failed_count" field.
|
||||||
|
func FailedCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldFailedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorCountEQ applies the EQ predicate on the "error_count" field.
|
||||||
|
func ErrorCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldErrorCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorCountNEQ applies the NEQ predicate on the "error_count" field.
|
||||||
|
func ErrorCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldErrorCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorCountIn applies the In predicate on the "error_count" field.
|
||||||
|
func ErrorCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldErrorCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorCountNotIn applies the NotIn predicate on the "error_count" field.
|
||||||
|
func ErrorCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldErrorCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorCountGT applies the GT predicate on the "error_count" field.
|
||||||
|
func ErrorCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldErrorCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorCountGTE applies the GTE predicate on the "error_count" field.
|
||||||
|
func ErrorCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldErrorCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorCountLT applies the LT predicate on the "error_count" field.
|
||||||
|
func ErrorCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldErrorCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorCountLTE applies the LTE predicate on the "error_count" field.
|
||||||
|
func ErrorCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldErrorCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumLatencyMsEQ applies the EQ predicate on the "sum_latency_ms" field.
|
||||||
|
func SumLatencyMsEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumLatencyMsNEQ applies the NEQ predicate on the "sum_latency_ms" field.
|
||||||
|
func SumLatencyMsNEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldSumLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumLatencyMsIn applies the In predicate on the "sum_latency_ms" field.
|
||||||
|
func SumLatencyMsIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldSumLatencyMs, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumLatencyMsNotIn applies the NotIn predicate on the "sum_latency_ms" field.
|
||||||
|
func SumLatencyMsNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldSumLatencyMs, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumLatencyMsGT applies the GT predicate on the "sum_latency_ms" field.
|
||||||
|
func SumLatencyMsGT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldSumLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumLatencyMsGTE applies the GTE predicate on the "sum_latency_ms" field.
|
||||||
|
func SumLatencyMsGTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldSumLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumLatencyMsLT applies the LT predicate on the "sum_latency_ms" field.
|
||||||
|
func SumLatencyMsLT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldSumLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumLatencyMsLTE applies the LTE predicate on the "sum_latency_ms" field.
|
||||||
|
func SumLatencyMsLTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldSumLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountLatencyEQ applies the EQ predicate on the "count_latency" field.
|
||||||
|
func CountLatencyEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountLatencyNEQ applies the NEQ predicate on the "count_latency" field.
|
||||||
|
func CountLatencyNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldCountLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountLatencyIn applies the In predicate on the "count_latency" field.
|
||||||
|
func CountLatencyIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldCountLatency, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountLatencyNotIn applies the NotIn predicate on the "count_latency" field.
|
||||||
|
func CountLatencyNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldCountLatency, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountLatencyGT applies the GT predicate on the "count_latency" field.
|
||||||
|
func CountLatencyGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldCountLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountLatencyGTE applies the GTE predicate on the "count_latency" field.
|
||||||
|
func CountLatencyGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldCountLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountLatencyLT applies the LT predicate on the "count_latency" field.
|
||||||
|
func CountLatencyLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldCountLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountLatencyLTE applies the LTE predicate on the "count_latency" field.
|
||||||
|
func CountLatencyLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldCountLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumPingLatencyMsEQ applies the EQ predicate on the "sum_ping_latency_ms" field.
|
||||||
|
func SumPingLatencyMsEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumPingLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumPingLatencyMsNEQ applies the NEQ predicate on the "sum_ping_latency_ms" field.
|
||||||
|
func SumPingLatencyMsNEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldSumPingLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumPingLatencyMsIn applies the In predicate on the "sum_ping_latency_ms" field.
|
||||||
|
func SumPingLatencyMsIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldSumPingLatencyMs, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumPingLatencyMsNotIn applies the NotIn predicate on the "sum_ping_latency_ms" field.
|
||||||
|
func SumPingLatencyMsNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldSumPingLatencyMs, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumPingLatencyMsGT applies the GT predicate on the "sum_ping_latency_ms" field.
|
||||||
|
func SumPingLatencyMsGT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldSumPingLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumPingLatencyMsGTE applies the GTE predicate on the "sum_ping_latency_ms" field.
|
||||||
|
func SumPingLatencyMsGTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldSumPingLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumPingLatencyMsLT applies the LT predicate on the "sum_ping_latency_ms" field.
|
||||||
|
func SumPingLatencyMsLT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldSumPingLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SumPingLatencyMsLTE applies the LTE predicate on the "sum_ping_latency_ms" field.
|
||||||
|
func SumPingLatencyMsLTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldSumPingLatencyMs, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountPingLatencyEQ applies the EQ predicate on the "count_ping_latency" field.
|
||||||
|
func CountPingLatencyEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountPingLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountPingLatencyNEQ applies the NEQ predicate on the "count_ping_latency" field.
|
||||||
|
func CountPingLatencyNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldCountPingLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountPingLatencyIn applies the In predicate on the "count_ping_latency" field.
|
||||||
|
func CountPingLatencyIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldCountPingLatency, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountPingLatencyNotIn applies the NotIn predicate on the "count_ping_latency" field.
|
||||||
|
func CountPingLatencyNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldCountPingLatency, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountPingLatencyGT applies the GT predicate on the "count_ping_latency" field.
|
||||||
|
func CountPingLatencyGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldCountPingLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountPingLatencyGTE applies the GTE predicate on the "count_ping_latency" field.
|
||||||
|
func CountPingLatencyGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldCountPingLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountPingLatencyLT applies the LT predicate on the "count_ping_latency" field.
|
||||||
|
func CountPingLatencyLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldCountPingLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountPingLatencyLTE applies the LTE predicate on the "count_ping_latency" field.
|
||||||
|
func CountPingLatencyLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldCountPingLatency, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputedAtEQ applies the EQ predicate on the "computed_at" field.
|
||||||
|
func ComputedAtEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldComputedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputedAtNEQ applies the NEQ predicate on the "computed_at" field.
|
||||||
|
func ComputedAtNEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldComputedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputedAtIn applies the In predicate on the "computed_at" field.
|
||||||
|
func ComputedAtIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldComputedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputedAtNotIn applies the NotIn predicate on the "computed_at" field.
|
||||||
|
func ComputedAtNotIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldComputedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputedAtGT applies the GT predicate on the "computed_at" field.
|
||||||
|
func ComputedAtGT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldComputedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputedAtGTE applies the GTE predicate on the "computed_at" field.
|
||||||
|
func ComputedAtGTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldComputedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputedAtLT applies the LT predicate on the "computed_at" field.
|
||||||
|
func ComputedAtLT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldComputedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputedAtLTE applies the LTE predicate on the "computed_at" field.
|
||||||
|
func ComputedAtLTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldComputedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasMonitor applies the HasEdge predicate on the "monitor" edge.
|
||||||
|
func HasMonitor() predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasMonitorWith applies the HasEdge predicate on the "monitor" edge with a given conditions (other predicates).
|
||||||
|
func HasMonitorWith(preds ...predicate.ChannelMonitor) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) {
|
||||||
|
step := newMonitorStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup {
|
||||||
|
return predicate.ChannelMonitorDailyRollup(sql.NotPredicates(p))
|
||||||
|
}
|
||||||