Compare commits
928 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9d801595c9 | ||
|
|
9c448f89a8 | ||
|
|
73b872998e | ||
|
|
094e1171ef | ||
|
|
733627cf9d | ||
|
|
f084d30d65 | ||
|
|
8ad099baa6 | ||
|
|
8bf2a7b88a | ||
|
|
40feb86ba4 | ||
|
|
f972a2faf2 | ||
|
|
55a7fa1e07 | ||
|
|
5e54d492be | ||
|
|
8d6d31545f | ||
|
|
17ced6b73a | ||
|
|
7f8f3fe0dd | ||
|
|
46f06b2498 | ||
|
|
7ce5b83215 | ||
|
|
27cad10d30 | ||
|
|
ff6fa0203d | ||
|
|
f7c13af11f | ||
|
|
28dc34b6a3 | ||
|
|
4d676dddd1 | ||
|
|
93d91e20b9 | ||
|
|
63ef23108c | ||
|
|
d78478e866 | ||
|
|
bf43fb4e38 | ||
|
|
a16c66500f | ||
|
|
4b6954f9f0 | ||
|
|
da4b078df2 | ||
|
|
7452fad820 | ||
|
|
4c474616b9 | ||
|
|
6327573534 | ||
|
|
04b2866f65 | ||
|
|
b0a2252ed1 | ||
|
|
30f55a1f72 | ||
|
|
3d4ca5e8d1 | ||
|
|
0537a490f0 | ||
|
|
ca5d029e7c | ||
|
|
1eca03432a | ||
|
|
53b24bc2d8 | ||
|
|
a161f9d045 | ||
|
|
c5a1a82223 | ||
|
|
2ab6b34fd1 | ||
|
|
764afbe37a | ||
|
|
25c7b0d9f4 | ||
|
|
f422ac6dcc | ||
|
|
54de4e008c | ||
|
|
65c27d2c69 | ||
|
|
53f919f8f0 | ||
|
|
c92b88e34a | ||
|
|
ed0c85a17e | ||
|
|
9fe02bba7e | ||
|
|
615557ec20 | ||
|
|
3f05ef2ae3 | ||
|
|
3022090365 | ||
|
|
798fd673e9 | ||
|
|
c056db740d | ||
|
|
a0b5e5bfa0 | ||
|
|
41d0657330 | ||
|
|
1a0cabbfd6 | ||
|
|
9b6dcc57bd | ||
|
|
6d11f9ed77 | ||
|
|
489a4d934e | ||
|
|
b17704d6ef | ||
|
|
496469ac4e | ||
|
|
c1b52615be | ||
|
|
3af9940b85 | ||
|
|
22b1277572 | ||
|
|
aff98d5ae1 | ||
|
|
4e1bb2b445 | ||
|
|
dac6e52091 | ||
|
|
8987e0ba67 | ||
|
|
9d1751ec57 | ||
|
|
5d1c12e60e | ||
|
|
5b63a9b02d | ||
|
|
641e61073f | ||
|
|
095f457c57 | ||
|
|
1e57e88e43 | ||
|
|
b95ffce244 | ||
|
|
8f28a834f8 | ||
|
|
7424c73b05 | ||
|
|
1afd81b019 | ||
|
|
732d6495ea | ||
|
|
6d20ab8082 | ||
|
|
aa8ee33b0a | ||
|
|
5f630fbb19 | ||
|
|
bdbd2916f5 | ||
|
|
6dc89765fd | ||
|
|
f3233db01f | ||
|
|
6e12578bc5 | ||
|
|
a25faecadd | ||
|
|
5862e2d8d9 | ||
|
|
66d6454535 | ||
|
|
165553cfb0 | ||
|
|
b5467d610a | ||
|
|
57ff97960d | ||
|
|
5b5db88550 | ||
|
|
f03de00cb9 | ||
|
|
76aae5aa74 | ||
|
|
27ee141c1e | ||
|
|
e65574dea9 | ||
|
|
1ce9dc03f9 | ||
|
|
15ce914a62 | ||
|
|
959af1c8f6 | ||
|
|
c4d496da18 | ||
|
|
f3ea878ba2 | ||
|
|
d80469ea35 | ||
|
|
5fc30ea964 | ||
|
|
f68909a68b | ||
|
|
d162604f32 | ||
|
|
a4e329c18b | ||
|
|
ca204ddd2f | ||
|
|
ff08f9d798 | ||
|
|
ac11473833 | ||
|
|
09fd83ab9b | ||
|
|
6699d33760 | ||
|
|
f7c8377abf | ||
|
|
0dcc0e0504 | ||
|
|
5f41899705 | ||
|
|
5e060b2222 | ||
|
|
6f04c25e3d | ||
|
|
375cce29c6 | ||
|
|
67518a59ac | ||
|
|
a3ea8ecac5 | ||
|
|
497872693f | ||
|
|
748a84d871 | ||
|
|
d5dac84e12 | ||
|
|
75e1b40fb4 | ||
|
|
5eedf782f4 | ||
|
|
1949425ab9 | ||
|
|
0a80ec80e3 | ||
|
|
a22a5b9e72 | ||
|
|
3fe4fd4c35 | ||
|
|
827a4498e0 | ||
|
|
8dbbd94299 | ||
|
|
6b0cf4663d | ||
|
|
dc5d42addc | ||
|
|
ef967d8f8a | ||
|
|
27ffc7f373 | ||
|
|
9e5a6351fc | ||
|
|
bcf4aedcde | ||
|
|
11cf23da7d | ||
|
|
eea6f38881 | ||
|
|
2489ea3699 | ||
|
|
0b85a8da88 | ||
|
|
327da8e260 | ||
|
|
00778dca31 | ||
|
|
79aff2df31 | ||
|
|
f35e967516 | ||
|
|
6449da6c8d | ||
|
|
755c7d5026 | ||
|
|
1da4bd72df | ||
|
|
5551349349 | ||
|
|
c6d25f69d5 | ||
|
|
45065c23d5 | ||
|
|
ddf80f5ea1 | ||
|
|
c048ca80a4 | ||
|
|
22385be515 | ||
|
|
4d0483f5b8 | ||
|
|
6b19490393 | ||
|
|
1e0d466002 | ||
|
|
9de7a72cce | ||
|
|
66b3acc274 | ||
|
|
0bc3a521b5 | ||
|
|
3419cb0112 | ||
|
|
a94d89efa7 | ||
|
|
66680a3056 | ||
|
|
ad4600964e | ||
|
|
82259d1380 | ||
|
|
ca4e38aa01 | ||
|
|
1aab084ecb | ||
|
|
36aed35957 | ||
|
|
32107b4f95 | ||
|
|
3d29f7c2fa | ||
|
|
01a991f56f | ||
|
|
6696e61c7b | ||
|
|
81c827ee51 | ||
|
|
83cad63ce0 | ||
|
|
06136af805 | ||
|
|
6ad333d6b2 | ||
|
|
29caf85104 | ||
|
|
d6a04bb772 | ||
|
|
c548021921 | ||
|
|
b2e0712190 | ||
|
|
767f2f2dfe | ||
|
|
1ffebbb568 | ||
|
|
be9df2bea7 | ||
|
|
9d5e9bbc18 | ||
|
|
454873221c | ||
|
|
18481a100b | ||
|
|
ca1f30a911 | ||
|
|
84628108fc | ||
|
|
dd314c41e3 | ||
|
|
c229f33e9e | ||
|
|
8eb3f9e789 | ||
|
|
7fbd5177c2 | ||
|
|
fdf72eb511 | ||
|
|
b13e34f831 | ||
|
|
6d51834a95 | ||
|
|
863258d782 | ||
|
|
287f2f56d6 | ||
|
|
525a320424 | ||
|
|
0f4a8d7be8 | ||
|
|
d4c0a99114 | ||
|
|
89d09838d8 | ||
|
|
0d87f94cb7 | ||
|
|
9bf8ab7048 | ||
|
|
dcbddef611 | ||
|
|
906802abe3 | ||
|
|
da1d26001f | ||
|
|
a13ae5a0da | ||
|
|
e4cfcae652 | ||
|
|
11db3989ce | ||
|
|
40f7e832b4 | ||
|
|
b22d00e541 | ||
|
|
54dc176725 | ||
|
|
d5819181ea | ||
|
|
c0371e9104 | ||
|
|
65d3bd728b | ||
|
|
20062b44dc | ||
|
|
a6b919eb53 | ||
|
|
1f81b77911 | ||
|
|
6cd7c60549 | ||
|
|
25a5035503 | ||
|
|
9dae6c7aee | ||
|
|
ff4ef1b574 | ||
|
|
84b03efa0b | ||
|
|
4c21320d1b | ||
|
|
2cebb0dc60 | ||
|
|
3cdd5754df | ||
|
|
800802b8aa | ||
|
|
17c6348b57 | ||
|
|
7309c02f0b | ||
|
|
ee3f158f4e | ||
|
|
d08757ce9e | ||
|
|
9ba42aa556 | ||
|
|
59290e39f9 | ||
|
|
c624cce88e | ||
|
|
78f691d2de | ||
|
|
49258dd3f6 | ||
|
|
ed01c59916 | ||
|
|
422f3449a2 | ||
|
|
4a3652ec09 | ||
|
|
147ed42ad3 | ||
|
|
62ff2d803f | ||
|
|
0fcddce69e | ||
|
|
ace082066a | ||
|
|
65efef1eee | ||
|
|
12f1e19d68 | ||
|
|
0934f737d5 | ||
|
|
267844ebe6 | ||
|
|
ebd053c87e | ||
|
|
64e401e224 | ||
|
|
276ce052a3 | ||
|
|
119f784d19 | ||
|
|
35aeeaa6e1 | ||
|
|
561405ab00 | ||
|
|
960b2bb8e6 | ||
|
|
440536a93d | ||
|
|
9742796ee7 | ||
|
|
375aefa209 | ||
|
|
33b208ab6f | ||
|
|
f398650166 | ||
|
|
7e89bca5e6 | ||
|
|
dcd5c43da4 | ||
|
|
6da08262d7 | ||
|
|
ffc9c38722 | ||
|
|
a8854947c0 | ||
|
|
07f23aaa7d | ||
|
|
2626e8f22c | ||
|
|
09351e9459 | ||
|
|
f11b7d5105 | ||
|
|
07bde2b665 | ||
|
|
ebe7524415 | ||
|
|
a27a7add3d | ||
|
|
e12599c1b9 | ||
|
|
cd0338fbae | ||
|
|
7c6491c2d3 | ||
|
|
88decb6e0c | ||
|
|
1d8432b8a4 | ||
|
|
0a461d8248 | ||
|
|
a70f7aca07 | ||
|
|
365ef1fdf7 | ||
|
|
ea27ac6fd7 | ||
|
|
7a9488ff37 | ||
|
|
c297d0112e | ||
|
|
067eb23d8e | ||
|
|
12f4af742f | ||
|
|
e4fe9fae2a | ||
|
|
030da8c2f6 | ||
|
|
55e8dd550a | ||
|
|
1521d50399 | ||
|
|
654cfb6480 | ||
|
|
c46744f366 | ||
|
|
c2f9ad7a21 | ||
|
|
e1193212b5 | ||
|
|
a7415d4d2e | ||
|
|
6925ac25c4 | ||
|
|
a296425994 | ||
|
|
0c48f08f5c | ||
|
|
b363bff1d8 | ||
|
|
ef6ec8a15a | ||
|
|
8cf83c984e | ||
|
|
ba98243cc2 | ||
|
|
0d01bd908e | ||
|
|
bf3ef2d19a | ||
|
|
7da5124067 | ||
|
|
beeab54ae3 | ||
|
|
b79052aaf2 | ||
|
|
16be82b959 | ||
|
|
5d58c7c6fb | ||
|
|
9204145746 | ||
|
|
f73117f9b1 | ||
|
|
85fc54b205 | ||
|
|
4f6966d7b3 | ||
|
|
9e84e2fd2b | ||
|
|
f83fd59dca | ||
|
|
4ebdfcd13a | ||
|
|
0fa47f18ed | ||
|
|
a1425b457d | ||
|
|
7ef7fd19e7 | ||
|
|
6f00efa350 | ||
|
|
e1a28848fa | ||
|
|
7fdede579a | ||
|
|
4d10ba4297 | ||
|
|
bffcc2042e | ||
|
|
724f8e89a1 | ||
|
|
452e55a53c | ||
|
|
3bd3027251 | ||
|
|
bbc4aed3d9 | ||
|
|
aaf4946b27 | ||
|
|
31d0183d45 | ||
|
|
b309822199 | ||
|
|
422f60a145 | ||
|
|
f65429145e | ||
|
|
5adefb466b | ||
|
|
bdcd3d87e5 | ||
|
|
32059ae9d5 | ||
|
|
9bebf1c1a6 | ||
|
|
c0b24aefba | ||
|
|
e3f69e0246 | ||
|
|
7c7924e9fa | ||
|
|
97c9b992cb | ||
|
|
40d4e167cd | ||
|
|
58b2cc380f | ||
|
|
20a4e41872 | ||
|
|
b51bc7ee24 | ||
|
|
7826e9880c | ||
|
|
fb6204ea8b | ||
|
|
79192cf65b | ||
|
|
6ea3f42e2f | ||
|
|
6a75bd77e3 | ||
|
|
d47580a144 | ||
|
|
0353c3870f | ||
|
|
4e0e691546 | ||
|
|
c6d8592484 | ||
|
|
13d9780df4 | ||
|
|
e9de839d87 | ||
|
|
fbd0a2e3c4 | ||
|
|
d3d4267731 | ||
|
|
584ded2182 | ||
|
|
b6751f7ebc | ||
|
|
721d7ab3ab | ||
|
|
e01c1eaceb | ||
|
|
23def40bc5 | ||
|
|
f5ee93796d | ||
|
|
e8be434498 | ||
|
|
061fd48df7 | ||
|
|
6579f28b64 | ||
|
|
258fd145ff | ||
|
|
6530776a62 | ||
|
|
51af8df31d | ||
|
|
235f710853 | ||
|
|
c3cb0280ef | ||
|
|
6c73b6212c | ||
|
|
0c538a584f | ||
|
|
6ae1cc8f3f | ||
|
|
37123cef8f | ||
|
|
61a008f7e4 | ||
|
|
bf0bbe0be7 | ||
|
|
df57d2776b | ||
|
|
948d8e6d02 | ||
|
|
44cdef7934 | ||
|
|
fd0c9a1305 | ||
|
|
6cfdf4ec05 | ||
|
|
358ff6a608 | ||
|
|
41fbdba104 | ||
|
|
c22d11cedd | ||
|
|
5d586a9f3a | ||
|
|
a789c8c4c7 | ||
|
|
697c41a3f6 | ||
|
|
e44baa1094 | ||
|
|
e6e73b4f52 | ||
|
|
7ea8e7e667 | ||
|
|
a55ead5ea8 | ||
|
|
836092a666 | ||
|
|
3944b3d216 | ||
|
|
10699eeb34 | ||
|
|
6c89d8d35c | ||
|
|
be7551b9f4 | ||
|
|
70d0569f08 | ||
|
|
1db32d692b | ||
|
|
8fd29082c0 | ||
|
|
9bf079b725 | ||
|
|
e180dd0710 | ||
|
|
a7dd535d47 | ||
|
|
db27e8f000 | ||
|
|
7451b6f9ae | ||
|
|
e0b12b7512 | ||
|
|
22680dc602 | ||
|
|
6ade6d30a8 | ||
|
|
38c00872e1 | ||
|
|
c2108421c2 | ||
|
|
342dbd2e19 | ||
|
|
21f22b5099 | ||
|
|
60614e6f74 | ||
|
|
3053c56cac | ||
|
|
d149dbc91f | ||
|
|
e761d38fd1 | ||
|
|
98140f6cac | ||
|
|
60a4b9316b | ||
|
|
7c671b5373 | ||
|
|
d402e722cf | ||
|
|
8548a130c7 | ||
|
|
3d2027227b | ||
|
|
3fa5b8bca5 | ||
|
|
5240b44452 | ||
|
|
a56151fec9 | ||
|
|
63f539b382 | ||
|
|
c14d739360 | ||
|
|
58677dd53f | ||
|
|
6ac8ccde46 | ||
|
|
f1297a3694 | ||
|
|
e8ee400a3f | ||
|
|
6a08efeef9 | ||
|
|
4aa0070e3d | ||
|
|
b42f34c359 | ||
|
|
24e16b7f59 | ||
|
|
d6965b0676 | ||
|
|
9028d2085f | ||
|
|
7c7292935e | ||
|
|
1e6912ea2e | ||
|
|
9e0d12d3b0 | ||
|
|
b402c367d3 | ||
|
|
0a4ece5f5b | ||
|
|
9c09bd19b4 | ||
|
|
a9880ee7b9 | ||
|
|
74f8a30f86 | ||
|
|
1b7c295199 | ||
|
|
594f0d17d1 | ||
|
|
9d319cfa2d | ||
|
|
ed8a9d975b | ||
|
|
ca673f9899 | ||
|
|
a43da62254 | ||
|
|
6e9146e746 | ||
|
|
f571d8ffad | ||
|
|
48b6c4811f | ||
|
|
c1eb79e4ba | ||
|
|
e27335acdd | ||
|
|
216bda58da | ||
|
|
7141dceee2 | ||
|
|
ac55443278 | ||
|
|
2066c478ab | ||
|
|
98c9d51791 | ||
|
|
42f8ef3315 | ||
|
|
245f47cebb | ||
|
|
48e8efe3e8 | ||
|
|
58c0f57647 | ||
|
|
b1875f0b82 | ||
|
|
b7fb2e4387 | ||
|
|
a68df457d8 | ||
|
|
1262654d97 | ||
|
|
11c4606874 | ||
|
|
95f9b27e70 | ||
|
|
31550a2c6a | ||
|
|
915b7a4a56 | ||
|
|
61aa197b0b | ||
|
|
422807514c | ||
|
|
81287e960c | ||
|
|
79d154ed73 | ||
|
|
49281bbe45 | ||
|
|
5df7309979 | ||
|
|
80fa484467 | ||
|
|
4e96a6faec | ||
|
|
eba289a7ff | ||
|
|
889b5b4f3b | ||
|
|
cef22c70ab | ||
|
|
9e33d0c4c0 | ||
|
|
f694afbbf4 | ||
|
|
d0674e0ff9 | ||
|
|
30b926add4 | ||
|
|
c3812ce1e3 | ||
|
|
b32d1a2c9f | ||
|
|
60b0fa81ec | ||
|
|
499159870c | ||
|
|
fda61b067c | ||
|
|
7535e312e0 | ||
|
|
7fad9f604f | ||
|
|
1b53ffcac7 | ||
|
|
c738cfec93 | ||
|
|
56e4a9a914 | ||
|
|
3c884f8e30 | ||
|
|
5bae3b0577 | ||
|
|
1c63ea1448 | ||
|
|
3d4d960d60 | ||
|
|
794e817208 | ||
|
|
37c23eccfe | ||
|
|
e374874125 | ||
|
|
160903fce7 | ||
|
|
2dce4306b4 | ||
|
|
3de7713017 | ||
|
|
1cd033e521 | ||
|
|
e534e9bae8 | ||
|
|
f9f57e9505 | ||
|
|
92f4a6bb94 | ||
|
|
66bea2b5ed | ||
|
|
ad6c328135 | ||
|
|
d949acb1f2 | ||
|
|
759088002d | ||
|
|
7d80b5ad28 | ||
|
|
e70812f03f | ||
|
|
b9b52e74c6 | ||
|
|
a1e299a355 | ||
|
|
24f0eebcf7 | ||
|
|
f498eb8fde | ||
|
|
abe4267553 | ||
|
|
3a11348119 | ||
|
|
ad64190bec | ||
|
|
9648c4323f | ||
|
|
a1a283688c | ||
|
|
82b840c16e | ||
|
|
16126a2c9c | ||
|
|
9b7b3755fe | ||
|
|
54490cf65e | ||
|
|
cb016ad861 | ||
|
|
c520de11de | ||
|
|
422e25c99f | ||
|
|
1def627443 | ||
|
|
64cee0b633 | ||
|
|
24f95767b4 | ||
|
|
ed001d94da | ||
|
|
f3e8e5e057 | ||
|
|
3b0dc92952 | ||
|
|
1a686239ed | ||
|
|
c777fe5471 | ||
|
|
b7edc3ed82 | ||
|
|
97f14b7a08 | ||
|
|
6793503ed0 | ||
|
|
fa833f7684 | ||
|
|
d67ecf893d | ||
|
|
faee59ee15 | ||
|
|
217b7ea68c | ||
|
|
a020fc52c9 | ||
|
|
1ef3782dd4 | ||
|
|
7515590324 | ||
|
|
e3a000e0d4 | ||
|
|
27cd2f8e96 | ||
|
|
e1547d7835 | ||
|
|
63d1860dc0 | ||
|
|
f480e57344 | ||
|
|
7dc7ff22d2 | ||
|
|
67a05dfccd | ||
|
|
b6bc042302 | ||
|
|
1312405966 | ||
|
|
07d2add674 | ||
|
|
57d0f9794f | ||
|
|
269c7a065c | ||
|
|
b6946e78a2 | ||
|
|
2b70d1d332 | ||
|
|
b37afd68ec | ||
|
|
00c08c574e | ||
|
|
bbc79796dc | ||
|
|
760cc7d6be | ||
|
|
9a72025afb | ||
|
|
74302f60ab | ||
|
|
62962c05f1 | ||
|
|
118ff85fbf | ||
|
|
5f8e60a1b7 | ||
|
|
66e15a54a4 | ||
|
|
ad80606a44 | ||
|
|
d8fa38d55a | ||
|
|
6401dd7cc7 | ||
|
|
fe211fc563 | ||
|
|
7d008bd5b6 | ||
|
|
66ff2def8c | ||
|
|
de9b9c9dfb | ||
|
|
d765359f4b | ||
|
|
4de4823a65 | ||
|
|
23c4d592f8 | ||
|
|
311f06745a | ||
|
|
1b79f6a7cf | ||
|
|
8e1a7bdfff | ||
|
|
02a66a01c3 | ||
|
|
ce833d91ce | ||
|
|
155d3474d6 | ||
|
|
265687b56d | ||
|
|
0d69c0cd64 | ||
|
|
f54e9d0b1c | ||
|
|
b982076e52 | ||
|
|
7060596a30 | ||
|
|
e51c9e50b5 | ||
|
|
5088e91566 | ||
|
|
276f499c82 | ||
|
|
5c203ce6c6 | ||
|
|
47cd1c5286 | ||
|
|
06e2756ee4 | ||
|
|
1c9a2128cf | ||
|
|
9e515ea7c4 | ||
|
|
00aaf0f796 | ||
|
|
3694811d06 | ||
|
|
81b96ae123 | ||
|
|
7c60ee3c85 | ||
|
|
b2e379cf7a | ||
|
|
08b454423b | ||
|
|
f3aa54b770 | ||
|
|
3a07e92b60 | ||
|
|
7eecc49c3a | ||
|
|
9ab2fd7f9e | ||
|
|
bf2b590273 | ||
|
|
9151d34d40 | ||
|
|
339d906e54 | ||
|
|
f47c865555 | ||
|
|
58df2f0bdc | ||
|
|
c71b1d63e5 | ||
|
|
a07296770c | ||
|
|
8154575d70 | ||
|
|
d757df8a4b | ||
|
|
c5688fef9a | ||
|
|
19655a15f1 | ||
|
|
f345b0f595 | ||
|
|
58707f8a2a | ||
|
|
c6089ccb33 | ||
|
|
f585a15eff | ||
|
|
08e69af572 | ||
|
|
294b4bcbac | ||
|
|
67008b5d15 | ||
|
|
a29f5a4849 | ||
|
|
1b1c08f7fb | ||
|
|
0c72be0403 | ||
|
|
b4bd89b96b | ||
|
|
5bb8b2add6 | ||
|
|
93b42ccfea | ||
|
|
ff86154a03 | ||
|
|
fcee67e317 | ||
|
|
155900e62f | ||
|
|
9c514c9808 | ||
|
|
62e80c602d | ||
|
|
dbb248df52 | ||
|
|
66779f1c5f | ||
|
|
2c856b67ca | ||
|
|
bf45581104 | ||
|
|
e88b2890d1 | ||
|
|
1b5ae71d1f | ||
|
|
d4ff835bf1 | ||
|
|
e27b0adbc8 | ||
|
|
e59fa8637a | ||
|
|
58f758c816 | ||
|
|
feb6999d9a | ||
|
|
71f61bbc47 | ||
|
|
6d3ea64a35 | ||
|
|
1fca2bfab1 | ||
|
|
ce41afb756 | ||
|
|
b4a42a640d | ||
|
|
58b26cb4c8 | ||
|
|
b453c32743 | ||
|
|
3cd398b098 | ||
|
|
d3127b8eb1 | ||
|
|
6de1d0cb33 | ||
|
|
6c718578a5 | ||
|
|
0d241d52eb | ||
|
|
212eaa3a05 | ||
|
|
f3ab3fe5e2 | ||
|
|
b8c56ff940 | ||
|
|
38da737e6c | ||
|
|
1b2ea7a1df | ||
|
|
a9e5fc8539 | ||
|
|
9b213115e7 | ||
|
|
5534347328 | ||
|
|
2355029dc1 | ||
|
|
8d25335b01 | ||
|
|
c0b5900a37 | ||
|
|
35a9290528 | ||
|
|
c9145ad4d8 | ||
|
|
3851628a43 | ||
|
|
d72ac92694 | ||
|
|
2555951be4 | ||
|
|
669bff78c4 | ||
|
|
c90d1f2527 | ||
|
|
40cebc250f | ||
|
|
ddd495fb48 | ||
|
|
58f2044637 | ||
|
|
dfe3fdc1cc | ||
|
|
705131e172 | ||
|
|
88759407c7 | ||
|
|
6c99cc611c | ||
|
|
3457bcbfcd | ||
|
|
eb385457b2 | ||
|
|
4ea8b4cb4f | ||
|
|
91bdcf8994 | ||
|
|
8d03c52e15 | ||
|
|
0fbc9a44d3 | ||
|
|
632035aabd | ||
|
|
a51e0047b7 | ||
|
|
726730bb0e | ||
|
|
faff1771c4 | ||
|
|
b06cd06ec1 | ||
|
|
95751d8009 | ||
|
|
14e565a004 | ||
|
|
ce694701a9 | ||
|
|
12d03e4030 | ||
|
|
0b1ce6be8f | ||
|
|
28a6adaaa4 | ||
|
|
36990a0514 | ||
|
|
ebac0dc628 | ||
|
|
29d58f2414 | ||
|
|
dca0054e93 | ||
|
|
983fe58959 | ||
|
|
91c9b8d062 | ||
|
|
b384570de3 | ||
|
|
0507852a34 | ||
|
|
7b6ff135fb | ||
|
|
bf24de88ed | ||
|
|
ff6d4ab39a | ||
|
|
66fde7a2e6 | ||
|
|
e8efaa4cd9 | ||
|
|
00947d6492 | ||
|
|
cf70fb1b4e | ||
|
|
ef1a992cf0 | ||
|
|
1f6a73f0db | ||
|
|
f2e596f6ec | ||
|
|
77ba9e728d | ||
|
|
cf9efefd96 | ||
|
|
4fb1603001 | ||
|
|
c5aac1251d | ||
|
|
b155bc564b | ||
|
|
055c48ab33 | ||
|
|
6663e1eda6 | ||
|
|
649afef512 | ||
|
|
4514f3fc11 | ||
|
|
095bef9554 | ||
|
|
f00351c106 | ||
|
|
936fce68d0 | ||
|
|
d978ac97f1 | ||
|
|
dd5978f222 | ||
|
|
0ebe0ce585 | ||
|
|
c8cfad7c00 | ||
|
|
83a16dec19 | ||
|
|
820c531814 | ||
|
|
1727b8df3b | ||
|
|
a025a15f5d | ||
|
|
72e5876c64 | ||
|
|
aeed2eb9ad | ||
|
|
46bc5ca73b | ||
|
|
0b3feb9d4c | ||
|
|
ca8692c747 | ||
|
|
a61d58716f | ||
|
|
6b646b6127 | ||
|
|
318aa5e0d3 | ||
|
|
49e99e9d51 | ||
|
|
1dfd974432 | ||
|
|
cc396f59cf | ||
|
|
ad2cd97618 | ||
|
|
aa8b9cc508 | ||
|
|
6a2cf09ee0 | ||
|
|
c6fd88116b | ||
|
|
8f0dbdeaba | ||
|
|
007c09b84e | ||
|
|
73f3c068ef | ||
|
|
9a92fa4a60 | ||
|
|
576af710be | ||
|
|
b5642bd068 | ||
|
|
128f322252 | ||
|
|
17d7e57a2e | ||
|
|
50288e6b01 | ||
|
|
ab3e44e4bd | ||
|
|
61607990c8 | ||
|
|
b65275235f | ||
|
|
e298a71834 | ||
|
|
3f6fa1e3db | ||
|
|
f2c2abe628 | ||
|
|
ff5b467fbe | ||
|
|
8c10941142 | ||
|
|
f5764d8dc6 | ||
|
|
81ca4f12dd | ||
|
|
941c469ab9 | ||
|
|
8fcd819e6f | ||
|
|
9abdaed20c | ||
|
|
eb94342f78 | ||
|
|
d563eb2336 | ||
|
|
3ee6f085db | ||
|
|
7cca69a136 | ||
|
|
093a5a260e | ||
|
|
b6d46fd52f | ||
|
|
2c072c0ed6 | ||
|
|
1f39bf8a78 | ||
|
|
fdd8499ffc | ||
|
|
9398ea7af5 | ||
|
|
29dce1a59c | ||
|
|
c729ee425f | ||
|
|
c489f23810 | ||
|
|
47a544230a | ||
|
|
c13c81f09d | ||
|
|
20544a4447 | ||
|
|
b688ebeefa | ||
|
|
1854050df3 | ||
|
|
c7f4a649df | ||
|
|
ef5c8e6839 | ||
|
|
d571f300e5 | ||
|
|
ce96527dd9 | ||
|
|
f8b8b53985 | ||
|
|
b20e142249 | ||
|
|
7c6dc9dda8 | ||
|
|
5875571215 | ||
|
|
975e6b1563 | ||
|
|
f6fd7c83e3 | ||
|
|
c2965c0fb0 | ||
|
|
fdad55956e | ||
|
|
bb399e56b0 | ||
|
|
fa68cbad1b | ||
|
|
995ef1348a | ||
|
|
0f03393010 | ||
|
|
4b1ffc23f5 | ||
|
|
c7137dffa8 | ||
|
|
5a3375ce52 | ||
|
|
8e834fd9f5 | ||
|
|
02046744eb | ||
|
|
68d7ec9155 | ||
|
|
7537dce0f0 | ||
|
|
5f41b74707 | ||
|
|
25d961d4e0 | ||
|
|
08c4e514f8 | ||
|
|
91b1d812ce | ||
|
|
1f05d9f79d | ||
|
|
9f8cffe887 | ||
|
|
995bee143a | ||
|
|
f10e56be7e | ||
|
|
2f8e10db46 | ||
|
|
5418e15e63 | ||
|
|
bcf84cc153 | ||
|
|
ce8520c9e6 | ||
|
|
0b3928c33e | ||
|
|
73d72651b4 | ||
|
|
adbedd488c | ||
|
|
13b72f6bc2 | ||
|
|
c5aa96a3aa | ||
|
|
d927c0e45f | ||
|
|
31660c4c5f | ||
|
|
4321adab71 | ||
|
|
68f151f5c0 | ||
|
|
ecad083ffc | ||
|
|
fee43e8474 | ||
|
|
4838ab74b3 | ||
|
|
fef9259aaa | ||
|
|
ad7c10727a | ||
|
|
ccd42c1d1a | ||
|
|
bd8eadb75b | ||
|
|
70a9d0d3a2 | ||
|
|
7cd3824863 | ||
|
|
db9021f9c1 | ||
|
|
a2418c6040 | ||
|
|
1fb29d59b7 | ||
|
|
8c4a217f03 | ||
|
|
bda7c39e55 | ||
|
|
3583283ebb | ||
|
|
4feacf2213 | ||
|
|
73eb731881 | ||
|
|
186e36752d | ||
|
|
421728a985 | ||
|
|
39a5701184 | ||
|
|
27948c777e | ||
|
|
c64ed46d05 | ||
|
|
c64465ff7e | ||
|
|
095200bd16 | ||
|
|
2c667a159c | ||
|
|
bac408044f | ||
|
|
4edcfe1f7c | ||
|
|
9259dcb6f5 | ||
|
|
7ef933c7cf | ||
|
|
7d312822c1 | ||
|
|
1b3e5c6ea6 | ||
|
|
efe8401e92 | ||
|
|
0b845c2532 | ||
|
|
fe60412a17 | ||
|
|
5c39e6f2fb | ||
|
|
a225a241d7 | ||
|
|
553a486d17 | ||
|
|
c73374a221 | ||
|
|
94e26dee4f | ||
|
|
4617ef2bb8 | ||
|
|
8afa8c1091 | ||
|
|
578608d301 | ||
|
|
0d45d8669e | ||
|
|
73708da60d | ||
|
|
c810cad7c8 | ||
|
|
94bba415b1 | ||
|
|
4f7629a4cb | ||
|
|
4015f31f28 | ||
|
|
9dccbe1b07 | ||
|
|
9a88df7f28 | ||
|
|
a47f622e7e | ||
|
|
3529148455 | ||
|
|
01d8286bd9 | ||
|
|
21b6f2d593 | ||
|
|
528ff5d28c | ||
|
|
ba7d2aecbb | ||
|
|
0236b97d49 | ||
|
|
26f6b1eeff | ||
|
|
dc447ccebe | ||
|
|
7ec29638f4 | ||
|
|
4c9562af20 | ||
|
|
71942fd322 | ||
|
|
550b979ac5 | ||
|
|
3878a5a46f | ||
|
|
e443a6a1ea | ||
|
|
963494ec6f | ||
|
|
42d73118fd | ||
|
|
f2f819d70f | ||
|
|
525cdb8830 | ||
|
|
a6764e82f2 | ||
|
|
1de18b89dd | ||
|
|
882518c111 | ||
|
|
8027531d07 | ||
|
|
30706355a4 | ||
|
|
dfe99507b8 | ||
|
|
c1717c9a6c | ||
|
|
1fd1a58a7a | ||
|
|
fad07507be | ||
|
|
a20c211162 |
@@ -61,6 +61,9 @@ temp/
|
||||
deploy/install.sh
|
||||
deploy/sub2api.service
|
||||
deploy/sub2api-sudoers
|
||||
deploy/data/
|
||||
deploy/postgres_data/
|
||||
deploy/redis_data/
|
||||
|
||||
# GoReleaser
|
||||
.goreleaser.yaml
|
||||
|
||||
25
.github/audit-exceptions.yml
vendored
@@ -5,12 +5,33 @@ exceptions:
|
||||
severity: high
|
||||
reason: "Admin export only; switched to dynamic import to reduce exposure (CVE-2023-30533)"
|
||||
mitigation: "Load only on export; restrict export permissions and data scope"
|
||||
expires_on: "2026-04-05"
|
||||
expires_on: "2026-07-06"
|
||||
owner: "security@your-domain"
|
||||
- package: xlsx
|
||||
advisory: "GHSA-5pgg-2g8v-p4x9"
|
||||
severity: high
|
||||
reason: "Admin export only; switched to dynamic import to reduce exposure (CVE-2024-22363)"
|
||||
mitigation: "Load only on export; restrict export permissions and data scope"
|
||||
expires_on: "2026-04-05"
|
||||
expires_on: "2026-07-06"
|
||||
owner: "security@your-domain"
|
||||
- package: lodash
|
||||
advisory: "GHSA-r5fr-rjxr-66jc"
|
||||
severity: high
|
||||
reason: "lodash _.template not used with untrusted input; only internal admin UI templates"
|
||||
mitigation: "No user-controlled template strings; plan to migrate to lodash-es tree-shaken imports"
|
||||
expires_on: "2026-07-02"
|
||||
owner: "security@your-domain"
|
||||
- package: lodash-es
|
||||
advisory: "GHSA-r5fr-rjxr-66jc"
|
||||
severity: high
|
||||
reason: "lodash-es _.template not used with untrusted input; only internal admin UI templates"
|
||||
mitigation: "No user-controlled template strings; plan to migrate to native JS alternatives"
|
||||
expires_on: "2026-07-02"
|
||||
owner: "security@your-domain"
|
||||
- package: axios
|
||||
advisory: "GHSA-3p68-rc4w-qgx5"
|
||||
severity: critical
|
||||
reason: "NO_PROXY bypass not exploitable; all API calls go to known endpoints via server-side proxy"
|
||||
mitigation: "Proxy configuration not user-controlled; upgrade when axios releases fix"
|
||||
expires_on: "2026-07-10"
|
||||
owner: "security@your-domain"
|
||||
|
||||
28
.github/workflows/backend-ci.yml
vendored
@@ -17,9 +17,10 @@ jobs:
|
||||
go-version-file: backend/go.mod
|
||||
check-latest: false
|
||||
cache: true
|
||||
cache-dependency-path: backend/go.sum
|
||||
- name: Verify Go version
|
||||
run: |
|
||||
go version | grep -q 'go1.26.1'
|
||||
go version | grep -q 'go1.26.2'
|
||||
- name: Unit tests
|
||||
working-directory: backend
|
||||
run: make test-unit
|
||||
@@ -27,6 +28,26 @@ jobs:
|
||||
working-directory: backend
|
||||
run: make test-integration
|
||||
|
||||
frontend:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'pnpm'
|
||||
cache-dependency-path: frontend/pnpm-lock.yaml
|
||||
- name: Install frontend dependencies
|
||||
working-directory: frontend
|
||||
run: pnpm install --frozen-lockfile
|
||||
- name: Frontend typecheck and critical vitest
|
||||
run: make test-frontend
|
||||
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -36,12 +57,13 @@ jobs:
|
||||
go-version-file: backend/go.mod
|
||||
check-latest: false
|
||||
cache: true
|
||||
cache-dependency-path: backend/go.sum
|
||||
- name: Verify Go version
|
||||
run: |
|
||||
go version | grep -q 'go1.26.1'
|
||||
go version | grep -q 'go1.26.2'
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
version: v2.9
|
||||
args: --timeout=30m
|
||||
working-directory: backend
|
||||
working-directory: backend
|
||||
|
||||
59
.github/workflows/cla.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: "CLA Assistant"
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_target:
|
||||
types: [opened, reopened, closed, synchronize]
|
||||
|
||||
permissions:
|
||||
actions: write
|
||||
contents: write
|
||||
pull-requests: write
|
||||
statuses: write
|
||||
|
||||
jobs:
|
||||
cla-check:
|
||||
if: |
|
||||
github.event_name == 'issue_comment' ||
|
||||
(github.event_name == 'pull_request_target' && github.event.action != 'closed')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "CLA Assistant"
|
||||
if: |
|
||||
(github.event.comment.body == 'recheck' ||
|
||||
github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') ||
|
||||
github.event_name == 'pull_request_target'
|
||||
uses: contributor-assistant/github-action@v2.6.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
path-to-signatures: "cla.json"
|
||||
path-to-document: "https://github.com/Wei-Shaw/sub2api/blob/main/CLA.md"
|
||||
branch: "cla-signatures"
|
||||
allowlist: "dependabot[bot],renovate[bot],bot*"
|
||||
lock-pullrequest-aftermerge: false
|
||||
custom-notsigned-prcomment: |
|
||||
Thank you for your contribution! Before we can merge this PR, we need $you to sign our [Contributor License Agreement (CLA)](https://github.com/Wei-Shaw/sub2api/blob/main/CLA.md).
|
||||
|
||||
**To sign**, please reply with the following comment:
|
||||
|
||||
> I have read the CLA Document and I hereby sign the CLA
|
||||
|
||||
You only need to sign once — it will be valid for all your future contributions to this project.
|
||||
custom-pr-sign-comment: "I have read the CLA Document and I hereby sign the CLA"
|
||||
custom-allsigned-prcomment: "All contributors have signed the CLA. ✅"
|
||||
|
||||
cla-lock:
|
||||
if: github.event_name == 'pull_request_target' && github.event.action == 'closed' && github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "Lock merged PR"
|
||||
uses: contributor-assistant/github-action@v2.6.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
path-to-signatures: "cla.json"
|
||||
path-to-document: "https://github.com/Wei-Shaw/sub2api/blob/main/CLA.md"
|
||||
branch: "cla-signatures"
|
||||
lock-pullrequest-aftermerge: true
|
||||
39
.github/workflows/release.yml
vendored
@@ -115,7 +115,7 @@ jobs:
|
||||
|
||||
- name: Verify Go version
|
||||
run: |
|
||||
go version | grep -q 'go1.26.1'
|
||||
go version | grep -q 'go1.26.2'
|
||||
|
||||
# Docker setup for GoReleaser
|
||||
- name: Set up QEMU
|
||||
@@ -246,10 +246,10 @@ jobs:
|
||||
if [ -n "$DOCKERHUB_USERNAME" ]; then
|
||||
DOCKER_IMAGE="${DOCKERHUB_USERNAME}/sub2api"
|
||||
MESSAGE+="# Docker Hub"$'\n'
|
||||
MESSAGE+="docker pull ${DOCKER_IMAGE}:${TAG_NAME}"$'\n'
|
||||
MESSAGE+="docker pull ${DOCKER_IMAGE}:${VERSION}"$'\n'
|
||||
MESSAGE+="# GitHub Container Registry"$'\n'
|
||||
fi
|
||||
MESSAGE+="docker pull ${GHCR_IMAGE}:${TAG_NAME}"$'\n'
|
||||
MESSAGE+="docker pull ${GHCR_IMAGE}:${VERSION}"$'\n'
|
||||
MESSAGE+="\`\`\`"$'\n'$'\n'
|
||||
MESSAGE+="🔗 *相关链接:*"$'\n'
|
||||
MESSAGE+="• [GitHub Release](https://github.com/${REPO}/releases/tag/${TAG_NAME})"$'\n'
|
||||
@@ -271,3 +271,36 @@ jobs:
|
||||
parse_mode: "Markdown",
|
||||
disable_web_page_preview: true
|
||||
}')"
|
||||
|
||||
sync-version-file:
|
||||
needs: [release]
|
||||
if: ${{ needs.release.result == 'success' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout default branch
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ github.event.repository.default_branch }}
|
||||
|
||||
- name: Sync VERSION file to released tag
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
VERSION=${{ github.event.inputs.tag }}
|
||||
VERSION=${VERSION#v}
|
||||
else
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
fi
|
||||
|
||||
CURRENT_VERSION=$(tr -d '\r\n' < backend/cmd/server/VERSION || true)
|
||||
if [ "$CURRENT_VERSION" = "$VERSION" ]; then
|
||||
echo "VERSION file already matches $VERSION"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "$VERSION" > backend/cmd/server/VERSION
|
||||
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git add backend/cmd/server/VERSION
|
||||
git commit -m "chore: sync VERSION to ${VERSION} [skip ci]"
|
||||
git push origin HEAD:${{ github.event.repository.default_branch }}
|
||||
|
||||
2
.github/workflows/security-scan.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
cache-dependency-path: backend/go.sum
|
||||
- name: Verify Go version
|
||||
run: |
|
||||
go version | grep -q 'go1.26.1'
|
||||
go version | grep -q 'go1.26.2'
|
||||
- name: Run govulncheck
|
||||
working-directory: backend
|
||||
run: |
|
||||
|
||||
7
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
docs/claude-relay-service/
|
||||
.codex
|
||||
|
||||
# ===================
|
||||
# Go 后端
|
||||
@@ -121,15 +122,17 @@ scripts
|
||||
.code-review-state
|
||||
#openspec/
|
||||
code-reviews/
|
||||
#AGENTS.md
|
||||
AGENTS.md
|
||||
backend/cmd/server/server
|
||||
deploy/docker-compose.override.yml
|
||||
.gocache/
|
||||
vite.config.js
|
||||
docs/*
|
||||
!docs/PAYMENT.md
|
||||
!docs/PAYMENT_CN.md
|
||||
!docs/ADMIN_PAYMENT_INTEGRATION_API.md
|
||||
.serena/
|
||||
.codex/
|
||||
frontend/coverage/
|
||||
aicodex
|
||||
output/
|
||||
|
||||
|
||||
73
CLA.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Sub2API Individual Contributor License Agreement (v1.0)
|
||||
|
||||
Thank you for your interest in contributing to Sub2API ("the Project"). This Contributor License Agreement ("Agreement") documents the rights granted by contributors to the Project.
|
||||
|
||||
By signing this Agreement, you accept and agree to the following terms and conditions for your present and future contributions submitted to the Project.
|
||||
|
||||
## 1. Definitions
|
||||
|
||||
- **"You" (or "Your")** means the copyright owner or legal entity authorized by the copyright owner that is making this Agreement.
|
||||
- **"Contribution"** means any original work of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to the Project for inclusion in, or documentation of, any of the products owned or managed by the Project. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Project or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Project for the purpose of discussing and improving the Project, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution."
|
||||
- **"Project Owner"** means Wesley Liddick, or any individual or legal entity to whom Wesley Liddick has explicitly assigned or transferred ownership of the Project in writing, and their respective successors and assigns.
|
||||
|
||||
## 2. Grant of Copyright License
|
||||
|
||||
Subject to the terms and conditions of this Agreement, You hereby grant to the Project Owner a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Your Contributions and such derivative works. This license includes, without limitation, the right to sublicense, assign, and transfer these rights to any third party, including without limitation any successor, assignee, or acquiring entity of the Project or the Project Owner, and to use Your Contributions under any license, including proprietary or commercial licenses.
|
||||
|
||||
## 3. Moral Rights
|
||||
|
||||
To the fullest extent permitted by applicable law, You irrevocably waive and agree not to assert any moral rights (including rights of attribution and integrity) that You may have in Your Contributions, and agree that the Project Owner and its licensees may use, modify, and distribute Your Contributions without attribution or other obligations arising from moral rights.
|
||||
|
||||
## 4. Grant of Patent License
|
||||
|
||||
Subject to the terms and conditions of this Agreement, You hereby grant to the Project Owner a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer Your Contributions, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your Contribution(s) alone or by combination of Your Contribution(s) with the Project to which such Contribution(s) was submitted.
|
||||
|
||||
## 5. Representations and Warranties
|
||||
|
||||
You represent and warrant that:
|
||||
|
||||
(a) You are legally entitled to grant the above licenses.
|
||||
|
||||
(b) If Your employer(s) has rights to intellectual property that You create that includes Your Contributions, You have received permission to make Contributions on behalf of that employer, or that Your employer has waived such rights for Your Contributions to the Project.
|
||||
|
||||
(c) Each of Your Contributions is Your original creation, or You have sufficient rights to submit it under the terms of this Agreement. You agree to provide, upon request, reasonable documentation or explanation of any third-party materials included in Your Contributions.
|
||||
|
||||
## 6. No Warranty
|
||||
|
||||
Your Contributions are provided on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are not expected to provide support for Your Contributions, except to the extent You desire to provide support.
|
||||
|
||||
## 7. No Obligation
|
||||
|
||||
You understand that the decision to include Your Contribution in any product or project is entirely at the discretion of the Project Owner, and this Agreement does not obligate the Project Owner to use Your Contribution.
|
||||
|
||||
## 8. Retention of Rights
|
||||
|
||||
You retain ownership of the copyright in Your Contributions. This Agreement does not transfer any copyright or other intellectual property rights from You to the Project Owner. This Agreement only grants the licenses described above.
|
||||
|
||||
## 9. Term and Termination
|
||||
|
||||
This Agreement shall remain in effect indefinitely. You may terminate this Agreement prospectively by providing written notice to the Project Owner, but such termination shall not affect the licenses granted for Contributions submitted prior to the effective date of termination. The licenses granted herein for Contributions submitted prior to termination are perpetual and irrevocable.
|
||||
|
||||
## 10. Electronic Signature
|
||||
|
||||
You agree that Your electronic signature (including but not limited to typing a specific phrase in a pull request, issue, or other electronic communication) is legally binding and has the same force and effect as a handwritten signature. You consent to the use of electronic means to enter into this Agreement and acknowledge that this Agreement is enforceable as if executed in a traditional written format.
|
||||
|
||||
## 11. General Provisions
|
||||
|
||||
**Entire Agreement.** This Agreement constitutes the entire agreement between You and the Project Owner with respect to Your Contributions and supersedes all prior or contemporaneous understandings regarding such subject matter.
|
||||
|
||||
**Severability.** If any provision of this Agreement is held to be unenforceable or invalid, that provision will be enforced to the maximum extent possible and the remaining provisions will remain in full force and effect.
|
||||
|
||||
**No Waiver.** The failure of the Project Owner to enforce any provision of this Agreement shall not constitute a waiver of that provision or any other provision.
|
||||
|
||||
**Amendment.** This Agreement may only be modified by a written instrument signed by both parties. Modifications to this Agreement apply only to Contributions submitted after the modified Agreement is published and accepted by You. Prior Contributions remain governed by the version of the Agreement in effect at the time of submission.
|
||||
|
||||
**Notification.** Notices under this Agreement shall be sent to the Project Owner via a GitHub issue on the Project repository. Notices are effective upon receipt.
|
||||
|
||||
---
|
||||
|
||||
**By signing this CLA, you acknowledge that you have read and understood this Agreement and agree to be bound by its terms.**
|
||||
|
||||
To sign, reply in the pull request with:
|
||||
|
||||
> I have read the CLA Document and I hereby sign the CLA
|
||||
@@ -7,7 +7,7 @@
|
||||
# =============================================================================
|
||||
|
||||
ARG NODE_IMAGE=node:24-alpine
|
||||
ARG GOLANG_IMAGE=golang:1.26.1-alpine
|
||||
ARG GOLANG_IMAGE=golang:1.26.2-alpine
|
||||
ARG ALPINE_IMAGE=alpine:3.21
|
||||
ARG POSTGRES_IMAGE=postgres:18-alpine
|
||||
ARG GOPROXY=https://goproxy.cn,direct
|
||||
|
||||
178
LICENSE
@@ -1,21 +1,165 @@
|
||||
MIT License
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (c) 2025 Wesley Liddick
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
||||
14
Makefile
@@ -1,4 +1,12 @@
|
||||
.PHONY: build build-backend build-frontend build-datamanagementd test test-backend test-frontend test-datamanagementd secret-scan
|
||||
.PHONY: build build-backend build-frontend build-datamanagementd test test-backend test-frontend test-frontend-critical test-datamanagementd secret-scan
|
||||
|
||||
FRONTEND_CRITICAL_VITEST := \
|
||||
src/views/auth/__tests__/LinuxDoCallbackView.spec.ts \
|
||||
src/views/auth/__tests__/WechatCallbackView.spec.ts \
|
||||
src/views/user/__tests__/PaymentView.spec.ts \
|
||||
src/views/user/__tests__/PaymentResultView.spec.ts \
|
||||
src/components/user/profile/__tests__/ProfileInfoCard.spec.ts \
|
||||
src/views/admin/__tests__/SettingsView.spec.ts
|
||||
|
||||
# 一键编译前后端
|
||||
build: build-backend build-frontend
|
||||
@@ -24,6 +32,10 @@ test-backend:
|
||||
test-frontend:
|
||||
@pnpm --dir frontend run lint:check
|
||||
@pnpm --dir frontend run typecheck
|
||||
@$(MAKE) test-frontend-critical
|
||||
|
||||
test-frontend-critical:
|
||||
@pnpm --dir frontend exec vitest run $(FRONTEND_CRITICAL_VITEST)
|
||||
|
||||
test-datamanagementd:
|
||||
@cd datamanagement && go test ./...
|
||||
|
||||
100
README.md
@@ -12,7 +12,7 @@
|
||||
|
||||
**AI API Gateway Platform for Subscription Quota Distribution**
|
||||
|
||||
English | [中文](README_CN.md)
|
||||
English | [中文](README_CN.md) | [日本語](README_JA.md)
|
||||
|
||||
</div>
|
||||
|
||||
@@ -42,16 +42,72 @@ Sub2API is an AI API gateway platform designed to distribute and manage API quot
|
||||
- **Smart Scheduling** - Intelligent account selection with sticky sessions
|
||||
- **Concurrency Control** - Per-user and per-account concurrency limits
|
||||
- **Rate Limiting** - Configurable request and token rate limits
|
||||
- **Built-in Payment System** - Supports EasyPay, Alipay, WeChat Pay, and Stripe for user self-service top-up, no separate payment service needed ([Configuration Guide](docs/PAYMENT.md))
|
||||
- **Admin Dashboard** - Web interface for monitoring and management
|
||||
- **External System Integration** - Embed external systems (e.g. payment, ticketing) via iframe to extend the admin dashboard
|
||||
- **External System Integration** - Embed external systems (e.g. ticketing) via iframe to extend the admin dashboard
|
||||
|
||||
## Don't Want to Self-Host?
|
||||
## ❤️ Sponsors
|
||||
|
||||
> [Want to appear here?](mailto:support@pincc.ai)
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="120"></a></td>
|
||||
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="150"></a></td>
|
||||
<td valign="middle"><b><a href="https://shop.pincc.ai/">PinCC</a></b> is the official relay service built on Sub2API, offering stable access to Claude Code, Codex, Gemini and other popular models — ready to use, no deployment or maintenance required.</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://www.packyapi.com/register?aff=sub2api"><img src="assets/partners/logos/packycode.png" alt="PackyCode" width="150"></a></td>
|
||||
<td>Thanks to PackyCode for sponsoring this project! PackyCode is a reliable and efficient API relay service provider, offering relay services for Claude Code, Codex, Gemini, and more. PackyCode provides special discounts for our software users: register using <a href="https://www.packyapi.com/register?aff=sub2api">this link</a> and enter the "sub2api" promo code during first recharge to get 10% off.</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://poixe.com/i/sub2api"><img src="assets/partners/logos/poixe.png" alt="PoixeAi" width="150"></a></td>
|
||||
<td>Thanks to Poixe Ai for sponsoring this project! Poixe AI provides reliable LLM API services. You can leverage the platform's API endpoints to seamlessly build AI-powered products. Additionally, you can become a vendor by providing AI API resources to the platform and earn revenue. Register through the exclusive <a href="https://poixe.com/i/sub2api">sub2api</a> referral link and receive a bonus of $5 USD on your first top-up.</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://ctok.ai"><img src="assets/partners/logos/ctok.png" alt="CTok" width="150"></a></td>
|
||||
<td>Thanks to CTok.ai for sponsoring this project! CTok.ai is dedicated to building a one-stop AI programming tool service platform. We offer professional Claude Code packages and technical community services, with support for Google Gemini and OpenAI Codex. Through carefully designed plans and a professional tech community, we provide developers with reliable service guarantees and continuous technical support, making AI-assisted programming a true productivity tool. Click <a href="https://ctok.ai">here</a> to register!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://code.silkapi.com/"><img src="assets/partners/logos/silkapi.png" alt="silkapi" width="150"></a></td>
|
||||
<td>Thanks to SilkAPI for sponsoring this project! <a href="https://code.silkapi.com/">SilkAPI</a> is a relay service built on Sub2API, specializing in providing high-speed and stable Codex API relay.</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://ylscode.com/"><img src="assets/partners/logos/ylscode.png" alt="ylscode" width="150"></a></td>
|
||||
<td>Thanks to YLS Code for sponsoring this project! <a href="https://ylscode.com/">YLS Code</a> is dedicated to building secure enterprise-grade Coding Agent productivity services, offering stable and fast Codex / Claude / Gemini subscription services along with pay-as-you-go API options for flexible choices. Register now for a limited-time 3-day Codex trial bonus!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://www.aicodemirror.com/register?invitecode=KMVZQM"><img src="assets/partners/logos/AICodeMirror.jpg" alt="AICodeMirror" width="150"></a></td>
|
||||
<td>Thanks to AICodeMirror for sponsoring this project! AICodeMirror provides official high-stability relay services for Claude Code / Codex / Gemini CLI, with enterprise-grade concurrency, fast invoicing, and 24/7 dedicated technical support. Claude Code / Codex / Gemini official channels at 38% / 2% / 9% of original price, with extra discounts on top-ups! AICodeMirror offers special benefits for sub2api users: register via <a href="https://www.aicodemirror.com/register?invitecode=KMVZQM">this link</a> to enjoy 20% off your first top-up, and enterprise customers can get up to 25% off!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://aigocode.com/invite/SUB2API"><img src="assets/partners/logos/aigocode.png" alt="AIGoCode" width="150"></a></td>
|
||||
<td>Thanks to AIGoCode for sponsoring this project! AIGoCode is an all-in-one platform that integrates Claude Code, Codex, and the latest Gemini models, providing you with stable, efficient, and highly cost-effective AI coding services. The platform offers flexible subscription plans, zero risk of account suspension, direct access with no VPN required, and lightning-fast responses. AIGoCode has prepared a special benefit for sub2api users: if you register via <a href="https://aigocode.com/invite/SUB2API">this link</a>, you'll receive an extra 10% bonus credit on your first top-up!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://shop.bmoplus.com/?utm_source=github"><img src="assets/partners/logos/bmoplus.jpg" alt="bmoplus" width="150"></a></td>
|
||||
<td>Huge thanks to BmoPlus for sponsoring this project! BmoPlus is a highly reliable AI account provider built strictly for heavy AI users and developers. They offer rock-solid, ready-to-use accounts and official top-up services for ChatGPT Plus / ChatGPT Pro (Full Warranty) / Claude Pro / Super Grok / Gemini Pro. By registering and ordering through <a href="https://shop.bmoplus.com/?utm_source=github">BmoPlus - Premium AI Accounts & Top-ups</a>, users can unlock the mind-blowing rate of 10% of the official GPT subscription price (90% OFF)</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://bestproxy.com/?keyword=a2e8iuol"><img src="assets/partners/logos/bestproxy.png" alt="bestproxy" width="150"></a></td>
|
||||
<td>Thanks to Bestproxy for sponsoring this project! <a href="https://bestproxy.com/?keyword=a2e8iuol">Bestproxy</a> provides high-purity residential IPs with dedicated one-IP-per-account support. By combining real home networks with fingerprint isolation, it enables link environment isolation and reduces the probability of association-based risk control.</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://pateway.ai/?ch=1tsfr51"><img src="assets/partners/logos/pateway.png" alt="pateway" width="150"></a></td>
|
||||
<td>Thanks to PatewayAI for sponsoring this project! PatewayAI is a premium model API relay service provider built for heavy AI developers, focused on direct official connections. Offering the full Claude series and Codex series models, 100% sourced directly from official providers — no dilution, no substitution, open to verification. Billing is fully transparent with token-level invoices that can be audited line by line.
|
||||
Enterprise-grade high concurrency is also supported, with a dedicated management platform for enterprise clients. Enterprise customers can sign formal contracts and receive invoices. Visit the official website for more details and contact information.
|
||||
Register now via <a href="https://pateway.ai/?ch=1tsfr51">this link</a> to receive $3 in trial credits. User top-ups start as low as 60% off, and referring friends earns both parties rewards — referral bonuses up to $150.</td>
|
||||
</tr>
|
||||
|
||||
</table>
|
||||
|
||||
## Ecosystem
|
||||
@@ -60,7 +116,7 @@ Community projects that extend or integrate with Sub2API:
|
||||
|
||||
| Project | Description | Features |
|
||||
|---------|-------------|----------|
|
||||
| [Sub2ApiPay](https://github.com/touwaeriol/sub2apipay) | Self-service payment system | Self-service top-up and subscription purchase; supports YiPay protocol, WeChat Pay, Alipay, Stripe; embeddable via iframe |
|
||||
| ~~[Sub2ApiPay](https://github.com/touwaeriol/sub2apipay)~~ | ~~Self-service payment system~~ | **Now Built-in** — Payment is now integrated into Sub2API, no separate deployment needed. See [Payment Configuration Guide](docs/PAYMENT.md) |
|
||||
| [sub2api-mobile](https://github.com/ckken/sub2api-mobile) | Mobile admin console | Cross-platform app (iOS/Android/Web) for user management, account management, monitoring dashboard, and multi-backend switching; built with Expo + React Native |
|
||||
|
||||
## Tech Stack
|
||||
@@ -178,10 +234,10 @@ mkdir -p sub2api-deploy && cd sub2api-deploy
|
||||
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash
|
||||
|
||||
# Start services
|
||||
docker-compose up -d
|
||||
docker compose up -d
|
||||
|
||||
# View logs
|
||||
docker-compose logs -f sub2api
|
||||
docker compose logs -f sub2api
|
||||
```
|
||||
|
||||
**What the script does:**
|
||||
@@ -245,16 +301,16 @@ mkdir -p data postgres_data redis_data
|
||||
|
||||
# 5. Start all services
|
||||
# Option A: Local directory version (recommended - easy migration)
|
||||
docker-compose -f docker-compose.local.yml up -d
|
||||
docker compose -f docker-compose.local.yml up -d
|
||||
|
||||
# Option B: Named volumes version (simple setup)
|
||||
docker-compose up -d
|
||||
docker compose up -d
|
||||
|
||||
# 6. Check status
|
||||
docker-compose -f docker-compose.local.yml ps
|
||||
docker compose -f docker-compose.local.yml ps
|
||||
|
||||
# 7. View logs
|
||||
docker-compose -f docker-compose.local.yml logs -f sub2api
|
||||
docker compose -f docker-compose.local.yml logs -f sub2api
|
||||
```
|
||||
|
||||
#### Deployment Versions
|
||||
@@ -272,15 +328,15 @@ Open `http://YOUR_SERVER_IP:8080` in your browser.
|
||||
|
||||
If admin password was auto-generated, find it in logs:
|
||||
```bash
|
||||
docker-compose -f docker-compose.local.yml logs sub2api | grep "admin password"
|
||||
docker compose -f docker-compose.local.yml logs sub2api | grep "admin password"
|
||||
```
|
||||
|
||||
#### Upgrade
|
||||
|
||||
```bash
|
||||
# Pull latest image and recreate container
|
||||
docker-compose -f docker-compose.local.yml pull
|
||||
docker-compose -f docker-compose.local.yml up -d
|
||||
docker compose -f docker-compose.local.yml pull
|
||||
docker compose -f docker-compose.local.yml up -d
|
||||
```
|
||||
|
||||
#### Easy Migration (Local Directory Version)
|
||||
@@ -289,7 +345,7 @@ When using `docker-compose.local.yml`, migrate to a new server easily:
|
||||
|
||||
```bash
|
||||
# On source server
|
||||
docker-compose -f docker-compose.local.yml down
|
||||
docker compose -f docker-compose.local.yml down
|
||||
cd ..
|
||||
tar czf sub2api-complete.tar.gz sub2api-deploy/
|
||||
|
||||
@@ -299,23 +355,23 @@ scp sub2api-complete.tar.gz user@new-server:/path/
|
||||
# On new server
|
||||
tar xzf sub2api-complete.tar.gz
|
||||
cd sub2api-deploy/
|
||||
docker-compose -f docker-compose.local.yml up -d
|
||||
docker compose -f docker-compose.local.yml up -d
|
||||
```
|
||||
|
||||
#### Useful Commands
|
||||
|
||||
```bash
|
||||
# Stop all services
|
||||
docker-compose -f docker-compose.local.yml down
|
||||
docker compose -f docker-compose.local.yml down
|
||||
|
||||
# Restart
|
||||
docker-compose -f docker-compose.local.yml restart
|
||||
docker compose -f docker-compose.local.yml restart
|
||||
|
||||
# View all logs
|
||||
docker-compose -f docker-compose.local.yml logs -f
|
||||
docker compose -f docker-compose.local.yml logs -f
|
||||
|
||||
# Remove all data (caution!)
|
||||
docker-compose -f docker-compose.local.yml down
|
||||
docker compose -f docker-compose.local.yml down
|
||||
rm -rf data/ postgres_data/ redis_data/
|
||||
```
|
||||
|
||||
@@ -574,7 +630,9 @@ sub2api/
|
||||
|
||||
## License
|
||||
|
||||
MIT License
|
||||
This project is licensed under the [GNU Lesser General Public License v3.0](LICENSE) (or later).
|
||||
|
||||
Copyright (c) 2026 Wesley Liddick
|
||||
|
||||
---
|
||||
|
||||
|
||||
100
README_CN.md
@@ -12,7 +12,7 @@
|
||||
|
||||
**AI API 网关平台 - 订阅配额分发管理**
|
||||
|
||||
[English](README.md) | 中文
|
||||
[English](README.md) | 中文 | [日本語](README_JA.md)
|
||||
|
||||
</div>
|
||||
|
||||
@@ -41,16 +41,72 @@ Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅的
|
||||
- **智能调度** - 智能账号选择,支持粘性会话
|
||||
- **并发控制** - 用户级和账号级并发限制
|
||||
- **速率限制** - 可配置的请求和 Token 速率限制
|
||||
- **内置支付系统** - 支持 EasyPay 易支付、支付宝官方、微信官方、Stripe,用户自助充值,无需独立部署支付服务([配置指南](docs/PAYMENT_CN.md))
|
||||
- **管理后台** - Web 界面进行监控和管理
|
||||
- **外部系统集成** - 支持通过 iframe 嵌入外部系统(如支付、工单等),扩展管理后台功能
|
||||
- **外部系统集成** - 支持通过 iframe 嵌入外部系统(如工单等),扩展管理后台功能
|
||||
|
||||
## 不想自建?试试官方中转
|
||||
## ❤️ 赞助商
|
||||
|
||||
> [想出现在这里?](mailto:support@pincc.ai)
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="120"></a></td>
|
||||
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="150"></a></td>
|
||||
<td valign="middle"><b><a href="https://shop.pincc.ai/">PinCC</a></b> 是基于 Sub2API 搭建的官方中转服务,提供 Claude Code、Codex、Gemini 等主流模型的稳定中转,开箱即用,免去自建部署与运维烦恼。</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://www.packyapi.com/register?aff=sub2api"><img src="assets/partners/logos/packycode.png" alt="PackyCode" width="150"></a></td>
|
||||
<td>感谢 PackyCode 赞助了本项目!PackyCode 是一家稳定、高效的API中转服务商,提供 Claude Code、Codex、Gemini 等多种中转服务。PackyCode 为本软件的用户提供了特别优惠,使用<a href="https://www.packyapi.com/register?aff=sub2api">此链接</a>注册并在充值时填写"sub2api"优惠码,首次充值可以享受9折优惠!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://poixe.com/i/sub2api"><img src="assets/partners/logos/poixe.png" alt="PoixeAI" width="150"></a></td>
|
||||
<td>感谢 Poixe AI 赞助了本项目!Poixe AI 提供可靠的 AI 模型接口服务,您可以使用平台提供的 LLM API 接口轻松构建 AI 产品,同时也可以成为供应商,为平台提供大模型资源以赚取收益。通过 <a href="https://poixe.com/i/sub2api">此链接</a> 专属链接注册,充值额外赠送 $5 美金</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://ctok.ai"><img src="assets/partners/logos/ctok.png" alt="CTok" width="150"></a></td>
|
||||
<td>感谢 CTok.ai 赞助了本项目!CTok.ai 致力于打造一站式 AI 编程工具服务平台。我们提供 Claude Code 专业套餐及技术社群服务,同时支持 Google Gemini 和 OpenAI Codex。通过精心设计的套餐方案和专业的技术社群,为开发者提供稳定的服务保障和持续的技术支持,让 AI 辅助编程真正成为开发者的生产力工具。点击<a href="https://ctok.ai">这里</a>注册!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://code.silkapi.com/"><img src="assets/partners/logos/silkapi.png" alt="silkapi" width="150"></a></td>
|
||||
<td>感谢 丝绸API 赞助了本项目! <a href="https://code.silkapi.com/">丝绸API</a> 是基于 Sub2API 搭建的中转服务,专注于提供 Codex 高速稳定API中转。</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://ylscode.com/"><img src="assets/partners/logos/ylscode.png" alt="ylscode" width="150"></a></td>
|
||||
<td>感谢 伊莉思Code 赞助了本项目! <a href="https://ylscode.com/">伊莉思Code</a> 致力于构建安全的企业级Coding Agent生产力服务,提供稳定快速的 Codex / Claude / Gemini 订阅服务与即用即付API多种方案灵活选择,限时注册赠送 3 天 Codex 试用福利!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://www.aicodemirror.com/register?invitecode=KMVZQM"><img src="assets/partners/logos/AICodeMirror.jpg" alt="AICodeMirror" width="150"></a></td>
|
||||
<td>感谢 AICodeMirror 赞助了本项目!AICodeMirror 提供 Claude Code / Codex / Gemini CLI 官方高稳定性中转服务,企业级并发、快速开票、7×24 小时专属技术支持。Claude Code / Codex / Gemini 官方通道低至原价 38% / 2% / 9%,充值更享额外折扣!AICodeMirror 为 sub2api 用户提供专属福利:通过<a href="https://www.aicodemirror.com/register?invitecode=KMVZQM">此链接</a>注册,首次充值立享 8 折优惠,企业客户最高可享 75 折!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://aigocode.com/invite/SUB2API"><img src="assets/partners/logos/aigocode.png" alt="AIGoCode" width="150"></a></td>
|
||||
<td>感谢 AIGoCode 赞助了本项目!AIGoCode 是一站式集成 Claude Code、Codex 以及最新 Gemini 模型的综合平台,为您提供稳定、高效、高性价比的 AI 编程服务。平台提供灵活的订阅方案,零封号风险,免 VPN 直连,响应极速。AIGoCode 为 sub2api 用户准备了专属福利:通过<a href="https://aigocode.com/invite/SUB2API">此链接</a>注册,首次充值可额外获得 10% 赠送额度!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://shop.bmoplus.com/?utm_source=github"><img src="assets/partners/logos/bmoplus.jpg" alt="bmoplus" width="150"></a></td>
|
||||
<td>感谢 BmoPlus 赞助了本项目!BmoPlus 是一家专为AI订阅重度用户打造的可靠 AI 账号代充服务商,提供稳定的 ChatGPT Plus / ChatGPT Pro(全程质保) / Claude Pro / Super Grok / Gemini Pro 的官方代充&成品账号。 通过<a href="https://shop.bmoplus.com/?utm_source=github">BmoPlus AI成品号专卖/代充</a>注册下单的用户,可享GPT 官网订阅一折 的震撼价格!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://bestproxy.com/?keyword=a2e8iuol"><img src="assets/partners/logos/bestproxy.png" alt="bestproxy" width="150"></a></td>
|
||||
<td>感谢 Bestproxy 赞助了本项目!<a href="https://bestproxy.com/?keyword=a2e8iuol">Bestproxy</a> 是一家提供高纯度住宅IP,支持一号一IP独享,结合真实家庭网络与指纹隔离,可实现链路环境隔离,降低关联风控概率。</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://pateway.ai/?ch=1tsfr51"><img src="assets/partners/logos/pateway.png" alt="pateway" width="150"></a></td>
|
||||
<td>感谢 PatewayAI 赞助了本项目!PatewayAI 是一家面向重度 AI 开发者、专注官方直连的高品质模型 API 中转服务商。提供 Claude 全系列与 Codex 系列模型,100% 官方源直供,不掺假不注水,欢迎检验。计费透明,Token 级账单可逐笔核验。
|
||||
同时支持企业级高并发,并为企业客户提供了专业的管理平台,企业客户可签订正式合同并开具发票,更多详情进入官网获取联系方式。
|
||||
现在通过 <a href="https://pateway.ai/?ch=1tsfr51">此链接</a> 注册即送 $3 试用额度,用户充值低至 6 折,邀请好友双向赠送,邀请奖励可达 $150。</td>
|
||||
</tr>
|
||||
|
||||
</table>
|
||||
|
||||
## 生态项目
|
||||
@@ -59,7 +115,7 @@ Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅的
|
||||
|
||||
| 项目 | 说明 | 功能 |
|
||||
|------|------|------|
|
||||
| [Sub2ApiPay](https://github.com/touwaeriol/sub2apipay) | 自助支付系统 | 用户自助充值、自助订阅购买;兼容易支付协议、微信官方支付、支付宝官方支付、Stripe;支持 iframe 嵌入管理后台 |
|
||||
| ~~[Sub2ApiPay](https://github.com/touwaeriol/sub2apipay)~~ | ~~自助支付系统~~ | **已内置** — 支付功能已集成到 Sub2API 中,无需独立部署。详见 [支付配置指南](docs/PAYMENT_CN.md) |
|
||||
| [sub2api-mobile](https://github.com/ckken/sub2api-mobile) | 移动端管理控制台 | 跨平台应用(iOS/Android/Web),支持用户管理、账号管理、监控看板、多后端切换;基于 Expo + React Native 构建 |
|
||||
|
||||
## 技术栈
|
||||
@@ -177,10 +233,10 @@ mkdir -p sub2api-deploy && cd sub2api-deploy
|
||||
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash
|
||||
|
||||
# 启动服务
|
||||
docker-compose up -d
|
||||
docker compose up -d
|
||||
|
||||
# 查看日志
|
||||
docker-compose logs -f sub2api
|
||||
docker compose logs -f sub2api
|
||||
```
|
||||
|
||||
**脚本功能:**
|
||||
@@ -244,16 +300,16 @@ mkdir -p data postgres_data redis_data
|
||||
|
||||
# 5. 启动所有服务
|
||||
# 选项 A:本地目录版(推荐 - 易于迁移)
|
||||
docker-compose -f docker-compose.local.yml up -d
|
||||
docker compose -f docker-compose.local.yml up -d
|
||||
|
||||
# 选项 B:命名卷版(简单设置)
|
||||
docker-compose up -d
|
||||
docker compose up -d
|
||||
|
||||
# 6. 查看状态
|
||||
docker-compose -f docker-compose.local.yml ps
|
||||
docker compose -f docker-compose.local.yml ps
|
||||
|
||||
# 7. 查看日志
|
||||
docker-compose -f docker-compose.local.yml logs -f sub2api
|
||||
docker compose -f docker-compose.local.yml logs -f sub2api
|
||||
```
|
||||
|
||||
#### 部署版本对比
|
||||
@@ -283,15 +339,15 @@ docker-compose -f docker-compose.local.yml logs -f sub2api
|
||||
|
||||
如果管理员密码是自动生成的,在日志中查找:
|
||||
```bash
|
||||
docker-compose -f docker-compose.local.yml logs sub2api | grep "admin password"
|
||||
docker compose -f docker-compose.local.yml logs sub2api | grep "admin password"
|
||||
```
|
||||
|
||||
#### 升级
|
||||
|
||||
```bash
|
||||
# 拉取最新镜像并重建容器
|
||||
docker-compose -f docker-compose.local.yml pull
|
||||
docker-compose -f docker-compose.local.yml up -d
|
||||
docker compose -f docker-compose.local.yml pull
|
||||
docker compose -f docker-compose.local.yml up -d
|
||||
```
|
||||
|
||||
#### 轻松迁移(本地目录版)
|
||||
@@ -300,7 +356,7 @@ docker-compose -f docker-compose.local.yml up -d
|
||||
|
||||
```bash
|
||||
# 源服务器
|
||||
docker-compose -f docker-compose.local.yml down
|
||||
docker compose -f docker-compose.local.yml down
|
||||
cd ..
|
||||
tar czf sub2api-complete.tar.gz sub2api-deploy/
|
||||
|
||||
@@ -310,23 +366,23 @@ scp sub2api-complete.tar.gz user@new-server:/path/
|
||||
# 新服务器
|
||||
tar xzf sub2api-complete.tar.gz
|
||||
cd sub2api-deploy/
|
||||
docker-compose -f docker-compose.local.yml up -d
|
||||
docker compose -f docker-compose.local.yml up -d
|
||||
```
|
||||
|
||||
#### 常用命令
|
||||
|
||||
```bash
|
||||
# 停止所有服务
|
||||
docker-compose -f docker-compose.local.yml down
|
||||
docker compose -f docker-compose.local.yml down
|
||||
|
||||
# 重启
|
||||
docker-compose -f docker-compose.local.yml restart
|
||||
docker compose -f docker-compose.local.yml restart
|
||||
|
||||
# 查看所有日志
|
||||
docker-compose -f docker-compose.local.yml logs -f
|
||||
docker compose -f docker-compose.local.yml logs -f
|
||||
|
||||
# 删除所有数据(谨慎!)
|
||||
docker-compose -f docker-compose.local.yml down
|
||||
docker compose -f docker-compose.local.yml down
|
||||
rm -rf data/ postgres_data/ redis_data/
|
||||
```
|
||||
|
||||
@@ -635,7 +691,9 @@ sub2api/
|
||||
|
||||
## 许可证
|
||||
|
||||
MIT License
|
||||
本项目基于 [GNU 宽通用公共许可证 v3.0](LICENSE)(或更高版本)授权。
|
||||
|
||||
Copyright (c) 2026 Wesley Liddick
|
||||
|
||||
---
|
||||
|
||||
|
||||
642
README_JA.md
Normal file
@@ -0,0 +1,642 @@
|
||||
# Sub2API
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://golang.org/)
|
||||
[](https://vuejs.org/)
|
||||
[](https://www.postgresql.org/)
|
||||
[](https://redis.io/)
|
||||
[](https://www.docker.com/)
|
||||
|
||||
<a href="https://trendshift.io/repositories/21823" target="_blank"><img src="https://trendshift.io/api/badge/repositories/21823" alt="Wei-Shaw%2Fsub2api | Trendshift" width="250" height="55"/></a>
|
||||
|
||||
**サブスクリプションクォータ配分のための AI API ゲートウェイプラットフォーム**
|
||||
|
||||
[English](README.md) | [中文](README_CN.md) | 日本語
|
||||
|
||||
</div>
|
||||
|
||||
> **Sub2API が公式に使用しているドメインは `sub2api.org` と `pincc.ai` のみです。Sub2API の名称を使用している他のウェブサイトは、サードパーティによるデプロイやサービスであり、本プロジェクトとは一切関係がありません。ご利用の際はご自身で確認・判断をお願いします。**
|
||||
|
||||
---
|
||||
|
||||
## デモ
|
||||
|
||||
Sub2API をオンラインでお試しください: **[https://demo.sub2api.org/](https://demo.sub2api.org/)**
|
||||
|
||||
デモ用認証情報(共有デモ環境です。セルフホスト環境では**自動作成されません**):
|
||||
|
||||
| メールアドレス | パスワード |
|
||||
|-------|----------|
|
||||
| admin@sub2api.org | admin123 |
|
||||
|
||||
## 概要
|
||||
|
||||
Sub2API は、AI 製品のサブスクリプションから API クォータを配分・管理するために設計された AI API ゲートウェイプラットフォームです。ユーザーはプラットフォームが生成した API キーを通じて上流の AI サービスにアクセスでき、プラットフォームは認証、課金、負荷分散、リクエスト転送を処理します。
|
||||
|
||||
## 機能
|
||||
|
||||
- **マルチアカウント管理** - 複数の上流アカウントタイプ(OAuth、APIキー)をサポート
|
||||
- **APIキー配布** - ユーザー向けの APIキーの生成と管理
|
||||
- **精密な課金** - トークンレベルの使用量追跡とコスト計算
|
||||
- **スマートスケジューリング** - スティッキーセッション付きのインテリジェントなアカウント選択
|
||||
- **同時実行制御** - ユーザーごと・アカウントごとの同時実行数制限
|
||||
- **レート制限** - 設定可能なリクエスト数およびトークンレート制限
|
||||
- **内蔵決済システム** - EasyPay、Alipay、WeChat Pay、Stripe に対応。ユーザーのセルフサービスチャージが可能で、別途決済サービスのデプロイは不要([設定ガイド](docs/PAYMENT.md))
|
||||
- **管理ダッシュボード** - 監視・管理のための Web インターフェース
|
||||
- **外部システム連携** - 外部システム(チケット管理など)を iframe 経由で管理ダッシュボードに埋め込み可能
|
||||
|
||||
## ❤️ スポンサー
|
||||
|
||||
> [こちらに掲載しませんか?](mailto:support@pincc.ai)
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="180" align="center" valign="middle"><a href="https://shop.pincc.ai/"><img src="assets/partners/logos/pincc-logo.png" alt="pincc" width="150"></a></td>
|
||||
<td valign="middle"><b><a href="https://shop.pincc.ai/">PinCC</a></b> は Sub2API 上に構築された公式リレーサービスで、Claude Code、Codex、Gemini などの人気モデルへの安定したアクセスを提供します。デプロイやメンテナンスは不要で、すぐにご利用いただけます。</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td width="180"><a href="https://www.packyapi.com/register?aff=sub2api"><img src="assets/partners/logos/packycode.png" alt="PackyCode" width="150"></a></td>
|
||||
<td>PackyCode のご支援に感謝します!PackyCode は Claude Code、Codex、Gemini などのリレーサービスを提供する信頼性の高い API 中継プラットフォームです。本ソフト利用者向けに特別割引があります:<a href="https://www.packyapi.com/register?aff=sub2api">このリンク</a>で登録し、チャージ時に「sub2api」クーポンを入力すると 10% オフになります。</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://poixe.com/i/sub2api"><img src="assets/partners/logos/poixe.png" alt="PoixeAi" width="150"></a></td>
|
||||
<td>Poixe AI のご支援に感謝します!Poixe AI は信頼性の高い LLM API サービスを提供しています。プラットフォームの API エンドポイントを活用して、AI 搭載プロダクトをシームレスに構築できます。また、ベンダーとして AI API リソースをプラットフォームに提供し、収益を得ることも可能です。専用の <a href="https://poixe.com/i/sub2api">sub2api</a> 紹介リンクから登録すると、初回チャージ時に $5 USD のボーナスがもらえます。</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://ctok.ai"><img src="assets/partners/logos/ctok.png" alt="CTok" width="150"></a></td>
|
||||
<td>CTok.ai のご支援に感謝します!CTok.ai はワンストップ AI プログラミングツールサービスプラットフォームの構築に取り組んでいます。Claude Code の専用プランと技術コミュニティサービスを提供し、Google Gemini や OpenAI Codex もサポートしています。丁寧に設計されたプランと専門的な技術コミュニティを通じて、開発者に安定したサービス保証と継続的な技術サポートを提供し、AI アシスト プログラミングを真の生産性向上ツールにします。<a href="https://ctok.ai">こちら</a>から登録!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://code.silkapi.com/"><img src="assets/partners/logos/silkapi.png" alt="silkapi" width="150"></a></td>
|
||||
<td>SilkAPI のご支援に感謝します!<a href="https://code.silkapi.com/">SilkAPI</a> は Sub2API をベースに構築された中継サービスで、高速かつ安定した Codex API 中継の提供に特化しています。</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://ylscode.com/"><img src="assets/partners/logos/ylscode.png" alt="ylscode" width="150"></a></td>
|
||||
<td>YLS Code のご支援に感謝します!<a href="https://ylscode.com/">YLS Code</a> は安全なエンタープライズグレードの Coding Agent 生産性サービスの構築に取り組んでおり、安定かつ高速な Codex / Claude / Gemini サブスクリプションサービスと従量課金 API の柔軟なプランを提供しています。期間限定で新規登録者に 3 日間の Codex 試用特典をプレゼント中!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://www.aicodemirror.com/register?invitecode=KMVZQM"><img src="assets/partners/logos/AICodeMirror.jpg" alt="AICodeMirror" width="150"></a></td>
|
||||
<td>AICodeMirror のご支援に感謝します!AICodeMirror は Claude Code / Codex / Gemini CLI の公式高安定性リレーサービスを提供しており、エンタープライズグレードの同時実行、迅速な請求書発行、24時間年中無休の専属テクニカルサポートを備えています。Claude Code / Codex / Gemini の公式チャネルを定価の 38% / 2% / 9% で利用可能、チャージ時にはさらに追加割引!AICodeMirror は sub2api ユーザー向けに特別特典を提供中:<a href="https://www.aicodemirror.com/register?invitecode=KMVZQM">こちらのリンク</a>から登録すると、初回チャージが 20% オフ、法人のお客様は最大 25% オフ!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://aigocode.com/invite/SUB2API"><img src="assets/partners/logos/aigocode.png" alt="AIGoCode" width="150"></a></td>
|
||||
<td>AIGoCode のご支援に感謝します!AIGoCode は Claude Code、Codex、最新の Gemini モデルを統合したオールインワンプラットフォームで、安定的かつ効率的でコストパフォーマンスに優れた AI コーディングサービスを提供します。柔軟なサブスクリプションプラン、アカウント停止リスクゼロ、VPN 不要の直接アクセス、超高速レスポンスが特長です。AIGoCode は sub2api ユーザー向けに特別特典を用意しています:<a href="https://aigocode.com/invite/SUB2API">こちらのリンク</a>から登録すると、初回チャージ時に 10% のボーナスクレジットを追加プレゼント!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://shop.bmoplus.com/?utm_source=github"><img src="assets/partners/logos/bmoplus.jpg" alt="bmoplus" width="150"></a></td>
|
||||
<td>本プロジェクトにご支援いただいた BmoPlus に感謝いたします!BmoPlusは、AIサブスクリプションのヘビーユーザー向けに特化した信頼性の高いAIアカウントサービスプロバイダーであり、安定した ChatGPT Plus / ChatGPT Pro (完全保証) / Claude Pro / Super Grok / Gemini Pro の公式代行チャージおよび即納アカウントを提供しています。こちらの<a href="https://shop.bmoplus.com/?utm_source=github">BmoPlus AIアカウント専門店/代行チャージ</a>経由でご登録・ご注文いただいたユーザー様は、GPTを 公式サイト価格の約1割(90% OFF) という驚異的な価格でご利用いただけます!</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://bestproxy.com/?keyword=a2e8iuol"><img src="assets/partners/logos/bestproxy.png" alt="bestproxy" width="150"></a></td>
|
||||
<td>Bestproxy のご支援に感謝します!<a href="https://bestproxy.com/?keyword=a2e8iuol">Bestproxy</a> は高純度の住宅IPを提供し、1アカウント1IP専有をサポートしています。実際の家庭ネットワークとフィンガープリント分離を組み合わせることで、リンク環境の分離を実現し、関連付けによるリスク管理の確率を低減します。</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td width="180"><a href="https://pateway.ai/?ch=1tsfr51"><img src="assets/partners/logos/pateway.png" alt="pateway" width="150"></a></td>
|
||||
<td>PatewayAI のご支援に感謝します!PatewayAI は、ヘビーAI開発者向けに公式直結を重視した高品質モデルAPIリレーサービスプロバイダーです。Claude 全シリーズおよび Codex シリーズモデルを提供し、100%公式ソースから直接供給 — 偽りなし、水増しなし、検証歓迎。課金は完全透明で、トークン単位の請求書を1件ずつ監査可能です。
|
||||
エンタープライズ級の高同時接続にも対応し、法人顧客向けに専用管理プラットフォームを提供しています。法人顧客は正式な契約を締結し、請求書の発行が可能です。詳細は公式サイトでお問い合わせください。
|
||||
<a href="https://pateway.ai/?ch=1tsfr51">こちらのリンク</a>から登録すると、$3 のトライアルクレジットがもらえます。チャージは最大40%オフ、友達紹介で双方にボーナス付与 — 紹介報酬は最大 $150。</td>
|
||||
</tr>
|
||||
|
||||
</table>
|
||||
|
||||
## エコシステム
|
||||
|
||||
Sub2API を拡張・統合するコミュニティプロジェクト:
|
||||
|
||||
| プロジェクト | 説明 | 機能 |
|
||||
|---------|-------------|----------|
|
||||
| ~~[Sub2ApiPay](https://github.com/touwaeriol/sub2apipay)~~ | ~~セルフサービス決済システム~~ | **内蔵済み** — 決済機能は Sub2API に統合されました。別途デプロイは不要です。[決済設定ガイド](docs/PAYMENT.md)をご参照ください |
|
||||
| [sub2api-mobile](https://github.com/ckken/sub2api-mobile) | モバイル管理コンソール | ユーザー管理、アカウント管理、監視ダッシュボード、マルチバックエンド切り替えが可能なクロスプラットフォームアプリ(iOS/Android/Web)。Expo + React Native で構築 |
|
||||
|
||||
## 技術スタック
|
||||
|
||||
| コンポーネント | 技術 |
|
||||
|-----------|------------|
|
||||
| バックエンド | Go 1.25.7, Gin, Ent |
|
||||
| フロントエンド | Vue 3.4+, Vite 5+, TailwindCSS |
|
||||
| データベース | PostgreSQL 15+ |
|
||||
| キャッシュ/キュー | Redis 7+ |
|
||||
|
||||
---
|
||||
|
||||
## Nginx リバースプロキシに関する注意
|
||||
|
||||
Sub2API(または CRS)を Nginx でリバースプロキシし、Codex CLI と組み合わせて使用する場合、Nginx の `http` ブロックに以下の設定を追加してください:
|
||||
|
||||
```nginx
|
||||
underscores_in_headers on;
|
||||
```
|
||||
|
||||
Nginx はデフォルトでアンダースコアを含むヘッダー(例: `session_id`)を破棄するため、マルチアカウント構成でのスティッキーセッションルーティングに支障をきたします。
|
||||
|
||||
---
|
||||
|
||||
## デプロイ
|
||||
|
||||
### 方法1: スクリプトによるインストール(推奨)
|
||||
|
||||
GitHub Releases からビルド済みバイナリをダウンロードするワンクリックインストールスクリプトです。
|
||||
|
||||
#### 前提条件
|
||||
|
||||
- Linux サーバー(amd64 または arm64)
|
||||
- PostgreSQL 15+(インストール済みかつ稼働中)
|
||||
- Redis 7+(インストール済みかつ稼働中)
|
||||
- root 権限
|
||||
|
||||
#### インストール手順
|
||||
|
||||
```bash
|
||||
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash
|
||||
```
|
||||
|
||||
スクリプトは以下を実行します:
|
||||
1. システムアーキテクチャの検出
|
||||
2. 最新リリースのダウンロード
|
||||
3. バイナリを `/opt/sub2api` にインストール
|
||||
4. systemd サービスの作成
|
||||
5. システムユーザーと権限の設定
|
||||
|
||||
#### インストール後の作業
|
||||
|
||||
```bash
|
||||
# 1. サービスを起動
|
||||
sudo systemctl start sub2api
|
||||
|
||||
# 2. 起動時の自動起動を有効化
|
||||
sudo systemctl enable sub2api
|
||||
|
||||
# 3. ブラウザでセットアップウィザードを開く
|
||||
# http://YOUR_SERVER_IP:8080
|
||||
```
|
||||
|
||||
セットアップウィザードでは以下の設定を行います:
|
||||
- データベース設定
|
||||
- Redis 設定
|
||||
- 管理者アカウントの作成
|
||||
|
||||
#### アップグレード
|
||||
|
||||
**管理ダッシュボード**の左上にある**アップデートを確認**ボタンをクリックすることで、ダッシュボードから直接アップグレードできます。
|
||||
|
||||
Web インターフェースでは以下が可能です:
|
||||
- 新しいバージョンの自動確認
|
||||
- ワンクリックでのアップデートのダウンロードと適用
|
||||
- 必要に応じたロールバック
|
||||
|
||||
#### よく使うコマンド
|
||||
|
||||
```bash
|
||||
# ステータスを確認
|
||||
sudo systemctl status sub2api
|
||||
|
||||
# ログを表示
|
||||
sudo journalctl -u sub2api -f
|
||||
|
||||
# サービスを再起動
|
||||
sudo systemctl restart sub2api
|
||||
|
||||
# アンインストール
|
||||
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash -s -- uninstall -y
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 方法2: Docker Compose(推奨)
|
||||
|
||||
PostgreSQL と Redis のコンテナを含む Docker Compose でデプロイします。
|
||||
|
||||
#### 前提条件
|
||||
|
||||
- Docker 20.10+
|
||||
- Docker Compose v2+
|
||||
|
||||
#### クイックスタート(ワンクリックデプロイ)
|
||||
|
||||
自動デプロイスクリプトを使用して簡単にセットアップできます:
|
||||
|
||||
```bash
|
||||
# デプロイ用ディレクトリを作成
|
||||
mkdir -p sub2api-deploy && cd sub2api-deploy
|
||||
|
||||
# デプロイ準備スクリプトをダウンロードして実行
|
||||
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/docker-deploy.sh | bash
|
||||
|
||||
# サービスを起動
|
||||
docker compose up -d
|
||||
|
||||
# ログを表示
|
||||
docker compose logs -f sub2api
|
||||
```
|
||||
|
||||
**スクリプトの動作内容:**
|
||||
- `docker-compose.local.yml`(`docker-compose.yml` として保存)と `.env.example` をダウンロード
|
||||
- セキュアな認証情報(JWT_SECRET、TOTP_ENCRYPTION_KEY、POSTGRES_PASSWORD)を自動生成
|
||||
- 自動生成されたシークレットで `.env` ファイルを作成
|
||||
- データディレクトリを作成(バックアップ・移行が容易なローカルディレクトリを使用)
|
||||
- 生成された認証情報を参照用に表示
|
||||
|
||||
#### 手動デプロイ
|
||||
|
||||
手動でセットアップする場合:
|
||||
|
||||
```bash
|
||||
# 1. リポジトリをクローン
|
||||
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||
cd sub2api/deploy
|
||||
|
||||
# 2. 環境設定ファイルをコピー
|
||||
cp .env.example .env
|
||||
|
||||
# 3. 設定を編集(セキュアなパスワードを生成)
|
||||
nano .env
|
||||
```
|
||||
|
||||
**`.env` の必須設定:**
|
||||
|
||||
```bash
|
||||
# PostgreSQL パスワード(必須)
|
||||
POSTGRES_PASSWORD=your_secure_password_here
|
||||
|
||||
# JWT シークレット(推奨 - 再起動後もユーザーのログイン状態を保持)
|
||||
JWT_SECRET=your_jwt_secret_here
|
||||
|
||||
# TOTP 暗号化キー(推奨 - 再起動後も二要素認証を維持)
|
||||
TOTP_ENCRYPTION_KEY=your_totp_key_here
|
||||
|
||||
# オプション: 管理者アカウント
|
||||
ADMIN_EMAIL=admin@example.com
|
||||
ADMIN_PASSWORD=your_admin_password
|
||||
|
||||
# オプション: カスタムポート
|
||||
SERVER_PORT=8080
|
||||
```
|
||||
|
||||
**セキュアなシークレットの生成方法:**
|
||||
```bash
|
||||
# JWT_SECRET を生成
|
||||
openssl rand -hex 32
|
||||
|
||||
# TOTP_ENCRYPTION_KEY を生成
|
||||
openssl rand -hex 32
|
||||
|
||||
# POSTGRES_PASSWORD を生成
|
||||
openssl rand -hex 32
|
||||
```
|
||||
|
||||
```bash
|
||||
# 4. データディレクトリを作成(ローカルバージョンの場合)
|
||||
mkdir -p data postgres_data redis_data
|
||||
|
||||
# 5. すべてのサービスを起動
|
||||
# オプション A: ローカルディレクトリバージョン(推奨 - 移行が容易)
|
||||
docker compose -f docker-compose.local.yml up -d
|
||||
|
||||
# オプション B: 名前付きボリュームバージョン(シンプルなセットアップ)
|
||||
docker compose up -d
|
||||
|
||||
# 6. ステータスを確認
|
||||
docker compose -f docker-compose.local.yml ps
|
||||
|
||||
# 7. ログを表示
|
||||
docker compose -f docker-compose.local.yml logs -f sub2api
|
||||
```
|
||||
|
||||
#### デプロイバージョン
|
||||
|
||||
| バージョン | データストレージ | 移行 | 推奨用途 |
|
||||
|---------|-------------|-----------|----------|
|
||||
| **docker-compose.local.yml** | ローカルディレクトリ | ✅ 容易(ディレクトリ全体を tar) | 本番環境、頻繁なバックアップ |
|
||||
| **docker-compose.yml** | 名前付きボリューム | ⚠️ docker コマンドが必要 | シンプルなセットアップ |
|
||||
|
||||
**推奨:** データ管理が容易な `docker-compose.local.yml`(スクリプトによるデプロイ)を使用してください。
|
||||
|
||||
#### アクセス
|
||||
|
||||
ブラウザで `http://YOUR_SERVER_IP:8080` を開いてください。
|
||||
|
||||
管理者パスワードが自動生成された場合は、ログで確認できます:
|
||||
```bash
|
||||
docker compose -f docker-compose.local.yml logs sub2api | grep "admin password"
|
||||
```
|
||||
|
||||
#### アップグレード
|
||||
|
||||
```bash
|
||||
# 最新イメージをプルしてコンテナを再作成
|
||||
docker compose -f docker-compose.local.yml pull
|
||||
docker compose -f docker-compose.local.yml up -d
|
||||
```
|
||||
|
||||
#### 簡単な移行(ローカルディレクトリバージョン)
|
||||
|
||||
`docker-compose.local.yml` を使用している場合、新しいサーバーへの移行が簡単です:
|
||||
|
||||
```bash
|
||||
# 移行元サーバーにて
|
||||
docker compose -f docker-compose.local.yml down
|
||||
cd ..
|
||||
tar czf sub2api-complete.tar.gz sub2api-deploy/
|
||||
|
||||
# 新しいサーバーに転送
|
||||
scp sub2api-complete.tar.gz user@new-server:/path/
|
||||
|
||||
# 移行先サーバーにて
|
||||
tar xzf sub2api-complete.tar.gz
|
||||
cd sub2api-deploy/
|
||||
docker compose -f docker-compose.local.yml up -d
|
||||
```
|
||||
|
||||
#### よく使うコマンド
|
||||
|
||||
```bash
|
||||
# すべてのサービスを停止
|
||||
docker compose -f docker-compose.local.yml down
|
||||
|
||||
# 再起動
|
||||
docker compose -f docker-compose.local.yml restart
|
||||
|
||||
# すべてのログを表示
|
||||
docker compose -f docker-compose.local.yml logs -f
|
||||
|
||||
# すべてのデータを削除(注意!)
|
||||
docker compose -f docker-compose.local.yml down
|
||||
rm -rf data/ postgres_data/ redis_data/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 方法3: ソースからビルド
|
||||
|
||||
開発やカスタマイズのためにソースコードからビルドして実行します。
|
||||
|
||||
#### 前提条件
|
||||
|
||||
- Go 1.21+
|
||||
- Node.js 18+
|
||||
- PostgreSQL 15+
|
||||
- Redis 7+
|
||||
|
||||
#### ビルド手順
|
||||
|
||||
```bash
|
||||
# 1. リポジトリをクローン
|
||||
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||
cd sub2api
|
||||
|
||||
# 2. pnpm をインストール(未インストールの場合)
|
||||
npm install -g pnpm
|
||||
|
||||
# 3. フロントエンドをビルド
|
||||
cd frontend
|
||||
pnpm install
|
||||
pnpm run build
|
||||
# 出力先: ../backend/internal/web/dist/
|
||||
|
||||
# 4. フロントエンドを組み込んだバックエンドをビルド
|
||||
cd ../backend
|
||||
go build -tags embed -o sub2api ./cmd/server
|
||||
|
||||
# 5. 設定ファイルを作成
|
||||
cp ../deploy/config.example.yaml ./config.yaml
|
||||
|
||||
# 6. 設定を編集
|
||||
nano config.yaml
|
||||
```
|
||||
|
||||
> **注意:** `-tags embed` フラグはフロントエンドをバイナリに組み込みます。このフラグがない場合、バイナリはフロントエンド UI を提供しません。
|
||||
|
||||
**`config.yaml` の主要設定:**
|
||||
|
||||
```yaml
|
||||
server:
|
||||
host: "0.0.0.0"
|
||||
port: 8080
|
||||
mode: "release"
|
||||
|
||||
database:
|
||||
host: "localhost"
|
||||
port: 5432
|
||||
user: "postgres"
|
||||
password: "your_password"
|
||||
dbname: "sub2api"
|
||||
|
||||
redis:
|
||||
host: "localhost"
|
||||
port: 6379
|
||||
password: ""
|
||||
|
||||
jwt:
|
||||
secret: "change-this-to-a-secure-random-string"
|
||||
expire_hour: 24
|
||||
|
||||
default:
|
||||
user_concurrency: 5
|
||||
user_balance: 0
|
||||
api_key_prefix: "sk-"
|
||||
rate_multiplier: 1.0
|
||||
```
|
||||
|
||||
### Sora ステータス(一時的に利用不可)
|
||||
|
||||
> ⚠️ Sora 関連の機能は、上流統合およびメディア配信の技術的問題により一時的に利用できません。
|
||||
> 現時点では本番環境で Sora に依存しないでください。
|
||||
> 既存の `gateway.sora_*` 設定キーは予約されていますが、これらの問題が解決されるまで有効にならない場合があります。
|
||||
|
||||
`config.yaml` では追加のセキュリティ関連オプションも利用できます:
|
||||
|
||||
- `cors.allowed_origins` - CORS 許可リスト
|
||||
- `security.url_allowlist` - 上流/価格/CRS ホストの許可リスト
|
||||
- `security.url_allowlist.enabled` - URL バリデーションの無効化(注意して使用)
|
||||
- `security.url_allowlist.allow_insecure_http` - バリデーション無効時に HTTP URL を許可
|
||||
- `security.url_allowlist.allow_private_hosts` - プライベート/ローカル IP アドレスを許可
|
||||
- `security.response_headers.enabled` - 設定可能なレスポンスヘッダーフィルタリングを有効化(無効時はデフォルトの許可リストを使用)
|
||||
- `security.csp` - Content-Security-Policy ヘッダーの制御
|
||||
- `billing.circuit_breaker` - 課金エラー時にフェイルクローズ
|
||||
- `server.trusted_proxies` - X-Forwarded-For パースの有効化
|
||||
- `turnstile.required` - リリースモードでの Turnstile 必須化
|
||||
|
||||
**⚠️ セキュリティ警告: HTTP URL 設定**
|
||||
|
||||
`security.url_allowlist.enabled=false` の場合、システムはデフォルトで最小限の URL バリデーションを行い、**HTTP URL を拒否**して HTTPS のみを許可します。HTTP URL を許可するには(開発環境や内部テスト用など)、以下を明示的に設定する必要があります:
|
||||
|
||||
```yaml
|
||||
security:
|
||||
url_allowlist:
|
||||
enabled: false # 許可リストチェックを無効化
|
||||
allow_insecure_http: true # HTTP URL を許可(⚠️ セキュリティリスクあり)
|
||||
```
|
||||
|
||||
**または環境変数で設定:**
|
||||
|
||||
```bash
|
||||
SECURITY_URL_ALLOWLIST_ENABLED=false
|
||||
SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=true
|
||||
```
|
||||
|
||||
**HTTP を許可するリスク:**
|
||||
- API キーとデータが**平文**で送信される(傍受の危険性)
|
||||
- **中間者攻撃(MITM)**を受けやすい
|
||||
- **本番環境には不適切**
|
||||
|
||||
**HTTP を使用すべき場面:**
|
||||
- ✅ ローカルサーバーでの開発・テスト(http://localhost)
|
||||
- ✅ 信頼できるエンドポイントを持つ内部ネットワーク
|
||||
- ✅ HTTPS 取得前のアカウント接続テスト
|
||||
- ❌ 本番環境(HTTPS のみを使用)
|
||||
|
||||
**この設定なしで表示されるエラー例:**
|
||||
```
|
||||
Invalid base URL: invalid url scheme: http
|
||||
```
|
||||
|
||||
URL バリデーションまたはレスポンスヘッダーフィルタリングを無効にする場合は、ネットワーク層を強化してください:
|
||||
- 上流ドメイン/IP のエグレス許可リストを適用
|
||||
- プライベート/ループバック/リンクローカル範囲をブロック
|
||||
- TLS のみのアウトバウンドトラフィックを強制
|
||||
- プロキシで機密性の高い上流レスポンスヘッダーを除去
|
||||
|
||||
```bash
|
||||
# 6. アプリケーションを実行
|
||||
./sub2api
|
||||
```
|
||||
|
||||
#### 開発モード
|
||||
|
||||
```bash
|
||||
# バックエンド(ホットリロード付き)
|
||||
cd backend
|
||||
go run ./cmd/server
|
||||
|
||||
# フロントエンド(ホットリロード付き)
|
||||
cd frontend
|
||||
pnpm run dev
|
||||
```
|
||||
|
||||
#### コード生成
|
||||
|
||||
`backend/ent/schema` を編集した場合、Ent + Wire を再生成してください:
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
go generate ./ent
|
||||
go generate ./cmd/server
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## シンプルモード
|
||||
|
||||
シンプルモードは、フル SaaS 機能を必要とせず、素早くアクセスしたい個人開発者や社内チーム向けに設計されています。
|
||||
|
||||
- 有効化: 環境変数 `RUN_MODE=simple` を設定
|
||||
- 違い: SaaS 関連機能を非表示にし、課金プロセスをスキップ
|
||||
- セキュリティに関する注意: 本番環境では `SIMPLE_MODE_CONFIRM=true` も設定する必要があります
|
||||
|
||||
---
|
||||
|
||||
## Antigravity サポート
|
||||
|
||||
Sub2API は [Antigravity](https://antigravity.so/) アカウントをサポートしています。認証後、Claude および Gemini モデル用の専用エンドポイントが利用可能になります。
|
||||
|
||||
### 専用エンドポイント
|
||||
|
||||
| エンドポイント | モデル |
|
||||
|----------|-------|
|
||||
| `/antigravity/v1/messages` | Claude モデル |
|
||||
| `/antigravity/v1beta/` | Gemini モデル |
|
||||
|
||||
### Claude Code の設定
|
||||
|
||||
```bash
|
||||
export ANTHROPIC_BASE_URL="http://localhost:8080/antigravity"
|
||||
export ANTHROPIC_AUTH_TOKEN="sk-xxx"
|
||||
```
|
||||
|
||||
### ハイブリッドスケジューリングモード
|
||||
|
||||
Antigravity アカウントはオプションの**ハイブリッドスケジューリング**をサポートしています。有効にすると、汎用エンドポイント `/v1/messages` および `/v1beta/` も Antigravity アカウントにリクエストをルーティングします。
|
||||
|
||||
> **⚠️ 警告**: Anthropic Claude と Antigravity Claude は**同じ会話コンテキスト内で混在させることはできません**。グループを使用して適切に分離してください。
|
||||
|
||||
### 既知の問題
|
||||
|
||||
Claude Code では、Plan Mode を自動的に終了できません。(通常、ネイティブの Claude API を使用する場合、計画が完了すると Claude Code はユーザーに計画を承認または拒否するオプションをポップアップ表示します。)
|
||||
|
||||
**回避策**: `Shift + Tab` を押して手動で Plan Mode を終了し、計画を承認または拒否するためのレスポンスを入力してください。
|
||||
|
||||
---
|
||||
|
||||
## プロジェクト構成
|
||||
|
||||
```
|
||||
sub2api/
|
||||
├── backend/ # Go バックエンドサービス
|
||||
│ ├── cmd/server/ # アプリケーションエントリ
|
||||
│ ├── internal/ # 内部モジュール
|
||||
│ │ ├── config/ # 設定
|
||||
│ │ ├── model/ # データモデル
|
||||
│ │ ├── service/ # ビジネスロジック
|
||||
│ │ ├── handler/ # HTTP ハンドラー
|
||||
│ │ └── gateway/ # API ゲートウェイコア
|
||||
│ └── resources/ # 静的リソース
|
||||
│
|
||||
├── frontend/ # Vue 3 フロントエンド
|
||||
│ └── src/
|
||||
│ ├── api/ # API 呼び出し
|
||||
│ ├── stores/ # 状態管理
|
||||
│ ├── views/ # ページコンポーネント
|
||||
│ └── components/ # 再利用可能なコンポーネント
|
||||
│
|
||||
└── deploy/ # デプロイファイル
|
||||
├── docker-compose.yml # Docker Compose 設定
|
||||
├── .env.example # Docker Compose 用環境変数
|
||||
├── config.example.yaml # バイナリデプロイ用フル設定ファイル
|
||||
└── install.sh # ワンクリックインストールスクリプト
|
||||
```
|
||||
|
||||
## 免責事項
|
||||
|
||||
> **本プロジェクトをご利用の前に、以下をよくお読みください:**
|
||||
>
|
||||
> :rotating_light: **利用規約違反のリスク**: 本プロジェクトの使用は Anthropic の利用規約に違反する可能性があります。使用前に Anthropic のユーザー契約をよくお読みください。本プロジェクトの使用に起因するすべてのリスクは、ユーザー自身が負うものとします。
|
||||
>
|
||||
> :book: **免責事項**: 本プロジェクトは技術的な学習および研究目的のみで提供されています。作者は、本プロジェクトの使用によるアカウント停止、サービス中断、その他の損失について一切の責任を負いません。
|
||||
|
||||
---
|
||||
|
||||
## スター履歴
|
||||
|
||||
<a href="https://star-history.com/#Wei-Shaw/sub2api&Date">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date&theme=dark" />
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=Wei-Shaw/sub2api&type=Date" />
|
||||
</picture>
|
||||
</a>
|
||||
|
||||
---
|
||||
|
||||
## ライセンス
|
||||
|
||||
本プロジェクトは [GNU Lesser General Public License v3.0](LICENSE)(またはそれ以降のバージョン)の下でライセンスされています。
|
||||
|
||||
Copyright (c) 2026 Wesley Liddick
|
||||
|
||||
---
|
||||
|
||||
<div align="center">
|
||||
|
||||
**このプロジェクトが役に立ったら、ぜひスターをお願いします!**
|
||||
|
||||
</div>
|
||||
BIN
assets/partners/logos/AICodeMirror.jpg
Normal file
|
After Width: | Height: | Size: 83 KiB |
BIN
assets/partners/logos/aigocode.png
Normal file
|
After Width: | Height: | Size: 38 KiB |
BIN
assets/partners/logos/bestproxy.png
Normal file
|
After Width: | Height: | Size: 9.5 KiB |
BIN
assets/partners/logos/bmoplus.jpg
Normal file
|
After Width: | Height: | Size: 7.8 KiB |
BIN
assets/partners/logos/ctok.png
Normal file
|
After Width: | Height: | Size: 246 KiB |
BIN
assets/partners/logos/packycode.png
Normal file
|
After Width: | Height: | Size: 8.1 KiB |
BIN
assets/partners/logos/pateway.png
Normal file
|
After Width: | Height: | Size: 8.0 KiB |
BIN
assets/partners/logos/poixe.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
assets/partners/logos/silkapi.png
Normal file
|
After Width: | Height: | Size: 2.8 KiB |
BIN
assets/partners/logos/ylscode.png
Normal file
|
After Width: | Height: | Size: 26 KiB |
@@ -33,7 +33,7 @@ func main() {
|
||||
}()
|
||||
|
||||
userRepo := repository.NewUserRepository(client, sqlDB)
|
||||
authService := service.NewAuthService(client, userRepo, nil, nil, cfg, nil, nil, nil, nil, nil, nil)
|
||||
authService := service.NewAuthService(client, userRepo, nil, nil, cfg, nil, nil, nil, nil, nil, nil, nil)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
@@ -1 +1 @@
|
||||
0.1.88
|
||||
0.1.120
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/ent"
|
||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||
"github.com/Wei-Shaw/sub2api/internal/payment"
|
||||
"github.com/Wei-Shaw/sub2api/internal/repository"
|
||||
"github.com/Wei-Shaw/sub2api/internal/server"
|
||||
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
@@ -35,6 +36,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
// Business layer ProviderSets
|
||||
repository.ProviderSet,
|
||||
service.ProviderSet,
|
||||
payment.ProviderSet,
|
||||
middleware.ProviderSet,
|
||||
handler.ProviderSet,
|
||||
|
||||
@@ -76,7 +78,6 @@ func provideCleanup(
|
||||
opsCleanup *service.OpsCleanupService,
|
||||
opsScheduledReport *service.OpsScheduledReportService,
|
||||
opsSystemLogSink *service.OpsSystemLogSink,
|
||||
soraMediaCleanup *service.SoraMediaCleanupService,
|
||||
schedulerSnapshot *service.SchedulerSnapshotService,
|
||||
tokenRefresh *service.TokenRefreshService,
|
||||
accountExpiry *service.AccountExpiryService,
|
||||
@@ -95,6 +96,8 @@ func provideCleanup(
|
||||
openAIGateway *service.OpenAIGatewayService,
|
||||
scheduledTestRunner *service.ScheduledTestRunnerService,
|
||||
backupSvc *service.BackupService,
|
||||
paymentOrderExpiry *service.PaymentOrderExpiryService,
|
||||
channelMonitorRunner *service.ChannelMonitorRunner,
|
||||
) func() {
|
||||
return func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
@@ -125,12 +128,6 @@ func provideCleanup(
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
{"SoraMediaCleanupService", func() error {
|
||||
if soraMediaCleanup != nil {
|
||||
soraMediaCleanup.Stop()
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
{"OpsAlertEvaluatorService", func() error {
|
||||
if opsAlertEvaluator != nil {
|
||||
opsAlertEvaluator.Stop()
|
||||
@@ -237,6 +234,18 @@ func provideCleanup(
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
{"PaymentOrderExpiryService", func() error {
|
||||
if paymentOrderExpiry != nil {
|
||||
paymentOrderExpiry.Stop()
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
{"ChannelMonitorRunner", func() error {
|
||||
if channelMonitorRunner != nil {
|
||||
channelMonitorRunner.Stop()
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
}
|
||||
|
||||
infraSteps := []cleanupStep{
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||
"github.com/Wei-Shaw/sub2api/internal/handler/admin"
|
||||
"github.com/Wei-Shaw/sub2api/internal/payment"
|
||||
"github.com/Wei-Shaw/sub2api/internal/repository"
|
||||
"github.com/Wei-Shaw/sub2api/internal/server"
|
||||
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||
@@ -49,7 +50,8 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
refreshTokenCache := repository.NewRefreshTokenCache(redisClient)
|
||||
settingRepository := repository.NewSettingRepository(client)
|
||||
groupRepository := repository.NewGroupRepository(client, db)
|
||||
settingService := service.ProvideSettingService(settingRepository, groupRepository, configConfig)
|
||||
proxyRepository := repository.NewProxyRepository(client, db)
|
||||
settingService := service.ProvideSettingService(settingRepository, groupRepository, proxyRepository, configConfig)
|
||||
emailCache := repository.NewEmailCache(redisClient)
|
||||
emailService := service.NewEmailService(settingRepository, emailCache)
|
||||
turnstileVerifier := repository.NewTurnstileVerifier()
|
||||
@@ -59,16 +61,18 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
billingCache := repository.NewBillingCache(redisClient)
|
||||
userSubscriptionRepository := repository.NewUserSubscriptionRepository(client)
|
||||
apiKeyRepository := repository.NewAPIKeyRepository(client, db)
|
||||
billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository, apiKeyRepository, configConfig)
|
||||
userRPMCache := repository.NewUserRPMCache(redisClient)
|
||||
userGroupRateRepository := repository.NewUserGroupRateRepository(db)
|
||||
billingCacheService := service.ProvideBillingCacheService(billingCache, userRepository, userSubscriptionRepository, apiKeyRepository, userRPMCache, userGroupRateRepository, configConfig)
|
||||
apiKeyCache := repository.NewAPIKeyCache(redisClient)
|
||||
apiKeyService := service.NewAPIKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, userGroupRateRepository, apiKeyCache, configConfig)
|
||||
apiKeyService.SetRateLimitCacheInvalidator(billingCache)
|
||||
apiKeyService := service.ProvideAPIKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, userGroupRateRepository, apiKeyCache, configConfig, billingCacheService)
|
||||
apiKeyAuthCacheInvalidator := service.ProvideAPIKeyAuthCacheInvalidator(apiKeyService)
|
||||
promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||
subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService, client, configConfig)
|
||||
authService := service.NewAuthService(client, userRepository, redeemCodeRepository, refreshTokenCache, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService, subscriptionService)
|
||||
userService := service.NewUserService(userRepository, apiKeyAuthCacheInvalidator, billingCache)
|
||||
affiliateRepository := repository.NewAffiliateRepository(client, db)
|
||||
affiliateService := service.NewAffiliateService(affiliateRepository, settingService, apiKeyAuthCacheInvalidator, billingCacheService)
|
||||
authService := service.NewAuthService(client, userRepository, redeemCodeRepository, refreshTokenCache, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService, subscriptionService, affiliateService)
|
||||
userService := service.NewUserService(userRepository, settingRepository, apiKeyAuthCacheInvalidator, billingCache)
|
||||
redeemCache := repository.NewRedeemCache(redisClient)
|
||||
redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||
secretEncryptor, err := repository.NewAESEncryptor(configConfig)
|
||||
@@ -78,10 +82,9 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
totpCache := repository.NewTotpCache(redisClient)
|
||||
totpService := service.NewTotpService(userRepository, secretEncryptor, totpCache, settingService, emailService, emailQueueService)
|
||||
authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService, promoService, redeemService, totpService)
|
||||
userHandler := handler.NewUserHandler(userService)
|
||||
userHandler := handler.NewUserHandler(userService, authService, emailService, emailCache, affiliateService)
|
||||
apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService)
|
||||
usageLogRepository := repository.NewUsageLogRepository(client, db)
|
||||
usageBillingRepository := repository.NewUsageBillingRepository(client, db)
|
||||
usageService := service.NewUsageService(usageLogRepository, userRepository, client, apiKeyAuthCacheInvalidator)
|
||||
usageHandler := handler.NewUsageHandler(usageService, apiKeyService)
|
||||
redeemHandler := handler.NewRedeemHandler(redeemService)
|
||||
@@ -90,6 +93,9 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
announcementReadRepository := repository.NewAnnouncementReadRepository(client)
|
||||
announcementService := service.NewAnnouncementService(announcementRepository, announcementReadRepository, userRepository, userSubscriptionRepository)
|
||||
announcementHandler := handler.NewAnnouncementHandler(announcementService)
|
||||
channelMonitorRepository := repository.NewChannelMonitorRepository(client, db)
|
||||
channelMonitorService := service.ProvideChannelMonitorService(channelMonitorRepository, secretEncryptor)
|
||||
channelMonitorUserHandler := handler.NewChannelMonitorUserHandler(channelMonitorService, settingService)
|
||||
dashboardAggregationRepository := repository.NewDashboardAggregationRepository(db)
|
||||
dashboardStatsCache := repository.NewDashboardCache(redisClient, configConfig)
|
||||
dashboardService := service.NewDashboardService(usageLogRepository, dashboardAggregationRepository, dashboardStatsCache, configConfig)
|
||||
@@ -99,17 +105,19 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
}
|
||||
dashboardAggregationService := service.ProvideDashboardAggregationService(dashboardAggregationRepository, timingWheelService, configConfig)
|
||||
dashboardHandler := admin.NewDashboardHandler(dashboardService, dashboardAggregationService)
|
||||
schedulerCache := repository.NewSchedulerCache(redisClient)
|
||||
schedulerCache := repository.ProvideSchedulerCache(redisClient, configConfig)
|
||||
accountRepository := repository.NewAccountRepository(client, db, schedulerCache)
|
||||
soraAccountRepository := repository.NewSoraAccountRepository(db)
|
||||
proxyRepository := repository.NewProxyRepository(client, db)
|
||||
proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig)
|
||||
proxyLatencyCache := repository.NewProxyLatencyCache(redisClient)
|
||||
privacyClientFactory := providePrivacyClientFactory()
|
||||
adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, soraAccountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, userGroupRateRepository, billingCacheService, proxyExitInfoProber, proxyLatencyCache, apiKeyAuthCacheInvalidator, client, settingService, subscriptionService, userSubscriptionRepository, privacyClientFactory)
|
||||
adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, userGroupRateRepository, userRPMCache, billingCacheService, proxyExitInfoProber, proxyLatencyCache, apiKeyAuthCacheInvalidator, client, settingService, subscriptionService, userSubscriptionRepository, privacyClientFactory)
|
||||
concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig)
|
||||
concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig)
|
||||
adminUserHandler := admin.NewUserHandler(adminService, concurrencyService)
|
||||
sessionLimitCache := repository.ProvideSessionLimitCache(redisClient, configConfig)
|
||||
rpmCache := repository.NewRPMCache(redisClient)
|
||||
groupCapacityService := service.NewGroupCapacityService(accountRepository, groupRepository, concurrencyService, sessionLimitCache, rpmCache)
|
||||
groupHandler := admin.NewGroupHandler(adminService, dashboardService, groupCapacityService)
|
||||
claudeOAuthClient := repository.NewClaudeOAuthClient()
|
||||
oAuthService := service.NewOAuthService(proxyRepository, claudeOAuthClient)
|
||||
openAIOAuthClient := repository.NewOpenAIOAuthClient()
|
||||
@@ -122,28 +130,30 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
geminiQuotaService := service.NewGeminiQuotaService(configConfig, settingRepository)
|
||||
tempUnschedCache := repository.NewTempUnschedCache(redisClient)
|
||||
timeoutCounterCache := repository.NewTimeoutCounterCache(redisClient)
|
||||
openAI403CounterCache := repository.NewOpenAI403CounterCache(redisClient)
|
||||
geminiTokenCache := repository.NewGeminiTokenCache(redisClient)
|
||||
oauthRefreshAPI := service.NewOAuthRefreshAPI(accountRepository, geminiTokenCache)
|
||||
compositeTokenCacheInvalidator := service.NewCompositeTokenCacheInvalidator(geminiTokenCache)
|
||||
rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, settingService, compositeTokenCacheInvalidator)
|
||||
rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, openAI403CounterCache, settingService, compositeTokenCacheInvalidator)
|
||||
httpUpstream := repository.NewHTTPUpstream(configConfig)
|
||||
claudeUsageFetcher := repository.NewClaudeUsageFetcher(httpUpstream)
|
||||
antigravityQuotaFetcher := service.NewAntigravityQuotaFetcher(proxyRepository)
|
||||
usageCache := service.NewUsageCache()
|
||||
identityCache := repository.NewIdentityCache(redisClient)
|
||||
accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache, identityCache)
|
||||
geminiTokenProvider := service.ProvideGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService, oauthRefreshAPI)
|
||||
tlsFingerprintProfileRepository := repository.NewTLSFingerprintProfileRepository(client)
|
||||
tlsFingerprintProfileCache := repository.NewTLSFingerprintProfileCache(redisClient)
|
||||
tlsFingerprintProfileService := service.NewTLSFingerprintProfileService(tlsFingerprintProfileRepository, tlsFingerprintProfileCache)
|
||||
accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache, identityCache, tlsFingerprintProfileService)
|
||||
oAuthRefreshAPI := service.ProvideOAuthRefreshAPI(accountRepository, geminiTokenCache)
|
||||
geminiTokenProvider := service.ProvideGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService, oAuthRefreshAPI)
|
||||
claudeTokenProvider := service.ProvideClaudeTokenProvider(accountRepository, geminiTokenCache, oAuthService, oAuthRefreshAPI)
|
||||
gatewayCache := repository.NewGatewayCache(redisClient)
|
||||
schedulerOutboxRepository := repository.NewSchedulerOutboxRepository(db)
|
||||
schedulerSnapshotService := service.ProvideSchedulerSnapshotService(schedulerCache, schedulerOutboxRepository, accountRepository, groupRepository, configConfig)
|
||||
antigravityTokenProvider := service.ProvideAntigravityTokenProvider(accountRepository, geminiTokenCache, antigravityOAuthService, oauthRefreshAPI)
|
||||
antigravityGatewayService := service.NewAntigravityGatewayService(accountRepository, gatewayCache, schedulerSnapshotService, antigravityTokenProvider, rateLimitService, httpUpstream, settingService)
|
||||
accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig)
|
||||
antigravityTokenProvider := service.ProvideAntigravityTokenProvider(accountRepository, geminiTokenCache, antigravityOAuthService, oAuthRefreshAPI, tempUnschedCache)
|
||||
internal500CounterCache := repository.NewInternal500CounterCache(redisClient)
|
||||
antigravityGatewayService := service.NewAntigravityGatewayService(accountRepository, gatewayCache, schedulerSnapshotService, antigravityTokenProvider, rateLimitService, httpUpstream, settingService, internal500CounterCache)
|
||||
accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, claudeTokenProvider, antigravityGatewayService, httpUpstream, configConfig, tlsFingerprintProfileService)
|
||||
crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig)
|
||||
sessionLimitCache := repository.ProvideSessionLimitCache(redisClient, configConfig)
|
||||
rpmCache := repository.NewRPMCache(redisClient)
|
||||
groupCapacityService := service.NewGroupCapacityService(accountRepository, groupRepository, concurrencyService, sessionLimitCache, rpmCache)
|
||||
groupHandler := admin.NewGroupHandler(adminService, dashboardService, groupCapacityService)
|
||||
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache, rpmCache, compositeTokenCacheInvalidator)
|
||||
adminAnnouncementHandler := admin.NewAnnouncementHandler(announcementService)
|
||||
dataManagementService := service.NewDataManagementService()
|
||||
@@ -160,6 +170,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
adminRedeemHandler := admin.NewRedeemHandler(adminService, redeemService)
|
||||
promoHandler := admin.NewPromoHandler(promoService)
|
||||
opsRepository := repository.NewOpsRepository(db)
|
||||
usageBillingRepository := repository.NewUsageBillingRepository(client, db)
|
||||
pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig)
|
||||
pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient)
|
||||
if err != nil {
|
||||
@@ -168,20 +179,26 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
billingService := service.NewBillingService(configConfig, pricingService)
|
||||
identityService := service.NewIdentityService(identityCache)
|
||||
deferredService := service.ProvideDeferredService(accountRepository, timingWheelService)
|
||||
claudeTokenProvider := service.ProvideClaudeTokenProvider(accountRepository, geminiTokenCache, oAuthService, oauthRefreshAPI)
|
||||
digestSessionStore := service.NewDigestSessionStore()
|
||||
gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, usageBillingRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache, rpmCache, digestSessionStore, settingService)
|
||||
openAITokenProvider := service.ProvideOpenAITokenProvider(accountRepository, geminiTokenCache, openAIOAuthService, oauthRefreshAPI)
|
||||
openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, usageBillingRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService, openAITokenProvider)
|
||||
channelRepository := repository.NewChannelRepository(db)
|
||||
channelService := service.NewChannelService(channelRepository, groupRepository, apiKeyAuthCacheInvalidator, pricingService)
|
||||
modelPricingResolver := service.NewModelPricingResolver(channelService, billingService)
|
||||
balanceNotifyService := service.ProvideBalanceNotifyService(emailService, settingRepository, accountRepository)
|
||||
gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, usageBillingRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache, rpmCache, digestSessionStore, settingService, tlsFingerprintProfileService, channelService, modelPricingResolver, balanceNotifyService)
|
||||
openAITokenProvider := service.ProvideOpenAITokenProvider(accountRepository, geminiTokenCache, openAIOAuthService, oAuthRefreshAPI)
|
||||
openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, usageBillingRepository, userRepository, userSubscriptionRepository, userGroupRateRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService, openAITokenProvider, modelPricingResolver, channelService, balanceNotifyService, settingService)
|
||||
geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, schedulerSnapshotService, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig)
|
||||
opsSystemLogSink := service.ProvideOpsSystemLogSink(opsRepository)
|
||||
opsService := service.NewOpsService(opsRepository, settingRepository, configConfig, accountRepository, userRepository, concurrencyService, gatewayService, openAIGatewayService, geminiMessagesCompatService, antigravityGatewayService, opsSystemLogSink)
|
||||
soraS3Storage := service.NewSoraS3Storage(settingService)
|
||||
settingService.SetOnS3UpdateCallback(soraS3Storage.RefreshClient)
|
||||
soraGenerationRepository := repository.NewSoraGenerationRepository(db)
|
||||
soraQuotaService := service.NewSoraQuotaService(userRepository, groupRepository, settingService)
|
||||
soraGenerationService := service.NewSoraGenerationService(soraGenerationRepository, soraS3Storage, soraQuotaService)
|
||||
settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService, soraS3Storage)
|
||||
encryptionKey, err := payment.ProvideEncryptionKey(configConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
paymentConfigService := service.ProvidePaymentConfigService(client, settingRepository, encryptionKey)
|
||||
registry := payment.ProvideRegistry()
|
||||
defaultLoadBalancer := payment.ProvideDefaultLoadBalancer(client, encryptionKey)
|
||||
paymentService := service.NewPaymentService(client, registry, defaultLoadBalancer, redeemService, subscriptionService, paymentConfigService, userRepository, groupRepository, affiliateService)
|
||||
settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService, paymentConfigService, paymentService)
|
||||
opsHandler := admin.NewOpsHandler(opsService)
|
||||
updateCache := repository.NewUpdateCache(redisClient)
|
||||
gitHubReleaseClient := repository.ProvideGitHubReleaseClient(configConfig)
|
||||
@@ -202,27 +219,33 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
errorPassthroughCache := repository.NewErrorPassthroughCache(redisClient)
|
||||
errorPassthroughService := service.NewErrorPassthroughService(errorPassthroughRepository, errorPassthroughCache)
|
||||
errorPassthroughHandler := admin.NewErrorPassthroughHandler(errorPassthroughService)
|
||||
tlsFingerprintProfileHandler := admin.NewTLSFingerprintProfileHandler(tlsFingerprintProfileService)
|
||||
adminAPIKeyHandler := admin.NewAdminAPIKeyHandler(adminService)
|
||||
scheduledTestPlanRepository := repository.NewScheduledTestPlanRepository(db)
|
||||
scheduledTestResultRepository := repository.NewScheduledTestResultRepository(db)
|
||||
scheduledTestService := service.ProvideScheduledTestService(scheduledTestPlanRepository, scheduledTestResultRepository)
|
||||
scheduledTestHandler := admin.NewScheduledTestHandler(scheduledTestService)
|
||||
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, backupHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler, adminAPIKeyHandler, scheduledTestHandler)
|
||||
channelHandler := admin.NewChannelHandler(channelService, billingService)
|
||||
channelMonitorHandler := admin.NewChannelMonitorHandler(channelMonitorService)
|
||||
channelMonitorRequestTemplateRepository := repository.NewChannelMonitorRequestTemplateRepository(client, db)
|
||||
channelMonitorRequestTemplateService := service.NewChannelMonitorRequestTemplateService(channelMonitorRequestTemplateRepository)
|
||||
channelMonitorRequestTemplateHandler := admin.NewChannelMonitorRequestTemplateHandler(channelMonitorRequestTemplateService)
|
||||
paymentHandler := admin.NewPaymentHandler(paymentService, paymentConfigService)
|
||||
affiliateHandler := admin.NewAffiliateHandler(affiliateService, adminService)
|
||||
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, backupHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler, tlsFingerprintProfileHandler, adminAPIKeyHandler, scheduledTestHandler, channelHandler, channelMonitorHandler, channelMonitorRequestTemplateHandler, paymentHandler, affiliateHandler)
|
||||
usageRecordWorkerPool := service.NewUsageRecordWorkerPool(configConfig)
|
||||
userMsgQueueCache := repository.NewUserMsgQueueCache(redisClient)
|
||||
userMessageQueueService := service.ProvideUserMessageQueueService(userMsgQueueCache, rpmCache, configConfig)
|
||||
gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, usageService, apiKeyService, usageRecordWorkerPool, errorPassthroughService, userMessageQueueService, configConfig, settingService)
|
||||
openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, apiKeyService, usageRecordWorkerPool, errorPassthroughService, configConfig)
|
||||
soraSDKClient := service.ProvideSoraSDKClient(configConfig, httpUpstream, openAITokenProvider, accountRepository, soraAccountRepository)
|
||||
soraMediaStorage := service.ProvideSoraMediaStorage(configConfig)
|
||||
soraGatewayService := service.NewSoraGatewayService(soraSDKClient, rateLimitService, httpUpstream, configConfig)
|
||||
soraClientHandler := handler.NewSoraClientHandler(soraGenerationService, soraQuotaService, soraS3Storage, soraGatewayService, gatewayService, soraMediaStorage, apiKeyService)
|
||||
soraGatewayHandler := handler.NewSoraGatewayHandler(gatewayService, soraGatewayService, concurrencyService, billingCacheService, usageRecordWorkerPool, configConfig)
|
||||
handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo)
|
||||
totpHandler := handler.NewTotpHandler(totpService)
|
||||
handlerPaymentHandler := handler.NewPaymentHandler(paymentService, paymentConfigService, channelService)
|
||||
paymentWebhookHandler := handler.NewPaymentWebhookHandler(paymentService, registry)
|
||||
availableChannelHandler := handler.NewAvailableChannelHandler(channelService, apiKeyService, settingService)
|
||||
idempotencyCoordinator := service.ProvideIdempotencyCoordinator(idempotencyRepository, configConfig)
|
||||
idempotencyCleanupService := service.ProvideIdempotencyCleanupService(idempotencyRepository, configConfig)
|
||||
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, soraGatewayHandler, soraClientHandler, handlerSettingHandler, totpHandler, idempotencyCoordinator, idempotencyCleanupService)
|
||||
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, channelMonitorUserHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler, totpHandler, handlerPaymentHandler, paymentWebhookHandler, availableChannelHandler, idempotencyCoordinator, idempotencyCleanupService)
|
||||
jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService)
|
||||
adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService)
|
||||
apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig)
|
||||
@@ -231,14 +254,15 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||
opsMetricsCollector := service.ProvideOpsMetricsCollector(opsRepository, settingRepository, accountRepository, concurrencyService, db, redisClient, configConfig)
|
||||
opsAggregationService := service.ProvideOpsAggregationService(opsRepository, settingRepository, db, redisClient, configConfig)
|
||||
opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig)
|
||||
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig)
|
||||
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig, channelMonitorService)
|
||||
opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig)
|
||||
soraMediaCleanupService := service.ProvideSoraMediaCleanupService(soraMediaStorage, configConfig)
|
||||
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, soraAccountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig, tempUnschedCache, privacyClientFactory, proxyRepository, oauthRefreshAPI)
|
||||
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig, tempUnschedCache, privacyClientFactory, proxyRepository, oAuthRefreshAPI)
|
||||
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
|
||||
subscriptionExpiryService := service.ProvideSubscriptionExpiryService(userSubscriptionRepository)
|
||||
scheduledTestRunnerService := service.ProvideScheduledTestRunnerService(scheduledTestPlanRepository, scheduledTestService, accountTestService, rateLimitService, configConfig)
|
||||
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, soraMediaCleanupService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, openAIGatewayService, scheduledTestRunnerService, backupService)
|
||||
paymentOrderExpiryService := service.ProvidePaymentOrderExpiryService(paymentService)
|
||||
channelMonitorRunner := service.ProvideChannelMonitorRunner(channelMonitorService, settingService)
|
||||
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, openAIGatewayService, scheduledTestRunnerService, backupService, paymentOrderExpiryService, channelMonitorRunner)
|
||||
application := &Application{
|
||||
Server: httpServer,
|
||||
Cleanup: v,
|
||||
@@ -273,7 +297,6 @@ func provideCleanup(
|
||||
opsCleanup *service.OpsCleanupService,
|
||||
opsScheduledReport *service.OpsScheduledReportService,
|
||||
opsSystemLogSink *service.OpsSystemLogSink,
|
||||
soraMediaCleanup *service.SoraMediaCleanupService,
|
||||
schedulerSnapshot *service.SchedulerSnapshotService,
|
||||
tokenRefresh *service.TokenRefreshService,
|
||||
accountExpiry *service.AccountExpiryService,
|
||||
@@ -292,6 +315,8 @@ func provideCleanup(
|
||||
openAIGateway *service.OpenAIGatewayService,
|
||||
scheduledTestRunner *service.ScheduledTestRunnerService,
|
||||
backupSvc *service.BackupService,
|
||||
paymentOrderExpiry *service.PaymentOrderExpiryService,
|
||||
channelMonitorRunner *service.ChannelMonitorRunner,
|
||||
) func() {
|
||||
return func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
@@ -321,12 +346,6 @@ func provideCleanup(
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
{"SoraMediaCleanupService", func() error {
|
||||
if soraMediaCleanup != nil {
|
||||
soraMediaCleanup.Stop()
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
{"OpsAlertEvaluatorService", func() error {
|
||||
if opsAlertEvaluator != nil {
|
||||
opsAlertEvaluator.Stop()
|
||||
@@ -433,6 +452,18 @@ func provideCleanup(
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
{"PaymentOrderExpiryService", func() error {
|
||||
if paymentOrderExpiry != nil {
|
||||
paymentOrderExpiry.Stop()
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
{"ChannelMonitorRunner", func() error {
|
||||
if channelMonitorRunner != nil {
|
||||
channelMonitorRunner.Stop()
|
||||
}
|
||||
return nil
|
||||
}},
|
||||
}
|
||||
|
||||
infraSteps := []cleanupStep{
|
||||
|
||||
@@ -43,7 +43,7 @@ func TestProvideCleanup_WithMinimalDependencies_NoPanic(t *testing.T) {
|
||||
subscriptionExpirySvc := service.NewSubscriptionExpiryService(nil, time.Second)
|
||||
pricingSvc := service.NewPricingService(cfg, nil)
|
||||
emailQueueSvc := service.NewEmailQueueService(nil, 1)
|
||||
billingCacheSvc := service.NewBillingCacheService(nil, nil, nil, nil, cfg)
|
||||
billingCacheSvc := service.NewBillingCacheService(nil, nil, nil, nil, nil, nil, cfg)
|
||||
idempotencyCleanupSvc := service.NewIdempotencyCleanupService(nil, cfg)
|
||||
schedulerSnapshotSvc := service.NewSchedulerSnapshotService(nil, nil, nil, nil, cfg)
|
||||
opsSystemLogSinkSvc := service.NewOpsSystemLogSink(nil)
|
||||
@@ -57,7 +57,6 @@ func TestProvideCleanup_WithMinimalDependencies_NoPanic(t *testing.T) {
|
||||
&service.OpsCleanupService{},
|
||||
&service.OpsScheduledReportService{},
|
||||
opsSystemLogSinkSvc,
|
||||
&service.SoraMediaCleanupService{},
|
||||
schedulerSnapshotSvc,
|
||||
tokenRefreshSvc,
|
||||
accountExpirySvc,
|
||||
@@ -76,6 +75,8 @@ func TestProvideCleanup_WithMinimalDependencies_NoPanic(t *testing.T) {
|
||||
nil, // openAIGateway
|
||||
nil, // scheduledTestRunner
|
||||
nil, // backupSvc
|
||||
nil, // paymentOrderExpiry
|
||||
nil, // channelMonitorRunner
|
||||
)
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
|
||||
266
backend/ent/authidentity.go
Normal file
@@ -0,0 +1,266 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||
)
|
||||
|
||||
// AuthIdentity is the model entity for the AuthIdentity schema.
|
||||
type AuthIdentity struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// UserID holds the value of the "user_id" field.
|
||||
UserID int64 `json:"user_id,omitempty"`
|
||||
// ProviderType holds the value of the "provider_type" field.
|
||||
ProviderType string `json:"provider_type,omitempty"`
|
||||
// ProviderKey holds the value of the "provider_key" field.
|
||||
ProviderKey string `json:"provider_key,omitempty"`
|
||||
// ProviderSubject holds the value of the "provider_subject" field.
|
||||
ProviderSubject string `json:"provider_subject,omitempty"`
|
||||
// VerifiedAt holds the value of the "verified_at" field.
|
||||
VerifiedAt *time.Time `json:"verified_at,omitempty"`
|
||||
// Issuer holds the value of the "issuer" field.
|
||||
Issuer *string `json:"issuer,omitempty"`
|
||||
// Metadata holds the value of the "metadata" field.
|
||||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the AuthIdentityQuery when eager-loading is set.
|
||||
Edges AuthIdentityEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// AuthIdentityEdges holds the relations/edges for other nodes in the graph.
|
||||
type AuthIdentityEdges struct {
|
||||
// User holds the value of the user edge.
|
||||
User *User `json:"user,omitempty"`
|
||||
// Channels holds the value of the channels edge.
|
||||
Channels []*AuthIdentityChannel `json:"channels,omitempty"`
|
||||
// AdoptionDecisions holds the value of the adoption_decisions edge.
|
||||
AdoptionDecisions []*IdentityAdoptionDecision `json:"adoption_decisions,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [3]bool
|
||||
}
|
||||
|
||||
// UserOrErr returns the User value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e AuthIdentityEdges) UserOrErr() (*User, error) {
|
||||
if e.User != nil {
|
||||
return e.User, nil
|
||||
} else if e.loadedTypes[0] {
|
||||
return nil, &NotFoundError{label: user.Label}
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "user"}
|
||||
}
|
||||
|
||||
// ChannelsOrErr returns the Channels value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e AuthIdentityEdges) ChannelsOrErr() ([]*AuthIdentityChannel, error) {
|
||||
if e.loadedTypes[1] {
|
||||
return e.Channels, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "channels"}
|
||||
}
|
||||
|
||||
// AdoptionDecisionsOrErr returns the AdoptionDecisions value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e AuthIdentityEdges) AdoptionDecisionsOrErr() ([]*IdentityAdoptionDecision, error) {
|
||||
if e.loadedTypes[2] {
|
||||
return e.AdoptionDecisions, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "adoption_decisions"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*AuthIdentity) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case authidentity.FieldMetadata:
|
||||
values[i] = new([]byte)
|
||||
case authidentity.FieldID, authidentity.FieldUserID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case authidentity.FieldProviderType, authidentity.FieldProviderKey, authidentity.FieldProviderSubject, authidentity.FieldIssuer:
|
||||
values[i] = new(sql.NullString)
|
||||
case authidentity.FieldCreatedAt, authidentity.FieldUpdatedAt, authidentity.FieldVerifiedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the AuthIdentity fields.
|
||||
func (_m *AuthIdentity) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case authidentity.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case authidentity.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CreatedAt = value.Time
|
||||
}
|
||||
case authidentity.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UpdatedAt = value.Time
|
||||
}
|
||||
case authidentity.FieldUserID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field user_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UserID = value.Int64
|
||||
}
|
||||
case authidentity.FieldProviderType:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider_type", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ProviderType = value.String
|
||||
}
|
||||
case authidentity.FieldProviderKey:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider_key", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ProviderKey = value.String
|
||||
}
|
||||
case authidentity.FieldProviderSubject:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider_subject", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ProviderSubject = value.String
|
||||
}
|
||||
case authidentity.FieldVerifiedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field verified_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.VerifiedAt = new(time.Time)
|
||||
*_m.VerifiedAt = value.Time
|
||||
}
|
||||
case authidentity.FieldIssuer:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field issuer", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Issuer = new(string)
|
||||
*_m.Issuer = value.String
|
||||
}
|
||||
case authidentity.FieldMetadata:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field metadata", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.Metadata); err != nil {
|
||||
return fmt.Errorf("unmarshal field metadata: %w", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the AuthIdentity.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *AuthIdentity) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryUser queries the "user" edge of the AuthIdentity entity.
|
||||
func (_m *AuthIdentity) QueryUser() *UserQuery {
|
||||
return NewAuthIdentityClient(_m.config).QueryUser(_m)
|
||||
}
|
||||
|
||||
// QueryChannels queries the "channels" edge of the AuthIdentity entity.
|
||||
func (_m *AuthIdentity) QueryChannels() *AuthIdentityChannelQuery {
|
||||
return NewAuthIdentityClient(_m.config).QueryChannels(_m)
|
||||
}
|
||||
|
||||
// QueryAdoptionDecisions queries the "adoption_decisions" edge of the AuthIdentity entity.
|
||||
func (_m *AuthIdentity) QueryAdoptionDecisions() *IdentityAdoptionDecisionQuery {
|
||||
return NewAuthIdentityClient(_m.config).QueryAdoptionDecisions(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this AuthIdentity.
|
||||
// Note that you need to call AuthIdentity.Unwrap() before calling this method if this AuthIdentity
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *AuthIdentity) Update() *AuthIdentityUpdateOne {
|
||||
return NewAuthIdentityClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the AuthIdentity entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *AuthIdentity) Unwrap() *AuthIdentity {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: AuthIdentity is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *AuthIdentity) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("AuthIdentity(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("user_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.UserID))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("provider_type=")
|
||||
builder.WriteString(_m.ProviderType)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("provider_key=")
|
||||
builder.WriteString(_m.ProviderKey)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("provider_subject=")
|
||||
builder.WriteString(_m.ProviderSubject)
|
||||
builder.WriteString(", ")
|
||||
if v := _m.VerifiedAt; v != nil {
|
||||
builder.WriteString("verified_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.Issuer; v != nil {
|
||||
builder.WriteString("issuer=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("metadata=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Metadata))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// AuthIdentities is a parsable slice of AuthIdentity.
|
||||
type AuthIdentities []*AuthIdentity
|
||||
209
backend/ent/authidentity/authidentity.go
Normal file
@@ -0,0 +1,209 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package authidentity
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the authidentity type in the database.
|
||||
Label = "auth_identity"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldUserID holds the string denoting the user_id field in the database.
|
||||
FieldUserID = "user_id"
|
||||
// FieldProviderType holds the string denoting the provider_type field in the database.
|
||||
FieldProviderType = "provider_type"
|
||||
// FieldProviderKey holds the string denoting the provider_key field in the database.
|
||||
FieldProviderKey = "provider_key"
|
||||
// FieldProviderSubject holds the string denoting the provider_subject field in the database.
|
||||
FieldProviderSubject = "provider_subject"
|
||||
// FieldVerifiedAt holds the string denoting the verified_at field in the database.
|
||||
FieldVerifiedAt = "verified_at"
|
||||
// FieldIssuer holds the string denoting the issuer field in the database.
|
||||
FieldIssuer = "issuer"
|
||||
// FieldMetadata holds the string denoting the metadata field in the database.
|
||||
FieldMetadata = "metadata"
|
||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||
EdgeUser = "user"
|
||||
// EdgeChannels holds the string denoting the channels edge name in mutations.
|
||||
EdgeChannels = "channels"
|
||||
// EdgeAdoptionDecisions holds the string denoting the adoption_decisions edge name in mutations.
|
||||
EdgeAdoptionDecisions = "adoption_decisions"
|
||||
// Table holds the table name of the authidentity in the database.
|
||||
Table = "auth_identities"
|
||||
// UserTable is the table that holds the user relation/edge.
|
||||
UserTable = "auth_identities"
|
||||
// UserInverseTable is the table name for the User entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||
UserInverseTable = "users"
|
||||
// UserColumn is the table column denoting the user relation/edge.
|
||||
UserColumn = "user_id"
|
||||
// ChannelsTable is the table that holds the channels relation/edge.
|
||||
ChannelsTable = "auth_identity_channels"
|
||||
// ChannelsInverseTable is the table name for the AuthIdentityChannel entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "authidentitychannel" package.
|
||||
ChannelsInverseTable = "auth_identity_channels"
|
||||
// ChannelsColumn is the table column denoting the channels relation/edge.
|
||||
ChannelsColumn = "identity_id"
|
||||
// AdoptionDecisionsTable is the table that holds the adoption_decisions relation/edge.
|
||||
AdoptionDecisionsTable = "identity_adoption_decisions"
|
||||
// AdoptionDecisionsInverseTable is the table name for the IdentityAdoptionDecision entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "identityadoptiondecision" package.
|
||||
AdoptionDecisionsInverseTable = "identity_adoption_decisions"
|
||||
// AdoptionDecisionsColumn is the table column denoting the adoption_decisions relation/edge.
|
||||
AdoptionDecisionsColumn = "identity_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for authidentity fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldUserID,
|
||||
FieldProviderType,
|
||||
FieldProviderKey,
|
||||
FieldProviderSubject,
|
||||
FieldVerifiedAt,
|
||||
FieldIssuer,
|
||||
FieldMetadata,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
// ProviderTypeValidator is a validator for the "provider_type" field. It is called by the builders before save.
|
||||
ProviderTypeValidator func(string) error
|
||||
// ProviderKeyValidator is a validator for the "provider_key" field. It is called by the builders before save.
|
||||
ProviderKeyValidator func(string) error
|
||||
// ProviderSubjectValidator is a validator for the "provider_subject" field. It is called by the builders before save.
|
||||
ProviderSubjectValidator func(string) error
|
||||
// DefaultMetadata holds the default value on creation for the "metadata" field.
|
||||
DefaultMetadata func() map[string]interface{}
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the AuthIdentity queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserID orders the results by the user_id field.
|
||||
func ByUserID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUserID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProviderType orders the results by the provider_type field.
|
||||
func ByProviderType(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldProviderType, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProviderKey orders the results by the provider_key field.
|
||||
func ByProviderKey(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldProviderKey, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProviderSubject orders the results by the provider_subject field.
|
||||
func ByProviderSubject(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldProviderSubject, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByVerifiedAt orders the results by the verified_at field.
|
||||
func ByVerifiedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldVerifiedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByIssuer orders the results by the issuer field.
|
||||
func ByIssuer(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldIssuer, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserField orders the results by user field.
|
||||
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByChannelsCount orders the results by channels count.
|
||||
func ByChannelsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newChannelsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByChannels orders the results by channels terms.
|
||||
func ByChannels(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newChannelsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByAdoptionDecisionsCount orders the results by adoption_decisions count.
|
||||
func ByAdoptionDecisionsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newAdoptionDecisionsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByAdoptionDecisions orders the results by adoption_decisions terms.
|
||||
func ByAdoptionDecisions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newAdoptionDecisionsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
func newUserStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(UserInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
}
|
||||
func newChannelsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(ChannelsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, ChannelsTable, ChannelsColumn),
|
||||
)
|
||||
}
|
||||
func newAdoptionDecisionsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(AdoptionDecisionsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, AdoptionDecisionsTable, AdoptionDecisionsColumn),
|
||||
)
|
||||
}
|
||||
600
backend/ent/authidentity/where.go
Normal file
@@ -0,0 +1,600 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package authidentity
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ.
|
||||
func UserID(v int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldUserID, v))
|
||||
}
|
||||
|
||||
// ProviderType applies equality check predicate on the "provider_type" field. It's identical to ProviderTypeEQ.
|
||||
func ProviderType(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderKey applies equality check predicate on the "provider_key" field. It's identical to ProviderKeyEQ.
|
||||
func ProviderKey(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderSubject applies equality check predicate on the "provider_subject" field. It's identical to ProviderSubjectEQ.
|
||||
func ProviderSubject(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// VerifiedAt applies equality check predicate on the "verified_at" field. It's identical to VerifiedAtEQ.
|
||||
func VerifiedAt(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldVerifiedAt, v))
|
||||
}
|
||||
|
||||
// Issuer applies equality check predicate on the "issuer" field. It's identical to IssuerEQ.
|
||||
func Issuer(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UserIDEQ applies the EQ predicate on the "user_id" field.
|
||||
func UserIDEQ(v int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldUserID, v))
|
||||
}
|
||||
|
||||
// UserIDNEQ applies the NEQ predicate on the "user_id" field.
|
||||
func UserIDNEQ(v int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNEQ(FieldUserID, v))
|
||||
}
|
||||
|
||||
// UserIDIn applies the In predicate on the "user_id" field.
|
||||
func UserIDIn(vs ...int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldIn(FieldUserID, vs...))
|
||||
}
|
||||
|
||||
// UserIDNotIn applies the NotIn predicate on the "user_id" field.
|
||||
func UserIDNotIn(vs ...int64) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNotIn(FieldUserID, vs...))
|
||||
}
|
||||
|
||||
// ProviderTypeEQ applies the EQ predicate on the "provider_type" field.
|
||||
func ProviderTypeEQ(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeNEQ applies the NEQ predicate on the "provider_type" field.
|
||||
func ProviderTypeNEQ(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNEQ(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeIn applies the In predicate on the "provider_type" field.
|
||||
func ProviderTypeIn(vs ...string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldIn(FieldProviderType, vs...))
|
||||
}
|
||||
|
||||
// ProviderTypeNotIn applies the NotIn predicate on the "provider_type" field.
|
||||
func ProviderTypeNotIn(vs ...string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNotIn(FieldProviderType, vs...))
|
||||
}
|
||||
|
||||
// ProviderTypeGT applies the GT predicate on the "provider_type" field.
|
||||
func ProviderTypeGT(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGT(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeGTE applies the GTE predicate on the "provider_type" field.
|
||||
func ProviderTypeGTE(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGTE(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeLT applies the LT predicate on the "provider_type" field.
|
||||
func ProviderTypeLT(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLT(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeLTE applies the LTE predicate on the "provider_type" field.
|
||||
func ProviderTypeLTE(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLTE(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeContains applies the Contains predicate on the "provider_type" field.
|
||||
func ProviderTypeContains(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldContains(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeHasPrefix applies the HasPrefix predicate on the "provider_type" field.
|
||||
func ProviderTypeHasPrefix(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldHasPrefix(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeHasSuffix applies the HasSuffix predicate on the "provider_type" field.
|
||||
func ProviderTypeHasSuffix(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldHasSuffix(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeEqualFold applies the EqualFold predicate on the "provider_type" field.
|
||||
func ProviderTypeEqualFold(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEqualFold(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeContainsFold applies the ContainsFold predicate on the "provider_type" field.
|
||||
func ProviderTypeContainsFold(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldContainsFold(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderKeyEQ applies the EQ predicate on the "provider_key" field.
|
||||
func ProviderKeyEQ(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyNEQ applies the NEQ predicate on the "provider_key" field.
|
||||
func ProviderKeyNEQ(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNEQ(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyIn applies the In predicate on the "provider_key" field.
|
||||
func ProviderKeyIn(vs ...string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldIn(FieldProviderKey, vs...))
|
||||
}
|
||||
|
||||
// ProviderKeyNotIn applies the NotIn predicate on the "provider_key" field.
|
||||
func ProviderKeyNotIn(vs ...string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNotIn(FieldProviderKey, vs...))
|
||||
}
|
||||
|
||||
// ProviderKeyGT applies the GT predicate on the "provider_key" field.
|
||||
func ProviderKeyGT(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGT(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyGTE applies the GTE predicate on the "provider_key" field.
|
||||
func ProviderKeyGTE(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGTE(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyLT applies the LT predicate on the "provider_key" field.
|
||||
func ProviderKeyLT(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLT(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyLTE applies the LTE predicate on the "provider_key" field.
|
||||
func ProviderKeyLTE(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLTE(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyContains applies the Contains predicate on the "provider_key" field.
|
||||
func ProviderKeyContains(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldContains(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyHasPrefix applies the HasPrefix predicate on the "provider_key" field.
|
||||
func ProviderKeyHasPrefix(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldHasPrefix(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyHasSuffix applies the HasSuffix predicate on the "provider_key" field.
|
||||
func ProviderKeyHasSuffix(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldHasSuffix(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyEqualFold applies the EqualFold predicate on the "provider_key" field.
|
||||
func ProviderKeyEqualFold(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEqualFold(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyContainsFold applies the ContainsFold predicate on the "provider_key" field.
|
||||
func ProviderKeyContainsFold(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldContainsFold(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderSubjectEQ applies the EQ predicate on the "provider_subject" field.
|
||||
func ProviderSubjectEQ(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// ProviderSubjectNEQ applies the NEQ predicate on the "provider_subject" field.
|
||||
func ProviderSubjectNEQ(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNEQ(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// ProviderSubjectIn applies the In predicate on the "provider_subject" field.
|
||||
func ProviderSubjectIn(vs ...string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldIn(FieldProviderSubject, vs...))
|
||||
}
|
||||
|
||||
// ProviderSubjectNotIn applies the NotIn predicate on the "provider_subject" field.
|
||||
func ProviderSubjectNotIn(vs ...string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNotIn(FieldProviderSubject, vs...))
|
||||
}
|
||||
|
||||
// ProviderSubjectGT applies the GT predicate on the "provider_subject" field.
|
||||
func ProviderSubjectGT(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGT(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// ProviderSubjectGTE applies the GTE predicate on the "provider_subject" field.
|
||||
func ProviderSubjectGTE(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGTE(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// ProviderSubjectLT applies the LT predicate on the "provider_subject" field.
|
||||
func ProviderSubjectLT(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLT(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// ProviderSubjectLTE applies the LTE predicate on the "provider_subject" field.
|
||||
func ProviderSubjectLTE(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLTE(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// ProviderSubjectContains applies the Contains predicate on the "provider_subject" field.
|
||||
func ProviderSubjectContains(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldContains(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// ProviderSubjectHasPrefix applies the HasPrefix predicate on the "provider_subject" field.
|
||||
func ProviderSubjectHasPrefix(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldHasPrefix(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// ProviderSubjectHasSuffix applies the HasSuffix predicate on the "provider_subject" field.
|
||||
func ProviderSubjectHasSuffix(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldHasSuffix(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// ProviderSubjectEqualFold applies the EqualFold predicate on the "provider_subject" field.
|
||||
func ProviderSubjectEqualFold(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEqualFold(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// ProviderSubjectContainsFold applies the ContainsFold predicate on the "provider_subject" field.
|
||||
func ProviderSubjectContainsFold(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldContainsFold(FieldProviderSubject, v))
|
||||
}
|
||||
|
||||
// VerifiedAtEQ applies the EQ predicate on the "verified_at" field.
|
||||
func VerifiedAtEQ(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldVerifiedAt, v))
|
||||
}
|
||||
|
||||
// VerifiedAtNEQ applies the NEQ predicate on the "verified_at" field.
|
||||
func VerifiedAtNEQ(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNEQ(FieldVerifiedAt, v))
|
||||
}
|
||||
|
||||
// VerifiedAtIn applies the In predicate on the "verified_at" field.
|
||||
func VerifiedAtIn(vs ...time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldIn(FieldVerifiedAt, vs...))
|
||||
}
|
||||
|
||||
// VerifiedAtNotIn applies the NotIn predicate on the "verified_at" field.
|
||||
func VerifiedAtNotIn(vs ...time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNotIn(FieldVerifiedAt, vs...))
|
||||
}
|
||||
|
||||
// VerifiedAtGT applies the GT predicate on the "verified_at" field.
|
||||
func VerifiedAtGT(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGT(FieldVerifiedAt, v))
|
||||
}
|
||||
|
||||
// VerifiedAtGTE applies the GTE predicate on the "verified_at" field.
|
||||
func VerifiedAtGTE(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGTE(FieldVerifiedAt, v))
|
||||
}
|
||||
|
||||
// VerifiedAtLT applies the LT predicate on the "verified_at" field.
|
||||
func VerifiedAtLT(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLT(FieldVerifiedAt, v))
|
||||
}
|
||||
|
||||
// VerifiedAtLTE applies the LTE predicate on the "verified_at" field.
|
||||
func VerifiedAtLTE(v time.Time) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLTE(FieldVerifiedAt, v))
|
||||
}
|
||||
|
||||
// VerifiedAtIsNil applies the IsNil predicate on the "verified_at" field.
|
||||
func VerifiedAtIsNil() predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldIsNull(FieldVerifiedAt))
|
||||
}
|
||||
|
||||
// VerifiedAtNotNil applies the NotNil predicate on the "verified_at" field.
|
||||
func VerifiedAtNotNil() predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNotNull(FieldVerifiedAt))
|
||||
}
|
||||
|
||||
// IssuerEQ applies the EQ predicate on the "issuer" field.
|
||||
func IssuerEQ(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEQ(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// IssuerNEQ applies the NEQ predicate on the "issuer" field.
|
||||
func IssuerNEQ(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNEQ(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// IssuerIn applies the In predicate on the "issuer" field.
|
||||
func IssuerIn(vs ...string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldIn(FieldIssuer, vs...))
|
||||
}
|
||||
|
||||
// IssuerNotIn applies the NotIn predicate on the "issuer" field.
|
||||
func IssuerNotIn(vs ...string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNotIn(FieldIssuer, vs...))
|
||||
}
|
||||
|
||||
// IssuerGT applies the GT predicate on the "issuer" field.
|
||||
func IssuerGT(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGT(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// IssuerGTE applies the GTE predicate on the "issuer" field.
|
||||
func IssuerGTE(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldGTE(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// IssuerLT applies the LT predicate on the "issuer" field.
|
||||
func IssuerLT(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLT(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// IssuerLTE applies the LTE predicate on the "issuer" field.
|
||||
func IssuerLTE(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldLTE(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// IssuerContains applies the Contains predicate on the "issuer" field.
|
||||
func IssuerContains(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldContains(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// IssuerHasPrefix applies the HasPrefix predicate on the "issuer" field.
|
||||
func IssuerHasPrefix(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldHasPrefix(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// IssuerHasSuffix applies the HasSuffix predicate on the "issuer" field.
|
||||
func IssuerHasSuffix(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldHasSuffix(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// IssuerIsNil applies the IsNil predicate on the "issuer" field.
|
||||
func IssuerIsNil() predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldIsNull(FieldIssuer))
|
||||
}
|
||||
|
||||
// IssuerNotNil applies the NotNil predicate on the "issuer" field.
|
||||
func IssuerNotNil() predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldNotNull(FieldIssuer))
|
||||
}
|
||||
|
||||
// IssuerEqualFold applies the EqualFold predicate on the "issuer" field.
|
||||
func IssuerEqualFold(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldEqualFold(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// IssuerContainsFold applies the ContainsFold predicate on the "issuer" field.
|
||||
func IssuerContainsFold(v string) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.FieldContainsFold(FieldIssuer, v))
|
||||
}
|
||||
|
||||
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||
func HasUser() predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
||||
func HasUserWith(preds ...predicate.User) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||
step := newUserStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasChannels applies the HasEdge predicate on the "channels" edge.
|
||||
func HasChannels() predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, ChannelsTable, ChannelsColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasChannelsWith applies the HasEdge predicate on the "channels" edge with a given conditions (other predicates).
|
||||
func HasChannelsWith(preds ...predicate.AuthIdentityChannel) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||
step := newChannelsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasAdoptionDecisions applies the HasEdge predicate on the "adoption_decisions" edge.
|
||||
func HasAdoptionDecisions() predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, AdoptionDecisionsTable, AdoptionDecisionsColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasAdoptionDecisionsWith applies the HasEdge predicate on the "adoption_decisions" edge with a given conditions (other predicates).
|
||||
func HasAdoptionDecisionsWith(preds ...predicate.IdentityAdoptionDecision) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(func(s *sql.Selector) {
|
||||
step := newAdoptionDecisionsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.AuthIdentity) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.AuthIdentity) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.AuthIdentity) predicate.AuthIdentity {
|
||||
return predicate.AuthIdentity(sql.NotPredicates(p))
|
||||
}
|
||||
1036
backend/ent/authidentity_create.go
Normal file
88
backend/ent/authidentity_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// AuthIdentityDelete is the builder for deleting a AuthIdentity entity.
|
||||
type AuthIdentityDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *AuthIdentityMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the AuthIdentityDelete builder.
|
||||
func (_d *AuthIdentityDelete) Where(ps ...predicate.AuthIdentity) *AuthIdentityDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *AuthIdentityDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *AuthIdentityDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *AuthIdentityDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(authidentity.Table, sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// AuthIdentityDeleteOne is the builder for deleting a single AuthIdentity entity.
|
||||
type AuthIdentityDeleteOne struct {
|
||||
_d *AuthIdentityDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the AuthIdentityDelete builder.
|
||||
func (_d *AuthIdentityDeleteOne) Where(ps ...predicate.AuthIdentity) *AuthIdentityDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *AuthIdentityDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{authidentity.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *AuthIdentityDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
797
backend/ent/authidentity_query.go
Normal file
@@ -0,0 +1,797 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
"github.com/Wei-Shaw/sub2api/ent/identityadoptiondecision"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||
)
|
||||
|
||||
// AuthIdentityQuery is the builder for querying AuthIdentity entities.
|
||||
type AuthIdentityQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []authidentity.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.AuthIdentity
|
||||
withUser *UserQuery
|
||||
withChannels *AuthIdentityChannelQuery
|
||||
withAdoptionDecisions *IdentityAdoptionDecisionQuery
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the AuthIdentityQuery builder.
|
||||
func (_q *AuthIdentityQuery) Where(ps ...predicate.AuthIdentity) *AuthIdentityQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *AuthIdentityQuery) Limit(limit int) *AuthIdentityQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *AuthIdentityQuery) Offset(offset int) *AuthIdentityQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *AuthIdentityQuery) Unique(unique bool) *AuthIdentityQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *AuthIdentityQuery) Order(o ...authidentity.OrderOption) *AuthIdentityQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// QueryUser chains the current query on the "user" edge.
|
||||
func (_q *AuthIdentityQuery) QueryUser() *UserQuery {
|
||||
query := (&UserClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(authidentity.Table, authidentity.FieldID, selector),
|
||||
sqlgraph.To(user.Table, user.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, authidentity.UserTable, authidentity.UserColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryChannels chains the current query on the "channels" edge.
|
||||
func (_q *AuthIdentityQuery) QueryChannels() *AuthIdentityChannelQuery {
|
||||
query := (&AuthIdentityChannelClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(authidentity.Table, authidentity.FieldID, selector),
|
||||
sqlgraph.To(authidentitychannel.Table, authidentitychannel.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, authidentity.ChannelsTable, authidentity.ChannelsColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryAdoptionDecisions chains the current query on the "adoption_decisions" edge.
|
||||
func (_q *AuthIdentityQuery) QueryAdoptionDecisions() *IdentityAdoptionDecisionQuery {
|
||||
query := (&IdentityAdoptionDecisionClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(authidentity.Table, authidentity.FieldID, selector),
|
||||
sqlgraph.To(identityadoptiondecision.Table, identityadoptiondecision.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, authidentity.AdoptionDecisionsTable, authidentity.AdoptionDecisionsColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first AuthIdentity entity from the query.
|
||||
// Returns a *NotFoundError when no AuthIdentity was found.
|
||||
func (_q *AuthIdentityQuery) First(ctx context.Context) (*AuthIdentity, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{authidentity.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *AuthIdentityQuery) FirstX(ctx context.Context) *AuthIdentity {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first AuthIdentity ID from the query.
|
||||
// Returns a *NotFoundError when no AuthIdentity ID was found.
|
||||
func (_q *AuthIdentityQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{authidentity.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *AuthIdentityQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single AuthIdentity entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one AuthIdentity entity is found.
|
||||
// Returns a *NotFoundError when no AuthIdentity entities are found.
|
||||
func (_q *AuthIdentityQuery) Only(ctx context.Context) (*AuthIdentity, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{authidentity.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{authidentity.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *AuthIdentityQuery) OnlyX(ctx context.Context) *AuthIdentity {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only AuthIdentity ID in the query.
|
||||
// Returns a *NotSingularError when more than one AuthIdentity ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *AuthIdentityQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{authidentity.Label}
|
||||
default:
|
||||
err = &NotSingularError{authidentity.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *AuthIdentityQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of AuthIdentities.
|
||||
func (_q *AuthIdentityQuery) All(ctx context.Context) ([]*AuthIdentity, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*AuthIdentity, *AuthIdentityQuery]()
|
||||
return withInterceptors[[]*AuthIdentity](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *AuthIdentityQuery) AllX(ctx context.Context) []*AuthIdentity {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of AuthIdentity IDs.
|
||||
func (_q *AuthIdentityQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(authidentity.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *AuthIdentityQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *AuthIdentityQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*AuthIdentityQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *AuthIdentityQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *AuthIdentityQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *AuthIdentityQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the AuthIdentityQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *AuthIdentityQuery) Clone() *AuthIdentityQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &AuthIdentityQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]authidentity.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.AuthIdentity{}, _q.predicates...),
|
||||
withUser: _q.withUser.Clone(),
|
||||
withChannels: _q.withChannels.Clone(),
|
||||
withAdoptionDecisions: _q.withAdoptionDecisions.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *AuthIdentityQuery) WithUser(opts ...func(*UserQuery)) *AuthIdentityQuery {
|
||||
query := (&UserClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withUser = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// WithChannels tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "channels" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *AuthIdentityQuery) WithChannels(opts ...func(*AuthIdentityChannelQuery)) *AuthIdentityQuery {
|
||||
query := (&AuthIdentityChannelClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withChannels = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// WithAdoptionDecisions tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "adoption_decisions" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *AuthIdentityQuery) WithAdoptionDecisions(opts ...func(*IdentityAdoptionDecisionQuery)) *AuthIdentityQuery {
|
||||
query := (&IdentityAdoptionDecisionClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withAdoptionDecisions = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.AuthIdentity.Query().
|
||||
// GroupBy(authidentity.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *AuthIdentityQuery) GroupBy(field string, fields ...string) *AuthIdentityGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &AuthIdentityGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = authidentity.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.AuthIdentity.Query().
|
||||
// Select(authidentity.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *AuthIdentityQuery) Select(fields ...string) *AuthIdentitySelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &AuthIdentitySelect{AuthIdentityQuery: _q}
|
||||
sbuild.label = authidentity.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a AuthIdentitySelect configured with the given aggregations.
|
||||
func (_q *AuthIdentityQuery) Aggregate(fns ...AggregateFunc) *AuthIdentitySelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !authidentity.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AuthIdentity, error) {
|
||||
var (
|
||||
nodes = []*AuthIdentity{}
|
||||
_spec = _q.querySpec()
|
||||
loadedTypes = [3]bool{
|
||||
_q.withUser != nil,
|
||||
_q.withChannels != nil,
|
||||
_q.withAdoptionDecisions != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*AuthIdentity).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &AuthIdentity{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := _q.withUser; query != nil {
|
||||
if err := _q.loadUser(ctx, query, nodes, nil,
|
||||
func(n *AuthIdentity, e *User) { n.Edges.User = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := _q.withChannels; query != nil {
|
||||
if err := _q.loadChannels(ctx, query, nodes,
|
||||
func(n *AuthIdentity) { n.Edges.Channels = []*AuthIdentityChannel{} },
|
||||
func(n *AuthIdentity, e *AuthIdentityChannel) { n.Edges.Channels = append(n.Edges.Channels, e) }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := _q.withAdoptionDecisions; query != nil {
|
||||
if err := _q.loadAdoptionDecisions(ctx, query, nodes,
|
||||
func(n *AuthIdentity) { n.Edges.AdoptionDecisions = []*IdentityAdoptionDecision{} },
|
||||
func(n *AuthIdentity, e *IdentityAdoptionDecision) {
|
||||
n.Edges.AdoptionDecisions = append(n.Edges.AdoptionDecisions, e)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*AuthIdentity, init func(*AuthIdentity), assign func(*AuthIdentity, *User)) error {
|
||||
ids := make([]int64, 0, len(nodes))
|
||||
nodeids := make(map[int64][]*AuthIdentity)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].UserID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(user.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (_q *AuthIdentityQuery) loadChannels(ctx context.Context, query *AuthIdentityChannelQuery, nodes []*AuthIdentity, init func(*AuthIdentity), assign func(*AuthIdentity, *AuthIdentityChannel)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[int64]*AuthIdentity)
|
||||
for i := range nodes {
|
||||
fks = append(fks, nodes[i].ID)
|
||||
nodeids[nodes[i].ID] = nodes[i]
|
||||
if init != nil {
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
if len(query.ctx.Fields) > 0 {
|
||||
query.ctx.AppendFieldOnce(authidentitychannel.FieldIdentityID)
|
||||
}
|
||||
query.Where(predicate.AuthIdentityChannel(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(s.C(authidentity.ChannelsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
fk := n.IdentityID
|
||||
node, ok := nodeids[fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "identity_id" returned %v for node %v`, fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (_q *AuthIdentityQuery) loadAdoptionDecisions(ctx context.Context, query *IdentityAdoptionDecisionQuery, nodes []*AuthIdentity, init func(*AuthIdentity), assign func(*AuthIdentity, *IdentityAdoptionDecision)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[int64]*AuthIdentity)
|
||||
for i := range nodes {
|
||||
fks = append(fks, nodes[i].ID)
|
||||
nodeids[nodes[i].ID] = nodes[i]
|
||||
if init != nil {
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
if len(query.ctx.Fields) > 0 {
|
||||
query.ctx.AppendFieldOnce(identityadoptiondecision.FieldIdentityID)
|
||||
}
|
||||
query.Where(predicate.IdentityAdoptionDecision(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(s.C(authidentity.AdoptionDecisionsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
fk := n.IdentityID
|
||||
if fk == nil {
|
||||
return fmt.Errorf(`foreign-key "identity_id" is nil for node %v`, n.ID)
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "identity_id" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(authidentity.Table, authidentity.Columns, sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, authidentity.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != authidentity.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if _q.withUser != nil {
|
||||
_spec.Node.AddColumnOnce(authidentity.FieldUserID)
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(authidentity.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = authidentity.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *AuthIdentityQuery) ForUpdate(opts ...sql.LockOption) *AuthIdentityQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *AuthIdentityQuery) ForShare(opts ...sql.LockOption) *AuthIdentityQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// AuthIdentityGroupBy is the group-by builder for AuthIdentity entities.
|
||||
type AuthIdentityGroupBy struct {
|
||||
selector
|
||||
build *AuthIdentityQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *AuthIdentityGroupBy) Aggregate(fns ...AggregateFunc) *AuthIdentityGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *AuthIdentityGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*AuthIdentityQuery, *AuthIdentityGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *AuthIdentityGroupBy) sqlScan(ctx context.Context, root *AuthIdentityQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// AuthIdentitySelect is the builder for selecting fields of AuthIdentity entities.
|
||||
type AuthIdentitySelect struct {
|
||||
*AuthIdentityQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *AuthIdentitySelect) Aggregate(fns ...AggregateFunc) *AuthIdentitySelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *AuthIdentitySelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*AuthIdentityQuery, *AuthIdentitySelect](ctx, _s.AuthIdentityQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *AuthIdentitySelect) sqlScan(ctx context.Context, root *AuthIdentityQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
923
backend/ent/authidentity_update.go
Normal file
@@ -0,0 +1,923 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
"github.com/Wei-Shaw/sub2api/ent/identityadoptiondecision"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||
)
|
||||
|
||||
// AuthIdentityUpdate is the builder for updating AuthIdentity entities.
|
||||
type AuthIdentityUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *AuthIdentityMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the AuthIdentityUpdate builder.
|
||||
func (_u *AuthIdentityUpdate) Where(ps ...predicate.AuthIdentity) *AuthIdentityUpdate {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_u *AuthIdentityUpdate) SetUpdatedAt(v time.Time) *AuthIdentityUpdate {
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUserID sets the "user_id" field.
|
||||
func (_u *AuthIdentityUpdate) SetUserID(v int64) *AuthIdentityUpdate {
|
||||
_u.mutation.SetUserID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdate) SetNillableUserID(v *int64) *AuthIdentityUpdate {
|
||||
if v != nil {
|
||||
_u.SetUserID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProviderType sets the "provider_type" field.
|
||||
func (_u *AuthIdentityUpdate) SetProviderType(v string) *AuthIdentityUpdate {
|
||||
_u.mutation.SetProviderType(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProviderType sets the "provider_type" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdate) SetNillableProviderType(v *string) *AuthIdentityUpdate {
|
||||
if v != nil {
|
||||
_u.SetProviderType(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProviderKey sets the "provider_key" field.
|
||||
func (_u *AuthIdentityUpdate) SetProviderKey(v string) *AuthIdentityUpdate {
|
||||
_u.mutation.SetProviderKey(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProviderKey sets the "provider_key" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdate) SetNillableProviderKey(v *string) *AuthIdentityUpdate {
|
||||
if v != nil {
|
||||
_u.SetProviderKey(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProviderSubject sets the "provider_subject" field.
|
||||
func (_u *AuthIdentityUpdate) SetProviderSubject(v string) *AuthIdentityUpdate {
|
||||
_u.mutation.SetProviderSubject(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProviderSubject sets the "provider_subject" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdate) SetNillableProviderSubject(v *string) *AuthIdentityUpdate {
|
||||
if v != nil {
|
||||
_u.SetProviderSubject(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetVerifiedAt sets the "verified_at" field.
|
||||
func (_u *AuthIdentityUpdate) SetVerifiedAt(v time.Time) *AuthIdentityUpdate {
|
||||
_u.mutation.SetVerifiedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableVerifiedAt sets the "verified_at" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdate) SetNillableVerifiedAt(v *time.Time) *AuthIdentityUpdate {
|
||||
if v != nil {
|
||||
_u.SetVerifiedAt(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearVerifiedAt clears the value of the "verified_at" field.
|
||||
func (_u *AuthIdentityUpdate) ClearVerifiedAt() *AuthIdentityUpdate {
|
||||
_u.mutation.ClearVerifiedAt()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetIssuer sets the "issuer" field.
|
||||
func (_u *AuthIdentityUpdate) SetIssuer(v string) *AuthIdentityUpdate {
|
||||
_u.mutation.SetIssuer(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableIssuer sets the "issuer" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdate) SetNillableIssuer(v *string) *AuthIdentityUpdate {
|
||||
if v != nil {
|
||||
_u.SetIssuer(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearIssuer clears the value of the "issuer" field.
|
||||
func (_u *AuthIdentityUpdate) ClearIssuer() *AuthIdentityUpdate {
|
||||
_u.mutation.ClearIssuer()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMetadata sets the "metadata" field.
|
||||
func (_u *AuthIdentityUpdate) SetMetadata(v map[string]interface{}) *AuthIdentityUpdate {
|
||||
_u.mutation.SetMetadata(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUser sets the "user" edge to the User entity.
|
||||
func (_u *AuthIdentityUpdate) SetUser(v *User) *AuthIdentityUpdate {
|
||||
return _u.SetUserID(v.ID)
|
||||
}
|
||||
|
||||
// AddChannelIDs adds the "channels" edge to the AuthIdentityChannel entity by IDs.
|
||||
func (_u *AuthIdentityUpdate) AddChannelIDs(ids ...int64) *AuthIdentityUpdate {
|
||||
_u.mutation.AddChannelIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddChannels adds the "channels" edges to the AuthIdentityChannel entity.
|
||||
func (_u *AuthIdentityUpdate) AddChannels(v ...*AuthIdentityChannel) *AuthIdentityUpdate {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.AddChannelIDs(ids...)
|
||||
}
|
||||
|
||||
// AddAdoptionDecisionIDs adds the "adoption_decisions" edge to the IdentityAdoptionDecision entity by IDs.
|
||||
func (_u *AuthIdentityUpdate) AddAdoptionDecisionIDs(ids ...int64) *AuthIdentityUpdate {
|
||||
_u.mutation.AddAdoptionDecisionIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddAdoptionDecisions adds the "adoption_decisions" edges to the IdentityAdoptionDecision entity.
|
||||
func (_u *AuthIdentityUpdate) AddAdoptionDecisions(v ...*IdentityAdoptionDecision) *AuthIdentityUpdate {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.AddAdoptionDecisionIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the AuthIdentityMutation object of the builder.
|
||||
func (_u *AuthIdentityUpdate) Mutation() *AuthIdentityMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearUser clears the "user" edge to the User entity.
|
||||
func (_u *AuthIdentityUpdate) ClearUser() *AuthIdentityUpdate {
|
||||
_u.mutation.ClearUser()
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearChannels clears all "channels" edges to the AuthIdentityChannel entity.
|
||||
func (_u *AuthIdentityUpdate) ClearChannels() *AuthIdentityUpdate {
|
||||
_u.mutation.ClearChannels()
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveChannelIDs removes the "channels" edge to AuthIdentityChannel entities by IDs.
|
||||
func (_u *AuthIdentityUpdate) RemoveChannelIDs(ids ...int64) *AuthIdentityUpdate {
|
||||
_u.mutation.RemoveChannelIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveChannels removes "channels" edges to AuthIdentityChannel entities.
|
||||
func (_u *AuthIdentityUpdate) RemoveChannels(v ...*AuthIdentityChannel) *AuthIdentityUpdate {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.RemoveChannelIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearAdoptionDecisions clears all "adoption_decisions" edges to the IdentityAdoptionDecision entity.
|
||||
func (_u *AuthIdentityUpdate) ClearAdoptionDecisions() *AuthIdentityUpdate {
|
||||
_u.mutation.ClearAdoptionDecisions()
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveAdoptionDecisionIDs removes the "adoption_decisions" edge to IdentityAdoptionDecision entities by IDs.
|
||||
func (_u *AuthIdentityUpdate) RemoveAdoptionDecisionIDs(ids ...int64) *AuthIdentityUpdate {
|
||||
_u.mutation.RemoveAdoptionDecisionIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveAdoptionDecisions removes "adoption_decisions" edges to IdentityAdoptionDecision entities.
|
||||
func (_u *AuthIdentityUpdate) RemoveAdoptionDecisions(v ...*IdentityAdoptionDecision) *AuthIdentityUpdate {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.RemoveAdoptionDecisionIDs(ids...)
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (_u *AuthIdentityUpdate) Save(ctx context.Context) (int, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *AuthIdentityUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_u *AuthIdentityUpdate) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *AuthIdentityUpdate) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *AuthIdentityUpdate) defaults() {
|
||||
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||
v := authidentity.UpdateDefaultUpdatedAt()
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *AuthIdentityUpdate) check() error {
|
||||
if v, ok := _u.mutation.ProviderType(); ok {
|
||||
if err := authidentity.ProviderTypeValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_type", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_type": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.ProviderKey(); ok {
|
||||
if err := authidentity.ProviderKeyValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_key", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_key": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.ProviderSubject(); ok {
|
||||
if err := authidentity.ProviderSubjectValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_subject", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_subject": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "AuthIdentity.user"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *AuthIdentityUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(authidentity.Table, authidentity.Columns, sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64))
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(authidentity.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ProviderType(); ok {
|
||||
_spec.SetField(authidentity.FieldProviderType, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ProviderKey(); ok {
|
||||
_spec.SetField(authidentity.FieldProviderKey, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ProviderSubject(); ok {
|
||||
_spec.SetField(authidentity.FieldProviderSubject, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.VerifiedAt(); ok {
|
||||
_spec.SetField(authidentity.FieldVerifiedAt, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.VerifiedAtCleared() {
|
||||
_spec.ClearField(authidentity.FieldVerifiedAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.Issuer(); ok {
|
||||
_spec.SetField(authidentity.FieldIssuer, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.IssuerCleared() {
|
||||
_spec.ClearField(authidentity.FieldIssuer, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.Metadata(); ok {
|
||||
_spec.SetField(authidentity.FieldMetadata, field.TypeJSON, value)
|
||||
}
|
||||
if _u.mutation.UserCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: authidentity.UserTable,
|
||||
Columns: []string{authidentity.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: authidentity.UserTable,
|
||||
Columns: []string{authidentity.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _u.mutation.ChannelsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.ChannelsTable,
|
||||
Columns: []string{authidentity.ChannelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.RemovedChannelsIDs(); len(nodes) > 0 && !_u.mutation.ChannelsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.ChannelsTable,
|
||||
Columns: []string{authidentity.ChannelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.ChannelsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.ChannelsTable,
|
||||
Columns: []string{authidentity.ChannelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _u.mutation.AdoptionDecisionsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.AdoptionDecisionsTable,
|
||||
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.RemovedAdoptionDecisionsIDs(); len(nodes) > 0 && !_u.mutation.AdoptionDecisionsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.AdoptionDecisionsTable,
|
||||
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.AdoptionDecisionsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.AdoptionDecisionsTable,
|
||||
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{authidentity.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
// AuthIdentityUpdateOne is the builder for updating a single AuthIdentity entity.
|
||||
type AuthIdentityUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *AuthIdentityMutation
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_u *AuthIdentityUpdateOne) SetUpdatedAt(v time.Time) *AuthIdentityUpdateOne {
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUserID sets the "user_id" field.
|
||||
func (_u *AuthIdentityUpdateOne) SetUserID(v int64) *AuthIdentityUpdateOne {
|
||||
_u.mutation.SetUserID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdateOne) SetNillableUserID(v *int64) *AuthIdentityUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetUserID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProviderType sets the "provider_type" field.
|
||||
func (_u *AuthIdentityUpdateOne) SetProviderType(v string) *AuthIdentityUpdateOne {
|
||||
_u.mutation.SetProviderType(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProviderType sets the "provider_type" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdateOne) SetNillableProviderType(v *string) *AuthIdentityUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetProviderType(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProviderKey sets the "provider_key" field.
|
||||
func (_u *AuthIdentityUpdateOne) SetProviderKey(v string) *AuthIdentityUpdateOne {
|
||||
_u.mutation.SetProviderKey(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProviderKey sets the "provider_key" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdateOne) SetNillableProviderKey(v *string) *AuthIdentityUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetProviderKey(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProviderSubject sets the "provider_subject" field.
|
||||
func (_u *AuthIdentityUpdateOne) SetProviderSubject(v string) *AuthIdentityUpdateOne {
|
||||
_u.mutation.SetProviderSubject(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProviderSubject sets the "provider_subject" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdateOne) SetNillableProviderSubject(v *string) *AuthIdentityUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetProviderSubject(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetVerifiedAt sets the "verified_at" field.
|
||||
func (_u *AuthIdentityUpdateOne) SetVerifiedAt(v time.Time) *AuthIdentityUpdateOne {
|
||||
_u.mutation.SetVerifiedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableVerifiedAt sets the "verified_at" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdateOne) SetNillableVerifiedAt(v *time.Time) *AuthIdentityUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetVerifiedAt(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearVerifiedAt clears the value of the "verified_at" field.
|
||||
func (_u *AuthIdentityUpdateOne) ClearVerifiedAt() *AuthIdentityUpdateOne {
|
||||
_u.mutation.ClearVerifiedAt()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetIssuer sets the "issuer" field.
|
||||
func (_u *AuthIdentityUpdateOne) SetIssuer(v string) *AuthIdentityUpdateOne {
|
||||
_u.mutation.SetIssuer(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableIssuer sets the "issuer" field if the given value is not nil.
|
||||
func (_u *AuthIdentityUpdateOne) SetNillableIssuer(v *string) *AuthIdentityUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetIssuer(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearIssuer clears the value of the "issuer" field.
|
||||
func (_u *AuthIdentityUpdateOne) ClearIssuer() *AuthIdentityUpdateOne {
|
||||
_u.mutation.ClearIssuer()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMetadata sets the "metadata" field.
|
||||
func (_u *AuthIdentityUpdateOne) SetMetadata(v map[string]interface{}) *AuthIdentityUpdateOne {
|
||||
_u.mutation.SetMetadata(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUser sets the "user" edge to the User entity.
|
||||
func (_u *AuthIdentityUpdateOne) SetUser(v *User) *AuthIdentityUpdateOne {
|
||||
return _u.SetUserID(v.ID)
|
||||
}
|
||||
|
||||
// AddChannelIDs adds the "channels" edge to the AuthIdentityChannel entity by IDs.
|
||||
func (_u *AuthIdentityUpdateOne) AddChannelIDs(ids ...int64) *AuthIdentityUpdateOne {
|
||||
_u.mutation.AddChannelIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddChannels adds the "channels" edges to the AuthIdentityChannel entity.
|
||||
func (_u *AuthIdentityUpdateOne) AddChannels(v ...*AuthIdentityChannel) *AuthIdentityUpdateOne {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.AddChannelIDs(ids...)
|
||||
}
|
||||
|
||||
// AddAdoptionDecisionIDs adds the "adoption_decisions" edge to the IdentityAdoptionDecision entity by IDs.
|
||||
func (_u *AuthIdentityUpdateOne) AddAdoptionDecisionIDs(ids ...int64) *AuthIdentityUpdateOne {
|
||||
_u.mutation.AddAdoptionDecisionIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddAdoptionDecisions adds the "adoption_decisions" edges to the IdentityAdoptionDecision entity.
|
||||
func (_u *AuthIdentityUpdateOne) AddAdoptionDecisions(v ...*IdentityAdoptionDecision) *AuthIdentityUpdateOne {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.AddAdoptionDecisionIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the AuthIdentityMutation object of the builder.
|
||||
func (_u *AuthIdentityUpdateOne) Mutation() *AuthIdentityMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearUser clears the "user" edge to the User entity.
|
||||
func (_u *AuthIdentityUpdateOne) ClearUser() *AuthIdentityUpdateOne {
|
||||
_u.mutation.ClearUser()
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearChannels clears all "channels" edges to the AuthIdentityChannel entity.
|
||||
func (_u *AuthIdentityUpdateOne) ClearChannels() *AuthIdentityUpdateOne {
|
||||
_u.mutation.ClearChannels()
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveChannelIDs removes the "channels" edge to AuthIdentityChannel entities by IDs.
|
||||
func (_u *AuthIdentityUpdateOne) RemoveChannelIDs(ids ...int64) *AuthIdentityUpdateOne {
|
||||
_u.mutation.RemoveChannelIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveChannels removes "channels" edges to AuthIdentityChannel entities.
|
||||
func (_u *AuthIdentityUpdateOne) RemoveChannels(v ...*AuthIdentityChannel) *AuthIdentityUpdateOne {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.RemoveChannelIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearAdoptionDecisions clears all "adoption_decisions" edges to the IdentityAdoptionDecision entity.
|
||||
func (_u *AuthIdentityUpdateOne) ClearAdoptionDecisions() *AuthIdentityUpdateOne {
|
||||
_u.mutation.ClearAdoptionDecisions()
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveAdoptionDecisionIDs removes the "adoption_decisions" edge to IdentityAdoptionDecision entities by IDs.
|
||||
func (_u *AuthIdentityUpdateOne) RemoveAdoptionDecisionIDs(ids ...int64) *AuthIdentityUpdateOne {
|
||||
_u.mutation.RemoveAdoptionDecisionIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveAdoptionDecisions removes "adoption_decisions" edges to IdentityAdoptionDecision entities.
|
||||
func (_u *AuthIdentityUpdateOne) RemoveAdoptionDecisions(v ...*IdentityAdoptionDecision) *AuthIdentityUpdateOne {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.RemoveAdoptionDecisionIDs(ids...)
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the AuthIdentityUpdate builder.
|
||||
func (_u *AuthIdentityUpdateOne) Where(ps ...predicate.AuthIdentity) *AuthIdentityUpdateOne {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (_u *AuthIdentityUpdateOne) Select(field string, fields ...string) *AuthIdentityUpdateOne {
|
||||
_u.fields = append([]string{field}, fields...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated AuthIdentity entity.
|
||||
func (_u *AuthIdentityUpdateOne) Save(ctx context.Context) (*AuthIdentity, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *AuthIdentityUpdateOne) SaveX(ctx context.Context) *AuthIdentity {
|
||||
node, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (_u *AuthIdentityUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *AuthIdentityUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *AuthIdentityUpdateOne) defaults() {
|
||||
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||
v := authidentity.UpdateDefaultUpdatedAt()
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *AuthIdentityUpdateOne) check() error {
|
||||
if v, ok := _u.mutation.ProviderType(); ok {
|
||||
if err := authidentity.ProviderTypeValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_type", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_type": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.ProviderKey(); ok {
|
||||
if err := authidentity.ProviderKeyValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_key", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_key": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.ProviderSubject(); ok {
|
||||
if err := authidentity.ProviderSubjectValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_subject", err: fmt.Errorf(`ent: validator failed for field "AuthIdentity.provider_subject": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "AuthIdentity.user"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *AuthIdentityUpdateOne) sqlSave(ctx context.Context) (_node *AuthIdentity, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(authidentity.Table, authidentity.Columns, sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64))
|
||||
id, ok := _u.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AuthIdentity.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := _u.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, authidentity.FieldID)
|
||||
for _, f := range fields {
|
||||
if !authidentity.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != authidentity.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(authidentity.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ProviderType(); ok {
|
||||
_spec.SetField(authidentity.FieldProviderType, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ProviderKey(); ok {
|
||||
_spec.SetField(authidentity.FieldProviderKey, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ProviderSubject(); ok {
|
||||
_spec.SetField(authidentity.FieldProviderSubject, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.VerifiedAt(); ok {
|
||||
_spec.SetField(authidentity.FieldVerifiedAt, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.VerifiedAtCleared() {
|
||||
_spec.ClearField(authidentity.FieldVerifiedAt, field.TypeTime)
|
||||
}
|
||||
if value, ok := _u.mutation.Issuer(); ok {
|
||||
_spec.SetField(authidentity.FieldIssuer, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.IssuerCleared() {
|
||||
_spec.ClearField(authidentity.FieldIssuer, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.Metadata(); ok {
|
||||
_spec.SetField(authidentity.FieldMetadata, field.TypeJSON, value)
|
||||
}
|
||||
if _u.mutation.UserCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: authidentity.UserTable,
|
||||
Columns: []string{authidentity.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: authidentity.UserTable,
|
||||
Columns: []string{authidentity.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _u.mutation.ChannelsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.ChannelsTable,
|
||||
Columns: []string{authidentity.ChannelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.RemovedChannelsIDs(); len(nodes) > 0 && !_u.mutation.ChannelsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.ChannelsTable,
|
||||
Columns: []string{authidentity.ChannelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.ChannelsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.ChannelsTable,
|
||||
Columns: []string{authidentity.ChannelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _u.mutation.AdoptionDecisionsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.AdoptionDecisionsTable,
|
||||
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.RemovedAdoptionDecisionsIDs(); len(nodes) > 0 && !_u.mutation.AdoptionDecisionsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.AdoptionDecisionsTable,
|
||||
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.AdoptionDecisionsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: authidentity.AdoptionDecisionsTable,
|
||||
Columns: []string{authidentity.AdoptionDecisionsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &AuthIdentity{config: _u.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{authidentity.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
228
backend/ent/authidentitychannel.go
Normal file
@@ -0,0 +1,228 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
)
|
||||
|
||||
// AuthIdentityChannel is the model entity for the AuthIdentityChannel schema.
|
||||
type AuthIdentityChannel struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// IdentityID holds the value of the "identity_id" field.
|
||||
IdentityID int64 `json:"identity_id,omitempty"`
|
||||
// ProviderType holds the value of the "provider_type" field.
|
||||
ProviderType string `json:"provider_type,omitempty"`
|
||||
// ProviderKey holds the value of the "provider_key" field.
|
||||
ProviderKey string `json:"provider_key,omitempty"`
|
||||
// Channel holds the value of the "channel" field.
|
||||
Channel string `json:"channel,omitempty"`
|
||||
// ChannelAppID holds the value of the "channel_app_id" field.
|
||||
ChannelAppID string `json:"channel_app_id,omitempty"`
|
||||
// ChannelSubject holds the value of the "channel_subject" field.
|
||||
ChannelSubject string `json:"channel_subject,omitempty"`
|
||||
// Metadata holds the value of the "metadata" field.
|
||||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the AuthIdentityChannelQuery when eager-loading is set.
|
||||
Edges AuthIdentityChannelEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// AuthIdentityChannelEdges holds the relations/edges for other nodes in the graph.
|
||||
type AuthIdentityChannelEdges struct {
|
||||
// Identity holds the value of the identity edge.
|
||||
Identity *AuthIdentity `json:"identity,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// IdentityOrErr returns the Identity value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e AuthIdentityChannelEdges) IdentityOrErr() (*AuthIdentity, error) {
|
||||
if e.Identity != nil {
|
||||
return e.Identity, nil
|
||||
} else if e.loadedTypes[0] {
|
||||
return nil, &NotFoundError{label: authidentity.Label}
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "identity"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*AuthIdentityChannel) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case authidentitychannel.FieldMetadata:
|
||||
values[i] = new([]byte)
|
||||
case authidentitychannel.FieldID, authidentitychannel.FieldIdentityID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case authidentitychannel.FieldProviderType, authidentitychannel.FieldProviderKey, authidentitychannel.FieldChannel, authidentitychannel.FieldChannelAppID, authidentitychannel.FieldChannelSubject:
|
||||
values[i] = new(sql.NullString)
|
||||
case authidentitychannel.FieldCreatedAt, authidentitychannel.FieldUpdatedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the AuthIdentityChannel fields.
|
||||
func (_m *AuthIdentityChannel) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case authidentitychannel.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case authidentitychannel.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CreatedAt = value.Time
|
||||
}
|
||||
case authidentitychannel.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UpdatedAt = value.Time
|
||||
}
|
||||
case authidentitychannel.FieldIdentityID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field identity_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.IdentityID = value.Int64
|
||||
}
|
||||
case authidentitychannel.FieldProviderType:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider_type", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ProviderType = value.String
|
||||
}
|
||||
case authidentitychannel.FieldProviderKey:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider_key", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ProviderKey = value.String
|
||||
}
|
||||
case authidentitychannel.FieldChannel:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field channel", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Channel = value.String
|
||||
}
|
||||
case authidentitychannel.FieldChannelAppID:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field channel_app_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ChannelAppID = value.String
|
||||
}
|
||||
case authidentitychannel.FieldChannelSubject:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field channel_subject", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ChannelSubject = value.String
|
||||
}
|
||||
case authidentitychannel.FieldMetadata:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field metadata", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.Metadata); err != nil {
|
||||
return fmt.Errorf("unmarshal field metadata: %w", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the AuthIdentityChannel.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *AuthIdentityChannel) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryIdentity queries the "identity" edge of the AuthIdentityChannel entity.
|
||||
func (_m *AuthIdentityChannel) QueryIdentity() *AuthIdentityQuery {
|
||||
return NewAuthIdentityChannelClient(_m.config).QueryIdentity(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this AuthIdentityChannel.
|
||||
// Note that you need to call AuthIdentityChannel.Unwrap() before calling this method if this AuthIdentityChannel
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *AuthIdentityChannel) Update() *AuthIdentityChannelUpdateOne {
|
||||
return NewAuthIdentityChannelClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the AuthIdentityChannel entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *AuthIdentityChannel) Unwrap() *AuthIdentityChannel {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: AuthIdentityChannel is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *AuthIdentityChannel) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("AuthIdentityChannel(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("identity_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.IdentityID))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("provider_type=")
|
||||
builder.WriteString(_m.ProviderType)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("provider_key=")
|
||||
builder.WriteString(_m.ProviderKey)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("channel=")
|
||||
builder.WriteString(_m.Channel)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("channel_app_id=")
|
||||
builder.WriteString(_m.ChannelAppID)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("channel_subject=")
|
||||
builder.WriteString(_m.ChannelSubject)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("metadata=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Metadata))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// AuthIdentityChannels is a parsable slice of AuthIdentityChannel.
|
||||
type AuthIdentityChannels []*AuthIdentityChannel
|
||||
153
backend/ent/authidentitychannel/authidentitychannel.go
Normal file
@@ -0,0 +1,153 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package authidentitychannel
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the authidentitychannel type in the database.
|
||||
Label = "auth_identity_channel"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldIdentityID holds the string denoting the identity_id field in the database.
|
||||
FieldIdentityID = "identity_id"
|
||||
// FieldProviderType holds the string denoting the provider_type field in the database.
|
||||
FieldProviderType = "provider_type"
|
||||
// FieldProviderKey holds the string denoting the provider_key field in the database.
|
||||
FieldProviderKey = "provider_key"
|
||||
// FieldChannel holds the string denoting the channel field in the database.
|
||||
FieldChannel = "channel"
|
||||
// FieldChannelAppID holds the string denoting the channel_app_id field in the database.
|
||||
FieldChannelAppID = "channel_app_id"
|
||||
// FieldChannelSubject holds the string denoting the channel_subject field in the database.
|
||||
FieldChannelSubject = "channel_subject"
|
||||
// FieldMetadata holds the string denoting the metadata field in the database.
|
||||
FieldMetadata = "metadata"
|
||||
// EdgeIdentity holds the string denoting the identity edge name in mutations.
|
||||
EdgeIdentity = "identity"
|
||||
// Table holds the table name of the authidentitychannel in the database.
|
||||
Table = "auth_identity_channels"
|
||||
// IdentityTable is the table that holds the identity relation/edge.
|
||||
IdentityTable = "auth_identity_channels"
|
||||
// IdentityInverseTable is the table name for the AuthIdentity entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "authidentity" package.
|
||||
IdentityInverseTable = "auth_identities"
|
||||
// IdentityColumn is the table column denoting the identity relation/edge.
|
||||
IdentityColumn = "identity_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for authidentitychannel fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldIdentityID,
|
||||
FieldProviderType,
|
||||
FieldProviderKey,
|
||||
FieldChannel,
|
||||
FieldChannelAppID,
|
||||
FieldChannelSubject,
|
||||
FieldMetadata,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
// ProviderTypeValidator is a validator for the "provider_type" field. It is called by the builders before save.
|
||||
ProviderTypeValidator func(string) error
|
||||
// ProviderKeyValidator is a validator for the "provider_key" field. It is called by the builders before save.
|
||||
ProviderKeyValidator func(string) error
|
||||
// ChannelValidator is a validator for the "channel" field. It is called by the builders before save.
|
||||
ChannelValidator func(string) error
|
||||
// ChannelAppIDValidator is a validator for the "channel_app_id" field. It is called by the builders before save.
|
||||
ChannelAppIDValidator func(string) error
|
||||
// ChannelSubjectValidator is a validator for the "channel_subject" field. It is called by the builders before save.
|
||||
ChannelSubjectValidator func(string) error
|
||||
// DefaultMetadata holds the default value on creation for the "metadata" field.
|
||||
DefaultMetadata func() map[string]interface{}
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the AuthIdentityChannel queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByIdentityID orders the results by the identity_id field.
|
||||
func ByIdentityID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldIdentityID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProviderType orders the results by the provider_type field.
|
||||
func ByProviderType(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldProviderType, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProviderKey orders the results by the provider_key field.
|
||||
func ByProviderKey(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldProviderKey, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByChannel orders the results by the channel field.
|
||||
func ByChannel(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldChannel, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByChannelAppID orders the results by the channel_app_id field.
|
||||
func ByChannelAppID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldChannelAppID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByChannelSubject orders the results by the channel_subject field.
|
||||
func ByChannelSubject(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldChannelSubject, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByIdentityField orders the results by identity field.
|
||||
func ByIdentityField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newIdentityStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newIdentityStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(IdentityInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, IdentityTable, IdentityColumn),
|
||||
)
|
||||
}
|
||||
559
backend/ent/authidentitychannel/where.go
Normal file
@@ -0,0 +1,559 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package authidentitychannel
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// IdentityID applies equality check predicate on the "identity_id" field. It's identical to IdentityIDEQ.
|
||||
func IdentityID(v int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldIdentityID, v))
|
||||
}
|
||||
|
||||
// ProviderType applies equality check predicate on the "provider_type" field. It's identical to ProviderTypeEQ.
|
||||
func ProviderType(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderKey applies equality check predicate on the "provider_key" field. It's identical to ProviderKeyEQ.
|
||||
func ProviderKey(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// Channel applies equality check predicate on the "channel" field. It's identical to ChannelEQ.
|
||||
func Channel(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelAppID applies equality check predicate on the "channel_app_id" field. It's identical to ChannelAppIDEQ.
|
||||
func ChannelAppID(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelSubject applies equality check predicate on the "channel_subject" field. It's identical to ChannelSubjectEQ.
|
||||
func ChannelSubject(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// IdentityIDEQ applies the EQ predicate on the "identity_id" field.
|
||||
func IdentityIDEQ(v int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldIdentityID, v))
|
||||
}
|
||||
|
||||
// IdentityIDNEQ applies the NEQ predicate on the "identity_id" field.
|
||||
func IdentityIDNEQ(v int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldIdentityID, v))
|
||||
}
|
||||
|
||||
// IdentityIDIn applies the In predicate on the "identity_id" field.
|
||||
func IdentityIDIn(vs ...int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldIn(FieldIdentityID, vs...))
|
||||
}
|
||||
|
||||
// IdentityIDNotIn applies the NotIn predicate on the "identity_id" field.
|
||||
func IdentityIDNotIn(vs ...int64) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldIdentityID, vs...))
|
||||
}
|
||||
|
||||
// ProviderTypeEQ applies the EQ predicate on the "provider_type" field.
|
||||
func ProviderTypeEQ(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeNEQ applies the NEQ predicate on the "provider_type" field.
|
||||
func ProviderTypeNEQ(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeIn applies the In predicate on the "provider_type" field.
|
||||
func ProviderTypeIn(vs ...string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldIn(FieldProviderType, vs...))
|
||||
}
|
||||
|
||||
// ProviderTypeNotIn applies the NotIn predicate on the "provider_type" field.
|
||||
func ProviderTypeNotIn(vs ...string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldProviderType, vs...))
|
||||
}
|
||||
|
||||
// ProviderTypeGT applies the GT predicate on the "provider_type" field.
|
||||
func ProviderTypeGT(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGT(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeGTE applies the GTE predicate on the "provider_type" field.
|
||||
func ProviderTypeGTE(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeLT applies the LT predicate on the "provider_type" field.
|
||||
func ProviderTypeLT(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLT(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeLTE applies the LTE predicate on the "provider_type" field.
|
||||
func ProviderTypeLTE(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeContains applies the Contains predicate on the "provider_type" field.
|
||||
func ProviderTypeContains(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldContains(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeHasPrefix applies the HasPrefix predicate on the "provider_type" field.
|
||||
func ProviderTypeHasPrefix(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldHasPrefix(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeHasSuffix applies the HasSuffix predicate on the "provider_type" field.
|
||||
func ProviderTypeHasSuffix(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldHasSuffix(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeEqualFold applies the EqualFold predicate on the "provider_type" field.
|
||||
func ProviderTypeEqualFold(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEqualFold(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderTypeContainsFold applies the ContainsFold predicate on the "provider_type" field.
|
||||
func ProviderTypeContainsFold(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldContainsFold(FieldProviderType, v))
|
||||
}
|
||||
|
||||
// ProviderKeyEQ applies the EQ predicate on the "provider_key" field.
|
||||
func ProviderKeyEQ(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyNEQ applies the NEQ predicate on the "provider_key" field.
|
||||
func ProviderKeyNEQ(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyIn applies the In predicate on the "provider_key" field.
|
||||
func ProviderKeyIn(vs ...string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldIn(FieldProviderKey, vs...))
|
||||
}
|
||||
|
||||
// ProviderKeyNotIn applies the NotIn predicate on the "provider_key" field.
|
||||
func ProviderKeyNotIn(vs ...string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldProviderKey, vs...))
|
||||
}
|
||||
|
||||
// ProviderKeyGT applies the GT predicate on the "provider_key" field.
|
||||
func ProviderKeyGT(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGT(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyGTE applies the GTE predicate on the "provider_key" field.
|
||||
func ProviderKeyGTE(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyLT applies the LT predicate on the "provider_key" field.
|
||||
func ProviderKeyLT(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLT(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyLTE applies the LTE predicate on the "provider_key" field.
|
||||
func ProviderKeyLTE(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyContains applies the Contains predicate on the "provider_key" field.
|
||||
func ProviderKeyContains(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldContains(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyHasPrefix applies the HasPrefix predicate on the "provider_key" field.
|
||||
func ProviderKeyHasPrefix(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldHasPrefix(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyHasSuffix applies the HasSuffix predicate on the "provider_key" field.
|
||||
func ProviderKeyHasSuffix(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldHasSuffix(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyEqualFold applies the EqualFold predicate on the "provider_key" field.
|
||||
func ProviderKeyEqualFold(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEqualFold(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ProviderKeyContainsFold applies the ContainsFold predicate on the "provider_key" field.
|
||||
func ProviderKeyContainsFold(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldContainsFold(FieldProviderKey, v))
|
||||
}
|
||||
|
||||
// ChannelEQ applies the EQ predicate on the "channel" field.
|
||||
func ChannelEQ(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelNEQ applies the NEQ predicate on the "channel" field.
|
||||
func ChannelNEQ(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelIn applies the In predicate on the "channel" field.
|
||||
func ChannelIn(vs ...string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldIn(FieldChannel, vs...))
|
||||
}
|
||||
|
||||
// ChannelNotIn applies the NotIn predicate on the "channel" field.
|
||||
func ChannelNotIn(vs ...string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldChannel, vs...))
|
||||
}
|
||||
|
||||
// ChannelGT applies the GT predicate on the "channel" field.
|
||||
func ChannelGT(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGT(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelGTE applies the GTE predicate on the "channel" field.
|
||||
func ChannelGTE(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelLT applies the LT predicate on the "channel" field.
|
||||
func ChannelLT(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLT(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelLTE applies the LTE predicate on the "channel" field.
|
||||
func ChannelLTE(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelContains applies the Contains predicate on the "channel" field.
|
||||
func ChannelContains(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldContains(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelHasPrefix applies the HasPrefix predicate on the "channel" field.
|
||||
func ChannelHasPrefix(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldHasPrefix(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelHasSuffix applies the HasSuffix predicate on the "channel" field.
|
||||
func ChannelHasSuffix(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldHasSuffix(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelEqualFold applies the EqualFold predicate on the "channel" field.
|
||||
func ChannelEqualFold(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEqualFold(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelContainsFold applies the ContainsFold predicate on the "channel" field.
|
||||
func ChannelContainsFold(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldContainsFold(FieldChannel, v))
|
||||
}
|
||||
|
||||
// ChannelAppIDEQ applies the EQ predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDEQ(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelAppIDNEQ applies the NEQ predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDNEQ(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelAppIDIn applies the In predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDIn(vs ...string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldIn(FieldChannelAppID, vs...))
|
||||
}
|
||||
|
||||
// ChannelAppIDNotIn applies the NotIn predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDNotIn(vs ...string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldChannelAppID, vs...))
|
||||
}
|
||||
|
||||
// ChannelAppIDGT applies the GT predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDGT(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGT(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelAppIDGTE applies the GTE predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDGTE(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelAppIDLT applies the LT predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDLT(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLT(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelAppIDLTE applies the LTE predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDLTE(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelAppIDContains applies the Contains predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDContains(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldContains(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelAppIDHasPrefix applies the HasPrefix predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDHasPrefix(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldHasPrefix(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelAppIDHasSuffix applies the HasSuffix predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDHasSuffix(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldHasSuffix(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelAppIDEqualFold applies the EqualFold predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDEqualFold(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEqualFold(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelAppIDContainsFold applies the ContainsFold predicate on the "channel_app_id" field.
|
||||
func ChannelAppIDContainsFold(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldContainsFold(FieldChannelAppID, v))
|
||||
}
|
||||
|
||||
// ChannelSubjectEQ applies the EQ predicate on the "channel_subject" field.
|
||||
func ChannelSubjectEQ(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEQ(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// ChannelSubjectNEQ applies the NEQ predicate on the "channel_subject" field.
|
||||
func ChannelSubjectNEQ(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNEQ(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// ChannelSubjectIn applies the In predicate on the "channel_subject" field.
|
||||
func ChannelSubjectIn(vs ...string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldIn(FieldChannelSubject, vs...))
|
||||
}
|
||||
|
||||
// ChannelSubjectNotIn applies the NotIn predicate on the "channel_subject" field.
|
||||
func ChannelSubjectNotIn(vs ...string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldNotIn(FieldChannelSubject, vs...))
|
||||
}
|
||||
|
||||
// ChannelSubjectGT applies the GT predicate on the "channel_subject" field.
|
||||
func ChannelSubjectGT(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGT(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// ChannelSubjectGTE applies the GTE predicate on the "channel_subject" field.
|
||||
func ChannelSubjectGTE(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldGTE(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// ChannelSubjectLT applies the LT predicate on the "channel_subject" field.
|
||||
func ChannelSubjectLT(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLT(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// ChannelSubjectLTE applies the LTE predicate on the "channel_subject" field.
|
||||
func ChannelSubjectLTE(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldLTE(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// ChannelSubjectContains applies the Contains predicate on the "channel_subject" field.
|
||||
func ChannelSubjectContains(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldContains(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// ChannelSubjectHasPrefix applies the HasPrefix predicate on the "channel_subject" field.
|
||||
func ChannelSubjectHasPrefix(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldHasPrefix(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// ChannelSubjectHasSuffix applies the HasSuffix predicate on the "channel_subject" field.
|
||||
func ChannelSubjectHasSuffix(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldHasSuffix(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// ChannelSubjectEqualFold applies the EqualFold predicate on the "channel_subject" field.
|
||||
func ChannelSubjectEqualFold(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldEqualFold(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// ChannelSubjectContainsFold applies the ContainsFold predicate on the "channel_subject" field.
|
||||
func ChannelSubjectContainsFold(v string) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.FieldContainsFold(FieldChannelSubject, v))
|
||||
}
|
||||
|
||||
// HasIdentity applies the HasEdge predicate on the "identity" edge.
|
||||
func HasIdentity() predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, IdentityTable, IdentityColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasIdentityWith applies the HasEdge predicate on the "identity" edge with a given conditions (other predicates).
|
||||
func HasIdentityWith(preds ...predicate.AuthIdentity) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(func(s *sql.Selector) {
|
||||
step := newIdentityStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.AuthIdentityChannel) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.AuthIdentityChannel) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.AuthIdentityChannel) predicate.AuthIdentityChannel {
|
||||
return predicate.AuthIdentityChannel(sql.NotPredicates(p))
|
||||
}
|
||||
932
backend/ent/authidentitychannel_create.go
Normal file
@@ -0,0 +1,932 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
)
|
||||
|
||||
// AuthIdentityChannelCreate is the builder for creating a AuthIdentityChannel entity.
|
||||
type AuthIdentityChannelCreate struct {
|
||||
config
|
||||
mutation *AuthIdentityChannelMutation
|
||||
hooks []Hook
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// SetCreatedAt sets the "created_at" field.
|
||||
func (_c *AuthIdentityChannelCreate) SetCreatedAt(v time.Time) *AuthIdentityChannelCreate {
|
||||
_c.mutation.SetCreatedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||
func (_c *AuthIdentityChannelCreate) SetNillableCreatedAt(v *time.Time) *AuthIdentityChannelCreate {
|
||||
if v != nil {
|
||||
_c.SetCreatedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_c *AuthIdentityChannelCreate) SetUpdatedAt(v time.Time) *AuthIdentityChannelCreate {
|
||||
_c.mutation.SetUpdatedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
|
||||
func (_c *AuthIdentityChannelCreate) SetNillableUpdatedAt(v *time.Time) *AuthIdentityChannelCreate {
|
||||
if v != nil {
|
||||
_c.SetUpdatedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (_c *AuthIdentityChannelCreate) SetIdentityID(v int64) *AuthIdentityChannelCreate {
|
||||
_c.mutation.SetIdentityID(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetProviderType sets the "provider_type" field.
|
||||
func (_c *AuthIdentityChannelCreate) SetProviderType(v string) *AuthIdentityChannelCreate {
|
||||
_c.mutation.SetProviderType(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetProviderKey sets the "provider_key" field.
|
||||
func (_c *AuthIdentityChannelCreate) SetProviderKey(v string) *AuthIdentityChannelCreate {
|
||||
_c.mutation.SetProviderKey(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetChannel sets the "channel" field.
|
||||
func (_c *AuthIdentityChannelCreate) SetChannel(v string) *AuthIdentityChannelCreate {
|
||||
_c.mutation.SetChannel(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetChannelAppID sets the "channel_app_id" field.
|
||||
func (_c *AuthIdentityChannelCreate) SetChannelAppID(v string) *AuthIdentityChannelCreate {
|
||||
_c.mutation.SetChannelAppID(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetChannelSubject sets the "channel_subject" field.
|
||||
func (_c *AuthIdentityChannelCreate) SetChannelSubject(v string) *AuthIdentityChannelCreate {
|
||||
_c.mutation.SetChannelSubject(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetMetadata sets the "metadata" field.
|
||||
func (_c *AuthIdentityChannelCreate) SetMetadata(v map[string]interface{}) *AuthIdentityChannelCreate {
|
||||
_c.mutation.SetMetadata(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetIdentity sets the "identity" edge to the AuthIdentity entity.
|
||||
func (_c *AuthIdentityChannelCreate) SetIdentity(v *AuthIdentity) *AuthIdentityChannelCreate {
|
||||
return _c.SetIdentityID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the AuthIdentityChannelMutation object of the builder.
|
||||
func (_c *AuthIdentityChannelCreate) Mutation() *AuthIdentityChannelMutation {
|
||||
return _c.mutation
|
||||
}
|
||||
|
||||
// Save creates the AuthIdentityChannel in the database.
|
||||
func (_c *AuthIdentityChannelCreate) Save(ctx context.Context) (*AuthIdentityChannel, error) {
|
||||
_c.defaults()
|
||||
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (_c *AuthIdentityChannelCreate) SaveX(ctx context.Context) *AuthIdentityChannel {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *AuthIdentityChannelCreate) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *AuthIdentityChannelCreate) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_c *AuthIdentityChannelCreate) defaults() {
|
||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||
v := authidentitychannel.DefaultCreatedAt()
|
||||
_c.mutation.SetCreatedAt(v)
|
||||
}
|
||||
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||
v := authidentitychannel.DefaultUpdatedAt()
|
||||
_c.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
if _, ok := _c.mutation.Metadata(); !ok {
|
||||
v := authidentitychannel.DefaultMetadata()
|
||||
_c.mutation.SetMetadata(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_c *AuthIdentityChannelCreate) check() error {
|
||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "AuthIdentityChannel.created_at"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "AuthIdentityChannel.updated_at"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.IdentityID(); !ok {
|
||||
return &ValidationError{Name: "identity_id", err: errors.New(`ent: missing required field "AuthIdentityChannel.identity_id"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.ProviderType(); !ok {
|
||||
return &ValidationError{Name: "provider_type", err: errors.New(`ent: missing required field "AuthIdentityChannel.provider_type"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.ProviderType(); ok {
|
||||
if err := authidentitychannel.ProviderTypeValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_type", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_type": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.ProviderKey(); !ok {
|
||||
return &ValidationError{Name: "provider_key", err: errors.New(`ent: missing required field "AuthIdentityChannel.provider_key"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.ProviderKey(); ok {
|
||||
if err := authidentitychannel.ProviderKeyValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_key", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_key": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.Channel(); !ok {
|
||||
return &ValidationError{Name: "channel", err: errors.New(`ent: missing required field "AuthIdentityChannel.channel"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.Channel(); ok {
|
||||
if err := authidentitychannel.ChannelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "channel", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.ChannelAppID(); !ok {
|
||||
return &ValidationError{Name: "channel_app_id", err: errors.New(`ent: missing required field "AuthIdentityChannel.channel_app_id"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.ChannelAppID(); ok {
|
||||
if err := authidentitychannel.ChannelAppIDValidator(v); err != nil {
|
||||
return &ValidationError{Name: "channel_app_id", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_app_id": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.ChannelSubject(); !ok {
|
||||
return &ValidationError{Name: "channel_subject", err: errors.New(`ent: missing required field "AuthIdentityChannel.channel_subject"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.ChannelSubject(); ok {
|
||||
if err := authidentitychannel.ChannelSubjectValidator(v); err != nil {
|
||||
return &ValidationError{Name: "channel_subject", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_subject": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.Metadata(); !ok {
|
||||
return &ValidationError{Name: "metadata", err: errors.New(`ent: missing required field "AuthIdentityChannel.metadata"`)}
|
||||
}
|
||||
if len(_c.mutation.IdentityIDs()) == 0 {
|
||||
return &ValidationError{Name: "identity", err: errors.New(`ent: missing required edge "AuthIdentityChannel.identity"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_c *AuthIdentityChannelCreate) sqlSave(ctx context.Context) (*AuthIdentityChannel, error) {
|
||||
if err := _c.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := _c.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int64(id)
|
||||
_c.mutation.id = &_node.ID
|
||||
_c.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (_c *AuthIdentityChannelCreate) createSpec() (*AuthIdentityChannel, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &AuthIdentityChannel{config: _c.config}
|
||||
_spec = sqlgraph.NewCreateSpec(authidentitychannel.Table, sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64))
|
||||
)
|
||||
_spec.OnConflict = _c.conflict
|
||||
if value, ok := _c.mutation.CreatedAt(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldCreatedAt, field.TypeTime, value)
|
||||
_node.CreatedAt = value
|
||||
}
|
||||
if value, ok := _c.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldUpdatedAt, field.TypeTime, value)
|
||||
_node.UpdatedAt = value
|
||||
}
|
||||
if value, ok := _c.mutation.ProviderType(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldProviderType, field.TypeString, value)
|
||||
_node.ProviderType = value
|
||||
}
|
||||
if value, ok := _c.mutation.ProviderKey(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldProviderKey, field.TypeString, value)
|
||||
_node.ProviderKey = value
|
||||
}
|
||||
if value, ok := _c.mutation.Channel(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldChannel, field.TypeString, value)
|
||||
_node.Channel = value
|
||||
}
|
||||
if value, ok := _c.mutation.ChannelAppID(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldChannelAppID, field.TypeString, value)
|
||||
_node.ChannelAppID = value
|
||||
}
|
||||
if value, ok := _c.mutation.ChannelSubject(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldChannelSubject, field.TypeString, value)
|
||||
_node.ChannelSubject = value
|
||||
}
|
||||
if value, ok := _c.mutation.Metadata(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldMetadata, field.TypeJSON, value)
|
||||
_node.Metadata = value
|
||||
}
|
||||
if nodes := _c.mutation.IdentityIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: authidentitychannel.IdentityTable,
|
||||
Columns: []string{authidentitychannel.IdentityColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.IdentityID = nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.AuthIdentityChannel.Create().
|
||||
// SetCreatedAt(v).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.AuthIdentityChannelUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *AuthIdentityChannelCreate) OnConflict(opts ...sql.ConflictOption) *AuthIdentityChannelUpsertOne {
|
||||
_c.conflict = opts
|
||||
return &AuthIdentityChannelUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.AuthIdentityChannel.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *AuthIdentityChannelCreate) OnConflictColumns(columns ...string) *AuthIdentityChannelUpsertOne {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &AuthIdentityChannelUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
// AuthIdentityChannelUpsertOne is the builder for "upsert"-ing
|
||||
// one AuthIdentityChannel node.
|
||||
AuthIdentityChannelUpsertOne struct {
|
||||
create *AuthIdentityChannelCreate
|
||||
}
|
||||
|
||||
// AuthIdentityChannelUpsert is the "OnConflict" setter.
|
||||
AuthIdentityChannelUpsert struct {
|
||||
*sql.UpdateSet
|
||||
}
|
||||
)
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *AuthIdentityChannelUpsert) SetUpdatedAt(v time.Time) *AuthIdentityChannelUpsert {
|
||||
u.Set(authidentitychannel.FieldUpdatedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsert) UpdateUpdatedAt() *AuthIdentityChannelUpsert {
|
||||
u.SetExcluded(authidentitychannel.FieldUpdatedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (u *AuthIdentityChannelUpsert) SetIdentityID(v int64) *AuthIdentityChannelUpsert {
|
||||
u.Set(authidentitychannel.FieldIdentityID, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateIdentityID sets the "identity_id" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsert) UpdateIdentityID() *AuthIdentityChannelUpsert {
|
||||
u.SetExcluded(authidentitychannel.FieldIdentityID)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetProviderType sets the "provider_type" field.
|
||||
func (u *AuthIdentityChannelUpsert) SetProviderType(v string) *AuthIdentityChannelUpsert {
|
||||
u.Set(authidentitychannel.FieldProviderType, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateProviderType sets the "provider_type" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsert) UpdateProviderType() *AuthIdentityChannelUpsert {
|
||||
u.SetExcluded(authidentitychannel.FieldProviderType)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetProviderKey sets the "provider_key" field.
|
||||
func (u *AuthIdentityChannelUpsert) SetProviderKey(v string) *AuthIdentityChannelUpsert {
|
||||
u.Set(authidentitychannel.FieldProviderKey, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateProviderKey sets the "provider_key" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsert) UpdateProviderKey() *AuthIdentityChannelUpsert {
|
||||
u.SetExcluded(authidentitychannel.FieldProviderKey)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetChannel sets the "channel" field.
|
||||
func (u *AuthIdentityChannelUpsert) SetChannel(v string) *AuthIdentityChannelUpsert {
|
||||
u.Set(authidentitychannel.FieldChannel, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateChannel sets the "channel" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsert) UpdateChannel() *AuthIdentityChannelUpsert {
|
||||
u.SetExcluded(authidentitychannel.FieldChannel)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetChannelAppID sets the "channel_app_id" field.
|
||||
func (u *AuthIdentityChannelUpsert) SetChannelAppID(v string) *AuthIdentityChannelUpsert {
|
||||
u.Set(authidentitychannel.FieldChannelAppID, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateChannelAppID sets the "channel_app_id" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsert) UpdateChannelAppID() *AuthIdentityChannelUpsert {
|
||||
u.SetExcluded(authidentitychannel.FieldChannelAppID)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetChannelSubject sets the "channel_subject" field.
|
||||
func (u *AuthIdentityChannelUpsert) SetChannelSubject(v string) *AuthIdentityChannelUpsert {
|
||||
u.Set(authidentitychannel.FieldChannelSubject, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateChannelSubject sets the "channel_subject" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsert) UpdateChannelSubject() *AuthIdentityChannelUpsert {
|
||||
u.SetExcluded(authidentitychannel.FieldChannelSubject)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetMetadata sets the "metadata" field.
|
||||
func (u *AuthIdentityChannelUpsert) SetMetadata(v map[string]interface{}) *AuthIdentityChannelUpsert {
|
||||
u.Set(authidentitychannel.FieldMetadata, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateMetadata sets the "metadata" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsert) UpdateMetadata() *AuthIdentityChannelUpsert {
|
||||
u.SetExcluded(authidentitychannel.FieldMetadata)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.AuthIdentityChannel.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *AuthIdentityChannelUpsertOne) UpdateNewValues() *AuthIdentityChannelUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(authidentitychannel.FieldCreatedAt)
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.AuthIdentityChannel.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *AuthIdentityChannelUpsertOne) Ignore() *AuthIdentityChannelUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *AuthIdentityChannelUpsertOne) DoNothing() *AuthIdentityChannelUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the AuthIdentityChannelCreate.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *AuthIdentityChannelUpsertOne) Update(set func(*AuthIdentityChannelUpsert)) *AuthIdentityChannelUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&AuthIdentityChannelUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *AuthIdentityChannelUpsertOne) SetUpdatedAt(v time.Time) *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertOne) UpdateUpdatedAt() *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (u *AuthIdentityChannelUpsertOne) SetIdentityID(v int64) *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetIdentityID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateIdentityID sets the "identity_id" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertOne) UpdateIdentityID() *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateIdentityID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetProviderType sets the "provider_type" field.
|
||||
func (u *AuthIdentityChannelUpsertOne) SetProviderType(v string) *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetProviderType(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProviderType sets the "provider_type" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertOne) UpdateProviderType() *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateProviderType()
|
||||
})
|
||||
}
|
||||
|
||||
// SetProviderKey sets the "provider_key" field.
|
||||
func (u *AuthIdentityChannelUpsertOne) SetProviderKey(v string) *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetProviderKey(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProviderKey sets the "provider_key" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertOne) UpdateProviderKey() *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateProviderKey()
|
||||
})
|
||||
}
|
||||
|
||||
// SetChannel sets the "channel" field.
|
||||
func (u *AuthIdentityChannelUpsertOne) SetChannel(v string) *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetChannel(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateChannel sets the "channel" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertOne) UpdateChannel() *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateChannel()
|
||||
})
|
||||
}
|
||||
|
||||
// SetChannelAppID sets the "channel_app_id" field.
|
||||
func (u *AuthIdentityChannelUpsertOne) SetChannelAppID(v string) *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetChannelAppID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateChannelAppID sets the "channel_app_id" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertOne) UpdateChannelAppID() *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateChannelAppID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetChannelSubject sets the "channel_subject" field.
|
||||
func (u *AuthIdentityChannelUpsertOne) SetChannelSubject(v string) *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetChannelSubject(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateChannelSubject sets the "channel_subject" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertOne) UpdateChannelSubject() *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateChannelSubject()
|
||||
})
|
||||
}
|
||||
|
||||
// SetMetadata sets the "metadata" field.
|
||||
func (u *AuthIdentityChannelUpsertOne) SetMetadata(v map[string]interface{}) *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetMetadata(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMetadata sets the "metadata" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertOne) UpdateMetadata() *AuthIdentityChannelUpsertOne {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateMetadata()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *AuthIdentityChannelUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for AuthIdentityChannelCreate.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *AuthIdentityChannelUpsertOne) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||
func (u *AuthIdentityChannelUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||
node, err := u.create.Save(ctx)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
return node.ID, nil
|
||||
}
|
||||
|
||||
// IDX is like ID, but panics if an error occurs.
|
||||
func (u *AuthIdentityChannelUpsertOne) IDX(ctx context.Context) int64 {
|
||||
id, err := u.ID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// AuthIdentityChannelCreateBulk is the builder for creating many AuthIdentityChannel entities in bulk.
|
||||
type AuthIdentityChannelCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*AuthIdentityChannelCreate
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// Save creates the AuthIdentityChannel entities in the database.
|
||||
func (_c *AuthIdentityChannelCreateBulk) Save(ctx context.Context) ([]*AuthIdentityChannel, error) {
|
||||
if _c.err != nil {
|
||||
return nil, _c.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||
nodes := make([]*AuthIdentityChannel, len(_c.builders))
|
||||
mutators := make([]Mutator, len(_c.builders))
|
||||
for i := range _c.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := _c.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*AuthIdentityChannelMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
spec.OnConflict = _c.conflict
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int64(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_c *AuthIdentityChannelCreateBulk) SaveX(ctx context.Context) []*AuthIdentityChannel {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *AuthIdentityChannelCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *AuthIdentityChannelCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.AuthIdentityChannel.CreateBulk(builders...).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.AuthIdentityChannelUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *AuthIdentityChannelCreateBulk) OnConflict(opts ...sql.ConflictOption) *AuthIdentityChannelUpsertBulk {
|
||||
_c.conflict = opts
|
||||
return &AuthIdentityChannelUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.AuthIdentityChannel.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *AuthIdentityChannelCreateBulk) OnConflictColumns(columns ...string) *AuthIdentityChannelUpsertBulk {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &AuthIdentityChannelUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// AuthIdentityChannelUpsertBulk is the builder for "upsert"-ing
|
||||
// a bulk of AuthIdentityChannel nodes.
|
||||
type AuthIdentityChannelUpsertBulk struct {
|
||||
create *AuthIdentityChannelCreateBulk
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that
|
||||
// were set on create. Using this option is equivalent to using:
|
||||
//
|
||||
// client.AuthIdentityChannel.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *AuthIdentityChannelUpsertBulk) UpdateNewValues() *AuthIdentityChannelUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
for _, b := range u.create.builders {
|
||||
if _, exists := b.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(authidentitychannel.FieldCreatedAt)
|
||||
}
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.AuthIdentityChannel.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *AuthIdentityChannelUpsertBulk) Ignore() *AuthIdentityChannelUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *AuthIdentityChannelUpsertBulk) DoNothing() *AuthIdentityChannelUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the AuthIdentityChannelCreateBulk.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *AuthIdentityChannelUpsertBulk) Update(set func(*AuthIdentityChannelUpsert)) *AuthIdentityChannelUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&AuthIdentityChannelUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *AuthIdentityChannelUpsertBulk) SetUpdatedAt(v time.Time) *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertBulk) UpdateUpdatedAt() *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (u *AuthIdentityChannelUpsertBulk) SetIdentityID(v int64) *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetIdentityID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateIdentityID sets the "identity_id" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertBulk) UpdateIdentityID() *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateIdentityID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetProviderType sets the "provider_type" field.
|
||||
func (u *AuthIdentityChannelUpsertBulk) SetProviderType(v string) *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetProviderType(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProviderType sets the "provider_type" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertBulk) UpdateProviderType() *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateProviderType()
|
||||
})
|
||||
}
|
||||
|
||||
// SetProviderKey sets the "provider_key" field.
|
||||
func (u *AuthIdentityChannelUpsertBulk) SetProviderKey(v string) *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetProviderKey(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProviderKey sets the "provider_key" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertBulk) UpdateProviderKey() *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateProviderKey()
|
||||
})
|
||||
}
|
||||
|
||||
// SetChannel sets the "channel" field.
|
||||
func (u *AuthIdentityChannelUpsertBulk) SetChannel(v string) *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetChannel(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateChannel sets the "channel" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertBulk) UpdateChannel() *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateChannel()
|
||||
})
|
||||
}
|
||||
|
||||
// SetChannelAppID sets the "channel_app_id" field.
|
||||
func (u *AuthIdentityChannelUpsertBulk) SetChannelAppID(v string) *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetChannelAppID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateChannelAppID sets the "channel_app_id" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertBulk) UpdateChannelAppID() *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateChannelAppID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetChannelSubject sets the "channel_subject" field.
|
||||
func (u *AuthIdentityChannelUpsertBulk) SetChannelSubject(v string) *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetChannelSubject(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateChannelSubject sets the "channel_subject" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertBulk) UpdateChannelSubject() *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateChannelSubject()
|
||||
})
|
||||
}
|
||||
|
||||
// SetMetadata sets the "metadata" field.
|
||||
func (u *AuthIdentityChannelUpsertBulk) SetMetadata(v map[string]interface{}) *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.SetMetadata(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMetadata sets the "metadata" field to the value that was provided on create.
|
||||
func (u *AuthIdentityChannelUpsertBulk) UpdateMetadata() *AuthIdentityChannelUpsertBulk {
|
||||
return u.Update(func(s *AuthIdentityChannelUpsert) {
|
||||
s.UpdateMetadata()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *AuthIdentityChannelUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
return u.create.err
|
||||
}
|
||||
for i, b := range u.create.builders {
|
||||
if len(b.conflict) != 0 {
|
||||
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AuthIdentityChannelCreateBulk instead", i)
|
||||
}
|
||||
}
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for AuthIdentityChannelCreateBulk.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *AuthIdentityChannelUpsertBulk) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
88
backend/ent/authidentitychannel_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// AuthIdentityChannelDelete is the builder for deleting a AuthIdentityChannel entity.
|
||||
type AuthIdentityChannelDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *AuthIdentityChannelMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the AuthIdentityChannelDelete builder.
|
||||
func (_d *AuthIdentityChannelDelete) Where(ps ...predicate.AuthIdentityChannel) *AuthIdentityChannelDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *AuthIdentityChannelDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *AuthIdentityChannelDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *AuthIdentityChannelDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(authidentitychannel.Table, sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// AuthIdentityChannelDeleteOne is the builder for deleting a single AuthIdentityChannel entity.
|
||||
type AuthIdentityChannelDeleteOne struct {
|
||||
_d *AuthIdentityChannelDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the AuthIdentityChannelDelete builder.
|
||||
func (_d *AuthIdentityChannelDeleteOne) Where(ps ...predicate.AuthIdentityChannel) *AuthIdentityChannelDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *AuthIdentityChannelDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{authidentitychannel.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *AuthIdentityChannelDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
643
backend/ent/authidentitychannel_query.go
Normal file
@@ -0,0 +1,643 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// AuthIdentityChannelQuery is the builder for querying AuthIdentityChannel entities.
|
||||
type AuthIdentityChannelQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []authidentitychannel.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.AuthIdentityChannel
|
||||
withIdentity *AuthIdentityQuery
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the AuthIdentityChannelQuery builder.
|
||||
func (_q *AuthIdentityChannelQuery) Where(ps ...predicate.AuthIdentityChannel) *AuthIdentityChannelQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *AuthIdentityChannelQuery) Limit(limit int) *AuthIdentityChannelQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *AuthIdentityChannelQuery) Offset(offset int) *AuthIdentityChannelQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *AuthIdentityChannelQuery) Unique(unique bool) *AuthIdentityChannelQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *AuthIdentityChannelQuery) Order(o ...authidentitychannel.OrderOption) *AuthIdentityChannelQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// QueryIdentity chains the current query on the "identity" edge.
|
||||
func (_q *AuthIdentityChannelQuery) QueryIdentity() *AuthIdentityQuery {
|
||||
query := (&AuthIdentityClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(authidentitychannel.Table, authidentitychannel.FieldID, selector),
|
||||
sqlgraph.To(authidentity.Table, authidentity.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, authidentitychannel.IdentityTable, authidentitychannel.IdentityColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first AuthIdentityChannel entity from the query.
|
||||
// Returns a *NotFoundError when no AuthIdentityChannel was found.
|
||||
func (_q *AuthIdentityChannelQuery) First(ctx context.Context) (*AuthIdentityChannel, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{authidentitychannel.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *AuthIdentityChannelQuery) FirstX(ctx context.Context) *AuthIdentityChannel {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first AuthIdentityChannel ID from the query.
|
||||
// Returns a *NotFoundError when no AuthIdentityChannel ID was found.
|
||||
func (_q *AuthIdentityChannelQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{authidentitychannel.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *AuthIdentityChannelQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single AuthIdentityChannel entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one AuthIdentityChannel entity is found.
|
||||
// Returns a *NotFoundError when no AuthIdentityChannel entities are found.
|
||||
func (_q *AuthIdentityChannelQuery) Only(ctx context.Context) (*AuthIdentityChannel, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{authidentitychannel.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{authidentitychannel.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *AuthIdentityChannelQuery) OnlyX(ctx context.Context) *AuthIdentityChannel {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only AuthIdentityChannel ID in the query.
|
||||
// Returns a *NotSingularError when more than one AuthIdentityChannel ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *AuthIdentityChannelQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{authidentitychannel.Label}
|
||||
default:
|
||||
err = &NotSingularError{authidentitychannel.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *AuthIdentityChannelQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of AuthIdentityChannels.
|
||||
func (_q *AuthIdentityChannelQuery) All(ctx context.Context) ([]*AuthIdentityChannel, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*AuthIdentityChannel, *AuthIdentityChannelQuery]()
|
||||
return withInterceptors[[]*AuthIdentityChannel](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *AuthIdentityChannelQuery) AllX(ctx context.Context) []*AuthIdentityChannel {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of AuthIdentityChannel IDs.
|
||||
func (_q *AuthIdentityChannelQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(authidentitychannel.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *AuthIdentityChannelQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *AuthIdentityChannelQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*AuthIdentityChannelQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *AuthIdentityChannelQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *AuthIdentityChannelQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *AuthIdentityChannelQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the AuthIdentityChannelQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *AuthIdentityChannelQuery) Clone() *AuthIdentityChannelQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &AuthIdentityChannelQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]authidentitychannel.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.AuthIdentityChannel{}, _q.predicates...),
|
||||
withIdentity: _q.withIdentity.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithIdentity tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "identity" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *AuthIdentityChannelQuery) WithIdentity(opts ...func(*AuthIdentityQuery)) *AuthIdentityChannelQuery {
|
||||
query := (&AuthIdentityClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withIdentity = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.AuthIdentityChannel.Query().
|
||||
// GroupBy(authidentitychannel.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *AuthIdentityChannelQuery) GroupBy(field string, fields ...string) *AuthIdentityChannelGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &AuthIdentityChannelGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = authidentitychannel.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.AuthIdentityChannel.Query().
|
||||
// Select(authidentitychannel.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *AuthIdentityChannelQuery) Select(fields ...string) *AuthIdentityChannelSelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &AuthIdentityChannelSelect{AuthIdentityChannelQuery: _q}
|
||||
sbuild.label = authidentitychannel.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a AuthIdentityChannelSelect configured with the given aggregations.
|
||||
func (_q *AuthIdentityChannelQuery) Aggregate(fns ...AggregateFunc) *AuthIdentityChannelSelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityChannelQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !authidentitychannel.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityChannelQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AuthIdentityChannel, error) {
|
||||
var (
|
||||
nodes = []*AuthIdentityChannel{}
|
||||
_spec = _q.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
_q.withIdentity != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*AuthIdentityChannel).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &AuthIdentityChannel{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := _q.withIdentity; query != nil {
|
||||
if err := _q.loadIdentity(ctx, query, nodes, nil,
|
||||
func(n *AuthIdentityChannel, e *AuthIdentity) { n.Edges.Identity = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityChannelQuery) loadIdentity(ctx context.Context, query *AuthIdentityQuery, nodes []*AuthIdentityChannel, init func(*AuthIdentityChannel), assign func(*AuthIdentityChannel, *AuthIdentity)) error {
|
||||
ids := make([]int64, 0, len(nodes))
|
||||
nodeids := make(map[int64][]*AuthIdentityChannel)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].IdentityID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(authidentity.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "identity_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityChannelQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityChannelQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(authidentitychannel.Table, authidentitychannel.Columns, sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, authidentitychannel.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != authidentitychannel.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if _q.withIdentity != nil {
|
||||
_spec.Node.AddColumnOnce(authidentitychannel.FieldIdentityID)
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *AuthIdentityChannelQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(authidentitychannel.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = authidentitychannel.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *AuthIdentityChannelQuery) ForUpdate(opts ...sql.LockOption) *AuthIdentityChannelQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *AuthIdentityChannelQuery) ForShare(opts ...sql.LockOption) *AuthIdentityChannelQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// AuthIdentityChannelGroupBy is the group-by builder for AuthIdentityChannel entities.
|
||||
type AuthIdentityChannelGroupBy struct {
|
||||
selector
|
||||
build *AuthIdentityChannelQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *AuthIdentityChannelGroupBy) Aggregate(fns ...AggregateFunc) *AuthIdentityChannelGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *AuthIdentityChannelGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*AuthIdentityChannelQuery, *AuthIdentityChannelGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *AuthIdentityChannelGroupBy) sqlScan(ctx context.Context, root *AuthIdentityChannelQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// AuthIdentityChannelSelect is the builder for selecting fields of AuthIdentityChannel entities.
|
||||
type AuthIdentityChannelSelect struct {
|
||||
*AuthIdentityChannelQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *AuthIdentityChannelSelect) Aggregate(fns ...AggregateFunc) *AuthIdentityChannelSelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *AuthIdentityChannelSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*AuthIdentityChannelQuery, *AuthIdentityChannelSelect](ctx, _s.AuthIdentityChannelQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *AuthIdentityChannelSelect) sqlScan(ctx context.Context, root *AuthIdentityChannelQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
581
backend/ent/authidentitychannel_update.go
Normal file
@@ -0,0 +1,581 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// AuthIdentityChannelUpdate is the builder for updating AuthIdentityChannel entities.
|
||||
type AuthIdentityChannelUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *AuthIdentityChannelMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the AuthIdentityChannelUpdate builder.
|
||||
func (_u *AuthIdentityChannelUpdate) Where(ps ...predicate.AuthIdentityChannel) *AuthIdentityChannelUpdate {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_u *AuthIdentityChannelUpdate) SetUpdatedAt(v time.Time) *AuthIdentityChannelUpdate {
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (_u *AuthIdentityChannelUpdate) SetIdentityID(v int64) *AuthIdentityChannelUpdate {
|
||||
_u.mutation.SetIdentityID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableIdentityID sets the "identity_id" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdate) SetNillableIdentityID(v *int64) *AuthIdentityChannelUpdate {
|
||||
if v != nil {
|
||||
_u.SetIdentityID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProviderType sets the "provider_type" field.
|
||||
func (_u *AuthIdentityChannelUpdate) SetProviderType(v string) *AuthIdentityChannelUpdate {
|
||||
_u.mutation.SetProviderType(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProviderType sets the "provider_type" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdate) SetNillableProviderType(v *string) *AuthIdentityChannelUpdate {
|
||||
if v != nil {
|
||||
_u.SetProviderType(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProviderKey sets the "provider_key" field.
|
||||
func (_u *AuthIdentityChannelUpdate) SetProviderKey(v string) *AuthIdentityChannelUpdate {
|
||||
_u.mutation.SetProviderKey(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProviderKey sets the "provider_key" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdate) SetNillableProviderKey(v *string) *AuthIdentityChannelUpdate {
|
||||
if v != nil {
|
||||
_u.SetProviderKey(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetChannel sets the "channel" field.
|
||||
func (_u *AuthIdentityChannelUpdate) SetChannel(v string) *AuthIdentityChannelUpdate {
|
||||
_u.mutation.SetChannel(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableChannel sets the "channel" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdate) SetNillableChannel(v *string) *AuthIdentityChannelUpdate {
|
||||
if v != nil {
|
||||
_u.SetChannel(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetChannelAppID sets the "channel_app_id" field.
|
||||
func (_u *AuthIdentityChannelUpdate) SetChannelAppID(v string) *AuthIdentityChannelUpdate {
|
||||
_u.mutation.SetChannelAppID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableChannelAppID sets the "channel_app_id" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdate) SetNillableChannelAppID(v *string) *AuthIdentityChannelUpdate {
|
||||
if v != nil {
|
||||
_u.SetChannelAppID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetChannelSubject sets the "channel_subject" field.
|
||||
func (_u *AuthIdentityChannelUpdate) SetChannelSubject(v string) *AuthIdentityChannelUpdate {
|
||||
_u.mutation.SetChannelSubject(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableChannelSubject sets the "channel_subject" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdate) SetNillableChannelSubject(v *string) *AuthIdentityChannelUpdate {
|
||||
if v != nil {
|
||||
_u.SetChannelSubject(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMetadata sets the "metadata" field.
|
||||
func (_u *AuthIdentityChannelUpdate) SetMetadata(v map[string]interface{}) *AuthIdentityChannelUpdate {
|
||||
_u.mutation.SetMetadata(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetIdentity sets the "identity" edge to the AuthIdentity entity.
|
||||
func (_u *AuthIdentityChannelUpdate) SetIdentity(v *AuthIdentity) *AuthIdentityChannelUpdate {
|
||||
return _u.SetIdentityID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the AuthIdentityChannelMutation object of the builder.
|
||||
func (_u *AuthIdentityChannelUpdate) Mutation() *AuthIdentityChannelMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearIdentity clears the "identity" edge to the AuthIdentity entity.
|
||||
func (_u *AuthIdentityChannelUpdate) ClearIdentity() *AuthIdentityChannelUpdate {
|
||||
_u.mutation.ClearIdentity()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (_u *AuthIdentityChannelUpdate) Save(ctx context.Context) (int, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *AuthIdentityChannelUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_u *AuthIdentityChannelUpdate) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *AuthIdentityChannelUpdate) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *AuthIdentityChannelUpdate) defaults() {
|
||||
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||
v := authidentitychannel.UpdateDefaultUpdatedAt()
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *AuthIdentityChannelUpdate) check() error {
|
||||
if v, ok := _u.mutation.ProviderType(); ok {
|
||||
if err := authidentitychannel.ProviderTypeValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_type", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_type": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.ProviderKey(); ok {
|
||||
if err := authidentitychannel.ProviderKeyValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_key", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_key": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Channel(); ok {
|
||||
if err := authidentitychannel.ChannelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "channel", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.ChannelAppID(); ok {
|
||||
if err := authidentitychannel.ChannelAppIDValidator(v); err != nil {
|
||||
return &ValidationError{Name: "channel_app_id", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_app_id": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.ChannelSubject(); ok {
|
||||
if err := authidentitychannel.ChannelSubjectValidator(v); err != nil {
|
||||
return &ValidationError{Name: "channel_subject", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_subject": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.IdentityCleared() && len(_u.mutation.IdentityIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "AuthIdentityChannel.identity"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *AuthIdentityChannelUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(authidentitychannel.Table, authidentitychannel.Columns, sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64))
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ProviderType(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldProviderType, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ProviderKey(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldProviderKey, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Channel(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldChannel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ChannelAppID(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldChannelAppID, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ChannelSubject(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldChannelSubject, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Metadata(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldMetadata, field.TypeJSON, value)
|
||||
}
|
||||
if _u.mutation.IdentityCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: authidentitychannel.IdentityTable,
|
||||
Columns: []string{authidentitychannel.IdentityColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.IdentityIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: authidentitychannel.IdentityTable,
|
||||
Columns: []string{authidentitychannel.IdentityColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{authidentitychannel.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
// AuthIdentityChannelUpdateOne is the builder for updating a single AuthIdentityChannel entity.
|
||||
type AuthIdentityChannelUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *AuthIdentityChannelMutation
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetUpdatedAt(v time.Time) *AuthIdentityChannelUpdateOne {
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetIdentityID(v int64) *AuthIdentityChannelUpdateOne {
|
||||
_u.mutation.SetIdentityID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableIdentityID sets the "identity_id" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetNillableIdentityID(v *int64) *AuthIdentityChannelUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetIdentityID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProviderType sets the "provider_type" field.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetProviderType(v string) *AuthIdentityChannelUpdateOne {
|
||||
_u.mutation.SetProviderType(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProviderType sets the "provider_type" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetNillableProviderType(v *string) *AuthIdentityChannelUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetProviderType(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProviderKey sets the "provider_key" field.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetProviderKey(v string) *AuthIdentityChannelUpdateOne {
|
||||
_u.mutation.SetProviderKey(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProviderKey sets the "provider_key" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetNillableProviderKey(v *string) *AuthIdentityChannelUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetProviderKey(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetChannel sets the "channel" field.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetChannel(v string) *AuthIdentityChannelUpdateOne {
|
||||
_u.mutation.SetChannel(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableChannel sets the "channel" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetNillableChannel(v *string) *AuthIdentityChannelUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetChannel(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetChannelAppID sets the "channel_app_id" field.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetChannelAppID(v string) *AuthIdentityChannelUpdateOne {
|
||||
_u.mutation.SetChannelAppID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableChannelAppID sets the "channel_app_id" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetNillableChannelAppID(v *string) *AuthIdentityChannelUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetChannelAppID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetChannelSubject sets the "channel_subject" field.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetChannelSubject(v string) *AuthIdentityChannelUpdateOne {
|
||||
_u.mutation.SetChannelSubject(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableChannelSubject sets the "channel_subject" field if the given value is not nil.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetNillableChannelSubject(v *string) *AuthIdentityChannelUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetChannelSubject(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMetadata sets the "metadata" field.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetMetadata(v map[string]interface{}) *AuthIdentityChannelUpdateOne {
|
||||
_u.mutation.SetMetadata(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetIdentity sets the "identity" edge to the AuthIdentity entity.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SetIdentity(v *AuthIdentity) *AuthIdentityChannelUpdateOne {
|
||||
return _u.SetIdentityID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the AuthIdentityChannelMutation object of the builder.
|
||||
func (_u *AuthIdentityChannelUpdateOne) Mutation() *AuthIdentityChannelMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearIdentity clears the "identity" edge to the AuthIdentity entity.
|
||||
func (_u *AuthIdentityChannelUpdateOne) ClearIdentity() *AuthIdentityChannelUpdateOne {
|
||||
_u.mutation.ClearIdentity()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the AuthIdentityChannelUpdate builder.
|
||||
func (_u *AuthIdentityChannelUpdateOne) Where(ps ...predicate.AuthIdentityChannel) *AuthIdentityChannelUpdateOne {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (_u *AuthIdentityChannelUpdateOne) Select(field string, fields ...string) *AuthIdentityChannelUpdateOne {
|
||||
_u.fields = append([]string{field}, fields...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated AuthIdentityChannel entity.
|
||||
func (_u *AuthIdentityChannelUpdateOne) Save(ctx context.Context) (*AuthIdentityChannel, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *AuthIdentityChannelUpdateOne) SaveX(ctx context.Context) *AuthIdentityChannel {
|
||||
node, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (_u *AuthIdentityChannelUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *AuthIdentityChannelUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *AuthIdentityChannelUpdateOne) defaults() {
|
||||
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||
v := authidentitychannel.UpdateDefaultUpdatedAt()
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *AuthIdentityChannelUpdateOne) check() error {
|
||||
if v, ok := _u.mutation.ProviderType(); ok {
|
||||
if err := authidentitychannel.ProviderTypeValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_type", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_type": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.ProviderKey(); ok {
|
||||
if err := authidentitychannel.ProviderKeyValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider_key", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.provider_key": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Channel(); ok {
|
||||
if err := authidentitychannel.ChannelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "channel", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.ChannelAppID(); ok {
|
||||
if err := authidentitychannel.ChannelAppIDValidator(v); err != nil {
|
||||
return &ValidationError{Name: "channel_app_id", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_app_id": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.ChannelSubject(); ok {
|
||||
if err := authidentitychannel.ChannelSubjectValidator(v); err != nil {
|
||||
return &ValidationError{Name: "channel_subject", err: fmt.Errorf(`ent: validator failed for field "AuthIdentityChannel.channel_subject": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.IdentityCleared() && len(_u.mutation.IdentityIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "AuthIdentityChannel.identity"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *AuthIdentityChannelUpdateOne) sqlSave(ctx context.Context) (_node *AuthIdentityChannel, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(authidentitychannel.Table, authidentitychannel.Columns, sqlgraph.NewFieldSpec(authidentitychannel.FieldID, field.TypeInt64))
|
||||
id, ok := _u.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AuthIdentityChannel.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := _u.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, authidentitychannel.FieldID)
|
||||
for _, f := range fields {
|
||||
if !authidentitychannel.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != authidentitychannel.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ProviderType(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldProviderType, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ProviderKey(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldProviderKey, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Channel(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldChannel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ChannelAppID(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldChannelAppID, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ChannelSubject(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldChannelSubject, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Metadata(); ok {
|
||||
_spec.SetField(authidentitychannel.FieldMetadata, field.TypeJSON, value)
|
||||
}
|
||||
if _u.mutation.IdentityCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: authidentitychannel.IdentityTable,
|
||||
Columns: []string{authidentitychannel.IdentityColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.IdentityIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: authidentitychannel.IdentityTable,
|
||||
Columns: []string{authidentitychannel.IdentityColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &AuthIdentityChannel{config: _u.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{authidentitychannel.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
359
backend/ent/channelmonitor.go
Normal file
@@ -0,0 +1,359 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
)
|
||||
|
||||
// ChannelMonitor is the model entity for the ChannelMonitor schema.
|
||||
type ChannelMonitor struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// Name holds the value of the "name" field.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Provider holds the value of the "provider" field.
|
||||
Provider channelmonitor.Provider `json:"provider,omitempty"`
|
||||
// Provider base origin, e.g. https://api.openai.com
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
// AES-256-GCM encrypted API key
|
||||
APIKeyEncrypted string `json:"-"`
|
||||
// PrimaryModel holds the value of the "primary_model" field.
|
||||
PrimaryModel string `json:"primary_model,omitempty"`
|
||||
// Additional model names to test alongside primary_model
|
||||
ExtraModels []string `json:"extra_models,omitempty"`
|
||||
// GroupName holds the value of the "group_name" field.
|
||||
GroupName string `json:"group_name,omitempty"`
|
||||
// Enabled holds the value of the "enabled" field.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
// IntervalSeconds holds the value of the "interval_seconds" field.
|
||||
IntervalSeconds int `json:"interval_seconds,omitempty"`
|
||||
// LastCheckedAt holds the value of the "last_checked_at" field.
|
||||
LastCheckedAt *time.Time `json:"last_checked_at,omitempty"`
|
||||
// CreatedBy holds the value of the "created_by" field.
|
||||
CreatedBy int64 `json:"created_by,omitempty"`
|
||||
// TemplateID holds the value of the "template_id" field.
|
||||
TemplateID *int64 `json:"template_id,omitempty"`
|
||||
// ExtraHeaders holds the value of the "extra_headers" field.
|
||||
ExtraHeaders map[string]string `json:"extra_headers,omitempty"`
|
||||
// BodyOverrideMode holds the value of the "body_override_mode" field.
|
||||
BodyOverrideMode string `json:"body_override_mode,omitempty"`
|
||||
// BodyOverride holds the value of the "body_override" field.
|
||||
BodyOverride map[string]interface{} `json:"body_override,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the ChannelMonitorQuery when eager-loading is set.
|
||||
Edges ChannelMonitorEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// ChannelMonitorEdges holds the relations/edges for other nodes in the graph.
|
||||
type ChannelMonitorEdges struct {
|
||||
// History holds the value of the history edge.
|
||||
History []*ChannelMonitorHistory `json:"history,omitempty"`
|
||||
// DailyRollups holds the value of the daily_rollups edge.
|
||||
DailyRollups []*ChannelMonitorDailyRollup `json:"daily_rollups,omitempty"`
|
||||
// RequestTemplate holds the value of the request_template edge.
|
||||
RequestTemplate *ChannelMonitorRequestTemplate `json:"request_template,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [3]bool
|
||||
}
|
||||
|
||||
// HistoryOrErr returns the History value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e ChannelMonitorEdges) HistoryOrErr() ([]*ChannelMonitorHistory, error) {
|
||||
if e.loadedTypes[0] {
|
||||
return e.History, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "history"}
|
||||
}
|
||||
|
||||
// DailyRollupsOrErr returns the DailyRollups value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e ChannelMonitorEdges) DailyRollupsOrErr() ([]*ChannelMonitorDailyRollup, error) {
|
||||
if e.loadedTypes[1] {
|
||||
return e.DailyRollups, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "daily_rollups"}
|
||||
}
|
||||
|
||||
// RequestTemplateOrErr returns the RequestTemplate value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e ChannelMonitorEdges) RequestTemplateOrErr() (*ChannelMonitorRequestTemplate, error) {
|
||||
if e.RequestTemplate != nil {
|
||||
return e.RequestTemplate, nil
|
||||
} else if e.loadedTypes[2] {
|
||||
return nil, &NotFoundError{label: channelmonitorrequesttemplate.Label}
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "request_template"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*ChannelMonitor) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitor.FieldExtraModels, channelmonitor.FieldExtraHeaders, channelmonitor.FieldBodyOverride:
|
||||
values[i] = new([]byte)
|
||||
case channelmonitor.FieldEnabled:
|
||||
values[i] = new(sql.NullBool)
|
||||
case channelmonitor.FieldID, channelmonitor.FieldIntervalSeconds, channelmonitor.FieldCreatedBy, channelmonitor.FieldTemplateID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case channelmonitor.FieldName, channelmonitor.FieldProvider, channelmonitor.FieldEndpoint, channelmonitor.FieldAPIKeyEncrypted, channelmonitor.FieldPrimaryModel, channelmonitor.FieldGroupName, channelmonitor.FieldBodyOverrideMode:
|
||||
values[i] = new(sql.NullString)
|
||||
case channelmonitor.FieldCreatedAt, channelmonitor.FieldUpdatedAt, channelmonitor.FieldLastCheckedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the ChannelMonitor fields.
|
||||
func (_m *ChannelMonitor) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitor.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case channelmonitor.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CreatedAt = value.Time
|
||||
}
|
||||
case channelmonitor.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UpdatedAt = value.Time
|
||||
}
|
||||
case channelmonitor.FieldName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Name = value.String
|
||||
}
|
||||
case channelmonitor.FieldProvider:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Provider = channelmonitor.Provider(value.String)
|
||||
}
|
||||
case channelmonitor.FieldEndpoint:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field endpoint", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Endpoint = value.String
|
||||
}
|
||||
case channelmonitor.FieldAPIKeyEncrypted:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field api_key_encrypted", values[i])
|
||||
} else if value.Valid {
|
||||
_m.APIKeyEncrypted = value.String
|
||||
}
|
||||
case channelmonitor.FieldPrimaryModel:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field primary_model", values[i])
|
||||
} else if value.Valid {
|
||||
_m.PrimaryModel = value.String
|
||||
}
|
||||
case channelmonitor.FieldExtraModels:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field extra_models", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.ExtraModels); err != nil {
|
||||
return fmt.Errorf("unmarshal field extra_models: %w", err)
|
||||
}
|
||||
}
|
||||
case channelmonitor.FieldGroupName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field group_name", values[i])
|
||||
} else if value.Valid {
|
||||
_m.GroupName = value.String
|
||||
}
|
||||
case channelmonitor.FieldEnabled:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field enabled", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Enabled = value.Bool
|
||||
}
|
||||
case channelmonitor.FieldIntervalSeconds:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field interval_seconds", values[i])
|
||||
} else if value.Valid {
|
||||
_m.IntervalSeconds = int(value.Int64)
|
||||
}
|
||||
case channelmonitor.FieldLastCheckedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field last_checked_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.LastCheckedAt = new(time.Time)
|
||||
*_m.LastCheckedAt = value.Time
|
||||
}
|
||||
case channelmonitor.FieldCreatedBy:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_by", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CreatedBy = value.Int64
|
||||
}
|
||||
case channelmonitor.FieldTemplateID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field template_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.TemplateID = new(int64)
|
||||
*_m.TemplateID = value.Int64
|
||||
}
|
||||
case channelmonitor.FieldExtraHeaders:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field extra_headers", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.ExtraHeaders); err != nil {
|
||||
return fmt.Errorf("unmarshal field extra_headers: %w", err)
|
||||
}
|
||||
}
|
||||
case channelmonitor.FieldBodyOverrideMode:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field body_override_mode", values[i])
|
||||
} else if value.Valid {
|
||||
_m.BodyOverrideMode = value.String
|
||||
}
|
||||
case channelmonitor.FieldBodyOverride:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field body_override", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.BodyOverride); err != nil {
|
||||
return fmt.Errorf("unmarshal field body_override: %w", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitor.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *ChannelMonitor) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryHistory queries the "history" edge of the ChannelMonitor entity.
|
||||
func (_m *ChannelMonitor) QueryHistory() *ChannelMonitorHistoryQuery {
|
||||
return NewChannelMonitorClient(_m.config).QueryHistory(_m)
|
||||
}
|
||||
|
||||
// QueryDailyRollups queries the "daily_rollups" edge of the ChannelMonitor entity.
|
||||
func (_m *ChannelMonitor) QueryDailyRollups() *ChannelMonitorDailyRollupQuery {
|
||||
return NewChannelMonitorClient(_m.config).QueryDailyRollups(_m)
|
||||
}
|
||||
|
||||
// QueryRequestTemplate queries the "request_template" edge of the ChannelMonitor entity.
|
||||
func (_m *ChannelMonitor) QueryRequestTemplate() *ChannelMonitorRequestTemplateQuery {
|
||||
return NewChannelMonitorClient(_m.config).QueryRequestTemplate(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this ChannelMonitor.
|
||||
// Note that you need to call ChannelMonitor.Unwrap() before calling this method if this ChannelMonitor
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *ChannelMonitor) Update() *ChannelMonitorUpdateOne {
|
||||
return NewChannelMonitorClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the ChannelMonitor entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *ChannelMonitor) Unwrap() *ChannelMonitor {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: ChannelMonitor is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *ChannelMonitor) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("ChannelMonitor(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("name=")
|
||||
builder.WriteString(_m.Name)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("provider=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Provider))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("endpoint=")
|
||||
builder.WriteString(_m.Endpoint)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("api_key_encrypted=<sensitive>")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("primary_model=")
|
||||
builder.WriteString(_m.PrimaryModel)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("extra_models=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ExtraModels))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("group_name=")
|
||||
builder.WriteString(_m.GroupName)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("enabled=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Enabled))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("interval_seconds=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.IntervalSeconds))
|
||||
builder.WriteString(", ")
|
||||
if v := _m.LastCheckedAt; v != nil {
|
||||
builder.WriteString("last_checked_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("created_by=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.CreatedBy))
|
||||
builder.WriteString(", ")
|
||||
if v := _m.TemplateID; v != nil {
|
||||
builder.WriteString("template_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("extra_headers=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ExtraHeaders))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("body_override_mode=")
|
||||
builder.WriteString(_m.BodyOverrideMode)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("body_override=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.BodyOverride))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// ChannelMonitors is a parsable slice of ChannelMonitor.
|
||||
type ChannelMonitors []*ChannelMonitor
|
||||
304
backend/ent/channelmonitor/channelmonitor.go
Normal file
@@ -0,0 +1,304 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the channelmonitor type in the database.
|
||||
Label = "channel_monitor"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldName holds the string denoting the name field in the database.
|
||||
FieldName = "name"
|
||||
// FieldProvider holds the string denoting the provider field in the database.
|
||||
FieldProvider = "provider"
|
||||
// FieldEndpoint holds the string denoting the endpoint field in the database.
|
||||
FieldEndpoint = "endpoint"
|
||||
// FieldAPIKeyEncrypted holds the string denoting the api_key_encrypted field in the database.
|
||||
FieldAPIKeyEncrypted = "api_key_encrypted"
|
||||
// FieldPrimaryModel holds the string denoting the primary_model field in the database.
|
||||
FieldPrimaryModel = "primary_model"
|
||||
// FieldExtraModels holds the string denoting the extra_models field in the database.
|
||||
FieldExtraModels = "extra_models"
|
||||
// FieldGroupName holds the string denoting the group_name field in the database.
|
||||
FieldGroupName = "group_name"
|
||||
// FieldEnabled holds the string denoting the enabled field in the database.
|
||||
FieldEnabled = "enabled"
|
||||
// FieldIntervalSeconds holds the string denoting the interval_seconds field in the database.
|
||||
FieldIntervalSeconds = "interval_seconds"
|
||||
// FieldLastCheckedAt holds the string denoting the last_checked_at field in the database.
|
||||
FieldLastCheckedAt = "last_checked_at"
|
||||
// FieldCreatedBy holds the string denoting the created_by field in the database.
|
||||
FieldCreatedBy = "created_by"
|
||||
// FieldTemplateID holds the string denoting the template_id field in the database.
|
||||
FieldTemplateID = "template_id"
|
||||
// FieldExtraHeaders holds the string denoting the extra_headers field in the database.
|
||||
FieldExtraHeaders = "extra_headers"
|
||||
// FieldBodyOverrideMode holds the string denoting the body_override_mode field in the database.
|
||||
FieldBodyOverrideMode = "body_override_mode"
|
||||
// FieldBodyOverride holds the string denoting the body_override field in the database.
|
||||
FieldBodyOverride = "body_override"
|
||||
// EdgeHistory holds the string denoting the history edge name in mutations.
|
||||
EdgeHistory = "history"
|
||||
// EdgeDailyRollups holds the string denoting the daily_rollups edge name in mutations.
|
||||
EdgeDailyRollups = "daily_rollups"
|
||||
// EdgeRequestTemplate holds the string denoting the request_template edge name in mutations.
|
||||
EdgeRequestTemplate = "request_template"
|
||||
// Table holds the table name of the channelmonitor in the database.
|
||||
Table = "channel_monitors"
|
||||
// HistoryTable is the table that holds the history relation/edge.
|
||||
HistoryTable = "channel_monitor_histories"
|
||||
// HistoryInverseTable is the table name for the ChannelMonitorHistory entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitorhistory" package.
|
||||
HistoryInverseTable = "channel_monitor_histories"
|
||||
// HistoryColumn is the table column denoting the history relation/edge.
|
||||
HistoryColumn = "monitor_id"
|
||||
// DailyRollupsTable is the table that holds the daily_rollups relation/edge.
|
||||
DailyRollupsTable = "channel_monitor_daily_rollups"
|
||||
// DailyRollupsInverseTable is the table name for the ChannelMonitorDailyRollup entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitordailyrollup" package.
|
||||
DailyRollupsInverseTable = "channel_monitor_daily_rollups"
|
||||
// DailyRollupsColumn is the table column denoting the daily_rollups relation/edge.
|
||||
DailyRollupsColumn = "monitor_id"
|
||||
// RequestTemplateTable is the table that holds the request_template relation/edge.
|
||||
RequestTemplateTable = "channel_monitors"
|
||||
// RequestTemplateInverseTable is the table name for the ChannelMonitorRequestTemplate entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitorrequesttemplate" package.
|
||||
RequestTemplateInverseTable = "channel_monitor_request_templates"
|
||||
// RequestTemplateColumn is the table column denoting the request_template relation/edge.
|
||||
RequestTemplateColumn = "template_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for channelmonitor fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldName,
|
||||
FieldProvider,
|
||||
FieldEndpoint,
|
||||
FieldAPIKeyEncrypted,
|
||||
FieldPrimaryModel,
|
||||
FieldExtraModels,
|
||||
FieldGroupName,
|
||||
FieldEnabled,
|
||||
FieldIntervalSeconds,
|
||||
FieldLastCheckedAt,
|
||||
FieldCreatedBy,
|
||||
FieldTemplateID,
|
||||
FieldExtraHeaders,
|
||||
FieldBodyOverrideMode,
|
||||
FieldBodyOverride,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||
NameValidator func(string) error
|
||||
// EndpointValidator is a validator for the "endpoint" field. It is called by the builders before save.
|
||||
EndpointValidator func(string) error
|
||||
// APIKeyEncryptedValidator is a validator for the "api_key_encrypted" field. It is called by the builders before save.
|
||||
APIKeyEncryptedValidator func(string) error
|
||||
// PrimaryModelValidator is a validator for the "primary_model" field. It is called by the builders before save.
|
||||
PrimaryModelValidator func(string) error
|
||||
// DefaultExtraModels holds the default value on creation for the "extra_models" field.
|
||||
DefaultExtraModels []string
|
||||
// DefaultGroupName holds the default value on creation for the "group_name" field.
|
||||
DefaultGroupName string
|
||||
// GroupNameValidator is a validator for the "group_name" field. It is called by the builders before save.
|
||||
GroupNameValidator func(string) error
|
||||
// DefaultEnabled holds the default value on creation for the "enabled" field.
|
||||
DefaultEnabled bool
|
||||
// IntervalSecondsValidator is a validator for the "interval_seconds" field. It is called by the builders before save.
|
||||
IntervalSecondsValidator func(int) error
|
||||
// DefaultExtraHeaders holds the default value on creation for the "extra_headers" field.
|
||||
DefaultExtraHeaders map[string]string
|
||||
// DefaultBodyOverrideMode holds the default value on creation for the "body_override_mode" field.
|
||||
DefaultBodyOverrideMode string
|
||||
// BodyOverrideModeValidator is a validator for the "body_override_mode" field. It is called by the builders before save.
|
||||
BodyOverrideModeValidator func(string) error
|
||||
)
|
||||
|
||||
// Provider defines the type for the "provider" enum field.
|
||||
type Provider string
|
||||
|
||||
// Provider values.
|
||||
const (
|
||||
ProviderOpenai Provider = "openai"
|
||||
ProviderAnthropic Provider = "anthropic"
|
||||
ProviderGemini Provider = "gemini"
|
||||
)
|
||||
|
||||
func (pr Provider) String() string {
|
||||
return string(pr)
|
||||
}
|
||||
|
||||
// ProviderValidator is a validator for the "provider" field enum values. It is called by the builders before save.
|
||||
func ProviderValidator(pr Provider) error {
|
||||
switch pr {
|
||||
case ProviderOpenai, ProviderAnthropic, ProviderGemini:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("channelmonitor: invalid enum value for provider field: %q", pr)
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the ChannelMonitor queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByName orders the results by the name field.
|
||||
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProvider orders the results by the provider field.
|
||||
func ByProvider(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldProvider, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByEndpoint orders the results by the endpoint field.
|
||||
func ByEndpoint(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldEndpoint, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByAPIKeyEncrypted orders the results by the api_key_encrypted field.
|
||||
func ByAPIKeyEncrypted(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldAPIKeyEncrypted, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPrimaryModel orders the results by the primary_model field.
|
||||
func ByPrimaryModel(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPrimaryModel, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByGroupName orders the results by the group_name field.
|
||||
func ByGroupName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldGroupName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByEnabled orders the results by the enabled field.
|
||||
func ByEnabled(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldEnabled, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByIntervalSeconds orders the results by the interval_seconds field.
|
||||
func ByIntervalSeconds(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldIntervalSeconds, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByLastCheckedAt orders the results by the last_checked_at field.
|
||||
func ByLastCheckedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldLastCheckedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedBy orders the results by the created_by field.
|
||||
func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedBy, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByTemplateID orders the results by the template_id field.
|
||||
func ByTemplateID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTemplateID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByBodyOverrideMode orders the results by the body_override_mode field.
|
||||
func ByBodyOverrideMode(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldBodyOverrideMode, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByHistoryCount orders the results by history count.
|
||||
func ByHistoryCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newHistoryStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByHistory orders the results by history terms.
|
||||
func ByHistory(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newHistoryStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByDailyRollupsCount orders the results by daily_rollups count.
|
||||
func ByDailyRollupsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newDailyRollupsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByDailyRollups orders the results by daily_rollups terms.
|
||||
func ByDailyRollups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newDailyRollupsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByRequestTemplateField orders the results by request_template field.
|
||||
func ByRequestTemplateField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newRequestTemplateStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newHistoryStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(HistoryInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, HistoryTable, HistoryColumn),
|
||||
)
|
||||
}
|
||||
func newDailyRollupsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(DailyRollupsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, DailyRollupsTable, DailyRollupsColumn),
|
||||
)
|
||||
}
|
||||
func newRequestTemplateStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(RequestTemplateInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, false, RequestTemplateTable, RequestTemplateColumn),
|
||||
)
|
||||
}
|
||||
885
backend/ent/channelmonitor/where.go
Normal file
@@ -0,0 +1,885 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitor
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
||||
func Name(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// Endpoint applies equality check predicate on the "endpoint" field. It's identical to EndpointEQ.
|
||||
func Endpoint(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// APIKeyEncrypted applies equality check predicate on the "api_key_encrypted" field. It's identical to APIKeyEncryptedEQ.
|
||||
func APIKeyEncrypted(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// PrimaryModel applies equality check predicate on the "primary_model" field. It's identical to PrimaryModelEQ.
|
||||
func PrimaryModel(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// GroupName applies equality check predicate on the "group_name" field. It's identical to GroupNameEQ.
|
||||
func GroupName(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// Enabled applies equality check predicate on the "enabled" field. It's identical to EnabledEQ.
|
||||
func Enabled(v bool) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldEnabled, v))
|
||||
}
|
||||
|
||||
// IntervalSeconds applies equality check predicate on the "interval_seconds" field. It's identical to IntervalSecondsEQ.
|
||||
func IntervalSeconds(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// LastCheckedAt applies equality check predicate on the "last_checked_at" field. It's identical to LastCheckedAtEQ.
|
||||
func LastCheckedAt(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ.
|
||||
func CreatedBy(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// TemplateID applies equality check predicate on the "template_id" field. It's identical to TemplateIDEQ.
|
||||
func TemplateID(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldTemplateID, v))
|
||||
}
|
||||
|
||||
// BodyOverrideMode applies equality check predicate on the "body_override_mode" field. It's identical to BodyOverrideModeEQ.
|
||||
func BodyOverrideMode(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// NameEQ applies the EQ predicate on the "name" field.
|
||||
func NameEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameNEQ applies the NEQ predicate on the "name" field.
|
||||
func NameNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameIn applies the In predicate on the "name" field.
|
||||
func NameIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameNotIn applies the NotIn predicate on the "name" field.
|
||||
func NameNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameGT applies the GT predicate on the "name" field.
|
||||
func NameGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameGTE applies the GTE predicate on the "name" field.
|
||||
func NameGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLT applies the LT predicate on the "name" field.
|
||||
func NameLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLTE applies the LTE predicate on the "name" field.
|
||||
func NameLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContains applies the Contains predicate on the "name" field.
|
||||
func NameContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
||||
func NameHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
||||
func NameHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
||||
func NameEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
||||
func NameContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldName, v))
|
||||
}
|
||||
|
||||
// ProviderEQ applies the EQ predicate on the "provider" field.
|
||||
func ProviderEQ(v Provider) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldProvider, v))
|
||||
}
|
||||
|
||||
// ProviderNEQ applies the NEQ predicate on the "provider" field.
|
||||
func ProviderNEQ(v Provider) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldProvider, v))
|
||||
}
|
||||
|
||||
// ProviderIn applies the In predicate on the "provider" field.
|
||||
func ProviderIn(vs ...Provider) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldProvider, vs...))
|
||||
}
|
||||
|
||||
// ProviderNotIn applies the NotIn predicate on the "provider" field.
|
||||
func ProviderNotIn(vs ...Provider) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldProvider, vs...))
|
||||
}
|
||||
|
||||
// EndpointEQ applies the EQ predicate on the "endpoint" field.
|
||||
func EndpointEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointNEQ applies the NEQ predicate on the "endpoint" field.
|
||||
func EndpointNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointIn applies the In predicate on the "endpoint" field.
|
||||
func EndpointIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldEndpoint, vs...))
|
||||
}
|
||||
|
||||
// EndpointNotIn applies the NotIn predicate on the "endpoint" field.
|
||||
func EndpointNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldEndpoint, vs...))
|
||||
}
|
||||
|
||||
// EndpointGT applies the GT predicate on the "endpoint" field.
|
||||
func EndpointGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointGTE applies the GTE predicate on the "endpoint" field.
|
||||
func EndpointGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointLT applies the LT predicate on the "endpoint" field.
|
||||
func EndpointLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointLTE applies the LTE predicate on the "endpoint" field.
|
||||
func EndpointLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointContains applies the Contains predicate on the "endpoint" field.
|
||||
func EndpointContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointHasPrefix applies the HasPrefix predicate on the "endpoint" field.
|
||||
func EndpointHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointHasSuffix applies the HasSuffix predicate on the "endpoint" field.
|
||||
func EndpointHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointEqualFold applies the EqualFold predicate on the "endpoint" field.
|
||||
func EndpointEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// EndpointContainsFold applies the ContainsFold predicate on the "endpoint" field.
|
||||
func EndpointContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldEndpoint, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedEQ applies the EQ predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedNEQ applies the NEQ predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedIn applies the In predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldAPIKeyEncrypted, vs...))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedNotIn applies the NotIn predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldAPIKeyEncrypted, vs...))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedGT applies the GT predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedGTE applies the GTE predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedLT applies the LT predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedLTE applies the LTE predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedContains applies the Contains predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedHasPrefix applies the HasPrefix predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedHasSuffix applies the HasSuffix predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedEqualFold applies the EqualFold predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// APIKeyEncryptedContainsFold applies the ContainsFold predicate on the "api_key_encrypted" field.
|
||||
func APIKeyEncryptedContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldAPIKeyEncrypted, v))
|
||||
}
|
||||
|
||||
// PrimaryModelEQ applies the EQ predicate on the "primary_model" field.
|
||||
func PrimaryModelEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelNEQ applies the NEQ predicate on the "primary_model" field.
|
||||
func PrimaryModelNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelIn applies the In predicate on the "primary_model" field.
|
||||
func PrimaryModelIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldPrimaryModel, vs...))
|
||||
}
|
||||
|
||||
// PrimaryModelNotIn applies the NotIn predicate on the "primary_model" field.
|
||||
func PrimaryModelNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldPrimaryModel, vs...))
|
||||
}
|
||||
|
||||
// PrimaryModelGT applies the GT predicate on the "primary_model" field.
|
||||
func PrimaryModelGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelGTE applies the GTE predicate on the "primary_model" field.
|
||||
func PrimaryModelGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelLT applies the LT predicate on the "primary_model" field.
|
||||
func PrimaryModelLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelLTE applies the LTE predicate on the "primary_model" field.
|
||||
func PrimaryModelLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelContains applies the Contains predicate on the "primary_model" field.
|
||||
func PrimaryModelContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelHasPrefix applies the HasPrefix predicate on the "primary_model" field.
|
||||
func PrimaryModelHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelHasSuffix applies the HasSuffix predicate on the "primary_model" field.
|
||||
func PrimaryModelHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelEqualFold applies the EqualFold predicate on the "primary_model" field.
|
||||
func PrimaryModelEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// PrimaryModelContainsFold applies the ContainsFold predicate on the "primary_model" field.
|
||||
func PrimaryModelContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldPrimaryModel, v))
|
||||
}
|
||||
|
||||
// GroupNameEQ applies the EQ predicate on the "group_name" field.
|
||||
func GroupNameEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameNEQ applies the NEQ predicate on the "group_name" field.
|
||||
func GroupNameNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameIn applies the In predicate on the "group_name" field.
|
||||
func GroupNameIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldGroupName, vs...))
|
||||
}
|
||||
|
||||
// GroupNameNotIn applies the NotIn predicate on the "group_name" field.
|
||||
func GroupNameNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldGroupName, vs...))
|
||||
}
|
||||
|
||||
// GroupNameGT applies the GT predicate on the "group_name" field.
|
||||
func GroupNameGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameGTE applies the GTE predicate on the "group_name" field.
|
||||
func GroupNameGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameLT applies the LT predicate on the "group_name" field.
|
||||
func GroupNameLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameLTE applies the LTE predicate on the "group_name" field.
|
||||
func GroupNameLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameContains applies the Contains predicate on the "group_name" field.
|
||||
func GroupNameContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameHasPrefix applies the HasPrefix predicate on the "group_name" field.
|
||||
func GroupNameHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameHasSuffix applies the HasSuffix predicate on the "group_name" field.
|
||||
func GroupNameHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameIsNil applies the IsNil predicate on the "group_name" field.
|
||||
func GroupNameIsNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIsNull(FieldGroupName))
|
||||
}
|
||||
|
||||
// GroupNameNotNil applies the NotNil predicate on the "group_name" field.
|
||||
func GroupNameNotNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotNull(FieldGroupName))
|
||||
}
|
||||
|
||||
// GroupNameEqualFold applies the EqualFold predicate on the "group_name" field.
|
||||
func GroupNameEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// GroupNameContainsFold applies the ContainsFold predicate on the "group_name" field.
|
||||
func GroupNameContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldGroupName, v))
|
||||
}
|
||||
|
||||
// EnabledEQ applies the EQ predicate on the "enabled" field.
|
||||
func EnabledEQ(v bool) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldEnabled, v))
|
||||
}
|
||||
|
||||
// EnabledNEQ applies the NEQ predicate on the "enabled" field.
|
||||
func EnabledNEQ(v bool) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldEnabled, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsEQ applies the EQ predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsEQ(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsNEQ applies the NEQ predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsNEQ(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsIn applies the In predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsIn(vs ...int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldIntervalSeconds, vs...))
|
||||
}
|
||||
|
||||
// IntervalSecondsNotIn applies the NotIn predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsNotIn(vs ...int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldIntervalSeconds, vs...))
|
||||
}
|
||||
|
||||
// IntervalSecondsGT applies the GT predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsGT(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsGTE applies the GTE predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsGTE(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsLT applies the LT predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsLT(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// IntervalSecondsLTE applies the LTE predicate on the "interval_seconds" field.
|
||||
func IntervalSecondsLTE(v int) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldIntervalSeconds, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtEQ applies the EQ predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtNEQ applies the NEQ predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtNEQ(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtIn applies the In predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldLastCheckedAt, vs...))
|
||||
}
|
||||
|
||||
// LastCheckedAtNotIn applies the NotIn predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtNotIn(vs ...time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldLastCheckedAt, vs...))
|
||||
}
|
||||
|
||||
// LastCheckedAtGT applies the GT predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtGT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtGTE applies the GTE predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtGTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtLT applies the LT predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtLT(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtLTE applies the LTE predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtLTE(v time.Time) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldLastCheckedAt, v))
|
||||
}
|
||||
|
||||
// LastCheckedAtIsNil applies the IsNil predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtIsNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIsNull(FieldLastCheckedAt))
|
||||
}
|
||||
|
||||
// LastCheckedAtNotNil applies the NotNil predicate on the "last_checked_at" field.
|
||||
func LastCheckedAtNotNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotNull(FieldLastCheckedAt))
|
||||
}
|
||||
|
||||
// CreatedByEQ applies the EQ predicate on the "created_by" field.
|
||||
func CreatedByEQ(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByNEQ applies the NEQ predicate on the "created_by" field.
|
||||
func CreatedByNEQ(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByIn applies the In predicate on the "created_by" field.
|
||||
func CreatedByIn(vs ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldCreatedBy, vs...))
|
||||
}
|
||||
|
||||
// CreatedByNotIn applies the NotIn predicate on the "created_by" field.
|
||||
func CreatedByNotIn(vs ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldCreatedBy, vs...))
|
||||
}
|
||||
|
||||
// CreatedByGT applies the GT predicate on the "created_by" field.
|
||||
func CreatedByGT(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByGTE applies the GTE predicate on the "created_by" field.
|
||||
func CreatedByGTE(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByLT applies the LT predicate on the "created_by" field.
|
||||
func CreatedByLT(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// CreatedByLTE applies the LTE predicate on the "created_by" field.
|
||||
func CreatedByLTE(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldCreatedBy, v))
|
||||
}
|
||||
|
||||
// TemplateIDEQ applies the EQ predicate on the "template_id" field.
|
||||
func TemplateIDEQ(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldTemplateID, v))
|
||||
}
|
||||
|
||||
// TemplateIDNEQ applies the NEQ predicate on the "template_id" field.
|
||||
func TemplateIDNEQ(v int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldTemplateID, v))
|
||||
}
|
||||
|
||||
// TemplateIDIn applies the In predicate on the "template_id" field.
|
||||
func TemplateIDIn(vs ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldTemplateID, vs...))
|
||||
}
|
||||
|
||||
// TemplateIDNotIn applies the NotIn predicate on the "template_id" field.
|
||||
func TemplateIDNotIn(vs ...int64) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldTemplateID, vs...))
|
||||
}
|
||||
|
||||
// TemplateIDIsNil applies the IsNil predicate on the "template_id" field.
|
||||
func TemplateIDIsNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIsNull(FieldTemplateID))
|
||||
}
|
||||
|
||||
// TemplateIDNotNil applies the NotNil predicate on the "template_id" field.
|
||||
func TemplateIDNotNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotNull(FieldTemplateID))
|
||||
}
|
||||
|
||||
// BodyOverrideModeEQ applies the EQ predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeNEQ applies the NEQ predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeNEQ(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeIn applies the In predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIn(FieldBodyOverrideMode, vs...))
|
||||
}
|
||||
|
||||
// BodyOverrideModeNotIn applies the NotIn predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeNotIn(vs ...string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotIn(FieldBodyOverrideMode, vs...))
|
||||
}
|
||||
|
||||
// BodyOverrideModeGT applies the GT predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeGT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGT(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeGTE applies the GTE predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeGTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldGTE(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeLT applies the LT predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeLT(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLT(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeLTE applies the LTE predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeLTE(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldLTE(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeContains applies the Contains predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeContains(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContains(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeHasPrefix applies the HasPrefix predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeHasPrefix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeHasSuffix applies the HasSuffix predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeHasSuffix(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeEqualFold applies the EqualFold predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeEqualFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldEqualFold(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeContainsFold applies the ContainsFold predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeContainsFold(v string) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldContainsFold(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideIsNil applies the IsNil predicate on the "body_override" field.
|
||||
func BodyOverrideIsNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldIsNull(FieldBodyOverride))
|
||||
}
|
||||
|
||||
// BodyOverrideNotNil applies the NotNil predicate on the "body_override" field.
|
||||
func BodyOverrideNotNil() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.FieldNotNull(FieldBodyOverride))
|
||||
}
|
||||
|
||||
// HasHistory applies the HasEdge predicate on the "history" edge.
|
||||
func HasHistory() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, HistoryTable, HistoryColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasHistoryWith applies the HasEdge predicate on the "history" edge with a given conditions (other predicates).
|
||||
func HasHistoryWith(preds ...predicate.ChannelMonitorHistory) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := newHistoryStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasDailyRollups applies the HasEdge predicate on the "daily_rollups" edge.
|
||||
func HasDailyRollups() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, DailyRollupsTable, DailyRollupsColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasDailyRollupsWith applies the HasEdge predicate on the "daily_rollups" edge with a given conditions (other predicates).
|
||||
func HasDailyRollupsWith(preds ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := newDailyRollupsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasRequestTemplate applies the HasEdge predicate on the "request_template" edge.
|
||||
func HasRequestTemplate() predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, false, RequestTemplateTable, RequestTemplateColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasRequestTemplateWith applies the HasEdge predicate on the "request_template" edge with a given conditions (other predicates).
|
||||
func HasRequestTemplateWith(preds ...predicate.ChannelMonitorRequestTemplate) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
step := newRequestTemplateStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.ChannelMonitor) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.ChannelMonitor) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.ChannelMonitor) predicate.ChannelMonitor {
|
||||
return predicate.ChannelMonitor(sql.NotPredicates(p))
|
||||
}
|
||||
1610
backend/ent/channelmonitor_create.go
Normal file
88
backend/ent/channelmonitor_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorDelete is the builder for deleting a ChannelMonitor entity.
|
||||
type ChannelMonitorDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDelete builder.
|
||||
func (_d *ChannelMonitorDelete) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *ChannelMonitorDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *ChannelMonitorDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(channelmonitor.Table, sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// ChannelMonitorDeleteOne is the builder for deleting a single ChannelMonitor entity.
|
||||
type ChannelMonitorDeleteOne struct {
|
||||
_d *ChannelMonitorDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDelete builder.
|
||||
func (_d *ChannelMonitorDeleteOne) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *ChannelMonitorDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{channelmonitor.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
797
backend/ent/channelmonitor_query.go
Normal file
@@ -0,0 +1,797 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorQuery is the builder for querying ChannelMonitor entities.
|
||||
type ChannelMonitorQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []channelmonitor.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.ChannelMonitor
|
||||
withHistory *ChannelMonitorHistoryQuery
|
||||
withDailyRollups *ChannelMonitorDailyRollupQuery
|
||||
withRequestTemplate *ChannelMonitorRequestTemplateQuery
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the ChannelMonitorQuery builder.
|
||||
func (_q *ChannelMonitorQuery) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *ChannelMonitorQuery) Limit(limit int) *ChannelMonitorQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *ChannelMonitorQuery) Offset(offset int) *ChannelMonitorQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *ChannelMonitorQuery) Unique(unique bool) *ChannelMonitorQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *ChannelMonitorQuery) Order(o ...channelmonitor.OrderOption) *ChannelMonitorQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// QueryHistory chains the current query on the "history" edge.
|
||||
func (_q *ChannelMonitorQuery) QueryHistory() *ChannelMonitorHistoryQuery {
|
||||
query := (&ChannelMonitorHistoryClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector),
|
||||
sqlgraph.To(channelmonitorhistory.Table, channelmonitorhistory.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.HistoryTable, channelmonitor.HistoryColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryDailyRollups chains the current query on the "daily_rollups" edge.
|
||||
func (_q *ChannelMonitorQuery) QueryDailyRollups() *ChannelMonitorDailyRollupQuery {
|
||||
query := (&ChannelMonitorDailyRollupClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector),
|
||||
sqlgraph.To(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.DailyRollupsTable, channelmonitor.DailyRollupsColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryRequestTemplate chains the current query on the "request_template" edge.
|
||||
func (_q *ChannelMonitorQuery) QueryRequestTemplate() *ChannelMonitorRequestTemplateQuery {
|
||||
query := (&ChannelMonitorRequestTemplateClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector),
|
||||
sqlgraph.To(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, false, channelmonitor.RequestTemplateTable, channelmonitor.RequestTemplateColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first ChannelMonitor entity from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitor was found.
|
||||
func (_q *ChannelMonitorQuery) First(ctx context.Context) (*ChannelMonitor, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{channelmonitor.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) FirstX(ctx context.Context) *ChannelMonitor {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first ChannelMonitor ID from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitor ID was found.
|
||||
func (_q *ChannelMonitorQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{channelmonitor.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single ChannelMonitor entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitor entity is found.
|
||||
// Returns a *NotFoundError when no ChannelMonitor entities are found.
|
||||
func (_q *ChannelMonitorQuery) Only(ctx context.Context) (*ChannelMonitor, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{channelmonitor.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{channelmonitor.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) OnlyX(ctx context.Context) *ChannelMonitor {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only ChannelMonitor ID in the query.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitor ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *ChannelMonitorQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{channelmonitor.Label}
|
||||
default:
|
||||
err = &NotSingularError{channelmonitor.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of ChannelMonitors.
|
||||
func (_q *ChannelMonitorQuery) All(ctx context.Context) ([]*ChannelMonitor, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*ChannelMonitor, *ChannelMonitorQuery]()
|
||||
return withInterceptors[[]*ChannelMonitor](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) AllX(ctx context.Context) []*ChannelMonitor {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of ChannelMonitor IDs.
|
||||
func (_q *ChannelMonitorQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(channelmonitor.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *ChannelMonitorQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *ChannelMonitorQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the ChannelMonitorQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *ChannelMonitorQuery) Clone() *ChannelMonitorQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &ChannelMonitorQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]channelmonitor.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.ChannelMonitor{}, _q.predicates...),
|
||||
withHistory: _q.withHistory.Clone(),
|
||||
withDailyRollups: _q.withDailyRollups.Clone(),
|
||||
withRequestTemplate: _q.withRequestTemplate.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithHistory tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "history" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorQuery) WithHistory(opts ...func(*ChannelMonitorHistoryQuery)) *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorHistoryClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withHistory = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// WithDailyRollups tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "daily_rollups" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorQuery) WithDailyRollups(opts ...func(*ChannelMonitorDailyRollupQuery)) *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorDailyRollupClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withDailyRollups = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// WithRequestTemplate tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "request_template" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorQuery) WithRequestTemplate(opts ...func(*ChannelMonitorRequestTemplateQuery)) *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorRequestTemplateClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withRequestTemplate = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitor.Query().
|
||||
// GroupBy(channelmonitor.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorQuery) GroupBy(field string, fields ...string) *ChannelMonitorGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &ChannelMonitorGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = channelmonitor.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitor.Query().
|
||||
// Select(channelmonitor.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorQuery) Select(fields ...string) *ChannelMonitorSelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &ChannelMonitorSelect{ChannelMonitorQuery: _q}
|
||||
sbuild.label = channelmonitor.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a ChannelMonitorSelect configured with the given aggregations.
|
||||
func (_q *ChannelMonitorQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorSelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !channelmonitor.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitor, error) {
|
||||
var (
|
||||
nodes = []*ChannelMonitor{}
|
||||
_spec = _q.querySpec()
|
||||
loadedTypes = [3]bool{
|
||||
_q.withHistory != nil,
|
||||
_q.withDailyRollups != nil,
|
||||
_q.withRequestTemplate != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*ChannelMonitor).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &ChannelMonitor{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := _q.withHistory; query != nil {
|
||||
if err := _q.loadHistory(ctx, query, nodes,
|
||||
func(n *ChannelMonitor) { n.Edges.History = []*ChannelMonitorHistory{} },
|
||||
func(n *ChannelMonitor, e *ChannelMonitorHistory) { n.Edges.History = append(n.Edges.History, e) }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := _q.withDailyRollups; query != nil {
|
||||
if err := _q.loadDailyRollups(ctx, query, nodes,
|
||||
func(n *ChannelMonitor) { n.Edges.DailyRollups = []*ChannelMonitorDailyRollup{} },
|
||||
func(n *ChannelMonitor, e *ChannelMonitorDailyRollup) {
|
||||
n.Edges.DailyRollups = append(n.Edges.DailyRollups, e)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := _q.withRequestTemplate; query != nil {
|
||||
if err := _q.loadRequestTemplate(ctx, query, nodes, nil,
|
||||
func(n *ChannelMonitor, e *ChannelMonitorRequestTemplate) { n.Edges.RequestTemplate = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) loadHistory(ctx context.Context, query *ChannelMonitorHistoryQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorHistory)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[int64]*ChannelMonitor)
|
||||
for i := range nodes {
|
||||
fks = append(fks, nodes[i].ID)
|
||||
nodeids[nodes[i].ID] = nodes[i]
|
||||
if init != nil {
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
if len(query.ctx.Fields) > 0 {
|
||||
query.ctx.AppendFieldOnce(channelmonitorhistory.FieldMonitorID)
|
||||
}
|
||||
query.Where(predicate.ChannelMonitorHistory(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(s.C(channelmonitor.HistoryColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
fk := n.MonitorID
|
||||
node, ok := nodeids[fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "monitor_id" returned %v for node %v`, fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (_q *ChannelMonitorQuery) loadDailyRollups(ctx context.Context, query *ChannelMonitorDailyRollupQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorDailyRollup)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[int64]*ChannelMonitor)
|
||||
for i := range nodes {
|
||||
fks = append(fks, nodes[i].ID)
|
||||
nodeids[nodes[i].ID] = nodes[i]
|
||||
if init != nil {
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
if len(query.ctx.Fields) > 0 {
|
||||
query.ctx.AppendFieldOnce(channelmonitordailyrollup.FieldMonitorID)
|
||||
}
|
||||
query.Where(predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(s.C(channelmonitor.DailyRollupsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
fk := n.MonitorID
|
||||
node, ok := nodeids[fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "monitor_id" returned %v for node %v`, fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (_q *ChannelMonitorQuery) loadRequestTemplate(ctx context.Context, query *ChannelMonitorRequestTemplateQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorRequestTemplate)) error {
|
||||
ids := make([]int64, 0, len(nodes))
|
||||
nodeids := make(map[int64][]*ChannelMonitor)
|
||||
for i := range nodes {
|
||||
if nodes[i].TemplateID == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].TemplateID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(channelmonitorrequesttemplate.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "template_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(channelmonitor.Table, channelmonitor.Columns, sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitor.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != channelmonitor.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if _q.withRequestTemplate != nil {
|
||||
_spec.Node.AddColumnOnce(channelmonitor.FieldTemplateID)
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(channelmonitor.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = channelmonitor.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *ChannelMonitorQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *ChannelMonitorQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ChannelMonitorGroupBy is the group-by builder for ChannelMonitor entities.
|
||||
type ChannelMonitorGroupBy struct {
|
||||
selector
|
||||
build *ChannelMonitorQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *ChannelMonitorGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *ChannelMonitorGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorQuery, *ChannelMonitorGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *ChannelMonitorGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// ChannelMonitorSelect is the builder for selecting fields of ChannelMonitor entities.
|
||||
type ChannelMonitorSelect struct {
|
||||
*ChannelMonitorQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *ChannelMonitorSelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorSelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *ChannelMonitorSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorQuery, *ChannelMonitorSelect](ctx, _s.ChannelMonitorQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *ChannelMonitorSelect) sqlScan(ctx context.Context, root *ChannelMonitorQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
1328
backend/ent/channelmonitor_update.go
Normal file
278
backend/ent/channelmonitordailyrollup.go
Normal file
@@ -0,0 +1,278 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
)
|
||||
|
||||
// ChannelMonitorDailyRollup is the model entity for the ChannelMonitorDailyRollup schema.
|
||||
type ChannelMonitorDailyRollup struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// MonitorID holds the value of the "monitor_id" field.
|
||||
MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// Model holds the value of the "model" field.
|
||||
Model string `json:"model,omitempty"`
|
||||
// BucketDate holds the value of the "bucket_date" field.
|
||||
BucketDate time.Time `json:"bucket_date,omitempty"`
|
||||
// TotalChecks holds the value of the "total_checks" field.
|
||||
TotalChecks int `json:"total_checks,omitempty"`
|
||||
// OkCount holds the value of the "ok_count" field.
|
||||
OkCount int `json:"ok_count,omitempty"`
|
||||
// OperationalCount holds the value of the "operational_count" field.
|
||||
OperationalCount int `json:"operational_count,omitempty"`
|
||||
// DegradedCount holds the value of the "degraded_count" field.
|
||||
DegradedCount int `json:"degraded_count,omitempty"`
|
||||
// FailedCount holds the value of the "failed_count" field.
|
||||
FailedCount int `json:"failed_count,omitempty"`
|
||||
// ErrorCount holds the value of the "error_count" field.
|
||||
ErrorCount int `json:"error_count,omitempty"`
|
||||
// SumLatencyMs holds the value of the "sum_latency_ms" field.
|
||||
SumLatencyMs int64 `json:"sum_latency_ms,omitempty"`
|
||||
// CountLatency holds the value of the "count_latency" field.
|
||||
CountLatency int `json:"count_latency,omitempty"`
|
||||
// SumPingLatencyMs holds the value of the "sum_ping_latency_ms" field.
|
||||
SumPingLatencyMs int64 `json:"sum_ping_latency_ms,omitempty"`
|
||||
// CountPingLatency holds the value of the "count_ping_latency" field.
|
||||
CountPingLatency int `json:"count_ping_latency,omitempty"`
|
||||
// ComputedAt holds the value of the "computed_at" field.
|
||||
ComputedAt time.Time `json:"computed_at,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the ChannelMonitorDailyRollupQuery when eager-loading is set.
|
||||
Edges ChannelMonitorDailyRollupEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollupEdges holds the relations/edges for other nodes in the graph.
|
||||
type ChannelMonitorDailyRollupEdges struct {
|
||||
// Monitor holds the value of the monitor edge.
|
||||
Monitor *ChannelMonitor `json:"monitor,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// MonitorOrErr returns the Monitor value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e ChannelMonitorDailyRollupEdges) MonitorOrErr() (*ChannelMonitor, error) {
|
||||
if e.Monitor != nil {
|
||||
return e.Monitor, nil
|
||||
} else if e.loadedTypes[0] {
|
||||
return nil, &NotFoundError{label: channelmonitor.Label}
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "monitor"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*ChannelMonitorDailyRollup) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitordailyrollup.FieldID, channelmonitordailyrollup.FieldMonitorID, channelmonitordailyrollup.FieldTotalChecks, channelmonitordailyrollup.FieldOkCount, channelmonitordailyrollup.FieldOperationalCount, channelmonitordailyrollup.FieldDegradedCount, channelmonitordailyrollup.FieldFailedCount, channelmonitordailyrollup.FieldErrorCount, channelmonitordailyrollup.FieldSumLatencyMs, channelmonitordailyrollup.FieldCountLatency, channelmonitordailyrollup.FieldSumPingLatencyMs, channelmonitordailyrollup.FieldCountPingLatency:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case channelmonitordailyrollup.FieldModel:
|
||||
values[i] = new(sql.NullString)
|
||||
case channelmonitordailyrollup.FieldBucketDate, channelmonitordailyrollup.FieldComputedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the ChannelMonitorDailyRollup fields.
|
||||
func (_m *ChannelMonitorDailyRollup) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitordailyrollup.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case channelmonitordailyrollup.FieldMonitorID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field monitor_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.MonitorID = value.Int64
|
||||
}
|
||||
case channelmonitordailyrollup.FieldModel:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field model", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Model = value.String
|
||||
}
|
||||
case channelmonitordailyrollup.FieldBucketDate:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field bucket_date", values[i])
|
||||
} else if value.Valid {
|
||||
_m.BucketDate = value.Time
|
||||
}
|
||||
case channelmonitordailyrollup.FieldTotalChecks:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field total_checks", values[i])
|
||||
} else if value.Valid {
|
||||
_m.TotalChecks = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldOkCount:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field ok_count", values[i])
|
||||
} else if value.Valid {
|
||||
_m.OkCount = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldOperationalCount:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field operational_count", values[i])
|
||||
} else if value.Valid {
|
||||
_m.OperationalCount = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldDegradedCount:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field degraded_count", values[i])
|
||||
} else if value.Valid {
|
||||
_m.DegradedCount = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldFailedCount:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field failed_count", values[i])
|
||||
} else if value.Valid {
|
||||
_m.FailedCount = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldErrorCount:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field error_count", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ErrorCount = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldSumLatencyMs:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sum_latency_ms", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SumLatencyMs = value.Int64
|
||||
}
|
||||
case channelmonitordailyrollup.FieldCountLatency:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field count_latency", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CountLatency = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldSumPingLatencyMs:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sum_ping_latency_ms", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SumPingLatencyMs = value.Int64
|
||||
}
|
||||
case channelmonitordailyrollup.FieldCountPingLatency:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field count_ping_latency", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CountPingLatency = int(value.Int64)
|
||||
}
|
||||
case channelmonitordailyrollup.FieldComputedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field computed_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ComputedAt = value.Time
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitorDailyRollup.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *ChannelMonitorDailyRollup) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryMonitor queries the "monitor" edge of the ChannelMonitorDailyRollup entity.
|
||||
func (_m *ChannelMonitorDailyRollup) QueryMonitor() *ChannelMonitorQuery {
|
||||
return NewChannelMonitorDailyRollupClient(_m.config).QueryMonitor(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this ChannelMonitorDailyRollup.
|
||||
// Note that you need to call ChannelMonitorDailyRollup.Unwrap() before calling this method if this ChannelMonitorDailyRollup
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *ChannelMonitorDailyRollup) Update() *ChannelMonitorDailyRollupUpdateOne {
|
||||
return NewChannelMonitorDailyRollupClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the ChannelMonitorDailyRollup entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *ChannelMonitorDailyRollup) Unwrap() *ChannelMonitorDailyRollup {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: ChannelMonitorDailyRollup is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *ChannelMonitorDailyRollup) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("ChannelMonitorDailyRollup(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("monitor_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.MonitorID))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("model=")
|
||||
builder.WriteString(_m.Model)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("bucket_date=")
|
||||
builder.WriteString(_m.BucketDate.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("total_checks=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.TotalChecks))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("ok_count=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.OkCount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("operational_count=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.OperationalCount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("degraded_count=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.DegradedCount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("failed_count=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.FailedCount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("error_count=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ErrorCount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("sum_latency_ms=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.SumLatencyMs))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("count_latency=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.CountLatency))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("sum_ping_latency_ms=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.SumPingLatencyMs))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("count_ping_latency=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.CountPingLatency))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("computed_at=")
|
||||
builder.WriteString(_m.ComputedAt.Format(time.ANSIC))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollups is a parsable slice of ChannelMonitorDailyRollup.
|
||||
type ChannelMonitorDailyRollups []*ChannelMonitorDailyRollup
|
||||
@@ -0,0 +1,206 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitordailyrollup
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the channelmonitordailyrollup type in the database.
|
||||
Label = "channel_monitor_daily_rollup"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldMonitorID holds the string denoting the monitor_id field in the database.
|
||||
FieldMonitorID = "monitor_id"
|
||||
// FieldModel holds the string denoting the model field in the database.
|
||||
FieldModel = "model"
|
||||
// FieldBucketDate holds the string denoting the bucket_date field in the database.
|
||||
FieldBucketDate = "bucket_date"
|
||||
// FieldTotalChecks holds the string denoting the total_checks field in the database.
|
||||
FieldTotalChecks = "total_checks"
|
||||
// FieldOkCount holds the string denoting the ok_count field in the database.
|
||||
FieldOkCount = "ok_count"
|
||||
// FieldOperationalCount holds the string denoting the operational_count field in the database.
|
||||
FieldOperationalCount = "operational_count"
|
||||
// FieldDegradedCount holds the string denoting the degraded_count field in the database.
|
||||
FieldDegradedCount = "degraded_count"
|
||||
// FieldFailedCount holds the string denoting the failed_count field in the database.
|
||||
FieldFailedCount = "failed_count"
|
||||
// FieldErrorCount holds the string denoting the error_count field in the database.
|
||||
FieldErrorCount = "error_count"
|
||||
// FieldSumLatencyMs holds the string denoting the sum_latency_ms field in the database.
|
||||
FieldSumLatencyMs = "sum_latency_ms"
|
||||
// FieldCountLatency holds the string denoting the count_latency field in the database.
|
||||
FieldCountLatency = "count_latency"
|
||||
// FieldSumPingLatencyMs holds the string denoting the sum_ping_latency_ms field in the database.
|
||||
FieldSumPingLatencyMs = "sum_ping_latency_ms"
|
||||
// FieldCountPingLatency holds the string denoting the count_ping_latency field in the database.
|
||||
FieldCountPingLatency = "count_ping_latency"
|
||||
// FieldComputedAt holds the string denoting the computed_at field in the database.
|
||||
FieldComputedAt = "computed_at"
|
||||
// EdgeMonitor holds the string denoting the monitor edge name in mutations.
|
||||
EdgeMonitor = "monitor"
|
||||
// Table holds the table name of the channelmonitordailyrollup in the database.
|
||||
Table = "channel_monitor_daily_rollups"
|
||||
// MonitorTable is the table that holds the monitor relation/edge.
|
||||
MonitorTable = "channel_monitor_daily_rollups"
|
||||
// MonitorInverseTable is the table name for the ChannelMonitor entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitor" package.
|
||||
MonitorInverseTable = "channel_monitors"
|
||||
// MonitorColumn is the table column denoting the monitor relation/edge.
|
||||
MonitorColumn = "monitor_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for channelmonitordailyrollup fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldMonitorID,
|
||||
FieldModel,
|
||||
FieldBucketDate,
|
||||
FieldTotalChecks,
|
||||
FieldOkCount,
|
||||
FieldOperationalCount,
|
||||
FieldDegradedCount,
|
||||
FieldFailedCount,
|
||||
FieldErrorCount,
|
||||
FieldSumLatencyMs,
|
||||
FieldCountLatency,
|
||||
FieldSumPingLatencyMs,
|
||||
FieldCountPingLatency,
|
||||
FieldComputedAt,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// ModelValidator is a validator for the "model" field. It is called by the builders before save.
|
||||
ModelValidator func(string) error
|
||||
// DefaultTotalChecks holds the default value on creation for the "total_checks" field.
|
||||
DefaultTotalChecks int
|
||||
// DefaultOkCount holds the default value on creation for the "ok_count" field.
|
||||
DefaultOkCount int
|
||||
// DefaultOperationalCount holds the default value on creation for the "operational_count" field.
|
||||
DefaultOperationalCount int
|
||||
// DefaultDegradedCount holds the default value on creation for the "degraded_count" field.
|
||||
DefaultDegradedCount int
|
||||
// DefaultFailedCount holds the default value on creation for the "failed_count" field.
|
||||
DefaultFailedCount int
|
||||
// DefaultErrorCount holds the default value on creation for the "error_count" field.
|
||||
DefaultErrorCount int
|
||||
// DefaultSumLatencyMs holds the default value on creation for the "sum_latency_ms" field.
|
||||
DefaultSumLatencyMs int64
|
||||
// DefaultCountLatency holds the default value on creation for the "count_latency" field.
|
||||
DefaultCountLatency int
|
||||
// DefaultSumPingLatencyMs holds the default value on creation for the "sum_ping_latency_ms" field.
|
||||
DefaultSumPingLatencyMs int64
|
||||
// DefaultCountPingLatency holds the default value on creation for the "count_ping_latency" field.
|
||||
DefaultCountPingLatency int
|
||||
// DefaultComputedAt holds the default value on creation for the "computed_at" field.
|
||||
DefaultComputedAt func() time.Time
|
||||
// UpdateDefaultComputedAt holds the default value on update for the "computed_at" field.
|
||||
UpdateDefaultComputedAt func() time.Time
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the ChannelMonitorDailyRollup queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMonitorID orders the results by the monitor_id field.
|
||||
func ByMonitorID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldMonitorID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByModel orders the results by the model field.
|
||||
func ByModel(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldModel, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByBucketDate orders the results by the bucket_date field.
|
||||
func ByBucketDate(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldBucketDate, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByTotalChecks orders the results by the total_checks field.
|
||||
func ByTotalChecks(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTotalChecks, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOkCount orders the results by the ok_count field.
|
||||
func ByOkCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldOkCount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOperationalCount orders the results by the operational_count field.
|
||||
func ByOperationalCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldOperationalCount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDegradedCount orders the results by the degraded_count field.
|
||||
func ByDegradedCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDegradedCount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByFailedCount orders the results by the failed_count field.
|
||||
func ByFailedCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldFailedCount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByErrorCount orders the results by the error_count field.
|
||||
func ByErrorCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldErrorCount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySumLatencyMs orders the results by the sum_latency_ms field.
|
||||
func BySumLatencyMs(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSumLatencyMs, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCountLatency orders the results by the count_latency field.
|
||||
func ByCountLatency(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCountLatency, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySumPingLatencyMs orders the results by the sum_ping_latency_ms field.
|
||||
func BySumPingLatencyMs(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSumPingLatencyMs, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCountPingLatency orders the results by the count_ping_latency field.
|
||||
func ByCountPingLatency(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCountPingLatency, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByComputedAt orders the results by the computed_at field.
|
||||
func ByComputedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldComputedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMonitorField orders the results by monitor field.
|
||||
func ByMonitorField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newMonitorStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newMonitorStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(MonitorInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn),
|
||||
)
|
||||
}
|
||||
729
backend/ent/channelmonitordailyrollup/where.go
Normal file
@@ -0,0 +1,729 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitordailyrollup
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// MonitorID applies equality check predicate on the "monitor_id" field. It's identical to MonitorIDEQ.
|
||||
func MonitorID(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// Model applies equality check predicate on the "model" field. It's identical to ModelEQ.
|
||||
func Model(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// BucketDate applies equality check predicate on the "bucket_date" field. It's identical to BucketDateEQ.
|
||||
func BucketDate(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// TotalChecks applies equality check predicate on the "total_checks" field. It's identical to TotalChecksEQ.
|
||||
func TotalChecks(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// OkCount applies equality check predicate on the "ok_count" field. It's identical to OkCountEQ.
|
||||
func OkCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OperationalCount applies equality check predicate on the "operational_count" field. It's identical to OperationalCountEQ.
|
||||
func OperationalCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// DegradedCount applies equality check predicate on the "degraded_count" field. It's identical to DegradedCountEQ.
|
||||
func DegradedCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// FailedCount applies equality check predicate on the "failed_count" field. It's identical to FailedCountEQ.
|
||||
func FailedCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// ErrorCount applies equality check predicate on the "error_count" field. It's identical to ErrorCountEQ.
|
||||
func ErrorCount(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// SumLatencyMs applies equality check predicate on the "sum_latency_ms" field. It's identical to SumLatencyMsEQ.
|
||||
func SumLatencyMs(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// CountLatency applies equality check predicate on the "count_latency" field. It's identical to CountLatencyEQ.
|
||||
func CountLatency(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMs applies equality check predicate on the "sum_ping_latency_ms" field. It's identical to SumPingLatencyMsEQ.
|
||||
func SumPingLatencyMs(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// CountPingLatency applies equality check predicate on the "count_ping_latency" field. It's identical to CountPingLatencyEQ.
|
||||
func CountPingLatency(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// ComputedAt applies equality check predicate on the "computed_at" field. It's identical to ComputedAtEQ.
|
||||
func ComputedAt(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// MonitorIDEQ applies the EQ predicate on the "monitor_id" field.
|
||||
func MonitorIDEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// MonitorIDNEQ applies the NEQ predicate on the "monitor_id" field.
|
||||
func MonitorIDNEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// MonitorIDIn applies the In predicate on the "monitor_id" field.
|
||||
func MonitorIDIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldMonitorID, vs...))
|
||||
}
|
||||
|
||||
// MonitorIDNotIn applies the NotIn predicate on the "monitor_id" field.
|
||||
func MonitorIDNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldMonitorID, vs...))
|
||||
}
|
||||
|
||||
// ModelEQ applies the EQ predicate on the "model" field.
|
||||
func ModelEQ(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelNEQ applies the NEQ predicate on the "model" field.
|
||||
func ModelNEQ(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelIn applies the In predicate on the "model" field.
|
||||
func ModelIn(vs ...string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldModel, vs...))
|
||||
}
|
||||
|
||||
// ModelNotIn applies the NotIn predicate on the "model" field.
|
||||
func ModelNotIn(vs ...string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldModel, vs...))
|
||||
}
|
||||
|
||||
// ModelGT applies the GT predicate on the "model" field.
|
||||
func ModelGT(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelGTE applies the GTE predicate on the "model" field.
|
||||
func ModelGTE(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelLT applies the LT predicate on the "model" field.
|
||||
func ModelLT(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelLTE applies the LTE predicate on the "model" field.
|
||||
func ModelLTE(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelContains applies the Contains predicate on the "model" field.
|
||||
func ModelContains(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldContains(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelHasPrefix applies the HasPrefix predicate on the "model" field.
|
||||
func ModelHasPrefix(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldHasPrefix(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelHasSuffix applies the HasSuffix predicate on the "model" field.
|
||||
func ModelHasSuffix(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldHasSuffix(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelEqualFold applies the EqualFold predicate on the "model" field.
|
||||
func ModelEqualFold(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEqualFold(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelContainsFold applies the ContainsFold predicate on the "model" field.
|
||||
func ModelContainsFold(v string) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldContainsFold(FieldModel, v))
|
||||
}
|
||||
|
||||
// BucketDateEQ applies the EQ predicate on the "bucket_date" field.
|
||||
func BucketDateEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// BucketDateNEQ applies the NEQ predicate on the "bucket_date" field.
|
||||
func BucketDateNEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// BucketDateIn applies the In predicate on the "bucket_date" field.
|
||||
func BucketDateIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldBucketDate, vs...))
|
||||
}
|
||||
|
||||
// BucketDateNotIn applies the NotIn predicate on the "bucket_date" field.
|
||||
func BucketDateNotIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldBucketDate, vs...))
|
||||
}
|
||||
|
||||
// BucketDateGT applies the GT predicate on the "bucket_date" field.
|
||||
func BucketDateGT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// BucketDateGTE applies the GTE predicate on the "bucket_date" field.
|
||||
func BucketDateGTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// BucketDateLT applies the LT predicate on the "bucket_date" field.
|
||||
func BucketDateLT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// BucketDateLTE applies the LTE predicate on the "bucket_date" field.
|
||||
func BucketDateLTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldBucketDate, v))
|
||||
}
|
||||
|
||||
// TotalChecksEQ applies the EQ predicate on the "total_checks" field.
|
||||
func TotalChecksEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// TotalChecksNEQ applies the NEQ predicate on the "total_checks" field.
|
||||
func TotalChecksNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// TotalChecksIn applies the In predicate on the "total_checks" field.
|
||||
func TotalChecksIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldTotalChecks, vs...))
|
||||
}
|
||||
|
||||
// TotalChecksNotIn applies the NotIn predicate on the "total_checks" field.
|
||||
func TotalChecksNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldTotalChecks, vs...))
|
||||
}
|
||||
|
||||
// TotalChecksGT applies the GT predicate on the "total_checks" field.
|
||||
func TotalChecksGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// TotalChecksGTE applies the GTE predicate on the "total_checks" field.
|
||||
func TotalChecksGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// TotalChecksLT applies the LT predicate on the "total_checks" field.
|
||||
func TotalChecksLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// TotalChecksLTE applies the LTE predicate on the "total_checks" field.
|
||||
func TotalChecksLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldTotalChecks, v))
|
||||
}
|
||||
|
||||
// OkCountEQ applies the EQ predicate on the "ok_count" field.
|
||||
func OkCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OkCountNEQ applies the NEQ predicate on the "ok_count" field.
|
||||
func OkCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OkCountIn applies the In predicate on the "ok_count" field.
|
||||
func OkCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldOkCount, vs...))
|
||||
}
|
||||
|
||||
// OkCountNotIn applies the NotIn predicate on the "ok_count" field.
|
||||
func OkCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldOkCount, vs...))
|
||||
}
|
||||
|
||||
// OkCountGT applies the GT predicate on the "ok_count" field.
|
||||
func OkCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OkCountGTE applies the GTE predicate on the "ok_count" field.
|
||||
func OkCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OkCountLT applies the LT predicate on the "ok_count" field.
|
||||
func OkCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OkCountLTE applies the LTE predicate on the "ok_count" field.
|
||||
func OkCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldOkCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountEQ applies the EQ predicate on the "operational_count" field.
|
||||
func OperationalCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountNEQ applies the NEQ predicate on the "operational_count" field.
|
||||
func OperationalCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountIn applies the In predicate on the "operational_count" field.
|
||||
func OperationalCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldOperationalCount, vs...))
|
||||
}
|
||||
|
||||
// OperationalCountNotIn applies the NotIn predicate on the "operational_count" field.
|
||||
func OperationalCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldOperationalCount, vs...))
|
||||
}
|
||||
|
||||
// OperationalCountGT applies the GT predicate on the "operational_count" field.
|
||||
func OperationalCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountGTE applies the GTE predicate on the "operational_count" field.
|
||||
func OperationalCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountLT applies the LT predicate on the "operational_count" field.
|
||||
func OperationalCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// OperationalCountLTE applies the LTE predicate on the "operational_count" field.
|
||||
func OperationalCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldOperationalCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountEQ applies the EQ predicate on the "degraded_count" field.
|
||||
func DegradedCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountNEQ applies the NEQ predicate on the "degraded_count" field.
|
||||
func DegradedCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountIn applies the In predicate on the "degraded_count" field.
|
||||
func DegradedCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldDegradedCount, vs...))
|
||||
}
|
||||
|
||||
// DegradedCountNotIn applies the NotIn predicate on the "degraded_count" field.
|
||||
func DegradedCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldDegradedCount, vs...))
|
||||
}
|
||||
|
||||
// DegradedCountGT applies the GT predicate on the "degraded_count" field.
|
||||
func DegradedCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountGTE applies the GTE predicate on the "degraded_count" field.
|
||||
func DegradedCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountLT applies the LT predicate on the "degraded_count" field.
|
||||
func DegradedCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// DegradedCountLTE applies the LTE predicate on the "degraded_count" field.
|
||||
func DegradedCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldDegradedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountEQ applies the EQ predicate on the "failed_count" field.
|
||||
func FailedCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountNEQ applies the NEQ predicate on the "failed_count" field.
|
||||
func FailedCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountIn applies the In predicate on the "failed_count" field.
|
||||
func FailedCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldFailedCount, vs...))
|
||||
}
|
||||
|
||||
// FailedCountNotIn applies the NotIn predicate on the "failed_count" field.
|
||||
func FailedCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldFailedCount, vs...))
|
||||
}
|
||||
|
||||
// FailedCountGT applies the GT predicate on the "failed_count" field.
|
||||
func FailedCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountGTE applies the GTE predicate on the "failed_count" field.
|
||||
func FailedCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountLT applies the LT predicate on the "failed_count" field.
|
||||
func FailedCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// FailedCountLTE applies the LTE predicate on the "failed_count" field.
|
||||
func FailedCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldFailedCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountEQ applies the EQ predicate on the "error_count" field.
|
||||
func ErrorCountEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountNEQ applies the NEQ predicate on the "error_count" field.
|
||||
func ErrorCountNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountIn applies the In predicate on the "error_count" field.
|
||||
func ErrorCountIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldErrorCount, vs...))
|
||||
}
|
||||
|
||||
// ErrorCountNotIn applies the NotIn predicate on the "error_count" field.
|
||||
func ErrorCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldErrorCount, vs...))
|
||||
}
|
||||
|
||||
// ErrorCountGT applies the GT predicate on the "error_count" field.
|
||||
func ErrorCountGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountGTE applies the GTE predicate on the "error_count" field.
|
||||
func ErrorCountGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountLT applies the LT predicate on the "error_count" field.
|
||||
func ErrorCountLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// ErrorCountLTE applies the LTE predicate on the "error_count" field.
|
||||
func ErrorCountLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldErrorCount, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsEQ applies the EQ predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsNEQ applies the NEQ predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsNEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsIn applies the In predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldSumLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// SumLatencyMsNotIn applies the NotIn predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldSumLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// SumLatencyMsGT applies the GT predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsGT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsGTE applies the GTE predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsGTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsLT applies the LT predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsLT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumLatencyMsLTE applies the LTE predicate on the "sum_latency_ms" field.
|
||||
func SumLatencyMsLTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldSumLatencyMs, v))
|
||||
}
|
||||
|
||||
// CountLatencyEQ applies the EQ predicate on the "count_latency" field.
|
||||
func CountLatencyEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// CountLatencyNEQ applies the NEQ predicate on the "count_latency" field.
|
||||
func CountLatencyNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// CountLatencyIn applies the In predicate on the "count_latency" field.
|
||||
func CountLatencyIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldCountLatency, vs...))
|
||||
}
|
||||
|
||||
// CountLatencyNotIn applies the NotIn predicate on the "count_latency" field.
|
||||
func CountLatencyNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldCountLatency, vs...))
|
||||
}
|
||||
|
||||
// CountLatencyGT applies the GT predicate on the "count_latency" field.
|
||||
func CountLatencyGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// CountLatencyGTE applies the GTE predicate on the "count_latency" field.
|
||||
func CountLatencyGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// CountLatencyLT applies the LT predicate on the "count_latency" field.
|
||||
func CountLatencyLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// CountLatencyLTE applies the LTE predicate on the "count_latency" field.
|
||||
func CountLatencyLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldCountLatency, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsEQ applies the EQ predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsNEQ applies the NEQ predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsNEQ(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsIn applies the In predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldSumPingLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsNotIn applies the NotIn predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldSumPingLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsGT applies the GT predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsGT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsGTE applies the GTE predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsGTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsLT applies the LT predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsLT(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// SumPingLatencyMsLTE applies the LTE predicate on the "sum_ping_latency_ms" field.
|
||||
func SumPingLatencyMsLTE(v int64) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldSumPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyEQ applies the EQ predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyNEQ applies the NEQ predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyNEQ(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyIn applies the In predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldCountPingLatency, vs...))
|
||||
}
|
||||
|
||||
// CountPingLatencyNotIn applies the NotIn predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyNotIn(vs ...int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldCountPingLatency, vs...))
|
||||
}
|
||||
|
||||
// CountPingLatencyGT applies the GT predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyGT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyGTE applies the GTE predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyGTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyLT applies the LT predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyLT(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// CountPingLatencyLTE applies the LTE predicate on the "count_ping_latency" field.
|
||||
func CountPingLatencyLTE(v int) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldCountPingLatency, v))
|
||||
}
|
||||
|
||||
// ComputedAtEQ applies the EQ predicate on the "computed_at" field.
|
||||
func ComputedAtEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// ComputedAtNEQ applies the NEQ predicate on the "computed_at" field.
|
||||
func ComputedAtNEQ(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// ComputedAtIn applies the In predicate on the "computed_at" field.
|
||||
func ComputedAtIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldComputedAt, vs...))
|
||||
}
|
||||
|
||||
// ComputedAtNotIn applies the NotIn predicate on the "computed_at" field.
|
||||
func ComputedAtNotIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldComputedAt, vs...))
|
||||
}
|
||||
|
||||
// ComputedAtGT applies the GT predicate on the "computed_at" field.
|
||||
func ComputedAtGT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// ComputedAtGTE applies the GTE predicate on the "computed_at" field.
|
||||
func ComputedAtGTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// ComputedAtLT applies the LT predicate on the "computed_at" field.
|
||||
func ComputedAtLT(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// ComputedAtLTE applies the LTE predicate on the "computed_at" field.
|
||||
func ComputedAtLTE(v time.Time) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldComputedAt, v))
|
||||
}
|
||||
|
||||
// HasMonitor applies the HasEdge predicate on the "monitor" edge.
|
||||
func HasMonitor() predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasMonitorWith applies the HasEdge predicate on the "monitor" edge with a given conditions (other predicates).
|
||||
func HasMonitorWith(preds ...predicate.ChannelMonitor) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) {
|
||||
step := newMonitorStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup {
|
||||
return predicate.ChannelMonitorDailyRollup(sql.NotPredicates(p))
|
||||
}
|
||||
1509
backend/ent/channelmonitordailyrollup_create.go
Normal file
88
backend/ent/channelmonitordailyrollup_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorDailyRollupDelete is the builder for deleting a ChannelMonitorDailyRollup entity.
|
||||
type ChannelMonitorDailyRollupDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorDailyRollupMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDailyRollupDelete builder.
|
||||
func (_d *ChannelMonitorDailyRollupDelete) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *ChannelMonitorDailyRollupDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorDailyRollupDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *ChannelMonitorDailyRollupDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(channelmonitordailyrollup.Table, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollupDeleteOne is the builder for deleting a single ChannelMonitorDailyRollup entity.
|
||||
type ChannelMonitorDailyRollupDeleteOne struct {
|
||||
_d *ChannelMonitorDailyRollupDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDailyRollupDelete builder.
|
||||
func (_d *ChannelMonitorDailyRollupDeleteOne) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *ChannelMonitorDailyRollupDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{channelmonitordailyrollup.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorDailyRollupDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
643
backend/ent/channelmonitordailyrollup_query.go
Normal file
@@ -0,0 +1,643 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorDailyRollupQuery is the builder for querying ChannelMonitorDailyRollup entities.
|
||||
type ChannelMonitorDailyRollupQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []channelmonitordailyrollup.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.ChannelMonitorDailyRollup
|
||||
withMonitor *ChannelMonitorQuery
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the ChannelMonitorDailyRollupQuery builder.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Limit(limit int) *ChannelMonitorDailyRollupQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Offset(offset int) *ChannelMonitorDailyRollupQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Unique(unique bool) *ChannelMonitorDailyRollupQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Order(o ...channelmonitordailyrollup.OrderOption) *ChannelMonitorDailyRollupQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// QueryMonitor chains the current query on the "monitor" edge.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) QueryMonitor() *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID, selector),
|
||||
sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, channelmonitordailyrollup.MonitorTable, channelmonitordailyrollup.MonitorColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first ChannelMonitorDailyRollup entity from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorDailyRollup was found.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) First(ctx context.Context) (*ChannelMonitorDailyRollup, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{channelmonitordailyrollup.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) FirstX(ctx context.Context) *ChannelMonitorDailyRollup {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first ChannelMonitorDailyRollup ID from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorDailyRollup ID was found.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{channelmonitordailyrollup.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single ChannelMonitorDailyRollup entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorDailyRollup entity is found.
|
||||
// Returns a *NotFoundError when no ChannelMonitorDailyRollup entities are found.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Only(ctx context.Context) (*ChannelMonitorDailyRollup, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{channelmonitordailyrollup.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{channelmonitordailyrollup.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) OnlyX(ctx context.Context) *ChannelMonitorDailyRollup {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only ChannelMonitorDailyRollup ID in the query.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorDailyRollup ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{channelmonitordailyrollup.Label}
|
||||
default:
|
||||
err = &NotSingularError{channelmonitordailyrollup.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of ChannelMonitorDailyRollups.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) All(ctx context.Context) ([]*ChannelMonitorDailyRollup, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*ChannelMonitorDailyRollup, *ChannelMonitorDailyRollupQuery]()
|
||||
return withInterceptors[[]*ChannelMonitorDailyRollup](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) AllX(ctx context.Context) []*ChannelMonitorDailyRollup {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of ChannelMonitorDailyRollup IDs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(channelmonitordailyrollup.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorDailyRollupQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the ChannelMonitorDailyRollupQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Clone() *ChannelMonitorDailyRollupQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &ChannelMonitorDailyRollupQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]channelmonitordailyrollup.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.ChannelMonitorDailyRollup{}, _q.predicates...),
|
||||
withMonitor: _q.withMonitor.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithMonitor tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "monitor" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) WithMonitor(opts ...func(*ChannelMonitorQuery)) *ChannelMonitorDailyRollupQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withMonitor = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorDailyRollup.Query().
|
||||
// GroupBy(channelmonitordailyrollup.FieldMonitorID).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorDailyRollupQuery) GroupBy(field string, fields ...string) *ChannelMonitorDailyRollupGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &ChannelMonitorDailyRollupGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = channelmonitordailyrollup.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorDailyRollup.Query().
|
||||
// Select(channelmonitordailyrollup.FieldMonitorID).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Select(fields ...string) *ChannelMonitorDailyRollupSelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &ChannelMonitorDailyRollupSelect{ChannelMonitorDailyRollupQuery: _q}
|
||||
sbuild.label = channelmonitordailyrollup.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a ChannelMonitorDailyRollupSelect configured with the given aggregations.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupSelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !channelmonitordailyrollup.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitorDailyRollup, error) {
|
||||
var (
|
||||
nodes = []*ChannelMonitorDailyRollup{}
|
||||
_spec = _q.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
_q.withMonitor != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*ChannelMonitorDailyRollup).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &ChannelMonitorDailyRollup{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := _q.withMonitor; query != nil {
|
||||
if err := _q.loadMonitor(ctx, query, nodes, nil,
|
||||
func(n *ChannelMonitorDailyRollup, e *ChannelMonitor) { n.Edges.Monitor = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) loadMonitor(ctx context.Context, query *ChannelMonitorQuery, nodes []*ChannelMonitorDailyRollup, init func(*ChannelMonitorDailyRollup), assign func(*ChannelMonitorDailyRollup, *ChannelMonitor)) error {
|
||||
ids := make([]int64, 0, len(nodes))
|
||||
nodeids := make(map[int64][]*ChannelMonitorDailyRollup)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].MonitorID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(channelmonitor.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "monitor_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(channelmonitordailyrollup.Table, channelmonitordailyrollup.Columns, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitordailyrollup.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != channelmonitordailyrollup.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if _q.withMonitor != nil {
|
||||
_spec.Node.AddColumnOnce(channelmonitordailyrollup.FieldMonitorID)
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorDailyRollupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(channelmonitordailyrollup.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = channelmonitordailyrollup.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorDailyRollupQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *ChannelMonitorDailyRollupQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorDailyRollupQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollupGroupBy is the group-by builder for ChannelMonitorDailyRollup entities.
|
||||
type ChannelMonitorDailyRollupGroupBy struct {
|
||||
selector
|
||||
build *ChannelMonitorDailyRollupQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *ChannelMonitorDailyRollupGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *ChannelMonitorDailyRollupGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorDailyRollupQuery, *ChannelMonitorDailyRollupGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *ChannelMonitorDailyRollupGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorDailyRollupQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollupSelect is the builder for selecting fields of ChannelMonitorDailyRollup entities.
|
||||
type ChannelMonitorDailyRollupSelect struct {
|
||||
*ChannelMonitorDailyRollupQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *ChannelMonitorDailyRollupSelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupSelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *ChannelMonitorDailyRollupSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorDailyRollupQuery, *ChannelMonitorDailyRollupSelect](ctx, _s.ChannelMonitorDailyRollupQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *ChannelMonitorDailyRollupSelect) sqlScan(ctx context.Context, root *ChannelMonitorDailyRollupQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
961
backend/ent/channelmonitordailyrollup_update.go
Normal file
@@ -0,0 +1,961 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorDailyRollupUpdate is the builder for updating ChannelMonitorDailyRollup entities.
|
||||
type ChannelMonitorDailyRollupUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorDailyRollupMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDailyRollupUpdate builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetMonitorID(v int64) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.SetMonitorID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableMonitorID(v *int64) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetMonitorID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetModel(v string) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.SetModel(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableModel sets the "model" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableModel(v *string) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetModel(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBucketDate sets the "bucket_date" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetBucketDate(v time.Time) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.SetBucketDate(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableBucketDate sets the "bucket_date" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableBucketDate(v *time.Time) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetBucketDate(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetTotalChecks sets the "total_checks" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetTotalChecks(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetTotalChecks()
|
||||
_u.mutation.SetTotalChecks(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableTotalChecks sets the "total_checks" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableTotalChecks(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetTotalChecks(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddTotalChecks adds value to the "total_checks" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddTotalChecks(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddTotalChecks(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetOkCount sets the "ok_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetOkCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetOkCount()
|
||||
_u.mutation.SetOkCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOkCount sets the "ok_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableOkCount(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetOkCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddOkCount adds value to the "ok_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddOkCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddOkCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetOperationalCount sets the "operational_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetOperationalCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetOperationalCount()
|
||||
_u.mutation.SetOperationalCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOperationalCount sets the "operational_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableOperationalCount(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetOperationalCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddOperationalCount adds value to the "operational_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddOperationalCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddOperationalCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDegradedCount sets the "degraded_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetDegradedCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetDegradedCount()
|
||||
_u.mutation.SetDegradedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableDegradedCount sets the "degraded_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableDegradedCount(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetDegradedCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddDegradedCount adds value to the "degraded_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddDegradedCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddDegradedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetFailedCount sets the "failed_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetFailedCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetFailedCount()
|
||||
_u.mutation.SetFailedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableFailedCount sets the "failed_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableFailedCount(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetFailedCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddFailedCount adds value to the "failed_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddFailedCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddFailedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetErrorCount sets the "error_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetErrorCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetErrorCount()
|
||||
_u.mutation.SetErrorCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableErrorCount sets the "error_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableErrorCount(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetErrorCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddErrorCount adds value to the "error_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddErrorCount(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddErrorCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSumLatencyMs sets the "sum_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetSumLatencyMs()
|
||||
_u.mutation.SetSumLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSumLatencyMs sets the "sum_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableSumLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetSumLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSumLatencyMs adds value to the "sum_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddSumLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCountLatency sets the "count_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetCountLatency(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetCountLatency()
|
||||
_u.mutation.SetCountLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCountLatency sets the "count_latency" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableCountLatency(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetCountLatency(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddCountLatency adds value to the "count_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddCountLatency(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddCountLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetSumPingLatencyMs()
|
||||
_u.mutation.SetSumPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSumPingLatencyMs sets the "sum_ping_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableSumPingLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetSumPingLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSumPingLatencyMs adds value to the "sum_ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddSumPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCountPingLatency sets the "count_ping_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetCountPingLatency(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ResetCountPingLatency()
|
||||
_u.mutation.SetCountPingLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCountPingLatency sets the "count_ping_latency" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetNillableCountPingLatency(v *int) *ChannelMonitorDailyRollupUpdate {
|
||||
if v != nil {
|
||||
_u.SetCountPingLatency(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddCountPingLatency adds value to the "count_ping_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) AddCountPingLatency(v int) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.AddCountPingLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetComputedAt sets the "computed_at" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetComputedAt(v time.Time) *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.SetComputedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitor sets the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SetMonitor(v *ChannelMonitor) *ChannelMonitorDailyRollupUpdate {
|
||||
return _u.SetMonitorID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorDailyRollupMutation object of the builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) Mutation() *ChannelMonitorDailyRollupMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) ClearMonitor() *ChannelMonitorDailyRollupUpdate {
|
||||
_u.mutation.ClearMonitor()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) Save(ctx context.Context) (int, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) defaults() {
|
||||
if _, ok := _u.mutation.ComputedAt(); !ok {
|
||||
v := channelmonitordailyrollup.UpdateDefaultComputedAt()
|
||||
_u.mutation.SetComputedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) check() error {
|
||||
if v, ok := _u.mutation.Model(); ok {
|
||||
if err := channelmonitordailyrollup.ModelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorDailyRollup.model": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "ChannelMonitorDailyRollup.monitor"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorDailyRollupUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitordailyrollup.Table, channelmonitordailyrollup.Columns, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64))
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.Model(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldModel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BucketDate(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldBucketDate, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.TotalChecks(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedTotalChecks(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.OkCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedOkCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.OperationalCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedOperationalCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.DegradedCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedDegradedCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.FailedCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedFailedCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ErrorCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedErrorCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.SumLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSumLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.CountLatency(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedCountLatency(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.SumPingLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSumPingLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.CountPingLatency(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedCountPingLatency(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ComputedAt(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldComputedAt, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.MonitorCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitordailyrollup.MonitorTable,
|
||||
Columns: []string{channelmonitordailyrollup.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitordailyrollup.MonitorTable,
|
||||
Columns: []string{channelmonitordailyrollup.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitordailyrollup.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
// ChannelMonitorDailyRollupUpdateOne is the builder for updating a single ChannelMonitorDailyRollup entity.
|
||||
type ChannelMonitorDailyRollupUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorDailyRollupMutation
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetMonitorID(v int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.SetMonitorID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableMonitorID(v *int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetMonitorID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetModel(v string) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.SetModel(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableModel sets the "model" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableModel(v *string) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetModel(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBucketDate sets the "bucket_date" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetBucketDate(v time.Time) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.SetBucketDate(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableBucketDate sets the "bucket_date" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableBucketDate(v *time.Time) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetBucketDate(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetTotalChecks sets the "total_checks" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetTotalChecks(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetTotalChecks()
|
||||
_u.mutation.SetTotalChecks(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableTotalChecks sets the "total_checks" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableTotalChecks(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetTotalChecks(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddTotalChecks adds value to the "total_checks" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddTotalChecks(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddTotalChecks(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetOkCount sets the "ok_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetOkCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetOkCount()
|
||||
_u.mutation.SetOkCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOkCount sets the "ok_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableOkCount(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetOkCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddOkCount adds value to the "ok_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddOkCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddOkCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetOperationalCount sets the "operational_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetOperationalCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetOperationalCount()
|
||||
_u.mutation.SetOperationalCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOperationalCount sets the "operational_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableOperationalCount(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetOperationalCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddOperationalCount adds value to the "operational_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddOperationalCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddOperationalCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDegradedCount sets the "degraded_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetDegradedCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetDegradedCount()
|
||||
_u.mutation.SetDegradedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableDegradedCount sets the "degraded_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableDegradedCount(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetDegradedCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddDegradedCount adds value to the "degraded_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddDegradedCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddDegradedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetFailedCount sets the "failed_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetFailedCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetFailedCount()
|
||||
_u.mutation.SetFailedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableFailedCount sets the "failed_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableFailedCount(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetFailedCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddFailedCount adds value to the "failed_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddFailedCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddFailedCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetErrorCount sets the "error_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetErrorCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetErrorCount()
|
||||
_u.mutation.SetErrorCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableErrorCount sets the "error_count" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableErrorCount(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetErrorCount(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddErrorCount adds value to the "error_count" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddErrorCount(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddErrorCount(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSumLatencyMs sets the "sum_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetSumLatencyMs()
|
||||
_u.mutation.SetSumLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSumLatencyMs sets the "sum_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableSumLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSumLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSumLatencyMs adds value to the "sum_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddSumLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCountLatency sets the "count_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetCountLatency(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetCountLatency()
|
||||
_u.mutation.SetCountLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCountLatency sets the "count_latency" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableCountLatency(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetCountLatency(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddCountLatency adds value to the "count_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddCountLatency(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddCountLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetSumPingLatencyMs()
|
||||
_u.mutation.SetSumPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSumPingLatencyMs sets the "sum_ping_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableSumPingLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSumPingLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSumPingLatencyMs adds value to the "sum_ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddSumPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCountPingLatency sets the "count_ping_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetCountPingLatency(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ResetCountPingLatency()
|
||||
_u.mutation.SetCountPingLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCountPingLatency sets the "count_ping_latency" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableCountPingLatency(v *int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetCountPingLatency(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddCountPingLatency adds value to the "count_ping_latency" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) AddCountPingLatency(v int) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.AddCountPingLatency(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetComputedAt sets the "computed_at" field.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetComputedAt(v time.Time) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.SetComputedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitor sets the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SetMonitor(v *ChannelMonitor) *ChannelMonitorDailyRollupUpdateOne {
|
||||
return _u.SetMonitorID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorDailyRollupMutation object of the builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) Mutation() *ChannelMonitorDailyRollupMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) ClearMonitor() *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.ClearMonitor()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorDailyRollupUpdate builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) Select(field string, fields ...string) *ChannelMonitorDailyRollupUpdateOne {
|
||||
_u.fields = append([]string{field}, fields...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated ChannelMonitorDailyRollup entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) Save(ctx context.Context) (*ChannelMonitorDailyRollup, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) SaveX(ctx context.Context) *ChannelMonitorDailyRollup {
|
||||
node, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) defaults() {
|
||||
if _, ok := _u.mutation.ComputedAt(); !ok {
|
||||
v := channelmonitordailyrollup.UpdateDefaultComputedAt()
|
||||
_u.mutation.SetComputedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) check() error {
|
||||
if v, ok := _u.mutation.Model(); ok {
|
||||
if err := channelmonitordailyrollup.ModelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorDailyRollup.model": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "ChannelMonitorDailyRollup.monitor"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorDailyRollupUpdateOne) sqlSave(ctx context.Context) (_node *ChannelMonitorDailyRollup, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitordailyrollup.Table, channelmonitordailyrollup.Columns, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64))
|
||||
id, ok := _u.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ChannelMonitorDailyRollup.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := _u.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitordailyrollup.FieldID)
|
||||
for _, f := range fields {
|
||||
if !channelmonitordailyrollup.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != channelmonitordailyrollup.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.Model(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldModel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BucketDate(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldBucketDate, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.TotalChecks(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedTotalChecks(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.OkCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedOkCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.OperationalCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedOperationalCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.DegradedCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedDegradedCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.FailedCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedFailedCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ErrorCount(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedErrorCount(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.SumLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSumLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.CountLatency(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedCountLatency(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.SumPingLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSumPingLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.CountPingLatency(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedCountPingLatency(); ok {
|
||||
_spec.AddField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ComputedAt(); ok {
|
||||
_spec.SetField(channelmonitordailyrollup.FieldComputedAt, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.MonitorCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitordailyrollup.MonitorTable,
|
||||
Columns: []string{channelmonitordailyrollup.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitordailyrollup.MonitorTable,
|
||||
Columns: []string{channelmonitordailyrollup.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &ChannelMonitorDailyRollup{config: _u.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitordailyrollup.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
207
backend/ent/channelmonitorhistory.go
Normal file
@@ -0,0 +1,207 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
)
|
||||
|
||||
// ChannelMonitorHistory is the model entity for the ChannelMonitorHistory schema.
|
||||
type ChannelMonitorHistory struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// MonitorID holds the value of the "monitor_id" field.
|
||||
MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// Model holds the value of the "model" field.
|
||||
Model string `json:"model,omitempty"`
|
||||
// Status holds the value of the "status" field.
|
||||
Status channelmonitorhistory.Status `json:"status,omitempty"`
|
||||
// LatencyMs holds the value of the "latency_ms" field.
|
||||
LatencyMs *int `json:"latency_ms,omitempty"`
|
||||
// PingLatencyMs holds the value of the "ping_latency_ms" field.
|
||||
PingLatencyMs *int `json:"ping_latency_ms,omitempty"`
|
||||
// Message holds the value of the "message" field.
|
||||
Message string `json:"message,omitempty"`
|
||||
// CheckedAt holds the value of the "checked_at" field.
|
||||
CheckedAt time.Time `json:"checked_at,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the ChannelMonitorHistoryQuery when eager-loading is set.
|
||||
Edges ChannelMonitorHistoryEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryEdges holds the relations/edges for other nodes in the graph.
|
||||
type ChannelMonitorHistoryEdges struct {
|
||||
// Monitor holds the value of the monitor edge.
|
||||
Monitor *ChannelMonitor `json:"monitor,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// MonitorOrErr returns the Monitor value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e ChannelMonitorHistoryEdges) MonitorOrErr() (*ChannelMonitor, error) {
|
||||
if e.Monitor != nil {
|
||||
return e.Monitor, nil
|
||||
} else if e.loadedTypes[0] {
|
||||
return nil, &NotFoundError{label: channelmonitor.Label}
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "monitor"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*ChannelMonitorHistory) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitorhistory.FieldID, channelmonitorhistory.FieldMonitorID, channelmonitorhistory.FieldLatencyMs, channelmonitorhistory.FieldPingLatencyMs:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case channelmonitorhistory.FieldModel, channelmonitorhistory.FieldStatus, channelmonitorhistory.FieldMessage:
|
||||
values[i] = new(sql.NullString)
|
||||
case channelmonitorhistory.FieldCheckedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the ChannelMonitorHistory fields.
|
||||
func (_m *ChannelMonitorHistory) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitorhistory.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case channelmonitorhistory.FieldMonitorID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field monitor_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.MonitorID = value.Int64
|
||||
}
|
||||
case channelmonitorhistory.FieldModel:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field model", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Model = value.String
|
||||
}
|
||||
case channelmonitorhistory.FieldStatus:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Status = channelmonitorhistory.Status(value.String)
|
||||
}
|
||||
case channelmonitorhistory.FieldLatencyMs:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field latency_ms", values[i])
|
||||
} else if value.Valid {
|
||||
_m.LatencyMs = new(int)
|
||||
*_m.LatencyMs = int(value.Int64)
|
||||
}
|
||||
case channelmonitorhistory.FieldPingLatencyMs:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field ping_latency_ms", values[i])
|
||||
} else if value.Valid {
|
||||
_m.PingLatencyMs = new(int)
|
||||
*_m.PingLatencyMs = int(value.Int64)
|
||||
}
|
||||
case channelmonitorhistory.FieldMessage:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field message", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Message = value.String
|
||||
}
|
||||
case channelmonitorhistory.FieldCheckedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field checked_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CheckedAt = value.Time
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitorHistory.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *ChannelMonitorHistory) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryMonitor queries the "monitor" edge of the ChannelMonitorHistory entity.
|
||||
func (_m *ChannelMonitorHistory) QueryMonitor() *ChannelMonitorQuery {
|
||||
return NewChannelMonitorHistoryClient(_m.config).QueryMonitor(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this ChannelMonitorHistory.
|
||||
// Note that you need to call ChannelMonitorHistory.Unwrap() before calling this method if this ChannelMonitorHistory
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *ChannelMonitorHistory) Update() *ChannelMonitorHistoryUpdateOne {
|
||||
return NewChannelMonitorHistoryClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the ChannelMonitorHistory entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *ChannelMonitorHistory) Unwrap() *ChannelMonitorHistory {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: ChannelMonitorHistory is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *ChannelMonitorHistory) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("ChannelMonitorHistory(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("monitor_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.MonitorID))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("model=")
|
||||
builder.WriteString(_m.Model)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("status=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Status))
|
||||
builder.WriteString(", ")
|
||||
if v := _m.LatencyMs; v != nil {
|
||||
builder.WriteString("latency_ms=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.PingLatencyMs; v != nil {
|
||||
builder.WriteString("ping_latency_ms=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("message=")
|
||||
builder.WriteString(_m.Message)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("checked_at=")
|
||||
builder.WriteString(_m.CheckedAt.Format(time.ANSIC))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// ChannelMonitorHistories is a parsable slice of ChannelMonitorHistory.
|
||||
type ChannelMonitorHistories []*ChannelMonitorHistory
|
||||
158
backend/ent/channelmonitorhistory/channelmonitorhistory.go
Normal file
@@ -0,0 +1,158 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitorhistory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the channelmonitorhistory type in the database.
|
||||
Label = "channel_monitor_history"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldMonitorID holds the string denoting the monitor_id field in the database.
|
||||
FieldMonitorID = "monitor_id"
|
||||
// FieldModel holds the string denoting the model field in the database.
|
||||
FieldModel = "model"
|
||||
// FieldStatus holds the string denoting the status field in the database.
|
||||
FieldStatus = "status"
|
||||
// FieldLatencyMs holds the string denoting the latency_ms field in the database.
|
||||
FieldLatencyMs = "latency_ms"
|
||||
// FieldPingLatencyMs holds the string denoting the ping_latency_ms field in the database.
|
||||
FieldPingLatencyMs = "ping_latency_ms"
|
||||
// FieldMessage holds the string denoting the message field in the database.
|
||||
FieldMessage = "message"
|
||||
// FieldCheckedAt holds the string denoting the checked_at field in the database.
|
||||
FieldCheckedAt = "checked_at"
|
||||
// EdgeMonitor holds the string denoting the monitor edge name in mutations.
|
||||
EdgeMonitor = "monitor"
|
||||
// Table holds the table name of the channelmonitorhistory in the database.
|
||||
Table = "channel_monitor_histories"
|
||||
// MonitorTable is the table that holds the monitor relation/edge.
|
||||
MonitorTable = "channel_monitor_histories"
|
||||
// MonitorInverseTable is the table name for the ChannelMonitor entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitor" package.
|
||||
MonitorInverseTable = "channel_monitors"
|
||||
// MonitorColumn is the table column denoting the monitor relation/edge.
|
||||
MonitorColumn = "monitor_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for channelmonitorhistory fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldMonitorID,
|
||||
FieldModel,
|
||||
FieldStatus,
|
||||
FieldLatencyMs,
|
||||
FieldPingLatencyMs,
|
||||
FieldMessage,
|
||||
FieldCheckedAt,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// ModelValidator is a validator for the "model" field. It is called by the builders before save.
|
||||
ModelValidator func(string) error
|
||||
// DefaultMessage holds the default value on creation for the "message" field.
|
||||
DefaultMessage string
|
||||
// MessageValidator is a validator for the "message" field. It is called by the builders before save.
|
||||
MessageValidator func(string) error
|
||||
// DefaultCheckedAt holds the default value on creation for the "checked_at" field.
|
||||
DefaultCheckedAt func() time.Time
|
||||
)
|
||||
|
||||
// Status defines the type for the "status" enum field.
|
||||
type Status string
|
||||
|
||||
// Status values.
|
||||
const (
|
||||
StatusOperational Status = "operational"
|
||||
StatusDegraded Status = "degraded"
|
||||
StatusFailed Status = "failed"
|
||||
StatusError Status = "error"
|
||||
)
|
||||
|
||||
func (s Status) String() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save.
|
||||
func StatusValidator(s Status) error {
|
||||
switch s {
|
||||
case StatusOperational, StatusDegraded, StatusFailed, StatusError:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("channelmonitorhistory: invalid enum value for status field: %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the ChannelMonitorHistory queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMonitorID orders the results by the monitor_id field.
|
||||
func ByMonitorID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldMonitorID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByModel orders the results by the model field.
|
||||
func ByModel(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldModel, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByStatus orders the results by the status field.
|
||||
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByLatencyMs orders the results by the latency_ms field.
|
||||
func ByLatencyMs(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldLatencyMs, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPingLatencyMs orders the results by the ping_latency_ms field.
|
||||
func ByPingLatencyMs(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPingLatencyMs, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMessage orders the results by the message field.
|
||||
func ByMessage(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldMessage, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCheckedAt orders the results by the checked_at field.
|
||||
func ByCheckedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCheckedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMonitorField orders the results by monitor field.
|
||||
func ByMonitorField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newMonitorStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newMonitorStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(MonitorInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn),
|
||||
)
|
||||
}
|
||||
444
backend/ent/channelmonitorhistory/where.go
Normal file
@@ -0,0 +1,444 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitorhistory
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// MonitorID applies equality check predicate on the "monitor_id" field. It's identical to MonitorIDEQ.
|
||||
func MonitorID(v int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// Model applies equality check predicate on the "model" field. It's identical to ModelEQ.
|
||||
func Model(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// LatencyMs applies equality check predicate on the "latency_ms" field. It's identical to LatencyMsEQ.
|
||||
func LatencyMs(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMs applies equality check predicate on the "ping_latency_ms" field. It's identical to PingLatencyMsEQ.
|
||||
func PingLatencyMs(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// Message applies equality check predicate on the "message" field. It's identical to MessageEQ.
|
||||
func Message(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMessage, v))
|
||||
}
|
||||
|
||||
// CheckedAt applies equality check predicate on the "checked_at" field. It's identical to CheckedAtEQ.
|
||||
func CheckedAt(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// MonitorIDEQ applies the EQ predicate on the "monitor_id" field.
|
||||
func MonitorIDEQ(v int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// MonitorIDNEQ applies the NEQ predicate on the "monitor_id" field.
|
||||
func MonitorIDNEQ(v int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldMonitorID, v))
|
||||
}
|
||||
|
||||
// MonitorIDIn applies the In predicate on the "monitor_id" field.
|
||||
func MonitorIDIn(vs ...int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldMonitorID, vs...))
|
||||
}
|
||||
|
||||
// MonitorIDNotIn applies the NotIn predicate on the "monitor_id" field.
|
||||
func MonitorIDNotIn(vs ...int64) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldMonitorID, vs...))
|
||||
}
|
||||
|
||||
// ModelEQ applies the EQ predicate on the "model" field.
|
||||
func ModelEQ(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelNEQ applies the NEQ predicate on the "model" field.
|
||||
func ModelNEQ(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelIn applies the In predicate on the "model" field.
|
||||
func ModelIn(vs ...string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldModel, vs...))
|
||||
}
|
||||
|
||||
// ModelNotIn applies the NotIn predicate on the "model" field.
|
||||
func ModelNotIn(vs ...string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldModel, vs...))
|
||||
}
|
||||
|
||||
// ModelGT applies the GT predicate on the "model" field.
|
||||
func ModelGT(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelGTE applies the GTE predicate on the "model" field.
|
||||
func ModelGTE(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelLT applies the LT predicate on the "model" field.
|
||||
func ModelLT(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelLTE applies the LTE predicate on the "model" field.
|
||||
func ModelLTE(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelContains applies the Contains predicate on the "model" field.
|
||||
func ModelContains(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldContains(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelHasPrefix applies the HasPrefix predicate on the "model" field.
|
||||
func ModelHasPrefix(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldHasPrefix(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelHasSuffix applies the HasSuffix predicate on the "model" field.
|
||||
func ModelHasSuffix(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldHasSuffix(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelEqualFold applies the EqualFold predicate on the "model" field.
|
||||
func ModelEqualFold(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEqualFold(FieldModel, v))
|
||||
}
|
||||
|
||||
// ModelContainsFold applies the ContainsFold predicate on the "model" field.
|
||||
func ModelContainsFold(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldContainsFold(FieldModel, v))
|
||||
}
|
||||
|
||||
// StatusEQ applies the EQ predicate on the "status" field.
|
||||
func StatusEQ(v Status) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldStatus, v))
|
||||
}
|
||||
|
||||
// StatusNEQ applies the NEQ predicate on the "status" field.
|
||||
func StatusNEQ(v Status) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldStatus, v))
|
||||
}
|
||||
|
||||
// StatusIn applies the In predicate on the "status" field.
|
||||
func StatusIn(vs ...Status) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldStatus, vs...))
|
||||
}
|
||||
|
||||
// StatusNotIn applies the NotIn predicate on the "status" field.
|
||||
func StatusNotIn(vs ...Status) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldStatus, vs...))
|
||||
}
|
||||
|
||||
// LatencyMsEQ applies the EQ predicate on the "latency_ms" field.
|
||||
func LatencyMsEQ(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsNEQ applies the NEQ predicate on the "latency_ms" field.
|
||||
func LatencyMsNEQ(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsIn applies the In predicate on the "latency_ms" field.
|
||||
func LatencyMsIn(vs ...int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// LatencyMsNotIn applies the NotIn predicate on the "latency_ms" field.
|
||||
func LatencyMsNotIn(vs ...int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// LatencyMsGT applies the GT predicate on the "latency_ms" field.
|
||||
func LatencyMsGT(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsGTE applies the GTE predicate on the "latency_ms" field.
|
||||
func LatencyMsGTE(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsLT applies the LT predicate on the "latency_ms" field.
|
||||
func LatencyMsLT(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsLTE applies the LTE predicate on the "latency_ms" field.
|
||||
func LatencyMsLTE(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldLatencyMs, v))
|
||||
}
|
||||
|
||||
// LatencyMsIsNil applies the IsNil predicate on the "latency_ms" field.
|
||||
func LatencyMsIsNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIsNull(FieldLatencyMs))
|
||||
}
|
||||
|
||||
// LatencyMsNotNil applies the NotNil predicate on the "latency_ms" field.
|
||||
func LatencyMsNotNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotNull(FieldLatencyMs))
|
||||
}
|
||||
|
||||
// PingLatencyMsEQ applies the EQ predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsEQ(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsNEQ applies the NEQ predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsNEQ(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsIn applies the In predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsIn(vs ...int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldPingLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// PingLatencyMsNotIn applies the NotIn predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsNotIn(vs ...int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldPingLatencyMs, vs...))
|
||||
}
|
||||
|
||||
// PingLatencyMsGT applies the GT predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsGT(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsGTE applies the GTE predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsGTE(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsLT applies the LT predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsLT(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsLTE applies the LTE predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsLTE(v int) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldPingLatencyMs, v))
|
||||
}
|
||||
|
||||
// PingLatencyMsIsNil applies the IsNil predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsIsNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIsNull(FieldPingLatencyMs))
|
||||
}
|
||||
|
||||
// PingLatencyMsNotNil applies the NotNil predicate on the "ping_latency_ms" field.
|
||||
func PingLatencyMsNotNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotNull(FieldPingLatencyMs))
|
||||
}
|
||||
|
||||
// MessageEQ applies the EQ predicate on the "message" field.
|
||||
func MessageEQ(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageNEQ applies the NEQ predicate on the "message" field.
|
||||
func MessageNEQ(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageIn applies the In predicate on the "message" field.
|
||||
func MessageIn(vs ...string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldMessage, vs...))
|
||||
}
|
||||
|
||||
// MessageNotIn applies the NotIn predicate on the "message" field.
|
||||
func MessageNotIn(vs ...string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldMessage, vs...))
|
||||
}
|
||||
|
||||
// MessageGT applies the GT predicate on the "message" field.
|
||||
func MessageGT(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageGTE applies the GTE predicate on the "message" field.
|
||||
func MessageGTE(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageLT applies the LT predicate on the "message" field.
|
||||
func MessageLT(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageLTE applies the LTE predicate on the "message" field.
|
||||
func MessageLTE(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageContains applies the Contains predicate on the "message" field.
|
||||
func MessageContains(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldContains(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageHasPrefix applies the HasPrefix predicate on the "message" field.
|
||||
func MessageHasPrefix(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldHasPrefix(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageHasSuffix applies the HasSuffix predicate on the "message" field.
|
||||
func MessageHasSuffix(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldHasSuffix(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageIsNil applies the IsNil predicate on the "message" field.
|
||||
func MessageIsNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIsNull(FieldMessage))
|
||||
}
|
||||
|
||||
// MessageNotNil applies the NotNil predicate on the "message" field.
|
||||
func MessageNotNil() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotNull(FieldMessage))
|
||||
}
|
||||
|
||||
// MessageEqualFold applies the EqualFold predicate on the "message" field.
|
||||
func MessageEqualFold(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEqualFold(FieldMessage, v))
|
||||
}
|
||||
|
||||
// MessageContainsFold applies the ContainsFold predicate on the "message" field.
|
||||
func MessageContainsFold(v string) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldContainsFold(FieldMessage, v))
|
||||
}
|
||||
|
||||
// CheckedAtEQ applies the EQ predicate on the "checked_at" field.
|
||||
func CheckedAtEQ(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// CheckedAtNEQ applies the NEQ predicate on the "checked_at" field.
|
||||
func CheckedAtNEQ(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// CheckedAtIn applies the In predicate on the "checked_at" field.
|
||||
func CheckedAtIn(vs ...time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldIn(FieldCheckedAt, vs...))
|
||||
}
|
||||
|
||||
// CheckedAtNotIn applies the NotIn predicate on the "checked_at" field.
|
||||
func CheckedAtNotIn(vs ...time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldCheckedAt, vs...))
|
||||
}
|
||||
|
||||
// CheckedAtGT applies the GT predicate on the "checked_at" field.
|
||||
func CheckedAtGT(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGT(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// CheckedAtGTE applies the GTE predicate on the "checked_at" field.
|
||||
func CheckedAtGTE(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// CheckedAtLT applies the LT predicate on the "checked_at" field.
|
||||
func CheckedAtLT(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLT(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// CheckedAtLTE applies the LTE predicate on the "checked_at" field.
|
||||
func CheckedAtLTE(v time.Time) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldCheckedAt, v))
|
||||
}
|
||||
|
||||
// HasMonitor applies the HasEdge predicate on the "monitor" edge.
|
||||
func HasMonitor() predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasMonitorWith applies the HasEdge predicate on the "monitor" edge with a given conditions (other predicates).
|
||||
func HasMonitorWith(preds ...predicate.ChannelMonitor) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(func(s *sql.Selector) {
|
||||
step := newMonitorStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.ChannelMonitorHistory) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.ChannelMonitorHistory) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.ChannelMonitorHistory) predicate.ChannelMonitorHistory {
|
||||
return predicate.ChannelMonitorHistory(sql.NotPredicates(p))
|
||||
}
|
||||
947
backend/ent/channelmonitorhistory_create.go
Normal file
@@ -0,0 +1,947 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
)
|
||||
|
||||
// ChannelMonitorHistoryCreate is the builder for creating a ChannelMonitorHistory entity.
|
||||
type ChannelMonitorHistoryCreate struct {
|
||||
config
|
||||
mutation *ChannelMonitorHistoryMutation
|
||||
hooks []Hook
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetMonitorID(v int64) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetMonitorID(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetModel(v string) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetModel(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetStatus(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetLatencyMs(v int) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetLatencyMs(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableLatencyMs sets the "latency_ms" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetNillableLatencyMs(v *int) *ChannelMonitorHistoryCreate {
|
||||
if v != nil {
|
||||
_c.SetLatencyMs(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetPingLatencyMs(v int) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetPingLatencyMs(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillablePingLatencyMs sets the "ping_latency_ms" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetNillablePingLatencyMs(v *int) *ChannelMonitorHistoryCreate {
|
||||
if v != nil {
|
||||
_c.SetPingLatencyMs(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetMessage(v string) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetMessage(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableMessage sets the "message" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetNillableMessage(v *string) *ChannelMonitorHistoryCreate {
|
||||
if v != nil {
|
||||
_c.SetMessage(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetCheckedAt(v time.Time) *ChannelMonitorHistoryCreate {
|
||||
_c.mutation.SetCheckedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableCheckedAt sets the "checked_at" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetNillableCheckedAt(v *time.Time) *ChannelMonitorHistoryCreate {
|
||||
if v != nil {
|
||||
_c.SetCheckedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetMonitor sets the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_c *ChannelMonitorHistoryCreate) SetMonitor(v *ChannelMonitor) *ChannelMonitorHistoryCreate {
|
||||
return _c.SetMonitorID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorHistoryMutation object of the builder.
|
||||
func (_c *ChannelMonitorHistoryCreate) Mutation() *ChannelMonitorHistoryMutation {
|
||||
return _c.mutation
|
||||
}
|
||||
|
||||
// Save creates the ChannelMonitorHistory in the database.
|
||||
func (_c *ChannelMonitorHistoryCreate) Save(ctx context.Context) (*ChannelMonitorHistory, error) {
|
||||
_c.defaults()
|
||||
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (_c *ChannelMonitorHistoryCreate) SaveX(ctx context.Context) *ChannelMonitorHistory {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *ChannelMonitorHistoryCreate) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorHistoryCreate) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_c *ChannelMonitorHistoryCreate) defaults() {
|
||||
if _, ok := _c.mutation.Message(); !ok {
|
||||
v := channelmonitorhistory.DefaultMessage
|
||||
_c.mutation.SetMessage(v)
|
||||
}
|
||||
if _, ok := _c.mutation.CheckedAt(); !ok {
|
||||
v := channelmonitorhistory.DefaultCheckedAt()
|
||||
_c.mutation.SetCheckedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_c *ChannelMonitorHistoryCreate) check() error {
|
||||
if _, ok := _c.mutation.MonitorID(); !ok {
|
||||
return &ValidationError{Name: "monitor_id", err: errors.New(`ent: missing required field "ChannelMonitorHistory.monitor_id"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.Model(); !ok {
|
||||
return &ValidationError{Name: "model", err: errors.New(`ent: missing required field "ChannelMonitorHistory.model"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.Model(); ok {
|
||||
if err := channelmonitorhistory.ModelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.model": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.Status(); !ok {
|
||||
return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "ChannelMonitorHistory.status"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.Status(); ok {
|
||||
if err := channelmonitorhistory.StatusValidator(v); err != nil {
|
||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.status": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _c.mutation.Message(); ok {
|
||||
if err := channelmonitorhistory.MessageValidator(v); err != nil {
|
||||
return &ValidationError{Name: "message", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.message": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.CheckedAt(); !ok {
|
||||
return &ValidationError{Name: "checked_at", err: errors.New(`ent: missing required field "ChannelMonitorHistory.checked_at"`)}
|
||||
}
|
||||
if len(_c.mutation.MonitorIDs()) == 0 {
|
||||
return &ValidationError{Name: "monitor", err: errors.New(`ent: missing required edge "ChannelMonitorHistory.monitor"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_c *ChannelMonitorHistoryCreate) sqlSave(ctx context.Context) (*ChannelMonitorHistory, error) {
|
||||
if err := _c.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := _c.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int64(id)
|
||||
_c.mutation.id = &_node.ID
|
||||
_c.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &ChannelMonitorHistory{config: _c.config}
|
||||
_spec = sqlgraph.NewCreateSpec(channelmonitorhistory.Table, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64))
|
||||
)
|
||||
_spec.OnConflict = _c.conflict
|
||||
if value, ok := _c.mutation.Model(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value)
|
||||
_node.Model = value
|
||||
}
|
||||
if value, ok := _c.mutation.Status(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldStatus, field.TypeEnum, value)
|
||||
_node.Status = value
|
||||
}
|
||||
if value, ok := _c.mutation.LatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value)
|
||||
_node.LatencyMs = &value
|
||||
}
|
||||
if value, ok := _c.mutation.PingLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value)
|
||||
_node.PingLatencyMs = &value
|
||||
}
|
||||
if value, ok := _c.mutation.Message(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldMessage, field.TypeString, value)
|
||||
_node.Message = value
|
||||
}
|
||||
if value, ok := _c.mutation.CheckedAt(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldCheckedAt, field.TypeTime, value)
|
||||
_node.CheckedAt = value
|
||||
}
|
||||
if nodes := _c.mutation.MonitorIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitorhistory.MonitorTable,
|
||||
Columns: []string{channelmonitorhistory.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.MonitorID = nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// SetMonitorID(v).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.ChannelMonitorHistoryUpsert) {
|
||||
// SetMonitorID(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorHistoryCreate) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertOne {
|
||||
_c.conflict = opts
|
||||
return &ChannelMonitorHistoryUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorHistoryCreate) OnConflictColumns(columns ...string) *ChannelMonitorHistoryUpsertOne {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &ChannelMonitorHistoryUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
// ChannelMonitorHistoryUpsertOne is the builder for "upsert"-ing
|
||||
// one ChannelMonitorHistory node.
|
||||
ChannelMonitorHistoryUpsertOne struct {
|
||||
create *ChannelMonitorHistoryCreate
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryUpsert is the "OnConflict" setter.
|
||||
ChannelMonitorHistoryUpsert struct {
|
||||
*sql.UpdateSet
|
||||
}
|
||||
)
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetMonitorID(v int64) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldMonitorID, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateMonitorID() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldMonitorID)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetModel(v string) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldModel, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateModel sets the "model" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateModel() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldModel)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldStatus, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateStatus sets the "status" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateStatus() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldStatus)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetLatencyMs(v int) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldLatencyMs, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateLatencyMs sets the "latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateLatencyMs() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldLatencyMs)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddLatencyMs adds v to the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) AddLatencyMs(v int) *ChannelMonitorHistoryUpsert {
|
||||
u.Add(channelmonitorhistory.FieldLatencyMs, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearLatencyMs clears the value of the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) ClearLatencyMs() *ChannelMonitorHistoryUpsert {
|
||||
u.SetNull(channelmonitorhistory.FieldLatencyMs)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldPingLatencyMs, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdatePingLatencyMs sets the "ping_latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdatePingLatencyMs() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldPingLatencyMs)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddPingLatencyMs adds v to the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpsert {
|
||||
u.Add(channelmonitorhistory.FieldPingLatencyMs, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) ClearPingLatencyMs() *ChannelMonitorHistoryUpsert {
|
||||
u.SetNull(channelmonitorhistory.FieldPingLatencyMs)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetMessage(v string) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldMessage, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateMessage sets the "message" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateMessage() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldMessage)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearMessage clears the value of the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) ClearMessage() *ChannelMonitorHistoryUpsert {
|
||||
u.SetNull(channelmonitorhistory.FieldMessage)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (u *ChannelMonitorHistoryUpsert) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpsert {
|
||||
u.Set(channelmonitorhistory.FieldCheckedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateCheckedAt sets the "checked_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsert) UpdateCheckedAt() *ChannelMonitorHistoryUpsert {
|
||||
u.SetExcluded(channelmonitorhistory.FieldCheckedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateNewValues() *ChannelMonitorHistoryUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorHistoryUpsertOne) Ignore() *ChannelMonitorHistoryUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) DoNothing() *ChannelMonitorHistoryUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the ChannelMonitorHistoryCreate.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) Update(set func(*ChannelMonitorHistoryUpsert)) *ChannelMonitorHistoryUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&ChannelMonitorHistoryUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetMonitorID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateMonitorID() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateMonitorID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetModel(v string) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetModel(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateModel sets the "model" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateModel() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateModel()
|
||||
})
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetStatus(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateStatus sets the "status" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateStatus() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateStatus()
|
||||
})
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetLatencyMs(v int) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddLatencyMs adds v to the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) AddLatencyMs(v int) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.AddLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateLatencyMs sets the "latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateLatencyMs() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearLatencyMs clears the value of the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) ClearLatencyMs() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetPingLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddPingLatencyMs adds v to the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.AddPingLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdatePingLatencyMs sets the "ping_latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdatePingLatencyMs() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdatePingLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) ClearPingLatencyMs() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearPingLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetMessage(v string) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetMessage(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMessage sets the "message" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateMessage() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateMessage()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearMessage clears the value of the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) ClearMessage() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearMessage()
|
||||
})
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetCheckedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateCheckedAt sets the "checked_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) UpdateCheckedAt() *ChannelMonitorHistoryUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateCheckedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for ChannelMonitorHistoryCreate.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||
node, err := u.create.Save(ctx)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
return node.ID, nil
|
||||
}
|
||||
|
||||
// IDX is like ID, but panics if an error occurs.
|
||||
func (u *ChannelMonitorHistoryUpsertOne) IDX(ctx context.Context) int64 {
|
||||
id, err := u.ID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryCreateBulk is the builder for creating many ChannelMonitorHistory entities in bulk.
|
||||
type ChannelMonitorHistoryCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*ChannelMonitorHistoryCreate
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// Save creates the ChannelMonitorHistory entities in the database.
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) Save(ctx context.Context) ([]*ChannelMonitorHistory, error) {
|
||||
if _c.err != nil {
|
||||
return nil, _c.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||
nodes := make([]*ChannelMonitorHistory, len(_c.builders))
|
||||
mutators := make([]Mutator, len(_c.builders))
|
||||
for i := range _c.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := _c.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*ChannelMonitorHistoryMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
spec.OnConflict = _c.conflict
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int64(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) SaveX(ctx context.Context) []*ChannelMonitorHistory {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.ChannelMonitorHistory.CreateBulk(builders...).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.ChannelMonitorHistoryUpsert) {
|
||||
// SetMonitorID(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertBulk {
|
||||
_c.conflict = opts
|
||||
return &ChannelMonitorHistoryUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorHistoryCreateBulk) OnConflictColumns(columns ...string) *ChannelMonitorHistoryUpsertBulk {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &ChannelMonitorHistoryUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryUpsertBulk is the builder for "upsert"-ing
|
||||
// a bulk of ChannelMonitorHistory nodes.
|
||||
type ChannelMonitorHistoryUpsertBulk struct {
|
||||
create *ChannelMonitorHistoryCreateBulk
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that
|
||||
// were set on create. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateNewValues() *ChannelMonitorHistoryUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorHistory.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) Ignore() *ChannelMonitorHistoryUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) DoNothing() *ChannelMonitorHistoryUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the ChannelMonitorHistoryCreateBulk.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) Update(set func(*ChannelMonitorHistoryUpsert)) *ChannelMonitorHistoryUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&ChannelMonitorHistoryUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetMonitorID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateMonitorID() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateMonitorID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetModel(v string) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetModel(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateModel sets the "model" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateModel() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateModel()
|
||||
})
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetStatus(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateStatus sets the "status" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateStatus() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateStatus()
|
||||
})
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddLatencyMs adds v to the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) AddLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.AddLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateLatencyMs sets the "latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateLatencyMs() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearLatencyMs clears the value of the "latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) ClearLatencyMs() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetPingLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddPingLatencyMs adds v to the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.AddPingLatencyMs(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdatePingLatencyMs sets the "ping_latency_ms" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdatePingLatencyMs() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdatePingLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) ClearPingLatencyMs() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearPingLatencyMs()
|
||||
})
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetMessage(v string) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetMessage(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMessage sets the "message" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateMessage() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateMessage()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearMessage clears the value of the "message" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) ClearMessage() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.ClearMessage()
|
||||
})
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.SetCheckedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateCheckedAt sets the "checked_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) UpdateCheckedAt() *ChannelMonitorHistoryUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorHistoryUpsert) {
|
||||
s.UpdateCheckedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
return u.create.err
|
||||
}
|
||||
for i, b := range u.create.builders {
|
||||
if len(b.conflict) != 0 {
|
||||
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ChannelMonitorHistoryCreateBulk instead", i)
|
||||
}
|
||||
}
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for ChannelMonitorHistoryCreateBulk.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *ChannelMonitorHistoryUpsertBulk) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
88
backend/ent/channelmonitorhistory_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorHistoryDelete is the builder for deleting a ChannelMonitorHistory entity.
|
||||
type ChannelMonitorHistoryDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorHistoryMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorHistoryDelete builder.
|
||||
func (_d *ChannelMonitorHistoryDelete) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *ChannelMonitorHistoryDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorHistoryDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *ChannelMonitorHistoryDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(channelmonitorhistory.Table, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryDeleteOne is the builder for deleting a single ChannelMonitorHistory entity.
|
||||
type ChannelMonitorHistoryDeleteOne struct {
|
||||
_d *ChannelMonitorHistoryDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorHistoryDelete builder.
|
||||
func (_d *ChannelMonitorHistoryDeleteOne) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *ChannelMonitorHistoryDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{channelmonitorhistory.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorHistoryDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
643
backend/ent/channelmonitorhistory_query.go
Normal file
@@ -0,0 +1,643 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorHistoryQuery is the builder for querying ChannelMonitorHistory entities.
|
||||
type ChannelMonitorHistoryQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []channelmonitorhistory.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.ChannelMonitorHistory
|
||||
withMonitor *ChannelMonitorQuery
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the ChannelMonitorHistoryQuery builder.
|
||||
func (_q *ChannelMonitorHistoryQuery) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *ChannelMonitorHistoryQuery) Limit(limit int) *ChannelMonitorHistoryQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *ChannelMonitorHistoryQuery) Offset(offset int) *ChannelMonitorHistoryQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *ChannelMonitorHistoryQuery) Unique(unique bool) *ChannelMonitorHistoryQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *ChannelMonitorHistoryQuery) Order(o ...channelmonitorhistory.OrderOption) *ChannelMonitorHistoryQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// QueryMonitor chains the current query on the "monitor" edge.
|
||||
func (_q *ChannelMonitorHistoryQuery) QueryMonitor() *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitorhistory.Table, channelmonitorhistory.FieldID, selector),
|
||||
sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, channelmonitorhistory.MonitorTable, channelmonitorhistory.MonitorColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first ChannelMonitorHistory entity from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorHistory was found.
|
||||
func (_q *ChannelMonitorHistoryQuery) First(ctx context.Context) (*ChannelMonitorHistory, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{channelmonitorhistory.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) FirstX(ctx context.Context) *ChannelMonitorHistory {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first ChannelMonitorHistory ID from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorHistory ID was found.
|
||||
func (_q *ChannelMonitorHistoryQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{channelmonitorhistory.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single ChannelMonitorHistory entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorHistory entity is found.
|
||||
// Returns a *NotFoundError when no ChannelMonitorHistory entities are found.
|
||||
func (_q *ChannelMonitorHistoryQuery) Only(ctx context.Context) (*ChannelMonitorHistory, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{channelmonitorhistory.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{channelmonitorhistory.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) OnlyX(ctx context.Context) *ChannelMonitorHistory {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only ChannelMonitorHistory ID in the query.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorHistory ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *ChannelMonitorHistoryQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{channelmonitorhistory.Label}
|
||||
default:
|
||||
err = &NotSingularError{channelmonitorhistory.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of ChannelMonitorHistories.
|
||||
func (_q *ChannelMonitorHistoryQuery) All(ctx context.Context) ([]*ChannelMonitorHistory, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*ChannelMonitorHistory, *ChannelMonitorHistoryQuery]()
|
||||
return withInterceptors[[]*ChannelMonitorHistory](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) AllX(ctx context.Context) []*ChannelMonitorHistory {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of ChannelMonitorHistory IDs.
|
||||
func (_q *ChannelMonitorHistoryQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(channelmonitorhistory.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *ChannelMonitorHistoryQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorHistoryQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *ChannelMonitorHistoryQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorHistoryQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the ChannelMonitorHistoryQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *ChannelMonitorHistoryQuery) Clone() *ChannelMonitorHistoryQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &ChannelMonitorHistoryQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]channelmonitorhistory.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.ChannelMonitorHistory{}, _q.predicates...),
|
||||
withMonitor: _q.withMonitor.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithMonitor tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "monitor" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorHistoryQuery) WithMonitor(opts ...func(*ChannelMonitorQuery)) *ChannelMonitorHistoryQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withMonitor = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorHistory.Query().
|
||||
// GroupBy(channelmonitorhistory.FieldMonitorID).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorHistoryQuery) GroupBy(field string, fields ...string) *ChannelMonitorHistoryGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &ChannelMonitorHistoryGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = channelmonitorhistory.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// MonitorID int64 `json:"monitor_id,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorHistory.Query().
|
||||
// Select(channelmonitorhistory.FieldMonitorID).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorHistoryQuery) Select(fields ...string) *ChannelMonitorHistorySelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &ChannelMonitorHistorySelect{ChannelMonitorHistoryQuery: _q}
|
||||
sbuild.label = channelmonitorhistory.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a ChannelMonitorHistorySelect configured with the given aggregations.
|
||||
func (_q *ChannelMonitorHistoryQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorHistorySelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !channelmonitorhistory.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitorHistory, error) {
|
||||
var (
|
||||
nodes = []*ChannelMonitorHistory{}
|
||||
_spec = _q.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
_q.withMonitor != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*ChannelMonitorHistory).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &ChannelMonitorHistory{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := _q.withMonitor; query != nil {
|
||||
if err := _q.loadMonitor(ctx, query, nodes, nil,
|
||||
func(n *ChannelMonitorHistory, e *ChannelMonitor) { n.Edges.Monitor = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) loadMonitor(ctx context.Context, query *ChannelMonitorQuery, nodes []*ChannelMonitorHistory, init func(*ChannelMonitorHistory), assign func(*ChannelMonitorHistory, *ChannelMonitor)) error {
|
||||
ids := make([]int64, 0, len(nodes))
|
||||
nodeids := make(map[int64][]*ChannelMonitorHistory)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].MonitorID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(channelmonitor.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "monitor_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(channelmonitorhistory.Table, channelmonitorhistory.Columns, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitorhistory.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != channelmonitorhistory.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if _q.withMonitor != nil {
|
||||
_spec.Node.AddColumnOnce(channelmonitorhistory.FieldMonitorID)
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorHistoryQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(channelmonitorhistory.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = channelmonitorhistory.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *ChannelMonitorHistoryQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorHistoryQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *ChannelMonitorHistoryQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorHistoryQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryGroupBy is the group-by builder for ChannelMonitorHistory entities.
|
||||
type ChannelMonitorHistoryGroupBy struct {
|
||||
selector
|
||||
build *ChannelMonitorHistoryQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *ChannelMonitorHistoryGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorHistoryGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *ChannelMonitorHistoryGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorHistoryQuery, *ChannelMonitorHistoryGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *ChannelMonitorHistoryGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorHistoryQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// ChannelMonitorHistorySelect is the builder for selecting fields of ChannelMonitorHistory entities.
|
||||
type ChannelMonitorHistorySelect struct {
|
||||
*ChannelMonitorHistoryQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *ChannelMonitorHistorySelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorHistorySelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *ChannelMonitorHistorySelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorHistoryQuery, *ChannelMonitorHistorySelect](ctx, _s.ChannelMonitorHistoryQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *ChannelMonitorHistorySelect) sqlScan(ctx context.Context, root *ChannelMonitorHistoryQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
635
backend/ent/channelmonitorhistory_update.go
Normal file
@@ -0,0 +1,635 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorHistoryUpdate is the builder for updating ChannelMonitorHistory entities.
|
||||
type ChannelMonitorHistoryUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorHistoryMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorHistoryUpdate builder.
|
||||
func (_u *ChannelMonitorHistoryUpdate) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetMonitorID(v int64) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.SetMonitorID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableMonitorID(v *int64) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetMonitorID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetModel(v string) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.SetModel(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableModel sets the "model" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableModel(v *string) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetModel(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.SetStatus(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableStatus(v *channelmonitorhistory.Status) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetStatus(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetLatencyMs(v int) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ResetLatencyMs()
|
||||
_u.mutation.SetLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableLatencyMs sets the "latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableLatencyMs(v *int) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddLatencyMs adds value to the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) AddLatencyMs(v int) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.AddLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearLatencyMs clears the value of the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) ClearLatencyMs() *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ClearLatencyMs()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ResetPingLatencyMs()
|
||||
_u.mutation.SetPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillablePingLatencyMs sets the "ping_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillablePingLatencyMs(v *int) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetPingLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddPingLatencyMs adds value to the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.AddPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) ClearPingLatencyMs() *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ClearPingLatencyMs()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetMessage(v string) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.SetMessage(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMessage sets the "message" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableMessage(v *string) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetMessage(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearMessage clears the value of the "message" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) ClearMessage() *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ClearMessage()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.SetCheckedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCheckedAt sets the "checked_at" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetNillableCheckedAt(v *time.Time) *ChannelMonitorHistoryUpdate {
|
||||
if v != nil {
|
||||
_u.SetCheckedAt(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitor sets the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SetMonitor(v *ChannelMonitor) *ChannelMonitorHistoryUpdate {
|
||||
return _u.SetMonitorID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorHistoryMutation object of the builder.
|
||||
func (_u *ChannelMonitorHistoryUpdate) Mutation() *ChannelMonitorHistoryMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorHistoryUpdate) ClearMonitor() *ChannelMonitorHistoryUpdate {
|
||||
_u.mutation.ClearMonitor()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (_u *ChannelMonitorHistoryUpdate) Save(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorHistoryUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_u *ChannelMonitorHistoryUpdate) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorHistoryUpdate) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorHistoryUpdate) check() error {
|
||||
if v, ok := _u.mutation.Model(); ok {
|
||||
if err := channelmonitorhistory.ModelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.model": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Status(); ok {
|
||||
if err := channelmonitorhistory.StatusValidator(v); err != nil {
|
||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.status": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Message(); ok {
|
||||
if err := channelmonitorhistory.MessageValidator(v); err != nil {
|
||||
return &ValidationError{Name: "message", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.message": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "ChannelMonitorHistory.monitor"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorHistoryUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitorhistory.Table, channelmonitorhistory.Columns, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64))
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.Model(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Status(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldStatus, field.TypeEnum, value)
|
||||
}
|
||||
if value, ok := _u.mutation.LatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if _u.mutation.LatencyMsCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldLatencyMs, field.TypeInt)
|
||||
}
|
||||
if value, ok := _u.mutation.PingLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedPingLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if _u.mutation.PingLatencyMsCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt)
|
||||
}
|
||||
if value, ok := _u.mutation.Message(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldMessage, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.MessageCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldMessage, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.CheckedAt(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldCheckedAt, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.MonitorCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitorhistory.MonitorTable,
|
||||
Columns: []string{channelmonitorhistory.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitorhistory.MonitorTable,
|
||||
Columns: []string{channelmonitorhistory.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitorhistory.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
// ChannelMonitorHistoryUpdateOne is the builder for updating a single ChannelMonitorHistory entity.
|
||||
type ChannelMonitorHistoryUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorHistoryMutation
|
||||
}
|
||||
|
||||
// SetMonitorID sets the "monitor_id" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.SetMonitorID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableMonitorID(v *int64) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetMonitorID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetModel sets the "model" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetModel(v string) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.SetModel(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableModel sets the "model" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableModel(v *string) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetModel(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetStatus sets the "status" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.SetStatus(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableStatus(v *channelmonitorhistory.Status) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetStatus(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetLatencyMs sets the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetLatencyMs(v int) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ResetLatencyMs()
|
||||
_u.mutation.SetLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableLatencyMs sets the "latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableLatencyMs(v *int) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddLatencyMs adds value to the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) AddLatencyMs(v int) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.AddLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearLatencyMs clears the value of the "latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) ClearLatencyMs() *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ClearLatencyMs()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetPingLatencyMs sets the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ResetPingLatencyMs()
|
||||
_u.mutation.SetPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillablePingLatencyMs sets the "ping_latency_ms" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillablePingLatencyMs(v *int) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetPingLatencyMs(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddPingLatencyMs adds value to the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.AddPingLatencyMs(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearPingLatencyMs clears the value of the "ping_latency_ms" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) ClearPingLatencyMs() *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ClearPingLatencyMs()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMessage sets the "message" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetMessage(v string) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.SetMessage(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMessage sets the "message" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableMessage(v *string) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetMessage(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearMessage clears the value of the "message" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) ClearMessage() *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ClearMessage()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetCheckedAt sets the "checked_at" field.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.SetCheckedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableCheckedAt sets the "checked_at" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetNillableCheckedAt(v *time.Time) *ChannelMonitorHistoryUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetCheckedAt(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMonitor sets the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SetMonitor(v *ChannelMonitor) *ChannelMonitorHistoryUpdateOne {
|
||||
return _u.SetMonitorID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorHistoryMutation object of the builder.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) Mutation() *ChannelMonitorHistoryMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) ClearMonitor() *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.ClearMonitor()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorHistoryUpdate builder.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) Select(field string, fields ...string) *ChannelMonitorHistoryUpdateOne {
|
||||
_u.fields = append([]string{field}, fields...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated ChannelMonitorHistory entity.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) Save(ctx context.Context) (*ChannelMonitorHistory, error) {
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) SaveX(ctx context.Context) *ChannelMonitorHistory {
|
||||
node, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) check() error {
|
||||
if v, ok := _u.mutation.Model(); ok {
|
||||
if err := channelmonitorhistory.ModelValidator(v); err != nil {
|
||||
return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.model": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Status(); ok {
|
||||
if err := channelmonitorhistory.StatusValidator(v); err != nil {
|
||||
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.status": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Message(); ok {
|
||||
if err := channelmonitorhistory.MessageValidator(v); err != nil {
|
||||
return &ValidationError{Name: "message", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.message": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "ChannelMonitorHistory.monitor"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorHistoryUpdateOne) sqlSave(ctx context.Context) (_node *ChannelMonitorHistory, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitorhistory.Table, channelmonitorhistory.Columns, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64))
|
||||
id, ok := _u.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ChannelMonitorHistory.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := _u.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitorhistory.FieldID)
|
||||
for _, f := range fields {
|
||||
if !channelmonitorhistory.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != channelmonitorhistory.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.Model(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Status(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldStatus, field.TypeEnum, value)
|
||||
}
|
||||
if value, ok := _u.mutation.LatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if _u.mutation.LatencyMsCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldLatencyMs, field.TypeInt)
|
||||
}
|
||||
if value, ok := _u.mutation.PingLatencyMs(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedPingLatencyMs(); ok {
|
||||
_spec.AddField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value)
|
||||
}
|
||||
if _u.mutation.PingLatencyMsCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt)
|
||||
}
|
||||
if value, ok := _u.mutation.Message(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldMessage, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.MessageCleared() {
|
||||
_spec.ClearField(channelmonitorhistory.FieldMessage, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.CheckedAt(); ok {
|
||||
_spec.SetField(channelmonitorhistory.FieldCheckedAt, field.TypeTime, value)
|
||||
}
|
||||
if _u.mutation.MonitorCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitorhistory.MonitorTable,
|
||||
Columns: []string{channelmonitorhistory.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: channelmonitorhistory.MonitorTable,
|
||||
Columns: []string{channelmonitorhistory.MonitorColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &ChannelMonitorHistory{config: _u.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitorhistory.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
216
backend/ent/channelmonitorrequesttemplate.go
Normal file
@@ -0,0 +1,216 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplate is the model entity for the ChannelMonitorRequestTemplate schema.
|
||||
type ChannelMonitorRequestTemplate struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// Name holds the value of the "name" field.
|
||||
Name string `json:"name,omitempty"`
|
||||
// Provider holds the value of the "provider" field.
|
||||
Provider channelmonitorrequesttemplate.Provider `json:"provider,omitempty"`
|
||||
// Description holds the value of the "description" field.
|
||||
Description string `json:"description,omitempty"`
|
||||
// ExtraHeaders holds the value of the "extra_headers" field.
|
||||
ExtraHeaders map[string]string `json:"extra_headers,omitempty"`
|
||||
// BodyOverrideMode holds the value of the "body_override_mode" field.
|
||||
BodyOverrideMode string `json:"body_override_mode,omitempty"`
|
||||
// BodyOverride holds the value of the "body_override" field.
|
||||
BodyOverride map[string]interface{} `json:"body_override,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the ChannelMonitorRequestTemplateQuery when eager-loading is set.
|
||||
Edges ChannelMonitorRequestTemplateEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateEdges holds the relations/edges for other nodes in the graph.
|
||||
type ChannelMonitorRequestTemplateEdges struct {
|
||||
// Monitors holds the value of the monitors edge.
|
||||
Monitors []*ChannelMonitor `json:"monitors,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// MonitorsOrErr returns the Monitors value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e ChannelMonitorRequestTemplateEdges) MonitorsOrErr() ([]*ChannelMonitor, error) {
|
||||
if e.loadedTypes[0] {
|
||||
return e.Monitors, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "monitors"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*ChannelMonitorRequestTemplate) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitorrequesttemplate.FieldExtraHeaders, channelmonitorrequesttemplate.FieldBodyOverride:
|
||||
values[i] = new([]byte)
|
||||
case channelmonitorrequesttemplate.FieldID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case channelmonitorrequesttemplate.FieldName, channelmonitorrequesttemplate.FieldProvider, channelmonitorrequesttemplate.FieldDescription, channelmonitorrequesttemplate.FieldBodyOverrideMode:
|
||||
values[i] = new(sql.NullString)
|
||||
case channelmonitorrequesttemplate.FieldCreatedAt, channelmonitorrequesttemplate.FieldUpdatedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the ChannelMonitorRequestTemplate fields.
|
||||
func (_m *ChannelMonitorRequestTemplate) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case channelmonitorrequesttemplate.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case channelmonitorrequesttemplate.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CreatedAt = value.Time
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UpdatedAt = value.Time
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Name = value.String
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldProvider:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Provider = channelmonitorrequesttemplate.Provider(value.String)
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldDescription:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field description", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Description = value.String
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldExtraHeaders:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field extra_headers", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.ExtraHeaders); err != nil {
|
||||
return fmt.Errorf("unmarshal field extra_headers: %w", err)
|
||||
}
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldBodyOverrideMode:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field body_override_mode", values[i])
|
||||
} else if value.Valid {
|
||||
_m.BodyOverrideMode = value.String
|
||||
}
|
||||
case channelmonitorrequesttemplate.FieldBodyOverride:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field body_override", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.BodyOverride); err != nil {
|
||||
return fmt.Errorf("unmarshal field body_override: %w", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitorRequestTemplate.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *ChannelMonitorRequestTemplate) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryMonitors queries the "monitors" edge of the ChannelMonitorRequestTemplate entity.
|
||||
func (_m *ChannelMonitorRequestTemplate) QueryMonitors() *ChannelMonitorQuery {
|
||||
return NewChannelMonitorRequestTemplateClient(_m.config).QueryMonitors(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this ChannelMonitorRequestTemplate.
|
||||
// Note that you need to call ChannelMonitorRequestTemplate.Unwrap() before calling this method if this ChannelMonitorRequestTemplate
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *ChannelMonitorRequestTemplate) Update() *ChannelMonitorRequestTemplateUpdateOne {
|
||||
return NewChannelMonitorRequestTemplateClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the ChannelMonitorRequestTemplate entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *ChannelMonitorRequestTemplate) Unwrap() *ChannelMonitorRequestTemplate {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: ChannelMonitorRequestTemplate is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *ChannelMonitorRequestTemplate) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("ChannelMonitorRequestTemplate(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("name=")
|
||||
builder.WriteString(_m.Name)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("provider=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Provider))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("description=")
|
||||
builder.WriteString(_m.Description)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("extra_headers=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ExtraHeaders))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("body_override_mode=")
|
||||
builder.WriteString(_m.BodyOverrideMode)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("body_override=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.BodyOverride))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplates is a parsable slice of ChannelMonitorRequestTemplate.
|
||||
type ChannelMonitorRequestTemplates []*ChannelMonitorRequestTemplate
|
||||
@@ -0,0 +1,172 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitorrequesttemplate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the channelmonitorrequesttemplate type in the database.
|
||||
Label = "channel_monitor_request_template"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldName holds the string denoting the name field in the database.
|
||||
FieldName = "name"
|
||||
// FieldProvider holds the string denoting the provider field in the database.
|
||||
FieldProvider = "provider"
|
||||
// FieldDescription holds the string denoting the description field in the database.
|
||||
FieldDescription = "description"
|
||||
// FieldExtraHeaders holds the string denoting the extra_headers field in the database.
|
||||
FieldExtraHeaders = "extra_headers"
|
||||
// FieldBodyOverrideMode holds the string denoting the body_override_mode field in the database.
|
||||
FieldBodyOverrideMode = "body_override_mode"
|
||||
// FieldBodyOverride holds the string denoting the body_override field in the database.
|
||||
FieldBodyOverride = "body_override"
|
||||
// EdgeMonitors holds the string denoting the monitors edge name in mutations.
|
||||
EdgeMonitors = "monitors"
|
||||
// Table holds the table name of the channelmonitorrequesttemplate in the database.
|
||||
Table = "channel_monitor_request_templates"
|
||||
// MonitorsTable is the table that holds the monitors relation/edge.
|
||||
MonitorsTable = "channel_monitors"
|
||||
// MonitorsInverseTable is the table name for the ChannelMonitor entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "channelmonitor" package.
|
||||
MonitorsInverseTable = "channel_monitors"
|
||||
// MonitorsColumn is the table column denoting the monitors relation/edge.
|
||||
MonitorsColumn = "template_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for channelmonitorrequesttemplate fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldName,
|
||||
FieldProvider,
|
||||
FieldDescription,
|
||||
FieldExtraHeaders,
|
||||
FieldBodyOverrideMode,
|
||||
FieldBodyOverride,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||
NameValidator func(string) error
|
||||
// DefaultDescription holds the default value on creation for the "description" field.
|
||||
DefaultDescription string
|
||||
// DescriptionValidator is a validator for the "description" field. It is called by the builders before save.
|
||||
DescriptionValidator func(string) error
|
||||
// DefaultExtraHeaders holds the default value on creation for the "extra_headers" field.
|
||||
DefaultExtraHeaders map[string]string
|
||||
// DefaultBodyOverrideMode holds the default value on creation for the "body_override_mode" field.
|
||||
DefaultBodyOverrideMode string
|
||||
// BodyOverrideModeValidator is a validator for the "body_override_mode" field. It is called by the builders before save.
|
||||
BodyOverrideModeValidator func(string) error
|
||||
)
|
||||
|
||||
// Provider defines the type for the "provider" enum field.
|
||||
type Provider string
|
||||
|
||||
// Provider values.
|
||||
const (
|
||||
ProviderOpenai Provider = "openai"
|
||||
ProviderAnthropic Provider = "anthropic"
|
||||
ProviderGemini Provider = "gemini"
|
||||
)
|
||||
|
||||
func (pr Provider) String() string {
|
||||
return string(pr)
|
||||
}
|
||||
|
||||
// ProviderValidator is a validator for the "provider" field enum values. It is called by the builders before save.
|
||||
func ProviderValidator(pr Provider) error {
|
||||
switch pr {
|
||||
case ProviderOpenai, ProviderAnthropic, ProviderGemini:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("channelmonitorrequesttemplate: invalid enum value for provider field: %q", pr)
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the ChannelMonitorRequestTemplate queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByName orders the results by the name field.
|
||||
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProvider orders the results by the provider field.
|
||||
func ByProvider(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldProvider, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDescription orders the results by the description field.
|
||||
func ByDescription(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDescription, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByBodyOverrideMode orders the results by the body_override_mode field.
|
||||
func ByBodyOverrideMode(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldBodyOverrideMode, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByMonitorsCount orders the results by monitors count.
|
||||
func ByMonitorsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newMonitorsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByMonitors orders the results by monitors terms.
|
||||
func ByMonitors(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newMonitorsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
func newMonitorsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(MonitorsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, true, MonitorsTable, MonitorsColumn),
|
||||
)
|
||||
}
|
||||
434
backend/ent/channelmonitorrequesttemplate/where.go
Normal file
@@ -0,0 +1,434 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package channelmonitorrequesttemplate
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
||||
func Name(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ.
|
||||
func Description(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldDescription, v))
|
||||
}
|
||||
|
||||
// BodyOverrideMode applies equality check predicate on the "body_override_mode" field. It's identical to BodyOverrideModeEQ.
|
||||
func BodyOverrideMode(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// NameEQ applies the EQ predicate on the "name" field.
|
||||
func NameEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameNEQ applies the NEQ predicate on the "name" field.
|
||||
func NameNEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameIn applies the In predicate on the "name" field.
|
||||
func NameIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameNotIn applies the NotIn predicate on the "name" field.
|
||||
func NameNotIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameGT applies the GT predicate on the "name" field.
|
||||
func NameGT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameGTE applies the GTE predicate on the "name" field.
|
||||
func NameGTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLT applies the LT predicate on the "name" field.
|
||||
func NameLT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLTE applies the LTE predicate on the "name" field.
|
||||
func NameLTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContains applies the Contains predicate on the "name" field.
|
||||
func NameContains(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContains(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
||||
func NameHasPrefix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasPrefix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
||||
func NameHasSuffix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasSuffix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
||||
func NameEqualFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEqualFold(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
||||
func NameContainsFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContainsFold(FieldName, v))
|
||||
}
|
||||
|
||||
// ProviderEQ applies the EQ predicate on the "provider" field.
|
||||
func ProviderEQ(v Provider) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldProvider, v))
|
||||
}
|
||||
|
||||
// ProviderNEQ applies the NEQ predicate on the "provider" field.
|
||||
func ProviderNEQ(v Provider) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldProvider, v))
|
||||
}
|
||||
|
||||
// ProviderIn applies the In predicate on the "provider" field.
|
||||
func ProviderIn(vs ...Provider) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldProvider, vs...))
|
||||
}
|
||||
|
||||
// ProviderNotIn applies the NotIn predicate on the "provider" field.
|
||||
func ProviderNotIn(vs ...Provider) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldProvider, vs...))
|
||||
}
|
||||
|
||||
// DescriptionEQ applies the EQ predicate on the "description" field.
|
||||
func DescriptionEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionNEQ applies the NEQ predicate on the "description" field.
|
||||
func DescriptionNEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionIn applies the In predicate on the "description" field.
|
||||
func DescriptionIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldDescription, vs...))
|
||||
}
|
||||
|
||||
// DescriptionNotIn applies the NotIn predicate on the "description" field.
|
||||
func DescriptionNotIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldDescription, vs...))
|
||||
}
|
||||
|
||||
// DescriptionGT applies the GT predicate on the "description" field.
|
||||
func DescriptionGT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionGTE applies the GTE predicate on the "description" field.
|
||||
func DescriptionGTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionLT applies the LT predicate on the "description" field.
|
||||
func DescriptionLT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionLTE applies the LTE predicate on the "description" field.
|
||||
func DescriptionLTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionContains applies the Contains predicate on the "description" field.
|
||||
func DescriptionContains(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContains(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field.
|
||||
func DescriptionHasPrefix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasPrefix(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field.
|
||||
func DescriptionHasSuffix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasSuffix(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionIsNil applies the IsNil predicate on the "description" field.
|
||||
func DescriptionIsNil() predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIsNull(FieldDescription))
|
||||
}
|
||||
|
||||
// DescriptionNotNil applies the NotNil predicate on the "description" field.
|
||||
func DescriptionNotNil() predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotNull(FieldDescription))
|
||||
}
|
||||
|
||||
// DescriptionEqualFold applies the EqualFold predicate on the "description" field.
|
||||
func DescriptionEqualFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEqualFold(FieldDescription, v))
|
||||
}
|
||||
|
||||
// DescriptionContainsFold applies the ContainsFold predicate on the "description" field.
|
||||
func DescriptionContainsFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContainsFold(FieldDescription, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeEQ applies the EQ predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeNEQ applies the NEQ predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeNEQ(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNEQ(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeIn applies the In predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIn(FieldBodyOverrideMode, vs...))
|
||||
}
|
||||
|
||||
// BodyOverrideModeNotIn applies the NotIn predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeNotIn(vs ...string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotIn(FieldBodyOverrideMode, vs...))
|
||||
}
|
||||
|
||||
// BodyOverrideModeGT applies the GT predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeGT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGT(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeGTE applies the GTE predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeGTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldGTE(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeLT applies the LT predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeLT(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLT(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeLTE applies the LTE predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeLTE(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldLTE(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeContains applies the Contains predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeContains(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContains(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeHasPrefix applies the HasPrefix predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeHasPrefix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasPrefix(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeHasSuffix applies the HasSuffix predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeHasSuffix(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldHasSuffix(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeEqualFold applies the EqualFold predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeEqualFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldEqualFold(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideModeContainsFold applies the ContainsFold predicate on the "body_override_mode" field.
|
||||
func BodyOverrideModeContainsFold(v string) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldContainsFold(FieldBodyOverrideMode, v))
|
||||
}
|
||||
|
||||
// BodyOverrideIsNil applies the IsNil predicate on the "body_override" field.
|
||||
func BodyOverrideIsNil() predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldIsNull(FieldBodyOverride))
|
||||
}
|
||||
|
||||
// BodyOverrideNotNil applies the NotNil predicate on the "body_override" field.
|
||||
func BodyOverrideNotNil() predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.FieldNotNull(FieldBodyOverride))
|
||||
}
|
||||
|
||||
// HasMonitors applies the HasEdge predicate on the "monitors" edge.
|
||||
func HasMonitors() predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, true, MonitorsTable, MonitorsColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasMonitorsWith applies the HasEdge predicate on the "monitors" edge with a given conditions (other predicates).
|
||||
func HasMonitorsWith(preds ...predicate.ChannelMonitor) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(func(s *sql.Selector) {
|
||||
step := newMonitorsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.ChannelMonitorRequestTemplate) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.ChannelMonitorRequestTemplate) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.ChannelMonitorRequestTemplate) predicate.ChannelMonitorRequestTemplate {
|
||||
return predicate.ChannelMonitorRequestTemplate(sql.NotPredicates(p))
|
||||
}
|
||||
942
backend/ent/channelmonitorrequesttemplate_create.go
Normal file
@@ -0,0 +1,942 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplateCreate is the builder for creating a ChannelMonitorRequestTemplate entity.
|
||||
type ChannelMonitorRequestTemplateCreate struct {
|
||||
config
|
||||
mutation *ChannelMonitorRequestTemplateMutation
|
||||
hooks []Hook
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// SetCreatedAt sets the "created_at" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetCreatedAt(v time.Time) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetCreatedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetNillableCreatedAt(v *time.Time) *ChannelMonitorRequestTemplateCreate {
|
||||
if v != nil {
|
||||
_c.SetCreatedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetUpdatedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetNillableUpdatedAt(v *time.Time) *ChannelMonitorRequestTemplateCreate {
|
||||
if v != nil {
|
||||
_c.SetUpdatedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetName(v string) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetName(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetProvider(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetDescription(v string) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetDescription(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableDescription sets the "description" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetNillableDescription(v *string) *ChannelMonitorRequestTemplateCreate {
|
||||
if v != nil {
|
||||
_c.SetDescription(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetExtraHeaders(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetBodyOverrideMode(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableBodyOverrideMode sets the "body_override_mode" field if the given value is not nil.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetNillableBodyOverrideMode(v *string) *ChannelMonitorRequestTemplateCreate {
|
||||
if v != nil {
|
||||
_c.SetBodyOverrideMode(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.SetBodyOverride(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// AddMonitorIDs adds the "monitors" edge to the ChannelMonitor entity by IDs.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) AddMonitorIDs(ids ...int64) *ChannelMonitorRequestTemplateCreate {
|
||||
_c.mutation.AddMonitorIDs(ids...)
|
||||
return _c
|
||||
}
|
||||
|
||||
// AddMonitors adds the "monitors" edges to the ChannelMonitor entity.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) AddMonitors(v ...*ChannelMonitor) *ChannelMonitorRequestTemplateCreate {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _c.AddMonitorIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorRequestTemplateMutation object of the builder.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) Mutation() *ChannelMonitorRequestTemplateMutation {
|
||||
return _c.mutation
|
||||
}
|
||||
|
||||
// Save creates the ChannelMonitorRequestTemplate in the database.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) Save(ctx context.Context) (*ChannelMonitorRequestTemplate, error) {
|
||||
_c.defaults()
|
||||
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) SaveX(ctx context.Context) *ChannelMonitorRequestTemplate {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) defaults() {
|
||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||
v := channelmonitorrequesttemplate.DefaultCreatedAt()
|
||||
_c.mutation.SetCreatedAt(v)
|
||||
}
|
||||
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||
v := channelmonitorrequesttemplate.DefaultUpdatedAt()
|
||||
_c.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
if _, ok := _c.mutation.Description(); !ok {
|
||||
v := channelmonitorrequesttemplate.DefaultDescription
|
||||
_c.mutation.SetDescription(v)
|
||||
}
|
||||
if _, ok := _c.mutation.ExtraHeaders(); !ok {
|
||||
v := channelmonitorrequesttemplate.DefaultExtraHeaders
|
||||
_c.mutation.SetExtraHeaders(v)
|
||||
}
|
||||
if _, ok := _c.mutation.BodyOverrideMode(); !ok {
|
||||
v := channelmonitorrequesttemplate.DefaultBodyOverrideMode
|
||||
_c.mutation.SetBodyOverrideMode(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) check() error {
|
||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.created_at"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.updated_at"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.Name(); !ok {
|
||||
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.name"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.Name(); ok {
|
||||
if err := channelmonitorrequesttemplate.NameValidator(v); err != nil {
|
||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.Provider(); !ok {
|
||||
return &ValidationError{Name: "provider", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.provider"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.Provider(); ok {
|
||||
if err := channelmonitorrequesttemplate.ProviderValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.provider": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _c.mutation.Description(); ok {
|
||||
if err := channelmonitorrequesttemplate.DescriptionValidator(v); err != nil {
|
||||
return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.description": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.ExtraHeaders(); !ok {
|
||||
return &ValidationError{Name: "extra_headers", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.extra_headers"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.BodyOverrideMode(); !ok {
|
||||
return &ValidationError{Name: "body_override_mode", err: errors.New(`ent: missing required field "ChannelMonitorRequestTemplate.body_override_mode"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.BodyOverrideMode(); ok {
|
||||
if err := channelmonitorrequesttemplate.BodyOverrideModeValidator(v); err != nil {
|
||||
return &ValidationError{Name: "body_override_mode", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.body_override_mode": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) sqlSave(ctx context.Context) (*ChannelMonitorRequestTemplate, error) {
|
||||
if err := _c.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := _c.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int64(id)
|
||||
_c.mutation.id = &_node.ID
|
||||
_c.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) createSpec() (*ChannelMonitorRequestTemplate, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &ChannelMonitorRequestTemplate{config: _c.config}
|
||||
_spec = sqlgraph.NewCreateSpec(channelmonitorrequesttemplate.Table, sqlgraph.NewFieldSpec(channelmonitorrequesttemplate.FieldID, field.TypeInt64))
|
||||
)
|
||||
_spec.OnConflict = _c.conflict
|
||||
if value, ok := _c.mutation.CreatedAt(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldCreatedAt, field.TypeTime, value)
|
||||
_node.CreatedAt = value
|
||||
}
|
||||
if value, ok := _c.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldUpdatedAt, field.TypeTime, value)
|
||||
_node.UpdatedAt = value
|
||||
}
|
||||
if value, ok := _c.mutation.Name(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldName, field.TypeString, value)
|
||||
_node.Name = value
|
||||
}
|
||||
if value, ok := _c.mutation.Provider(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldProvider, field.TypeEnum, value)
|
||||
_node.Provider = value
|
||||
}
|
||||
if value, ok := _c.mutation.Description(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldDescription, field.TypeString, value)
|
||||
_node.Description = value
|
||||
}
|
||||
if value, ok := _c.mutation.ExtraHeaders(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldExtraHeaders, field.TypeJSON, value)
|
||||
_node.ExtraHeaders = value
|
||||
}
|
||||
if value, ok := _c.mutation.BodyOverrideMode(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverrideMode, field.TypeString, value)
|
||||
_node.BodyOverrideMode = value
|
||||
}
|
||||
if value, ok := _c.mutation.BodyOverride(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverride, field.TypeJSON, value)
|
||||
_node.BodyOverride = value
|
||||
}
|
||||
if nodes := _c.mutation.MonitorsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// SetCreatedAt(v).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.ChannelMonitorRequestTemplateUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
_c.conflict = opts
|
||||
return &ChannelMonitorRequestTemplateUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorRequestTemplateCreate) OnConflictColumns(columns ...string) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &ChannelMonitorRequestTemplateUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
// ChannelMonitorRequestTemplateUpsertOne is the builder for "upsert"-ing
|
||||
// one ChannelMonitorRequestTemplate node.
|
||||
ChannelMonitorRequestTemplateUpsertOne struct {
|
||||
create *ChannelMonitorRequestTemplateCreate
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateUpsert is the "OnConflict" setter.
|
||||
ChannelMonitorRequestTemplateUpsert struct {
|
||||
*sql.UpdateSet
|
||||
}
|
||||
)
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldUpdatedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateUpdatedAt() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldUpdatedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetName(v string) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldName, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateName() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldName)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldProvider, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateProvider sets the "provider" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateProvider() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldProvider)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetDescription(v string) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldDescription, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateDescription sets the "description" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateDescription() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldDescription)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearDescription clears the value of the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) ClearDescription() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetNull(channelmonitorrequesttemplate.FieldDescription)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldExtraHeaders, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateExtraHeaders sets the "extra_headers" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateExtraHeaders() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldExtraHeaders)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldBodyOverrideMode, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateBodyOverrideMode sets the "body_override_mode" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateBodyOverrideMode() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldBodyOverrideMode)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateUpsert {
|
||||
u.Set(channelmonitorrequesttemplate.FieldBodyOverride, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateBodyOverride sets the "body_override" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) UpdateBodyOverride() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetExcluded(channelmonitorrequesttemplate.FieldBodyOverride)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearBodyOverride clears the value of the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsert) ClearBodyOverride() *ChannelMonitorRequestTemplateUpsert {
|
||||
u.SetNull(channelmonitorrequesttemplate.FieldBodyOverride)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateNewValues() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(channelmonitorrequesttemplate.FieldCreatedAt)
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) Ignore() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) DoNothing() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the ChannelMonitorRequestTemplateCreate.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) Update(set func(*ChannelMonitorRequestTemplateUpsert)) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&ChannelMonitorRequestTemplateUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateUpdatedAt() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetName(v string) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetName(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateName() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateName()
|
||||
})
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetProvider(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProvider sets the "provider" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateProvider() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateProvider()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetDescription(v string) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetDescription(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDescription sets the "description" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateDescription() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateDescription()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearDescription clears the value of the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) ClearDescription() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.ClearDescription()
|
||||
})
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetExtraHeaders(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateExtraHeaders sets the "extra_headers" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateExtraHeaders() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateExtraHeaders()
|
||||
})
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetBodyOverrideMode(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateBodyOverrideMode sets the "body_override_mode" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateBodyOverrideMode() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateBodyOverrideMode()
|
||||
})
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetBodyOverride(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateBodyOverride sets the "body_override" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) UpdateBodyOverride() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateBodyOverride()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearBodyOverride clears the value of the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) ClearBodyOverride() *ChannelMonitorRequestTemplateUpsertOne {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.ClearBodyOverride()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for ChannelMonitorRequestTemplateCreate.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||
node, err := u.create.Save(ctx)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
return node.ID, nil
|
||||
}
|
||||
|
||||
// IDX is like ID, but panics if an error occurs.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertOne) IDX(ctx context.Context) int64 {
|
||||
id, err := u.ID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateCreateBulk is the builder for creating many ChannelMonitorRequestTemplate entities in bulk.
|
||||
type ChannelMonitorRequestTemplateCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*ChannelMonitorRequestTemplateCreate
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// Save creates the ChannelMonitorRequestTemplate entities in the database.
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) Save(ctx context.Context) ([]*ChannelMonitorRequestTemplate, error) {
|
||||
if _c.err != nil {
|
||||
return nil, _c.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||
nodes := make([]*ChannelMonitorRequestTemplate, len(_c.builders))
|
||||
mutators := make([]Mutator, len(_c.builders))
|
||||
for i := range _c.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := _c.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*ChannelMonitorRequestTemplateMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
spec.OnConflict = _c.conflict
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int64(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) SaveX(ctx context.Context) []*ChannelMonitorRequestTemplate {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.CreateBulk(builders...).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.ChannelMonitorRequestTemplateUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
_c.conflict = opts
|
||||
return &ChannelMonitorRequestTemplateUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *ChannelMonitorRequestTemplateCreateBulk) OnConflictColumns(columns ...string) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &ChannelMonitorRequestTemplateUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateUpsertBulk is the builder for "upsert"-ing
|
||||
// a bulk of ChannelMonitorRequestTemplate nodes.
|
||||
type ChannelMonitorRequestTemplateUpsertBulk struct {
|
||||
create *ChannelMonitorRequestTemplateCreateBulk
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that
|
||||
// were set on create. Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateNewValues() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
for _, b := range u.create.builders {
|
||||
if _, exists := b.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(channelmonitorrequesttemplate.FieldCreatedAt)
|
||||
}
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) Ignore() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) DoNothing() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the ChannelMonitorRequestTemplateCreateBulk.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) Update(set func(*ChannelMonitorRequestTemplateUpsert)) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&ChannelMonitorRequestTemplateUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateUpdatedAt() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetName(v string) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetName(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateName sets the "name" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateName() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateName()
|
||||
})
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetProvider(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateProvider sets the "provider" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateProvider() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateProvider()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetDescription(v string) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetDescription(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDescription sets the "description" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateDescription() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateDescription()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearDescription clears the value of the "description" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) ClearDescription() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.ClearDescription()
|
||||
})
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetExtraHeaders(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateExtraHeaders sets the "extra_headers" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateExtraHeaders() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateExtraHeaders()
|
||||
})
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetBodyOverrideMode(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateBodyOverrideMode sets the "body_override_mode" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateBodyOverrideMode() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateBodyOverrideMode()
|
||||
})
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.SetBodyOverride(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateBodyOverride sets the "body_override" field to the value that was provided on create.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) UpdateBodyOverride() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.UpdateBodyOverride()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearBodyOverride clears the value of the "body_override" field.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) ClearBodyOverride() *ChannelMonitorRequestTemplateUpsertBulk {
|
||||
return u.Update(func(s *ChannelMonitorRequestTemplateUpsert) {
|
||||
s.ClearBodyOverride()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
return u.create.err
|
||||
}
|
||||
for i, b := range u.create.builders {
|
||||
if len(b.conflict) != 0 {
|
||||
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ChannelMonitorRequestTemplateCreateBulk instead", i)
|
||||
}
|
||||
}
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for ChannelMonitorRequestTemplateCreateBulk.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *ChannelMonitorRequestTemplateUpsertBulk) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
88
backend/ent/channelmonitorrequesttemplate_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplateDelete is the builder for deleting a ChannelMonitorRequestTemplate entity.
|
||||
type ChannelMonitorRequestTemplateDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorRequestTemplateMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorRequestTemplateDelete builder.
|
||||
func (_d *ChannelMonitorRequestTemplateDelete) Where(ps ...predicate.ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *ChannelMonitorRequestTemplateDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorRequestTemplateDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *ChannelMonitorRequestTemplateDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(channelmonitorrequesttemplate.Table, sqlgraph.NewFieldSpec(channelmonitorrequesttemplate.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateDeleteOne is the builder for deleting a single ChannelMonitorRequestTemplate entity.
|
||||
type ChannelMonitorRequestTemplateDeleteOne struct {
|
||||
_d *ChannelMonitorRequestTemplateDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorRequestTemplateDelete builder.
|
||||
func (_d *ChannelMonitorRequestTemplateDeleteOne) Where(ps ...predicate.ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *ChannelMonitorRequestTemplateDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *ChannelMonitorRequestTemplateDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
648
backend/ent/channelmonitorrequesttemplate_query.go
Normal file
@@ -0,0 +1,648 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplateQuery is the builder for querying ChannelMonitorRequestTemplate entities.
|
||||
type ChannelMonitorRequestTemplateQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []channelmonitorrequesttemplate.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.ChannelMonitorRequestTemplate
|
||||
withMonitors *ChannelMonitorQuery
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the ChannelMonitorRequestTemplateQuery builder.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Where(ps ...predicate.ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Limit(limit int) *ChannelMonitorRequestTemplateQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Offset(offset int) *ChannelMonitorRequestTemplateQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Unique(unique bool) *ChannelMonitorRequestTemplateQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Order(o ...channelmonitorrequesttemplate.OrderOption) *ChannelMonitorRequestTemplateQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// QueryMonitors chains the current query on the "monitors" edge.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) QueryMonitors() *ChannelMonitorQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.FieldID, selector),
|
||||
sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, true, channelmonitorrequesttemplate.MonitorsTable, channelmonitorrequesttemplate.MonitorsColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first ChannelMonitorRequestTemplate entity from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorRequestTemplate was found.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) First(ctx context.Context) (*ChannelMonitorRequestTemplate, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) FirstX(ctx context.Context) *ChannelMonitorRequestTemplate {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first ChannelMonitorRequestTemplate ID from the query.
|
||||
// Returns a *NotFoundError when no ChannelMonitorRequestTemplate ID was found.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single ChannelMonitorRequestTemplate entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorRequestTemplate entity is found.
|
||||
// Returns a *NotFoundError when no ChannelMonitorRequestTemplate entities are found.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Only(ctx context.Context) (*ChannelMonitorRequestTemplate, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{channelmonitorrequesttemplate.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) OnlyX(ctx context.Context) *ChannelMonitorRequestTemplate {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only ChannelMonitorRequestTemplate ID in the query.
|
||||
// Returns a *NotSingularError when more than one ChannelMonitorRequestTemplate ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
default:
|
||||
err = &NotSingularError{channelmonitorrequesttemplate.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of ChannelMonitorRequestTemplates.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) All(ctx context.Context) ([]*ChannelMonitorRequestTemplate, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*ChannelMonitorRequestTemplate, *ChannelMonitorRequestTemplateQuery]()
|
||||
return withInterceptors[[]*ChannelMonitorRequestTemplate](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) AllX(ctx context.Context) []*ChannelMonitorRequestTemplate {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of ChannelMonitorRequestTemplate IDs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(channelmonitorrequesttemplate.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorRequestTemplateQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the ChannelMonitorRequestTemplateQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Clone() *ChannelMonitorRequestTemplateQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &ChannelMonitorRequestTemplateQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]channelmonitorrequesttemplate.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.ChannelMonitorRequestTemplate{}, _q.predicates...),
|
||||
withMonitors: _q.withMonitors.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithMonitors tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "monitors" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) WithMonitors(opts ...func(*ChannelMonitorQuery)) *ChannelMonitorRequestTemplateQuery {
|
||||
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withMonitors = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Query().
|
||||
// GroupBy(channelmonitorrequesttemplate.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) GroupBy(field string, fields ...string) *ChannelMonitorRequestTemplateGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &ChannelMonitorRequestTemplateGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = channelmonitorrequesttemplate.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.ChannelMonitorRequestTemplate.Query().
|
||||
// Select(channelmonitorrequesttemplate.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Select(fields ...string) *ChannelMonitorRequestTemplateSelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &ChannelMonitorRequestTemplateSelect{ChannelMonitorRequestTemplateQuery: _q}
|
||||
sbuild.label = channelmonitorrequesttemplate.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a ChannelMonitorRequestTemplateSelect configured with the given aggregations.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorRequestTemplateSelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !channelmonitorrequesttemplate.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitorRequestTemplate, error) {
|
||||
var (
|
||||
nodes = []*ChannelMonitorRequestTemplate{}
|
||||
_spec = _q.querySpec()
|
||||
loadedTypes = [1]bool{
|
||||
_q.withMonitors != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*ChannelMonitorRequestTemplate).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &ChannelMonitorRequestTemplate{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := _q.withMonitors; query != nil {
|
||||
if err := _q.loadMonitors(ctx, query, nodes,
|
||||
func(n *ChannelMonitorRequestTemplate) { n.Edges.Monitors = []*ChannelMonitor{} },
|
||||
func(n *ChannelMonitorRequestTemplate, e *ChannelMonitor) {
|
||||
n.Edges.Monitors = append(n.Edges.Monitors, e)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) loadMonitors(ctx context.Context, query *ChannelMonitorQuery, nodes []*ChannelMonitorRequestTemplate, init func(*ChannelMonitorRequestTemplate), assign func(*ChannelMonitorRequestTemplate, *ChannelMonitor)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[int64]*ChannelMonitorRequestTemplate)
|
||||
for i := range nodes {
|
||||
fks = append(fks, nodes[i].ID)
|
||||
nodeids[nodes[i].ID] = nodes[i]
|
||||
if init != nil {
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
if len(query.ctx.Fields) > 0 {
|
||||
query.ctx.AppendFieldOnce(channelmonitor.FieldTemplateID)
|
||||
}
|
||||
query.Where(predicate.ChannelMonitor(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(s.C(channelmonitorrequesttemplate.MonitorsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
fk := n.TemplateID
|
||||
if fk == nil {
|
||||
return fmt.Errorf(`foreign-key "template_id" is nil for node %v`, n.ID)
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "template_id" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.Columns, sqlgraph.NewFieldSpec(channelmonitorrequesttemplate.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitorrequesttemplate.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != channelmonitorrequesttemplate.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(channelmonitorrequesttemplate.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = channelmonitorrequesttemplate.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorRequestTemplateQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *ChannelMonitorRequestTemplateQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorRequestTemplateQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateGroupBy is the group-by builder for ChannelMonitorRequestTemplate entities.
|
||||
type ChannelMonitorRequestTemplateGroupBy struct {
|
||||
selector
|
||||
build *ChannelMonitorRequestTemplateQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *ChannelMonitorRequestTemplateGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorRequestTemplateGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *ChannelMonitorRequestTemplateGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorRequestTemplateQuery, *ChannelMonitorRequestTemplateGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *ChannelMonitorRequestTemplateGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorRequestTemplateQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateSelect is the builder for selecting fields of ChannelMonitorRequestTemplate entities.
|
||||
type ChannelMonitorRequestTemplateSelect struct {
|
||||
*ChannelMonitorRequestTemplateQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *ChannelMonitorRequestTemplateSelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorRequestTemplateSelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *ChannelMonitorRequestTemplateSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*ChannelMonitorRequestTemplateQuery, *ChannelMonitorRequestTemplateSelect](ctx, _s.ChannelMonitorRequestTemplateQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *ChannelMonitorRequestTemplateSelect) sqlScan(ctx context.Context, root *ChannelMonitorRequestTemplateQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
639
backend/ent/channelmonitorrequesttemplate_update.go
Normal file
@@ -0,0 +1,639 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ChannelMonitorRequestTemplateUpdate is the builder for updating ChannelMonitorRequestTemplate entities.
|
||||
type ChannelMonitorRequestTemplateUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorRequestTemplateMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorRequestTemplateUpdate builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) Where(ps ...predicate.ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetName(v string) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetName(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetNillableName(v *string) *ChannelMonitorRequestTemplateUpdate {
|
||||
if v != nil {
|
||||
_u.SetName(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetProvider(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProvider sets the "provider" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetNillableProvider(v *channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpdate {
|
||||
if v != nil {
|
||||
_u.SetProvider(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetDescription(v string) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetDescription(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableDescription sets the "description" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetNillableDescription(v *string) *ChannelMonitorRequestTemplateUpdate {
|
||||
if v != nil {
|
||||
_u.SetDescription(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearDescription clears the value of the "description" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) ClearDescription() *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.ClearDescription()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetExtraHeaders(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetBodyOverrideMode(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableBodyOverrideMode sets the "body_override_mode" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetNillableBodyOverrideMode(v *string) *ChannelMonitorRequestTemplateUpdate {
|
||||
if v != nil {
|
||||
_u.SetBodyOverrideMode(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.SetBodyOverride(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearBodyOverride clears the value of the "body_override" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) ClearBodyOverride() *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.ClearBodyOverride()
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddMonitorIDs adds the "monitors" edge to the ChannelMonitor entity by IDs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) AddMonitorIDs(ids ...int64) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.AddMonitorIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddMonitors adds the "monitors" edges to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) AddMonitors(v ...*ChannelMonitor) *ChannelMonitorRequestTemplateUpdate {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.AddMonitorIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorRequestTemplateMutation object of the builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) Mutation() *ChannelMonitorRequestTemplateMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitors clears all "monitors" edges to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) ClearMonitors() *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.ClearMonitors()
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveMonitorIDs removes the "monitors" edge to ChannelMonitor entities by IDs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) RemoveMonitorIDs(ids ...int64) *ChannelMonitorRequestTemplateUpdate {
|
||||
_u.mutation.RemoveMonitorIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveMonitors removes "monitors" edges to ChannelMonitor entities.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) RemoveMonitors(v ...*ChannelMonitor) *ChannelMonitorRequestTemplateUpdate {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.RemoveMonitorIDs(ids...)
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) Save(ctx context.Context) (int, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) defaults() {
|
||||
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||
v := channelmonitorrequesttemplate.UpdateDefaultUpdatedAt()
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) check() error {
|
||||
if v, ok := _u.mutation.Name(); ok {
|
||||
if err := channelmonitorrequesttemplate.NameValidator(v); err != nil {
|
||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Provider(); ok {
|
||||
if err := channelmonitorrequesttemplate.ProviderValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.provider": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Description(); ok {
|
||||
if err := channelmonitorrequesttemplate.DescriptionValidator(v); err != nil {
|
||||
return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.description": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.BodyOverrideMode(); ok {
|
||||
if err := channelmonitorrequesttemplate.BodyOverrideModeValidator(v); err != nil {
|
||||
return &ValidationError{Name: "body_override_mode", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.body_override_mode": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorRequestTemplateUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.Columns, sqlgraph.NewFieldSpec(channelmonitorrequesttemplate.FieldID, field.TypeInt64))
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Name(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Provider(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldProvider, field.TypeEnum, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Description(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldDescription, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.DescriptionCleared() {
|
||||
_spec.ClearField(channelmonitorrequesttemplate.FieldDescription, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.ExtraHeaders(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldExtraHeaders, field.TypeJSON, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BodyOverrideMode(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverrideMode, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BodyOverride(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverride, field.TypeJSON, value)
|
||||
}
|
||||
if _u.mutation.BodyOverrideCleared() {
|
||||
_spec.ClearField(channelmonitorrequesttemplate.FieldBodyOverride, field.TypeJSON)
|
||||
}
|
||||
if _u.mutation.MonitorsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.RemovedMonitorsIDs(); len(nodes) > 0 && !_u.mutation.MonitorsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
// ChannelMonitorRequestTemplateUpdateOne is the builder for updating a single ChannelMonitorRequestTemplate entity.
|
||||
type ChannelMonitorRequestTemplateUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *ChannelMonitorRequestTemplateMutation
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetUpdatedAt(v time.Time) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetName(v string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetName(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetNillableName(v *string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetName(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetProvider sets the "provider" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetProvider(v channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetProvider(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableProvider sets the "provider" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetNillableProvider(v *channelmonitorrequesttemplate.Provider) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetProvider(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetDescription(v string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetDescription(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableDescription sets the "description" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetNillableDescription(v *string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetDescription(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearDescription clears the value of the "description" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) ClearDescription() *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.ClearDescription()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetExtraHeaders sets the "extra_headers" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetExtraHeaders(v map[string]string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetExtraHeaders(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBodyOverrideMode sets the "body_override_mode" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetBodyOverrideMode(v string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetBodyOverrideMode(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableBodyOverrideMode sets the "body_override_mode" field if the given value is not nil.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetNillableBodyOverrideMode(v *string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetBodyOverrideMode(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetBodyOverride sets the "body_override" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SetBodyOverride(v map[string]interface{}) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.SetBodyOverride(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearBodyOverride clears the value of the "body_override" field.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) ClearBodyOverride() *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.ClearBodyOverride()
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddMonitorIDs adds the "monitors" edge to the ChannelMonitor entity by IDs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) AddMonitorIDs(ids ...int64) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.AddMonitorIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddMonitors adds the "monitors" edges to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) AddMonitors(v ...*ChannelMonitor) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.AddMonitorIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the ChannelMonitorRequestTemplateMutation object of the builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) Mutation() *ChannelMonitorRequestTemplateMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearMonitors clears all "monitors" edges to the ChannelMonitor entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) ClearMonitors() *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.ClearMonitors()
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveMonitorIDs removes the "monitors" edge to ChannelMonitor entities by IDs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) RemoveMonitorIDs(ids ...int64) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.RemoveMonitorIDs(ids...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// RemoveMonitors removes "monitors" edges to ChannelMonitor entities.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) RemoveMonitors(v ...*ChannelMonitor) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
ids := make([]int64, len(v))
|
||||
for i := range v {
|
||||
ids[i] = v[i].ID
|
||||
}
|
||||
return _u.RemoveMonitorIDs(ids...)
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the ChannelMonitorRequestTemplateUpdate builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) Where(ps ...predicate.ChannelMonitorRequestTemplate) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) Select(field string, fields ...string) *ChannelMonitorRequestTemplateUpdateOne {
|
||||
_u.fields = append([]string{field}, fields...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated ChannelMonitorRequestTemplate entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) Save(ctx context.Context) (*ChannelMonitorRequestTemplate, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) SaveX(ctx context.Context) *ChannelMonitorRequestTemplate {
|
||||
node, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) defaults() {
|
||||
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||
v := channelmonitorrequesttemplate.UpdateDefaultUpdatedAt()
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) check() error {
|
||||
if v, ok := _u.mutation.Name(); ok {
|
||||
if err := channelmonitorrequesttemplate.NameValidator(v); err != nil {
|
||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Provider(); ok {
|
||||
if err := channelmonitorrequesttemplate.ProviderValidator(v); err != nil {
|
||||
return &ValidationError{Name: "provider", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.provider": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Description(); ok {
|
||||
if err := channelmonitorrequesttemplate.DescriptionValidator(v); err != nil {
|
||||
return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.description": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.BodyOverrideMode(); ok {
|
||||
if err := channelmonitorrequesttemplate.BodyOverrideModeValidator(v); err != nil {
|
||||
return &ValidationError{Name: "body_override_mode", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorRequestTemplate.body_override_mode": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *ChannelMonitorRequestTemplateUpdateOne) sqlSave(ctx context.Context) (_node *ChannelMonitorRequestTemplate, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(channelmonitorrequesttemplate.Table, channelmonitorrequesttemplate.Columns, sqlgraph.NewFieldSpec(channelmonitorrequesttemplate.FieldID, field.TypeInt64))
|
||||
id, ok := _u.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ChannelMonitorRequestTemplate.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := _u.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitorrequesttemplate.FieldID)
|
||||
for _, f := range fields {
|
||||
if !channelmonitorrequesttemplate.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != channelmonitorrequesttemplate.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Name(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Provider(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldProvider, field.TypeEnum, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Description(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldDescription, field.TypeString, value)
|
||||
}
|
||||
if _u.mutation.DescriptionCleared() {
|
||||
_spec.ClearField(channelmonitorrequesttemplate.FieldDescription, field.TypeString)
|
||||
}
|
||||
if value, ok := _u.mutation.ExtraHeaders(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldExtraHeaders, field.TypeJSON, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BodyOverrideMode(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverrideMode, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.BodyOverride(); ok {
|
||||
_spec.SetField(channelmonitorrequesttemplate.FieldBodyOverride, field.TypeJSON, value)
|
||||
}
|
||||
if _u.mutation.BodyOverrideCleared() {
|
||||
_spec.ClearField(channelmonitorrequesttemplate.FieldBodyOverride, field.TypeJSON)
|
||||
}
|
||||
if _u.mutation.MonitorsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.RemovedMonitorsIDs(); len(nodes) > 0 && !_u.mutation.MonitorsCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.MonitorsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: true,
|
||||
Table: channelmonitorrequesttemplate.MonitorsTable,
|
||||
Columns: []string{channelmonitorrequesttemplate.MonitorsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &ChannelMonitorRequestTemplate{config: _u.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{channelmonitorrequesttemplate.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
@@ -17,15 +17,28 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/ent/announcement"
|
||||
"github.com/Wei-Shaw/sub2api/ent/announcementread"
|
||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||
"github.com/Wei-Shaw/sub2api/ent/identityadoptiondecision"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentauditlog"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentorder"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentproviderinstance"
|
||||
"github.com/Wei-Shaw/sub2api/ent/pendingauthsession"
|
||||
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||
"github.com/Wei-Shaw/sub2api/ent/securitysecret"
|
||||
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||
"github.com/Wei-Shaw/sub2api/ent/subscriptionplan"
|
||||
"github.com/Wei-Shaw/sub2api/ent/tlsfingerprintprofile"
|
||||
"github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
||||
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||
@@ -93,27 +106,40 @@ var (
|
||||
func checkColumn(t, c string) error {
|
||||
initCheck.Do(func() {
|
||||
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
||||
apikey.Table: apikey.ValidColumn,
|
||||
account.Table: account.ValidColumn,
|
||||
accountgroup.Table: accountgroup.ValidColumn,
|
||||
announcement.Table: announcement.ValidColumn,
|
||||
announcementread.Table: announcementread.ValidColumn,
|
||||
errorpassthroughrule.Table: errorpassthroughrule.ValidColumn,
|
||||
group.Table: group.ValidColumn,
|
||||
idempotencyrecord.Table: idempotencyrecord.ValidColumn,
|
||||
promocode.Table: promocode.ValidColumn,
|
||||
promocodeusage.Table: promocodeusage.ValidColumn,
|
||||
proxy.Table: proxy.ValidColumn,
|
||||
redeemcode.Table: redeemcode.ValidColumn,
|
||||
securitysecret.Table: securitysecret.ValidColumn,
|
||||
setting.Table: setting.ValidColumn,
|
||||
usagecleanuptask.Table: usagecleanuptask.ValidColumn,
|
||||
usagelog.Table: usagelog.ValidColumn,
|
||||
user.Table: user.ValidColumn,
|
||||
userallowedgroup.Table: userallowedgroup.ValidColumn,
|
||||
userattributedefinition.Table: userattributedefinition.ValidColumn,
|
||||
userattributevalue.Table: userattributevalue.ValidColumn,
|
||||
usersubscription.Table: usersubscription.ValidColumn,
|
||||
apikey.Table: apikey.ValidColumn,
|
||||
account.Table: account.ValidColumn,
|
||||
accountgroup.Table: accountgroup.ValidColumn,
|
||||
announcement.Table: announcement.ValidColumn,
|
||||
announcementread.Table: announcementread.ValidColumn,
|
||||
authidentity.Table: authidentity.ValidColumn,
|
||||
authidentitychannel.Table: authidentitychannel.ValidColumn,
|
||||
channelmonitor.Table: channelmonitor.ValidColumn,
|
||||
channelmonitordailyrollup.Table: channelmonitordailyrollup.ValidColumn,
|
||||
channelmonitorhistory.Table: channelmonitorhistory.ValidColumn,
|
||||
channelmonitorrequesttemplate.Table: channelmonitorrequesttemplate.ValidColumn,
|
||||
errorpassthroughrule.Table: errorpassthroughrule.ValidColumn,
|
||||
group.Table: group.ValidColumn,
|
||||
idempotencyrecord.Table: idempotencyrecord.ValidColumn,
|
||||
identityadoptiondecision.Table: identityadoptiondecision.ValidColumn,
|
||||
paymentauditlog.Table: paymentauditlog.ValidColumn,
|
||||
paymentorder.Table: paymentorder.ValidColumn,
|
||||
paymentproviderinstance.Table: paymentproviderinstance.ValidColumn,
|
||||
pendingauthsession.Table: pendingauthsession.ValidColumn,
|
||||
promocode.Table: promocode.ValidColumn,
|
||||
promocodeusage.Table: promocodeusage.ValidColumn,
|
||||
proxy.Table: proxy.ValidColumn,
|
||||
redeemcode.Table: redeemcode.ValidColumn,
|
||||
securitysecret.Table: securitysecret.ValidColumn,
|
||||
setting.Table: setting.ValidColumn,
|
||||
subscriptionplan.Table: subscriptionplan.ValidColumn,
|
||||
tlsfingerprintprofile.Table: tlsfingerprintprofile.ValidColumn,
|
||||
usagecleanuptask.Table: usagecleanuptask.ValidColumn,
|
||||
usagelog.Table: usagelog.ValidColumn,
|
||||
user.Table: user.ValidColumn,
|
||||
userallowedgroup.Table: userallowedgroup.ValidColumn,
|
||||
userattributedefinition.Table: userattributedefinition.ValidColumn,
|
||||
userattributevalue.Table: userattributevalue.ValidColumn,
|
||||
usersubscription.Table: usersubscription.ValidColumn,
|
||||
})
|
||||
})
|
||||
return columnCheck(t, c)
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||
"github.com/Wei-Shaw/sub2api/internal/domain"
|
||||
)
|
||||
|
||||
// Group is the model entity for the Group schema.
|
||||
@@ -52,16 +53,6 @@ type Group struct {
|
||||
ImagePrice2k *float64 `json:"image_price_2k,omitempty"`
|
||||
// ImagePrice4k holds the value of the "image_price_4k" field.
|
||||
ImagePrice4k *float64 `json:"image_price_4k,omitempty"`
|
||||
// SoraImagePrice360 holds the value of the "sora_image_price_360" field.
|
||||
SoraImagePrice360 *float64 `json:"sora_image_price_360,omitempty"`
|
||||
// SoraImagePrice540 holds the value of the "sora_image_price_540" field.
|
||||
SoraImagePrice540 *float64 `json:"sora_image_price_540,omitempty"`
|
||||
// SoraVideoPricePerRequest holds the value of the "sora_video_price_per_request" field.
|
||||
SoraVideoPricePerRequest *float64 `json:"sora_video_price_per_request,omitempty"`
|
||||
// SoraVideoPricePerRequestHd holds the value of the "sora_video_price_per_request_hd" field.
|
||||
SoraVideoPricePerRequestHd *float64 `json:"sora_video_price_per_request_hd,omitempty"`
|
||||
// SoraStorageQuotaBytes holds the value of the "sora_storage_quota_bytes" field.
|
||||
SoraStorageQuotaBytes int64 `json:"sora_storage_quota_bytes,omitempty"`
|
||||
// 是否仅允许 Claude Code 客户端
|
||||
ClaudeCodeOnly bool `json:"claude_code_only,omitempty"`
|
||||
// 非 Claude Code 请求降级使用的分组 ID
|
||||
@@ -80,8 +71,16 @@ type Group struct {
|
||||
SortOrder int `json:"sort_order,omitempty"`
|
||||
// 是否允许 /v1/messages 调度到此 OpenAI 分组
|
||||
AllowMessagesDispatch bool `json:"allow_messages_dispatch,omitempty"`
|
||||
// 仅允许非 apikey 类型账号关联到此分组
|
||||
RequireOauthOnly bool `json:"require_oauth_only,omitempty"`
|
||||
// 调度时仅允许 privacy 已成功设置的账号
|
||||
RequirePrivacySet bool `json:"require_privacy_set,omitempty"`
|
||||
// 默认映射模型 ID,当账号级映射找不到时使用此值
|
||||
DefaultMappedModel string `json:"default_mapped_model,omitempty"`
|
||||
// OpenAI Messages 调度模型配置:按 Claude 系列/精确模型映射到目标 GPT 模型
|
||||
MessagesDispatchModelConfig domain.OpenAIMessagesDispatchModelConfig `json:"messages_dispatch_model_config,omitempty"`
|
||||
// 分组 RPM 上限,0 表示不限制;设置后接管该分组用户的限流
|
||||
RpmLimit int `json:"rpm_limit,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the GroupQuery when eager-loading is set.
|
||||
Edges GroupEdges `json:"edges"`
|
||||
@@ -188,13 +187,13 @@ func (*Group) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case group.FieldModelRouting, group.FieldSupportedModelScopes:
|
||||
case group.FieldModelRouting, group.FieldSupportedModelScopes, group.FieldMessagesDispatchModelConfig:
|
||||
values[i] = new([]byte)
|
||||
case group.FieldIsExclusive, group.FieldClaudeCodeOnly, group.FieldModelRoutingEnabled, group.FieldMcpXMLInject, group.FieldAllowMessagesDispatch:
|
||||
case group.FieldIsExclusive, group.FieldClaudeCodeOnly, group.FieldModelRoutingEnabled, group.FieldMcpXMLInject, group.FieldAllowMessagesDispatch, group.FieldRequireOauthOnly, group.FieldRequirePrivacySet:
|
||||
values[i] = new(sql.NullBool)
|
||||
case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd, group.FieldImagePrice1k, group.FieldImagePrice2k, group.FieldImagePrice4k, group.FieldSoraImagePrice360, group.FieldSoraImagePrice540, group.FieldSoraVideoPricePerRequest, group.FieldSoraVideoPricePerRequestHd:
|
||||
case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd, group.FieldImagePrice1k, group.FieldImagePrice2k, group.FieldImagePrice4k:
|
||||
values[i] = new(sql.NullFloat64)
|
||||
case group.FieldID, group.FieldDefaultValidityDays, group.FieldSoraStorageQuotaBytes, group.FieldFallbackGroupID, group.FieldFallbackGroupIDOnInvalidRequest, group.FieldSortOrder:
|
||||
case group.FieldID, group.FieldDefaultValidityDays, group.FieldFallbackGroupID, group.FieldFallbackGroupIDOnInvalidRequest, group.FieldSortOrder, group.FieldRpmLimit:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case group.FieldName, group.FieldDescription, group.FieldStatus, group.FieldPlatform, group.FieldSubscriptionType, group.FieldDefaultMappedModel:
|
||||
values[i] = new(sql.NullString)
|
||||
@@ -331,40 +330,6 @@ func (_m *Group) assignValues(columns []string, values []any) error {
|
||||
_m.ImagePrice4k = new(float64)
|
||||
*_m.ImagePrice4k = value.Float64
|
||||
}
|
||||
case group.FieldSoraImagePrice360:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sora_image_price_360", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SoraImagePrice360 = new(float64)
|
||||
*_m.SoraImagePrice360 = value.Float64
|
||||
}
|
||||
case group.FieldSoraImagePrice540:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sora_image_price_540", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SoraImagePrice540 = new(float64)
|
||||
*_m.SoraImagePrice540 = value.Float64
|
||||
}
|
||||
case group.FieldSoraVideoPricePerRequest:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sora_video_price_per_request", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SoraVideoPricePerRequest = new(float64)
|
||||
*_m.SoraVideoPricePerRequest = value.Float64
|
||||
}
|
||||
case group.FieldSoraVideoPricePerRequestHd:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sora_video_price_per_request_hd", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SoraVideoPricePerRequestHd = new(float64)
|
||||
*_m.SoraVideoPricePerRequestHd = value.Float64
|
||||
}
|
||||
case group.FieldSoraStorageQuotaBytes:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field sora_storage_quota_bytes", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SoraStorageQuotaBytes = value.Int64
|
||||
}
|
||||
case group.FieldClaudeCodeOnly:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field claude_code_only", values[i])
|
||||
@@ -425,12 +390,38 @@ func (_m *Group) assignValues(columns []string, values []any) error {
|
||||
} else if value.Valid {
|
||||
_m.AllowMessagesDispatch = value.Bool
|
||||
}
|
||||
case group.FieldRequireOauthOnly:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field require_oauth_only", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RequireOauthOnly = value.Bool
|
||||
}
|
||||
case group.FieldRequirePrivacySet:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field require_privacy_set", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RequirePrivacySet = value.Bool
|
||||
}
|
||||
case group.FieldDefaultMappedModel:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field default_mapped_model", values[i])
|
||||
} else if value.Valid {
|
||||
_m.DefaultMappedModel = value.String
|
||||
}
|
||||
case group.FieldMessagesDispatchModelConfig:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field messages_dispatch_model_config", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.MessagesDispatchModelConfig); err != nil {
|
||||
return fmt.Errorf("unmarshal field messages_dispatch_model_config: %w", err)
|
||||
}
|
||||
}
|
||||
case group.FieldRpmLimit:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field rpm_limit", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RpmLimit = int(value.Int64)
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
@@ -574,29 +565,6 @@ func (_m *Group) String() string {
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.SoraImagePrice360; v != nil {
|
||||
builder.WriteString("sora_image_price_360=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.SoraImagePrice540; v != nil {
|
||||
builder.WriteString("sora_image_price_540=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.SoraVideoPricePerRequest; v != nil {
|
||||
builder.WriteString("sora_video_price_per_request=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.SoraVideoPricePerRequestHd; v != nil {
|
||||
builder.WriteString("sora_video_price_per_request_hd=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("sora_storage_quota_bytes=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.SoraStorageQuotaBytes))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("claude_code_only=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ClaudeCodeOnly))
|
||||
builder.WriteString(", ")
|
||||
@@ -628,8 +596,20 @@ func (_m *Group) String() string {
|
||||
builder.WriteString("allow_messages_dispatch=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.AllowMessagesDispatch))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("require_oauth_only=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.RequireOauthOnly))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("require_privacy_set=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.RequirePrivacySet))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("default_mapped_model=")
|
||||
builder.WriteString(_m.DefaultMappedModel)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("messages_dispatch_model_config=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.MessagesDispatchModelConfig))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("rpm_limit=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.RpmLimit))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/internal/domain"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -49,16 +50,6 @@ const (
|
||||
FieldImagePrice2k = "image_price_2k"
|
||||
// FieldImagePrice4k holds the string denoting the image_price_4k field in the database.
|
||||
FieldImagePrice4k = "image_price_4k"
|
||||
// FieldSoraImagePrice360 holds the string denoting the sora_image_price_360 field in the database.
|
||||
FieldSoraImagePrice360 = "sora_image_price_360"
|
||||
// FieldSoraImagePrice540 holds the string denoting the sora_image_price_540 field in the database.
|
||||
FieldSoraImagePrice540 = "sora_image_price_540"
|
||||
// FieldSoraVideoPricePerRequest holds the string denoting the sora_video_price_per_request field in the database.
|
||||
FieldSoraVideoPricePerRequest = "sora_video_price_per_request"
|
||||
// FieldSoraVideoPricePerRequestHd holds the string denoting the sora_video_price_per_request_hd field in the database.
|
||||
FieldSoraVideoPricePerRequestHd = "sora_video_price_per_request_hd"
|
||||
// FieldSoraStorageQuotaBytes holds the string denoting the sora_storage_quota_bytes field in the database.
|
||||
FieldSoraStorageQuotaBytes = "sora_storage_quota_bytes"
|
||||
// FieldClaudeCodeOnly holds the string denoting the claude_code_only field in the database.
|
||||
FieldClaudeCodeOnly = "claude_code_only"
|
||||
// FieldFallbackGroupID holds the string denoting the fallback_group_id field in the database.
|
||||
@@ -77,8 +68,16 @@ const (
|
||||
FieldSortOrder = "sort_order"
|
||||
// FieldAllowMessagesDispatch holds the string denoting the allow_messages_dispatch field in the database.
|
||||
FieldAllowMessagesDispatch = "allow_messages_dispatch"
|
||||
// FieldRequireOauthOnly holds the string denoting the require_oauth_only field in the database.
|
||||
FieldRequireOauthOnly = "require_oauth_only"
|
||||
// FieldRequirePrivacySet holds the string denoting the require_privacy_set field in the database.
|
||||
FieldRequirePrivacySet = "require_privacy_set"
|
||||
// FieldDefaultMappedModel holds the string denoting the default_mapped_model field in the database.
|
||||
FieldDefaultMappedModel = "default_mapped_model"
|
||||
// FieldMessagesDispatchModelConfig holds the string denoting the messages_dispatch_model_config field in the database.
|
||||
FieldMessagesDispatchModelConfig = "messages_dispatch_model_config"
|
||||
// FieldRpmLimit holds the string denoting the rpm_limit field in the database.
|
||||
FieldRpmLimit = "rpm_limit"
|
||||
// EdgeAPIKeys holds the string denoting the api_keys edge name in mutations.
|
||||
EdgeAPIKeys = "api_keys"
|
||||
// EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations.
|
||||
@@ -171,11 +170,6 @@ var Columns = []string{
|
||||
FieldImagePrice1k,
|
||||
FieldImagePrice2k,
|
||||
FieldImagePrice4k,
|
||||
FieldSoraImagePrice360,
|
||||
FieldSoraImagePrice540,
|
||||
FieldSoraVideoPricePerRequest,
|
||||
FieldSoraVideoPricePerRequestHd,
|
||||
FieldSoraStorageQuotaBytes,
|
||||
FieldClaudeCodeOnly,
|
||||
FieldFallbackGroupID,
|
||||
FieldFallbackGroupIDOnInvalidRequest,
|
||||
@@ -185,7 +179,11 @@ var Columns = []string{
|
||||
FieldSupportedModelScopes,
|
||||
FieldSortOrder,
|
||||
FieldAllowMessagesDispatch,
|
||||
FieldRequireOauthOnly,
|
||||
FieldRequirePrivacySet,
|
||||
FieldDefaultMappedModel,
|
||||
FieldMessagesDispatchModelConfig,
|
||||
FieldRpmLimit,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -241,8 +239,6 @@ var (
|
||||
SubscriptionTypeValidator func(string) error
|
||||
// DefaultDefaultValidityDays holds the default value on creation for the "default_validity_days" field.
|
||||
DefaultDefaultValidityDays int
|
||||
// DefaultSoraStorageQuotaBytes holds the default value on creation for the "sora_storage_quota_bytes" field.
|
||||
DefaultSoraStorageQuotaBytes int64
|
||||
// DefaultClaudeCodeOnly holds the default value on creation for the "claude_code_only" field.
|
||||
DefaultClaudeCodeOnly bool
|
||||
// DefaultModelRoutingEnabled holds the default value on creation for the "model_routing_enabled" field.
|
||||
@@ -255,10 +251,18 @@ var (
|
||||
DefaultSortOrder int
|
||||
// DefaultAllowMessagesDispatch holds the default value on creation for the "allow_messages_dispatch" field.
|
||||
DefaultAllowMessagesDispatch bool
|
||||
// DefaultRequireOauthOnly holds the default value on creation for the "require_oauth_only" field.
|
||||
DefaultRequireOauthOnly bool
|
||||
// DefaultRequirePrivacySet holds the default value on creation for the "require_privacy_set" field.
|
||||
DefaultRequirePrivacySet bool
|
||||
// DefaultDefaultMappedModel holds the default value on creation for the "default_mapped_model" field.
|
||||
DefaultDefaultMappedModel string
|
||||
// DefaultMappedModelValidator is a validator for the "default_mapped_model" field. It is called by the builders before save.
|
||||
DefaultMappedModelValidator func(string) error
|
||||
// DefaultMessagesDispatchModelConfig holds the default value on creation for the "messages_dispatch_model_config" field.
|
||||
DefaultMessagesDispatchModelConfig domain.OpenAIMessagesDispatchModelConfig
|
||||
// DefaultRpmLimit holds the default value on creation for the "rpm_limit" field.
|
||||
DefaultRpmLimit int
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the Group queries.
|
||||
@@ -354,31 +358,6 @@ func ByImagePrice4k(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldImagePrice4k, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoraImagePrice360 orders the results by the sora_image_price_360 field.
|
||||
func BySoraImagePrice360(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoraImagePrice360, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoraImagePrice540 orders the results by the sora_image_price_540 field.
|
||||
func BySoraImagePrice540(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoraImagePrice540, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoraVideoPricePerRequest orders the results by the sora_video_price_per_request field.
|
||||
func BySoraVideoPricePerRequest(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoraVideoPricePerRequest, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoraVideoPricePerRequestHd orders the results by the sora_video_price_per_request_hd field.
|
||||
func BySoraVideoPricePerRequestHd(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoraVideoPricePerRequestHd, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoraStorageQuotaBytes orders the results by the sora_storage_quota_bytes field.
|
||||
func BySoraStorageQuotaBytes(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoraStorageQuotaBytes, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByClaudeCodeOnly orders the results by the claude_code_only field.
|
||||
func ByClaudeCodeOnly(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldClaudeCodeOnly, opts...).ToFunc()
|
||||
@@ -414,11 +393,26 @@ func ByAllowMessagesDispatch(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldAllowMessagesDispatch, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRequireOauthOnly orders the results by the require_oauth_only field.
|
||||
func ByRequireOauthOnly(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRequireOauthOnly, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRequirePrivacySet orders the results by the require_privacy_set field.
|
||||
func ByRequirePrivacySet(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRequirePrivacySet, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDefaultMappedModel orders the results by the default_mapped_model field.
|
||||
func ByDefaultMappedModel(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDefaultMappedModel, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRpmLimit orders the results by the rpm_limit field.
|
||||
func ByRpmLimit(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRpmLimit, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByAPIKeysCount orders the results by api_keys count.
|
||||
func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
|
||||
@@ -140,31 +140,6 @@ func ImagePrice4k(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldImagePrice4k, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice360 applies equality check predicate on the "sora_image_price_360" field. It's identical to SoraImagePrice360EQ.
|
||||
func SoraImagePrice360(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraImagePrice360, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice540 applies equality check predicate on the "sora_image_price_540" field. It's identical to SoraImagePrice540EQ.
|
||||
func SoraImagePrice540(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraImagePrice540, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequest applies equality check predicate on the "sora_video_price_per_request" field. It's identical to SoraVideoPricePerRequestEQ.
|
||||
func SoraVideoPricePerRequest(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraVideoPricePerRequest, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestHd applies equality check predicate on the "sora_video_price_per_request_hd" field. It's identical to SoraVideoPricePerRequestHdEQ.
|
||||
func SoraVideoPricePerRequestHd(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraVideoPricePerRequestHd, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytes applies equality check predicate on the "sora_storage_quota_bytes" field. It's identical to SoraStorageQuotaBytesEQ.
|
||||
func SoraStorageQuotaBytes(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// ClaudeCodeOnly applies equality check predicate on the "claude_code_only" field. It's identical to ClaudeCodeOnlyEQ.
|
||||
func ClaudeCodeOnly(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldClaudeCodeOnly, v))
|
||||
@@ -200,11 +175,26 @@ func AllowMessagesDispatch(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldAllowMessagesDispatch, v))
|
||||
}
|
||||
|
||||
// RequireOauthOnly applies equality check predicate on the "require_oauth_only" field. It's identical to RequireOauthOnlyEQ.
|
||||
func RequireOauthOnly(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldRequireOauthOnly, v))
|
||||
}
|
||||
|
||||
// RequirePrivacySet applies equality check predicate on the "require_privacy_set" field. It's identical to RequirePrivacySetEQ.
|
||||
func RequirePrivacySet(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldRequirePrivacySet, v))
|
||||
}
|
||||
|
||||
// DefaultMappedModel applies equality check predicate on the "default_mapped_model" field. It's identical to DefaultMappedModelEQ.
|
||||
func DefaultMappedModel(v string) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldDefaultMappedModel, v))
|
||||
}
|
||||
|
||||
// RpmLimit applies equality check predicate on the "rpm_limit" field. It's identical to RpmLimitEQ.
|
||||
func RpmLimit(v int) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldRpmLimit, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldCreatedAt, v))
|
||||
@@ -1060,246 +1050,6 @@ func ImagePrice4kNotNil() predicate.Group {
|
||||
return predicate.Group(sql.FieldNotNull(FieldImagePrice4k))
|
||||
}
|
||||
|
||||
// SoraImagePrice360EQ applies the EQ predicate on the "sora_image_price_360" field.
|
||||
func SoraImagePrice360EQ(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraImagePrice360, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice360NEQ applies the NEQ predicate on the "sora_image_price_360" field.
|
||||
func SoraImagePrice360NEQ(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNEQ(FieldSoraImagePrice360, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice360In applies the In predicate on the "sora_image_price_360" field.
|
||||
func SoraImagePrice360In(vs ...float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldIn(FieldSoraImagePrice360, vs...))
|
||||
}
|
||||
|
||||
// SoraImagePrice360NotIn applies the NotIn predicate on the "sora_image_price_360" field.
|
||||
func SoraImagePrice360NotIn(vs ...float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNotIn(FieldSoraImagePrice360, vs...))
|
||||
}
|
||||
|
||||
// SoraImagePrice360GT applies the GT predicate on the "sora_image_price_360" field.
|
||||
func SoraImagePrice360GT(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGT(FieldSoraImagePrice360, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice360GTE applies the GTE predicate on the "sora_image_price_360" field.
|
||||
func SoraImagePrice360GTE(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGTE(FieldSoraImagePrice360, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice360LT applies the LT predicate on the "sora_image_price_360" field.
|
||||
func SoraImagePrice360LT(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLT(FieldSoraImagePrice360, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice360LTE applies the LTE predicate on the "sora_image_price_360" field.
|
||||
func SoraImagePrice360LTE(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLTE(FieldSoraImagePrice360, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice360IsNil applies the IsNil predicate on the "sora_image_price_360" field.
|
||||
func SoraImagePrice360IsNil() predicate.Group {
|
||||
return predicate.Group(sql.FieldIsNull(FieldSoraImagePrice360))
|
||||
}
|
||||
|
||||
// SoraImagePrice360NotNil applies the NotNil predicate on the "sora_image_price_360" field.
|
||||
func SoraImagePrice360NotNil() predicate.Group {
|
||||
return predicate.Group(sql.FieldNotNull(FieldSoraImagePrice360))
|
||||
}
|
||||
|
||||
// SoraImagePrice540EQ applies the EQ predicate on the "sora_image_price_540" field.
|
||||
func SoraImagePrice540EQ(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraImagePrice540, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice540NEQ applies the NEQ predicate on the "sora_image_price_540" field.
|
||||
func SoraImagePrice540NEQ(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNEQ(FieldSoraImagePrice540, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice540In applies the In predicate on the "sora_image_price_540" field.
|
||||
func SoraImagePrice540In(vs ...float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldIn(FieldSoraImagePrice540, vs...))
|
||||
}
|
||||
|
||||
// SoraImagePrice540NotIn applies the NotIn predicate on the "sora_image_price_540" field.
|
||||
func SoraImagePrice540NotIn(vs ...float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNotIn(FieldSoraImagePrice540, vs...))
|
||||
}
|
||||
|
||||
// SoraImagePrice540GT applies the GT predicate on the "sora_image_price_540" field.
|
||||
func SoraImagePrice540GT(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGT(FieldSoraImagePrice540, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice540GTE applies the GTE predicate on the "sora_image_price_540" field.
|
||||
func SoraImagePrice540GTE(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGTE(FieldSoraImagePrice540, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice540LT applies the LT predicate on the "sora_image_price_540" field.
|
||||
func SoraImagePrice540LT(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLT(FieldSoraImagePrice540, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice540LTE applies the LTE predicate on the "sora_image_price_540" field.
|
||||
func SoraImagePrice540LTE(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLTE(FieldSoraImagePrice540, v))
|
||||
}
|
||||
|
||||
// SoraImagePrice540IsNil applies the IsNil predicate on the "sora_image_price_540" field.
|
||||
func SoraImagePrice540IsNil() predicate.Group {
|
||||
return predicate.Group(sql.FieldIsNull(FieldSoraImagePrice540))
|
||||
}
|
||||
|
||||
// SoraImagePrice540NotNil applies the NotNil predicate on the "sora_image_price_540" field.
|
||||
func SoraImagePrice540NotNil() predicate.Group {
|
||||
return predicate.Group(sql.FieldNotNull(FieldSoraImagePrice540))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestEQ applies the EQ predicate on the "sora_video_price_per_request" field.
|
||||
func SoraVideoPricePerRequestEQ(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraVideoPricePerRequest, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestNEQ applies the NEQ predicate on the "sora_video_price_per_request" field.
|
||||
func SoraVideoPricePerRequestNEQ(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNEQ(FieldSoraVideoPricePerRequest, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestIn applies the In predicate on the "sora_video_price_per_request" field.
|
||||
func SoraVideoPricePerRequestIn(vs ...float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldIn(FieldSoraVideoPricePerRequest, vs...))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestNotIn applies the NotIn predicate on the "sora_video_price_per_request" field.
|
||||
func SoraVideoPricePerRequestNotIn(vs ...float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNotIn(FieldSoraVideoPricePerRequest, vs...))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestGT applies the GT predicate on the "sora_video_price_per_request" field.
|
||||
func SoraVideoPricePerRequestGT(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGT(FieldSoraVideoPricePerRequest, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestGTE applies the GTE predicate on the "sora_video_price_per_request" field.
|
||||
func SoraVideoPricePerRequestGTE(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGTE(FieldSoraVideoPricePerRequest, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestLT applies the LT predicate on the "sora_video_price_per_request" field.
|
||||
func SoraVideoPricePerRequestLT(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLT(FieldSoraVideoPricePerRequest, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestLTE applies the LTE predicate on the "sora_video_price_per_request" field.
|
||||
func SoraVideoPricePerRequestLTE(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLTE(FieldSoraVideoPricePerRequest, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestIsNil applies the IsNil predicate on the "sora_video_price_per_request" field.
|
||||
func SoraVideoPricePerRequestIsNil() predicate.Group {
|
||||
return predicate.Group(sql.FieldIsNull(FieldSoraVideoPricePerRequest))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestNotNil applies the NotNil predicate on the "sora_video_price_per_request" field.
|
||||
func SoraVideoPricePerRequestNotNil() predicate.Group {
|
||||
return predicate.Group(sql.FieldNotNull(FieldSoraVideoPricePerRequest))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestHdEQ applies the EQ predicate on the "sora_video_price_per_request_hd" field.
|
||||
func SoraVideoPricePerRequestHdEQ(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraVideoPricePerRequestHd, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestHdNEQ applies the NEQ predicate on the "sora_video_price_per_request_hd" field.
|
||||
func SoraVideoPricePerRequestHdNEQ(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNEQ(FieldSoraVideoPricePerRequestHd, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestHdIn applies the In predicate on the "sora_video_price_per_request_hd" field.
|
||||
func SoraVideoPricePerRequestHdIn(vs ...float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldIn(FieldSoraVideoPricePerRequestHd, vs...))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestHdNotIn applies the NotIn predicate on the "sora_video_price_per_request_hd" field.
|
||||
func SoraVideoPricePerRequestHdNotIn(vs ...float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNotIn(FieldSoraVideoPricePerRequestHd, vs...))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestHdGT applies the GT predicate on the "sora_video_price_per_request_hd" field.
|
||||
func SoraVideoPricePerRequestHdGT(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGT(FieldSoraVideoPricePerRequestHd, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestHdGTE applies the GTE predicate on the "sora_video_price_per_request_hd" field.
|
||||
func SoraVideoPricePerRequestHdGTE(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGTE(FieldSoraVideoPricePerRequestHd, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestHdLT applies the LT predicate on the "sora_video_price_per_request_hd" field.
|
||||
func SoraVideoPricePerRequestHdLT(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLT(FieldSoraVideoPricePerRequestHd, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestHdLTE applies the LTE predicate on the "sora_video_price_per_request_hd" field.
|
||||
func SoraVideoPricePerRequestHdLTE(v float64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLTE(FieldSoraVideoPricePerRequestHd, v))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestHdIsNil applies the IsNil predicate on the "sora_video_price_per_request_hd" field.
|
||||
func SoraVideoPricePerRequestHdIsNil() predicate.Group {
|
||||
return predicate.Group(sql.FieldIsNull(FieldSoraVideoPricePerRequestHd))
|
||||
}
|
||||
|
||||
// SoraVideoPricePerRequestHdNotNil applies the NotNil predicate on the "sora_video_price_per_request_hd" field.
|
||||
func SoraVideoPricePerRequestHdNotNil() predicate.Group {
|
||||
return predicate.Group(sql.FieldNotNull(FieldSoraVideoPricePerRequestHd))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesEQ applies the EQ predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesEQ(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesNEQ applies the NEQ predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesNEQ(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNEQ(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesIn applies the In predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesIn(vs ...int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldIn(FieldSoraStorageQuotaBytes, vs...))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesNotIn applies the NotIn predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesNotIn(vs ...int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldNotIn(FieldSoraStorageQuotaBytes, vs...))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesGT applies the GT predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesGT(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGT(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesGTE applies the GTE predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesGTE(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldGTE(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesLT applies the LT predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesLT(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLT(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// SoraStorageQuotaBytesLTE applies the LTE predicate on the "sora_storage_quota_bytes" field.
|
||||
func SoraStorageQuotaBytesLTE(v int64) predicate.Group {
|
||||
return predicate.Group(sql.FieldLTE(FieldSoraStorageQuotaBytes, v))
|
||||
}
|
||||
|
||||
// ClaudeCodeOnlyEQ applies the EQ predicate on the "claude_code_only" field.
|
||||
func ClaudeCodeOnlyEQ(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldClaudeCodeOnly, v))
|
||||
@@ -1490,6 +1240,26 @@ func AllowMessagesDispatchNEQ(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldNEQ(FieldAllowMessagesDispatch, v))
|
||||
}
|
||||
|
||||
// RequireOauthOnlyEQ applies the EQ predicate on the "require_oauth_only" field.
|
||||
func RequireOauthOnlyEQ(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldRequireOauthOnly, v))
|
||||
}
|
||||
|
||||
// RequireOauthOnlyNEQ applies the NEQ predicate on the "require_oauth_only" field.
|
||||
func RequireOauthOnlyNEQ(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldNEQ(FieldRequireOauthOnly, v))
|
||||
}
|
||||
|
||||
// RequirePrivacySetEQ applies the EQ predicate on the "require_privacy_set" field.
|
||||
func RequirePrivacySetEQ(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldRequirePrivacySet, v))
|
||||
}
|
||||
|
||||
// RequirePrivacySetNEQ applies the NEQ predicate on the "require_privacy_set" field.
|
||||
func RequirePrivacySetNEQ(v bool) predicate.Group {
|
||||
return predicate.Group(sql.FieldNEQ(FieldRequirePrivacySet, v))
|
||||
}
|
||||
|
||||
// DefaultMappedModelEQ applies the EQ predicate on the "default_mapped_model" field.
|
||||
func DefaultMappedModelEQ(v string) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldDefaultMappedModel, v))
|
||||
@@ -1555,6 +1325,46 @@ func DefaultMappedModelContainsFold(v string) predicate.Group {
|
||||
return predicate.Group(sql.FieldContainsFold(FieldDefaultMappedModel, v))
|
||||
}
|
||||
|
||||
// RpmLimitEQ applies the EQ predicate on the "rpm_limit" field.
|
||||
func RpmLimitEQ(v int) predicate.Group {
|
||||
return predicate.Group(sql.FieldEQ(FieldRpmLimit, v))
|
||||
}
|
||||
|
||||
// RpmLimitNEQ applies the NEQ predicate on the "rpm_limit" field.
|
||||
func RpmLimitNEQ(v int) predicate.Group {
|
||||
return predicate.Group(sql.FieldNEQ(FieldRpmLimit, v))
|
||||
}
|
||||
|
||||
// RpmLimitIn applies the In predicate on the "rpm_limit" field.
|
||||
func RpmLimitIn(vs ...int) predicate.Group {
|
||||
return predicate.Group(sql.FieldIn(FieldRpmLimit, vs...))
|
||||
}
|
||||
|
||||
// RpmLimitNotIn applies the NotIn predicate on the "rpm_limit" field.
|
||||
func RpmLimitNotIn(vs ...int) predicate.Group {
|
||||
return predicate.Group(sql.FieldNotIn(FieldRpmLimit, vs...))
|
||||
}
|
||||
|
||||
// RpmLimitGT applies the GT predicate on the "rpm_limit" field.
|
||||
func RpmLimitGT(v int) predicate.Group {
|
||||
return predicate.Group(sql.FieldGT(FieldRpmLimit, v))
|
||||
}
|
||||
|
||||
// RpmLimitGTE applies the GTE predicate on the "rpm_limit" field.
|
||||
func RpmLimitGTE(v int) predicate.Group {
|
||||
return predicate.Group(sql.FieldGTE(FieldRpmLimit, v))
|
||||
}
|
||||
|
||||
// RpmLimitLT applies the LT predicate on the "rpm_limit" field.
|
||||
func RpmLimitLT(v int) predicate.Group {
|
||||
return predicate.Group(sql.FieldLT(FieldRpmLimit, v))
|
||||
}
|
||||
|
||||
// RpmLimitLTE applies the LTE predicate on the "rpm_limit" field.
|
||||
func RpmLimitLTE(v int) predicate.Group {
|
||||
return predicate.Group(sql.FieldLTE(FieldRpmLimit, v))
|
||||
}
|
||||
|
||||
// HasAPIKeys applies the HasEdge predicate on the "api_keys" edge.
|
||||
func HasAPIKeys() predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||
"github.com/Wei-Shaw/sub2api/ent/usersubscription"
|
||||
"github.com/Wei-Shaw/sub2api/internal/domain"
|
||||
)
|
||||
|
||||
// GroupCreate is the builder for creating a Group entity.
|
||||
@@ -258,76 +259,6 @@ func (_c *GroupCreate) SetNillableImagePrice4k(v *float64) *GroupCreate {
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||
func (_c *GroupCreate) SetSoraImagePrice360(v float64) *GroupCreate {
|
||||
_c.mutation.SetSoraImagePrice360(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableSoraImagePrice360 sets the "sora_image_price_360" field if the given value is not nil.
|
||||
func (_c *GroupCreate) SetNillableSoraImagePrice360(v *float64) *GroupCreate {
|
||||
if v != nil {
|
||||
_c.SetSoraImagePrice360(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||
func (_c *GroupCreate) SetSoraImagePrice540(v float64) *GroupCreate {
|
||||
_c.mutation.SetSoraImagePrice540(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableSoraImagePrice540 sets the "sora_image_price_540" field if the given value is not nil.
|
||||
func (_c *GroupCreate) SetNillableSoraImagePrice540(v *float64) *GroupCreate {
|
||||
if v != nil {
|
||||
_c.SetSoraImagePrice540(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||
func (_c *GroupCreate) SetSoraVideoPricePerRequest(v float64) *GroupCreate {
|
||||
_c.mutation.SetSoraVideoPricePerRequest(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableSoraVideoPricePerRequest sets the "sora_video_price_per_request" field if the given value is not nil.
|
||||
func (_c *GroupCreate) SetNillableSoraVideoPricePerRequest(v *float64) *GroupCreate {
|
||||
if v != nil {
|
||||
_c.SetSoraVideoPricePerRequest(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||
func (_c *GroupCreate) SetSoraVideoPricePerRequestHd(v float64) *GroupCreate {
|
||||
_c.mutation.SetSoraVideoPricePerRequestHd(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field if the given value is not nil.
|
||||
func (_c *GroupCreate) SetNillableSoraVideoPricePerRequestHd(v *float64) *GroupCreate {
|
||||
if v != nil {
|
||||
_c.SetSoraVideoPricePerRequestHd(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (_c *GroupCreate) SetSoraStorageQuotaBytes(v int64) *GroupCreate {
|
||||
_c.mutation.SetSoraStorageQuotaBytes(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||
func (_c *GroupCreate) SetNillableSoraStorageQuotaBytes(v *int64) *GroupCreate {
|
||||
if v != nil {
|
||||
_c.SetSoraStorageQuotaBytes(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (_c *GroupCreate) SetClaudeCodeOnly(v bool) *GroupCreate {
|
||||
_c.mutation.SetClaudeCodeOnly(v)
|
||||
@@ -438,6 +369,34 @@ func (_c *GroupCreate) SetNillableAllowMessagesDispatch(v *bool) *GroupCreate {
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetRequireOauthOnly sets the "require_oauth_only" field.
|
||||
func (_c *GroupCreate) SetRequireOauthOnly(v bool) *GroupCreate {
|
||||
_c.mutation.SetRequireOauthOnly(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableRequireOauthOnly sets the "require_oauth_only" field if the given value is not nil.
|
||||
func (_c *GroupCreate) SetNillableRequireOauthOnly(v *bool) *GroupCreate {
|
||||
if v != nil {
|
||||
_c.SetRequireOauthOnly(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetRequirePrivacySet sets the "require_privacy_set" field.
|
||||
func (_c *GroupCreate) SetRequirePrivacySet(v bool) *GroupCreate {
|
||||
_c.mutation.SetRequirePrivacySet(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableRequirePrivacySet sets the "require_privacy_set" field if the given value is not nil.
|
||||
func (_c *GroupCreate) SetNillableRequirePrivacySet(v *bool) *GroupCreate {
|
||||
if v != nil {
|
||||
_c.SetRequirePrivacySet(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||
func (_c *GroupCreate) SetDefaultMappedModel(v string) *GroupCreate {
|
||||
_c.mutation.SetDefaultMappedModel(v)
|
||||
@@ -452,6 +411,34 @@ func (_c *GroupCreate) SetNillableDefaultMappedModel(v *string) *GroupCreate {
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field.
|
||||
func (_c *GroupCreate) SetMessagesDispatchModelConfig(v domain.OpenAIMessagesDispatchModelConfig) *GroupCreate {
|
||||
_c.mutation.SetMessagesDispatchModelConfig(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field if the given value is not nil.
|
||||
func (_c *GroupCreate) SetNillableMessagesDispatchModelConfig(v *domain.OpenAIMessagesDispatchModelConfig) *GroupCreate {
|
||||
if v != nil {
|
||||
_c.SetMessagesDispatchModelConfig(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetRpmLimit sets the "rpm_limit" field.
|
||||
func (_c *GroupCreate) SetRpmLimit(v int) *GroupCreate {
|
||||
_c.mutation.SetRpmLimit(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableRpmLimit sets the "rpm_limit" field if the given value is not nil.
|
||||
func (_c *GroupCreate) SetNillableRpmLimit(v *int) *GroupCreate {
|
||||
if v != nil {
|
||||
_c.SetRpmLimit(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||
func (_c *GroupCreate) AddAPIKeyIDs(ids ...int64) *GroupCreate {
|
||||
_c.mutation.AddAPIKeyIDs(ids...)
|
||||
@@ -617,10 +604,6 @@ func (_c *GroupCreate) defaults() error {
|
||||
v := group.DefaultDefaultValidityDays
|
||||
_c.mutation.SetDefaultValidityDays(v)
|
||||
}
|
||||
if _, ok := _c.mutation.SoraStorageQuotaBytes(); !ok {
|
||||
v := group.DefaultSoraStorageQuotaBytes
|
||||
_c.mutation.SetSoraStorageQuotaBytes(v)
|
||||
}
|
||||
if _, ok := _c.mutation.ClaudeCodeOnly(); !ok {
|
||||
v := group.DefaultClaudeCodeOnly
|
||||
_c.mutation.SetClaudeCodeOnly(v)
|
||||
@@ -645,10 +628,26 @@ func (_c *GroupCreate) defaults() error {
|
||||
v := group.DefaultAllowMessagesDispatch
|
||||
_c.mutation.SetAllowMessagesDispatch(v)
|
||||
}
|
||||
if _, ok := _c.mutation.RequireOauthOnly(); !ok {
|
||||
v := group.DefaultRequireOauthOnly
|
||||
_c.mutation.SetRequireOauthOnly(v)
|
||||
}
|
||||
if _, ok := _c.mutation.RequirePrivacySet(); !ok {
|
||||
v := group.DefaultRequirePrivacySet
|
||||
_c.mutation.SetRequirePrivacySet(v)
|
||||
}
|
||||
if _, ok := _c.mutation.DefaultMappedModel(); !ok {
|
||||
v := group.DefaultDefaultMappedModel
|
||||
_c.mutation.SetDefaultMappedModel(v)
|
||||
}
|
||||
if _, ok := _c.mutation.MessagesDispatchModelConfig(); !ok {
|
||||
v := group.DefaultMessagesDispatchModelConfig
|
||||
_c.mutation.SetMessagesDispatchModelConfig(v)
|
||||
}
|
||||
if _, ok := _c.mutation.RpmLimit(); !ok {
|
||||
v := group.DefaultRpmLimit
|
||||
_c.mutation.SetRpmLimit(v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -701,9 +700,6 @@ func (_c *GroupCreate) check() error {
|
||||
if _, ok := _c.mutation.DefaultValidityDays(); !ok {
|
||||
return &ValidationError{Name: "default_validity_days", err: errors.New(`ent: missing required field "Group.default_validity_days"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.SoraStorageQuotaBytes(); !ok {
|
||||
return &ValidationError{Name: "sora_storage_quota_bytes", err: errors.New(`ent: missing required field "Group.sora_storage_quota_bytes"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.ClaudeCodeOnly(); !ok {
|
||||
return &ValidationError{Name: "claude_code_only", err: errors.New(`ent: missing required field "Group.claude_code_only"`)}
|
||||
}
|
||||
@@ -722,6 +718,12 @@ func (_c *GroupCreate) check() error {
|
||||
if _, ok := _c.mutation.AllowMessagesDispatch(); !ok {
|
||||
return &ValidationError{Name: "allow_messages_dispatch", err: errors.New(`ent: missing required field "Group.allow_messages_dispatch"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.RequireOauthOnly(); !ok {
|
||||
return &ValidationError{Name: "require_oauth_only", err: errors.New(`ent: missing required field "Group.require_oauth_only"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.RequirePrivacySet(); !ok {
|
||||
return &ValidationError{Name: "require_privacy_set", err: errors.New(`ent: missing required field "Group.require_privacy_set"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.DefaultMappedModel(); !ok {
|
||||
return &ValidationError{Name: "default_mapped_model", err: errors.New(`ent: missing required field "Group.default_mapped_model"`)}
|
||||
}
|
||||
@@ -730,6 +732,12 @@ func (_c *GroupCreate) check() error {
|
||||
return &ValidationError{Name: "default_mapped_model", err: fmt.Errorf(`ent: validator failed for field "Group.default_mapped_model": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.MessagesDispatchModelConfig(); !ok {
|
||||
return &ValidationError{Name: "messages_dispatch_model_config", err: errors.New(`ent: missing required field "Group.messages_dispatch_model_config"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.RpmLimit(); !ok {
|
||||
return &ValidationError{Name: "rpm_limit", err: errors.New(`ent: missing required field "Group.rpm_limit"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -825,26 +833,6 @@ func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(group.FieldImagePrice4k, field.TypeFloat64, value)
|
||||
_node.ImagePrice4k = &value
|
||||
}
|
||||
if value, ok := _c.mutation.SoraImagePrice360(); ok {
|
||||
_spec.SetField(group.FieldSoraImagePrice360, field.TypeFloat64, value)
|
||||
_node.SoraImagePrice360 = &value
|
||||
}
|
||||
if value, ok := _c.mutation.SoraImagePrice540(); ok {
|
||||
_spec.SetField(group.FieldSoraImagePrice540, field.TypeFloat64, value)
|
||||
_node.SoraImagePrice540 = &value
|
||||
}
|
||||
if value, ok := _c.mutation.SoraVideoPricePerRequest(); ok {
|
||||
_spec.SetField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64, value)
|
||||
_node.SoraVideoPricePerRequest = &value
|
||||
}
|
||||
if value, ok := _c.mutation.SoraVideoPricePerRequestHd(); ok {
|
||||
_spec.SetField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64, value)
|
||||
_node.SoraVideoPricePerRequestHd = &value
|
||||
}
|
||||
if value, ok := _c.mutation.SoraStorageQuotaBytes(); ok {
|
||||
_spec.SetField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
_node.SoraStorageQuotaBytes = value
|
||||
}
|
||||
if value, ok := _c.mutation.ClaudeCodeOnly(); ok {
|
||||
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
||||
_node.ClaudeCodeOnly = value
|
||||
@@ -881,10 +869,26 @@ func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(group.FieldAllowMessagesDispatch, field.TypeBool, value)
|
||||
_node.AllowMessagesDispatch = value
|
||||
}
|
||||
if value, ok := _c.mutation.RequireOauthOnly(); ok {
|
||||
_spec.SetField(group.FieldRequireOauthOnly, field.TypeBool, value)
|
||||
_node.RequireOauthOnly = value
|
||||
}
|
||||
if value, ok := _c.mutation.RequirePrivacySet(); ok {
|
||||
_spec.SetField(group.FieldRequirePrivacySet, field.TypeBool, value)
|
||||
_node.RequirePrivacySet = value
|
||||
}
|
||||
if value, ok := _c.mutation.DefaultMappedModel(); ok {
|
||||
_spec.SetField(group.FieldDefaultMappedModel, field.TypeString, value)
|
||||
_node.DefaultMappedModel = value
|
||||
}
|
||||
if value, ok := _c.mutation.MessagesDispatchModelConfig(); ok {
|
||||
_spec.SetField(group.FieldMessagesDispatchModelConfig, field.TypeJSON, value)
|
||||
_node.MessagesDispatchModelConfig = value
|
||||
}
|
||||
if value, ok := _c.mutation.RpmLimit(); ok {
|
||||
_spec.SetField(group.FieldRpmLimit, field.TypeInt, value)
|
||||
_node.RpmLimit = value
|
||||
}
|
||||
if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
@@ -1329,120 +1333,6 @@ func (u *GroupUpsert) ClearImagePrice4k() *GroupUpsert {
|
||||
return u
|
||||
}
|
||||
|
||||
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||
func (u *GroupUpsert) SetSoraImagePrice360(v float64) *GroupUpsert {
|
||||
u.Set(group.FieldSoraImagePrice360, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateSoraImagePrice360 sets the "sora_image_price_360" field to the value that was provided on create.
|
||||
func (u *GroupUpsert) UpdateSoraImagePrice360() *GroupUpsert {
|
||||
u.SetExcluded(group.FieldSoraImagePrice360)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddSoraImagePrice360 adds v to the "sora_image_price_360" field.
|
||||
func (u *GroupUpsert) AddSoraImagePrice360(v float64) *GroupUpsert {
|
||||
u.Add(group.FieldSoraImagePrice360, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearSoraImagePrice360 clears the value of the "sora_image_price_360" field.
|
||||
func (u *GroupUpsert) ClearSoraImagePrice360() *GroupUpsert {
|
||||
u.SetNull(group.FieldSoraImagePrice360)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||
func (u *GroupUpsert) SetSoraImagePrice540(v float64) *GroupUpsert {
|
||||
u.Set(group.FieldSoraImagePrice540, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateSoraImagePrice540 sets the "sora_image_price_540" field to the value that was provided on create.
|
||||
func (u *GroupUpsert) UpdateSoraImagePrice540() *GroupUpsert {
|
||||
u.SetExcluded(group.FieldSoraImagePrice540)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddSoraImagePrice540 adds v to the "sora_image_price_540" field.
|
||||
func (u *GroupUpsert) AddSoraImagePrice540(v float64) *GroupUpsert {
|
||||
u.Add(group.FieldSoraImagePrice540, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearSoraImagePrice540 clears the value of the "sora_image_price_540" field.
|
||||
func (u *GroupUpsert) ClearSoraImagePrice540() *GroupUpsert {
|
||||
u.SetNull(group.FieldSoraImagePrice540)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||
func (u *GroupUpsert) SetSoraVideoPricePerRequest(v float64) *GroupUpsert {
|
||||
u.Set(group.FieldSoraVideoPricePerRequest, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateSoraVideoPricePerRequest sets the "sora_video_price_per_request" field to the value that was provided on create.
|
||||
func (u *GroupUpsert) UpdateSoraVideoPricePerRequest() *GroupUpsert {
|
||||
u.SetExcluded(group.FieldSoraVideoPricePerRequest)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddSoraVideoPricePerRequest adds v to the "sora_video_price_per_request" field.
|
||||
func (u *GroupUpsert) AddSoraVideoPricePerRequest(v float64) *GroupUpsert {
|
||||
u.Add(group.FieldSoraVideoPricePerRequest, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearSoraVideoPricePerRequest clears the value of the "sora_video_price_per_request" field.
|
||||
func (u *GroupUpsert) ClearSoraVideoPricePerRequest() *GroupUpsert {
|
||||
u.SetNull(group.FieldSoraVideoPricePerRequest)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||
func (u *GroupUpsert) SetSoraVideoPricePerRequestHd(v float64) *GroupUpsert {
|
||||
u.Set(group.FieldSoraVideoPricePerRequestHd, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field to the value that was provided on create.
|
||||
func (u *GroupUpsert) UpdateSoraVideoPricePerRequestHd() *GroupUpsert {
|
||||
u.SetExcluded(group.FieldSoraVideoPricePerRequestHd)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddSoraVideoPricePerRequestHd adds v to the "sora_video_price_per_request_hd" field.
|
||||
func (u *GroupUpsert) AddSoraVideoPricePerRequestHd(v float64) *GroupUpsert {
|
||||
u.Add(group.FieldSoraVideoPricePerRequestHd, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearSoraVideoPricePerRequestHd clears the value of the "sora_video_price_per_request_hd" field.
|
||||
func (u *GroupUpsert) ClearSoraVideoPricePerRequestHd() *GroupUpsert {
|
||||
u.SetNull(group.FieldSoraVideoPricePerRequestHd)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsert) SetSoraStorageQuotaBytes(v int64) *GroupUpsert {
|
||||
u.Set(group.FieldSoraStorageQuotaBytes, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||
func (u *GroupUpsert) UpdateSoraStorageQuotaBytes() *GroupUpsert {
|
||||
u.SetExcluded(group.FieldSoraStorageQuotaBytes)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsert) AddSoraStorageQuotaBytes(v int64) *GroupUpsert {
|
||||
u.Add(group.FieldSoraStorageQuotaBytes, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (u *GroupUpsert) SetClaudeCodeOnly(v bool) *GroupUpsert {
|
||||
u.Set(group.FieldClaudeCodeOnly, v)
|
||||
@@ -1587,6 +1477,30 @@ func (u *GroupUpsert) UpdateAllowMessagesDispatch() *GroupUpsert {
|
||||
return u
|
||||
}
|
||||
|
||||
// SetRequireOauthOnly sets the "require_oauth_only" field.
|
||||
func (u *GroupUpsert) SetRequireOauthOnly(v bool) *GroupUpsert {
|
||||
u.Set(group.FieldRequireOauthOnly, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateRequireOauthOnly sets the "require_oauth_only" field to the value that was provided on create.
|
||||
func (u *GroupUpsert) UpdateRequireOauthOnly() *GroupUpsert {
|
||||
u.SetExcluded(group.FieldRequireOauthOnly)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetRequirePrivacySet sets the "require_privacy_set" field.
|
||||
func (u *GroupUpsert) SetRequirePrivacySet(v bool) *GroupUpsert {
|
||||
u.Set(group.FieldRequirePrivacySet, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateRequirePrivacySet sets the "require_privacy_set" field to the value that was provided on create.
|
||||
func (u *GroupUpsert) UpdateRequirePrivacySet() *GroupUpsert {
|
||||
u.SetExcluded(group.FieldRequirePrivacySet)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||
func (u *GroupUpsert) SetDefaultMappedModel(v string) *GroupUpsert {
|
||||
u.Set(group.FieldDefaultMappedModel, v)
|
||||
@@ -1599,6 +1513,36 @@ func (u *GroupUpsert) UpdateDefaultMappedModel() *GroupUpsert {
|
||||
return u
|
||||
}
|
||||
|
||||
// SetMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field.
|
||||
func (u *GroupUpsert) SetMessagesDispatchModelConfig(v domain.OpenAIMessagesDispatchModelConfig) *GroupUpsert {
|
||||
u.Set(group.FieldMessagesDispatchModelConfig, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field to the value that was provided on create.
|
||||
func (u *GroupUpsert) UpdateMessagesDispatchModelConfig() *GroupUpsert {
|
||||
u.SetExcluded(group.FieldMessagesDispatchModelConfig)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetRpmLimit sets the "rpm_limit" field.
|
||||
func (u *GroupUpsert) SetRpmLimit(v int) *GroupUpsert {
|
||||
u.Set(group.FieldRpmLimit, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateRpmLimit sets the "rpm_limit" field to the value that was provided on create.
|
||||
func (u *GroupUpsert) UpdateRpmLimit() *GroupUpsert {
|
||||
u.SetExcluded(group.FieldRpmLimit)
|
||||
return u
|
||||
}
|
||||
|
||||
// AddRpmLimit adds v to the "rpm_limit" field.
|
||||
func (u *GroupUpsert) AddRpmLimit(v int) *GroupUpsert {
|
||||
u.Add(group.FieldRpmLimit, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
@@ -1980,139 +1924,6 @@ func (u *GroupUpsertOne) ClearImagePrice4k() *GroupUpsertOne {
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||
func (u *GroupUpsertOne) SetSoraImagePrice360(v float64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraImagePrice360(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraImagePrice360 adds v to the "sora_image_price_360" field.
|
||||
func (u *GroupUpsertOne) AddSoraImagePrice360(v float64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraImagePrice360(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraImagePrice360 sets the "sora_image_price_360" field to the value that was provided on create.
|
||||
func (u *GroupUpsertOne) UpdateSoraImagePrice360() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraImagePrice360()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearSoraImagePrice360 clears the value of the "sora_image_price_360" field.
|
||||
func (u *GroupUpsertOne) ClearSoraImagePrice360() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.ClearSoraImagePrice360()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||
func (u *GroupUpsertOne) SetSoraImagePrice540(v float64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraImagePrice540(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraImagePrice540 adds v to the "sora_image_price_540" field.
|
||||
func (u *GroupUpsertOne) AddSoraImagePrice540(v float64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraImagePrice540(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraImagePrice540 sets the "sora_image_price_540" field to the value that was provided on create.
|
||||
func (u *GroupUpsertOne) UpdateSoraImagePrice540() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraImagePrice540()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearSoraImagePrice540 clears the value of the "sora_image_price_540" field.
|
||||
func (u *GroupUpsertOne) ClearSoraImagePrice540() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.ClearSoraImagePrice540()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||
func (u *GroupUpsertOne) SetSoraVideoPricePerRequest(v float64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraVideoPricePerRequest(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraVideoPricePerRequest adds v to the "sora_video_price_per_request" field.
|
||||
func (u *GroupUpsertOne) AddSoraVideoPricePerRequest(v float64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraVideoPricePerRequest(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraVideoPricePerRequest sets the "sora_video_price_per_request" field to the value that was provided on create.
|
||||
func (u *GroupUpsertOne) UpdateSoraVideoPricePerRequest() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraVideoPricePerRequest()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearSoraVideoPricePerRequest clears the value of the "sora_video_price_per_request" field.
|
||||
func (u *GroupUpsertOne) ClearSoraVideoPricePerRequest() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.ClearSoraVideoPricePerRequest()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||
func (u *GroupUpsertOne) SetSoraVideoPricePerRequestHd(v float64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraVideoPricePerRequestHd(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraVideoPricePerRequestHd adds v to the "sora_video_price_per_request_hd" field.
|
||||
func (u *GroupUpsertOne) AddSoraVideoPricePerRequestHd(v float64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraVideoPricePerRequestHd(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field to the value that was provided on create.
|
||||
func (u *GroupUpsertOne) UpdateSoraVideoPricePerRequestHd() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraVideoPricePerRequestHd()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearSoraVideoPricePerRequestHd clears the value of the "sora_video_price_per_request_hd" field.
|
||||
func (u *GroupUpsertOne) ClearSoraVideoPricePerRequestHd() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.ClearSoraVideoPricePerRequestHd()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsertOne) SetSoraStorageQuotaBytes(v int64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsertOne) AddSoraStorageQuotaBytes(v int64) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||
func (u *GroupUpsertOne) UpdateSoraStorageQuotaBytes() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraStorageQuotaBytes()
|
||||
})
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (u *GroupUpsertOne) SetClaudeCodeOnly(v bool) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
@@ -2281,6 +2092,34 @@ func (u *GroupUpsertOne) UpdateAllowMessagesDispatch() *GroupUpsertOne {
|
||||
})
|
||||
}
|
||||
|
||||
// SetRequireOauthOnly sets the "require_oauth_only" field.
|
||||
func (u *GroupUpsertOne) SetRequireOauthOnly(v bool) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetRequireOauthOnly(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRequireOauthOnly sets the "require_oauth_only" field to the value that was provided on create.
|
||||
func (u *GroupUpsertOne) UpdateRequireOauthOnly() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateRequireOauthOnly()
|
||||
})
|
||||
}
|
||||
|
||||
// SetRequirePrivacySet sets the "require_privacy_set" field.
|
||||
func (u *GroupUpsertOne) SetRequirePrivacySet(v bool) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetRequirePrivacySet(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRequirePrivacySet sets the "require_privacy_set" field to the value that was provided on create.
|
||||
func (u *GroupUpsertOne) UpdateRequirePrivacySet() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateRequirePrivacySet()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||
func (u *GroupUpsertOne) SetDefaultMappedModel(v string) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
@@ -2295,6 +2134,41 @@ func (u *GroupUpsertOne) UpdateDefaultMappedModel() *GroupUpsertOne {
|
||||
})
|
||||
}
|
||||
|
||||
// SetMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field.
|
||||
func (u *GroupUpsertOne) SetMessagesDispatchModelConfig(v domain.OpenAIMessagesDispatchModelConfig) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetMessagesDispatchModelConfig(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field to the value that was provided on create.
|
||||
func (u *GroupUpsertOne) UpdateMessagesDispatchModelConfig() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateMessagesDispatchModelConfig()
|
||||
})
|
||||
}
|
||||
|
||||
// SetRpmLimit sets the "rpm_limit" field.
|
||||
func (u *GroupUpsertOne) SetRpmLimit(v int) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetRpmLimit(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddRpmLimit adds v to the "rpm_limit" field.
|
||||
func (u *GroupUpsertOne) AddRpmLimit(v int) *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddRpmLimit(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRpmLimit sets the "rpm_limit" field to the value that was provided on create.
|
||||
func (u *GroupUpsertOne) UpdateRpmLimit() *GroupUpsertOne {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateRpmLimit()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *GroupUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
@@ -2842,139 +2716,6 @@ func (u *GroupUpsertBulk) ClearImagePrice4k() *GroupUpsertBulk {
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||
func (u *GroupUpsertBulk) SetSoraImagePrice360(v float64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraImagePrice360(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraImagePrice360 adds v to the "sora_image_price_360" field.
|
||||
func (u *GroupUpsertBulk) AddSoraImagePrice360(v float64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraImagePrice360(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraImagePrice360 sets the "sora_image_price_360" field to the value that was provided on create.
|
||||
func (u *GroupUpsertBulk) UpdateSoraImagePrice360() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraImagePrice360()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearSoraImagePrice360 clears the value of the "sora_image_price_360" field.
|
||||
func (u *GroupUpsertBulk) ClearSoraImagePrice360() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.ClearSoraImagePrice360()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||
func (u *GroupUpsertBulk) SetSoraImagePrice540(v float64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraImagePrice540(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraImagePrice540 adds v to the "sora_image_price_540" field.
|
||||
func (u *GroupUpsertBulk) AddSoraImagePrice540(v float64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraImagePrice540(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraImagePrice540 sets the "sora_image_price_540" field to the value that was provided on create.
|
||||
func (u *GroupUpsertBulk) UpdateSoraImagePrice540() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraImagePrice540()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearSoraImagePrice540 clears the value of the "sora_image_price_540" field.
|
||||
func (u *GroupUpsertBulk) ClearSoraImagePrice540() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.ClearSoraImagePrice540()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||
func (u *GroupUpsertBulk) SetSoraVideoPricePerRequest(v float64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraVideoPricePerRequest(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraVideoPricePerRequest adds v to the "sora_video_price_per_request" field.
|
||||
func (u *GroupUpsertBulk) AddSoraVideoPricePerRequest(v float64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraVideoPricePerRequest(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraVideoPricePerRequest sets the "sora_video_price_per_request" field to the value that was provided on create.
|
||||
func (u *GroupUpsertBulk) UpdateSoraVideoPricePerRequest() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraVideoPricePerRequest()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearSoraVideoPricePerRequest clears the value of the "sora_video_price_per_request" field.
|
||||
func (u *GroupUpsertBulk) ClearSoraVideoPricePerRequest() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.ClearSoraVideoPricePerRequest()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||
func (u *GroupUpsertBulk) SetSoraVideoPricePerRequestHd(v float64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraVideoPricePerRequestHd(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraVideoPricePerRequestHd adds v to the "sora_video_price_per_request_hd" field.
|
||||
func (u *GroupUpsertBulk) AddSoraVideoPricePerRequestHd(v float64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraVideoPricePerRequestHd(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field to the value that was provided on create.
|
||||
func (u *GroupUpsertBulk) UpdateSoraVideoPricePerRequestHd() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraVideoPricePerRequestHd()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearSoraVideoPricePerRequestHd clears the value of the "sora_video_price_per_request_hd" field.
|
||||
func (u *GroupUpsertBulk) ClearSoraVideoPricePerRequestHd() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.ClearSoraVideoPricePerRequestHd()
|
||||
})
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsertBulk) SetSoraStorageQuotaBytes(v int64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds v to the "sora_storage_quota_bytes" field.
|
||||
func (u *GroupUpsertBulk) AddSoraStorageQuotaBytes(v int64) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddSoraStorageQuotaBytes(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field to the value that was provided on create.
|
||||
func (u *GroupUpsertBulk) UpdateSoraStorageQuotaBytes() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateSoraStorageQuotaBytes()
|
||||
})
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (u *GroupUpsertBulk) SetClaudeCodeOnly(v bool) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
@@ -3143,6 +2884,34 @@ func (u *GroupUpsertBulk) UpdateAllowMessagesDispatch() *GroupUpsertBulk {
|
||||
})
|
||||
}
|
||||
|
||||
// SetRequireOauthOnly sets the "require_oauth_only" field.
|
||||
func (u *GroupUpsertBulk) SetRequireOauthOnly(v bool) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetRequireOauthOnly(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRequireOauthOnly sets the "require_oauth_only" field to the value that was provided on create.
|
||||
func (u *GroupUpsertBulk) UpdateRequireOauthOnly() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateRequireOauthOnly()
|
||||
})
|
||||
}
|
||||
|
||||
// SetRequirePrivacySet sets the "require_privacy_set" field.
|
||||
func (u *GroupUpsertBulk) SetRequirePrivacySet(v bool) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetRequirePrivacySet(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRequirePrivacySet sets the "require_privacy_set" field to the value that was provided on create.
|
||||
func (u *GroupUpsertBulk) UpdateRequirePrivacySet() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateRequirePrivacySet()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||
func (u *GroupUpsertBulk) SetDefaultMappedModel(v string) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
@@ -3157,6 +2926,41 @@ func (u *GroupUpsertBulk) UpdateDefaultMappedModel() *GroupUpsertBulk {
|
||||
})
|
||||
}
|
||||
|
||||
// SetMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field.
|
||||
func (u *GroupUpsertBulk) SetMessagesDispatchModelConfig(v domain.OpenAIMessagesDispatchModelConfig) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetMessagesDispatchModelConfig(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field to the value that was provided on create.
|
||||
func (u *GroupUpsertBulk) UpdateMessagesDispatchModelConfig() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateMessagesDispatchModelConfig()
|
||||
})
|
||||
}
|
||||
|
||||
// SetRpmLimit sets the "rpm_limit" field.
|
||||
func (u *GroupUpsertBulk) SetRpmLimit(v int) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.SetRpmLimit(v)
|
||||
})
|
||||
}
|
||||
|
||||
// AddRpmLimit adds v to the "rpm_limit" field.
|
||||
func (u *GroupUpsertBulk) AddRpmLimit(v int) *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.AddRpmLimit(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateRpmLimit sets the "rpm_limit" field to the value that was provided on create.
|
||||
func (u *GroupUpsertBulk) UpdateRpmLimit() *GroupUpsertBulk {
|
||||
return u.Update(func(s *GroupUpsert) {
|
||||
s.UpdateRpmLimit()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *GroupUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||
"github.com/Wei-Shaw/sub2api/ent/usersubscription"
|
||||
"github.com/Wei-Shaw/sub2api/internal/domain"
|
||||
)
|
||||
|
||||
// GroupUpdate is the builder for updating Group entities.
|
||||
@@ -355,135 +356,6 @@ func (_u *GroupUpdate) ClearImagePrice4k() *GroupUpdate {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||
func (_u *GroupUpdate) SetSoraImagePrice360(v float64) *GroupUpdate {
|
||||
_u.mutation.ResetSoraImagePrice360()
|
||||
_u.mutation.SetSoraImagePrice360(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraImagePrice360 sets the "sora_image_price_360" field if the given value is not nil.
|
||||
func (_u *GroupUpdate) SetNillableSoraImagePrice360(v *float64) *GroupUpdate {
|
||||
if v != nil {
|
||||
_u.SetSoraImagePrice360(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraImagePrice360 adds value to the "sora_image_price_360" field.
|
||||
func (_u *GroupUpdate) AddSoraImagePrice360(v float64) *GroupUpdate {
|
||||
_u.mutation.AddSoraImagePrice360(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearSoraImagePrice360 clears the value of the "sora_image_price_360" field.
|
||||
func (_u *GroupUpdate) ClearSoraImagePrice360() *GroupUpdate {
|
||||
_u.mutation.ClearSoraImagePrice360()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||
func (_u *GroupUpdate) SetSoraImagePrice540(v float64) *GroupUpdate {
|
||||
_u.mutation.ResetSoraImagePrice540()
|
||||
_u.mutation.SetSoraImagePrice540(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraImagePrice540 sets the "sora_image_price_540" field if the given value is not nil.
|
||||
func (_u *GroupUpdate) SetNillableSoraImagePrice540(v *float64) *GroupUpdate {
|
||||
if v != nil {
|
||||
_u.SetSoraImagePrice540(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraImagePrice540 adds value to the "sora_image_price_540" field.
|
||||
func (_u *GroupUpdate) AddSoraImagePrice540(v float64) *GroupUpdate {
|
||||
_u.mutation.AddSoraImagePrice540(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearSoraImagePrice540 clears the value of the "sora_image_price_540" field.
|
||||
func (_u *GroupUpdate) ClearSoraImagePrice540() *GroupUpdate {
|
||||
_u.mutation.ClearSoraImagePrice540()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||
func (_u *GroupUpdate) SetSoraVideoPricePerRequest(v float64) *GroupUpdate {
|
||||
_u.mutation.ResetSoraVideoPricePerRequest()
|
||||
_u.mutation.SetSoraVideoPricePerRequest(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraVideoPricePerRequest sets the "sora_video_price_per_request" field if the given value is not nil.
|
||||
func (_u *GroupUpdate) SetNillableSoraVideoPricePerRequest(v *float64) *GroupUpdate {
|
||||
if v != nil {
|
||||
_u.SetSoraVideoPricePerRequest(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraVideoPricePerRequest adds value to the "sora_video_price_per_request" field.
|
||||
func (_u *GroupUpdate) AddSoraVideoPricePerRequest(v float64) *GroupUpdate {
|
||||
_u.mutation.AddSoraVideoPricePerRequest(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearSoraVideoPricePerRequest clears the value of the "sora_video_price_per_request" field.
|
||||
func (_u *GroupUpdate) ClearSoraVideoPricePerRequest() *GroupUpdate {
|
||||
_u.mutation.ClearSoraVideoPricePerRequest()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||
func (_u *GroupUpdate) SetSoraVideoPricePerRequestHd(v float64) *GroupUpdate {
|
||||
_u.mutation.ResetSoraVideoPricePerRequestHd()
|
||||
_u.mutation.SetSoraVideoPricePerRequestHd(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field if the given value is not nil.
|
||||
func (_u *GroupUpdate) SetNillableSoraVideoPricePerRequestHd(v *float64) *GroupUpdate {
|
||||
if v != nil {
|
||||
_u.SetSoraVideoPricePerRequestHd(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraVideoPricePerRequestHd adds value to the "sora_video_price_per_request_hd" field.
|
||||
func (_u *GroupUpdate) AddSoraVideoPricePerRequestHd(v float64) *GroupUpdate {
|
||||
_u.mutation.AddSoraVideoPricePerRequestHd(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearSoraVideoPricePerRequestHd clears the value of the "sora_video_price_per_request_hd" field.
|
||||
func (_u *GroupUpdate) ClearSoraVideoPricePerRequestHd() *GroupUpdate {
|
||||
_u.mutation.ClearSoraVideoPricePerRequestHd()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (_u *GroupUpdate) SetSoraStorageQuotaBytes(v int64) *GroupUpdate {
|
||||
_u.mutation.ResetSoraStorageQuotaBytes()
|
||||
_u.mutation.SetSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||
func (_u *GroupUpdate) SetNillableSoraStorageQuotaBytes(v *int64) *GroupUpdate {
|
||||
if v != nil {
|
||||
_u.SetSoraStorageQuotaBytes(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds value to the "sora_storage_quota_bytes" field.
|
||||
func (_u *GroupUpdate) AddSoraStorageQuotaBytes(v int64) *GroupUpdate {
|
||||
_u.mutation.AddSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (_u *GroupUpdate) SetClaudeCodeOnly(v bool) *GroupUpdate {
|
||||
_u.mutation.SetClaudeCodeOnly(v)
|
||||
@@ -639,6 +511,34 @@ func (_u *GroupUpdate) SetNillableAllowMessagesDispatch(v *bool) *GroupUpdate {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRequireOauthOnly sets the "require_oauth_only" field.
|
||||
func (_u *GroupUpdate) SetRequireOauthOnly(v bool) *GroupUpdate {
|
||||
_u.mutation.SetRequireOauthOnly(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRequireOauthOnly sets the "require_oauth_only" field if the given value is not nil.
|
||||
func (_u *GroupUpdate) SetNillableRequireOauthOnly(v *bool) *GroupUpdate {
|
||||
if v != nil {
|
||||
_u.SetRequireOauthOnly(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRequirePrivacySet sets the "require_privacy_set" field.
|
||||
func (_u *GroupUpdate) SetRequirePrivacySet(v bool) *GroupUpdate {
|
||||
_u.mutation.SetRequirePrivacySet(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRequirePrivacySet sets the "require_privacy_set" field if the given value is not nil.
|
||||
func (_u *GroupUpdate) SetNillableRequirePrivacySet(v *bool) *GroupUpdate {
|
||||
if v != nil {
|
||||
_u.SetRequirePrivacySet(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||
func (_u *GroupUpdate) SetDefaultMappedModel(v string) *GroupUpdate {
|
||||
_u.mutation.SetDefaultMappedModel(v)
|
||||
@@ -653,6 +553,41 @@ func (_u *GroupUpdate) SetNillableDefaultMappedModel(v *string) *GroupUpdate {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field.
|
||||
func (_u *GroupUpdate) SetMessagesDispatchModelConfig(v domain.OpenAIMessagesDispatchModelConfig) *GroupUpdate {
|
||||
_u.mutation.SetMessagesDispatchModelConfig(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field if the given value is not nil.
|
||||
func (_u *GroupUpdate) SetNillableMessagesDispatchModelConfig(v *domain.OpenAIMessagesDispatchModelConfig) *GroupUpdate {
|
||||
if v != nil {
|
||||
_u.SetMessagesDispatchModelConfig(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRpmLimit sets the "rpm_limit" field.
|
||||
func (_u *GroupUpdate) SetRpmLimit(v int) *GroupUpdate {
|
||||
_u.mutation.ResetRpmLimit()
|
||||
_u.mutation.SetRpmLimit(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRpmLimit sets the "rpm_limit" field if the given value is not nil.
|
||||
func (_u *GroupUpdate) SetNillableRpmLimit(v *int) *GroupUpdate {
|
||||
if v != nil {
|
||||
_u.SetRpmLimit(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddRpmLimit adds value to the "rpm_limit" field.
|
||||
func (_u *GroupUpdate) AddRpmLimit(v int) *GroupUpdate {
|
||||
_u.mutation.AddRpmLimit(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||
func (_u *GroupUpdate) AddAPIKeyIDs(ids ...int64) *GroupUpdate {
|
||||
_u.mutation.AddAPIKeyIDs(ids...)
|
||||
@@ -1054,48 +989,6 @@ func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if _u.mutation.ImagePrice4kCleared() {
|
||||
_spec.ClearField(group.FieldImagePrice4k, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraImagePrice360(); ok {
|
||||
_spec.SetField(group.FieldSoraImagePrice360, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraImagePrice360(); ok {
|
||||
_spec.AddField(group.FieldSoraImagePrice360, field.TypeFloat64, value)
|
||||
}
|
||||
if _u.mutation.SoraImagePrice360Cleared() {
|
||||
_spec.ClearField(group.FieldSoraImagePrice360, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraImagePrice540(); ok {
|
||||
_spec.SetField(group.FieldSoraImagePrice540, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraImagePrice540(); ok {
|
||||
_spec.AddField(group.FieldSoraImagePrice540, field.TypeFloat64, value)
|
||||
}
|
||||
if _u.mutation.SoraImagePrice540Cleared() {
|
||||
_spec.ClearField(group.FieldSoraImagePrice540, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraVideoPricePerRequest(); ok {
|
||||
_spec.SetField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraVideoPricePerRequest(); ok {
|
||||
_spec.AddField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64, value)
|
||||
}
|
||||
if _u.mutation.SoraVideoPricePerRequestCleared() {
|
||||
_spec.ClearField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraVideoPricePerRequestHd(); ok {
|
||||
_spec.SetField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraVideoPricePerRequestHd(); ok {
|
||||
_spec.AddField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64, value)
|
||||
}
|
||||
if _u.mutation.SoraVideoPricePerRequestHdCleared() {
|
||||
_spec.ClearField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraStorageQuotaBytes(); ok {
|
||||
_spec.SetField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraStorageQuotaBytes(); ok {
|
||||
_spec.AddField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ClaudeCodeOnly(); ok {
|
||||
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
||||
}
|
||||
@@ -1146,9 +1039,24 @@ func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if value, ok := _u.mutation.AllowMessagesDispatch(); ok {
|
||||
_spec.SetField(group.FieldAllowMessagesDispatch, field.TypeBool, value)
|
||||
}
|
||||
if value, ok := _u.mutation.RequireOauthOnly(); ok {
|
||||
_spec.SetField(group.FieldRequireOauthOnly, field.TypeBool, value)
|
||||
}
|
||||
if value, ok := _u.mutation.RequirePrivacySet(); ok {
|
||||
_spec.SetField(group.FieldRequirePrivacySet, field.TypeBool, value)
|
||||
}
|
||||
if value, ok := _u.mutation.DefaultMappedModel(); ok {
|
||||
_spec.SetField(group.FieldDefaultMappedModel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.MessagesDispatchModelConfig(); ok {
|
||||
_spec.SetField(group.FieldMessagesDispatchModelConfig, field.TypeJSON, value)
|
||||
}
|
||||
if value, ok := _u.mutation.RpmLimit(); ok {
|
||||
_spec.SetField(group.FieldRpmLimit, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedRpmLimit(); ok {
|
||||
_spec.AddField(group.FieldRpmLimit, field.TypeInt, value)
|
||||
}
|
||||
if _u.mutation.APIKeysCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
@@ -1783,135 +1691,6 @@ func (_u *GroupUpdateOne) ClearImagePrice4k() *GroupUpdateOne {
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraImagePrice360 sets the "sora_image_price_360" field.
|
||||
func (_u *GroupUpdateOne) SetSoraImagePrice360(v float64) *GroupUpdateOne {
|
||||
_u.mutation.ResetSoraImagePrice360()
|
||||
_u.mutation.SetSoraImagePrice360(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraImagePrice360 sets the "sora_image_price_360" field if the given value is not nil.
|
||||
func (_u *GroupUpdateOne) SetNillableSoraImagePrice360(v *float64) *GroupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSoraImagePrice360(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraImagePrice360 adds value to the "sora_image_price_360" field.
|
||||
func (_u *GroupUpdateOne) AddSoraImagePrice360(v float64) *GroupUpdateOne {
|
||||
_u.mutation.AddSoraImagePrice360(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearSoraImagePrice360 clears the value of the "sora_image_price_360" field.
|
||||
func (_u *GroupUpdateOne) ClearSoraImagePrice360() *GroupUpdateOne {
|
||||
_u.mutation.ClearSoraImagePrice360()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraImagePrice540 sets the "sora_image_price_540" field.
|
||||
func (_u *GroupUpdateOne) SetSoraImagePrice540(v float64) *GroupUpdateOne {
|
||||
_u.mutation.ResetSoraImagePrice540()
|
||||
_u.mutation.SetSoraImagePrice540(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraImagePrice540 sets the "sora_image_price_540" field if the given value is not nil.
|
||||
func (_u *GroupUpdateOne) SetNillableSoraImagePrice540(v *float64) *GroupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSoraImagePrice540(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraImagePrice540 adds value to the "sora_image_price_540" field.
|
||||
func (_u *GroupUpdateOne) AddSoraImagePrice540(v float64) *GroupUpdateOne {
|
||||
_u.mutation.AddSoraImagePrice540(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearSoraImagePrice540 clears the value of the "sora_image_price_540" field.
|
||||
func (_u *GroupUpdateOne) ClearSoraImagePrice540() *GroupUpdateOne {
|
||||
_u.mutation.ClearSoraImagePrice540()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequest sets the "sora_video_price_per_request" field.
|
||||
func (_u *GroupUpdateOne) SetSoraVideoPricePerRequest(v float64) *GroupUpdateOne {
|
||||
_u.mutation.ResetSoraVideoPricePerRequest()
|
||||
_u.mutation.SetSoraVideoPricePerRequest(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraVideoPricePerRequest sets the "sora_video_price_per_request" field if the given value is not nil.
|
||||
func (_u *GroupUpdateOne) SetNillableSoraVideoPricePerRequest(v *float64) *GroupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSoraVideoPricePerRequest(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraVideoPricePerRequest adds value to the "sora_video_price_per_request" field.
|
||||
func (_u *GroupUpdateOne) AddSoraVideoPricePerRequest(v float64) *GroupUpdateOne {
|
||||
_u.mutation.AddSoraVideoPricePerRequest(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearSoraVideoPricePerRequest clears the value of the "sora_video_price_per_request" field.
|
||||
func (_u *GroupUpdateOne) ClearSoraVideoPricePerRequest() *GroupUpdateOne {
|
||||
_u.mutation.ClearSoraVideoPricePerRequest()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field.
|
||||
func (_u *GroupUpdateOne) SetSoraVideoPricePerRequestHd(v float64) *GroupUpdateOne {
|
||||
_u.mutation.ResetSoraVideoPricePerRequestHd()
|
||||
_u.mutation.SetSoraVideoPricePerRequestHd(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraVideoPricePerRequestHd sets the "sora_video_price_per_request_hd" field if the given value is not nil.
|
||||
func (_u *GroupUpdateOne) SetNillableSoraVideoPricePerRequestHd(v *float64) *GroupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSoraVideoPricePerRequestHd(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraVideoPricePerRequestHd adds value to the "sora_video_price_per_request_hd" field.
|
||||
func (_u *GroupUpdateOne) AddSoraVideoPricePerRequestHd(v float64) *GroupUpdateOne {
|
||||
_u.mutation.AddSoraVideoPricePerRequestHd(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearSoraVideoPricePerRequestHd clears the value of the "sora_video_price_per_request_hd" field.
|
||||
func (_u *GroupUpdateOne) ClearSoraVideoPricePerRequestHd() *GroupUpdateOne {
|
||||
_u.mutation.ClearSoraVideoPricePerRequestHd()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field.
|
||||
func (_u *GroupUpdateOne) SetSoraStorageQuotaBytes(v int64) *GroupUpdateOne {
|
||||
_u.mutation.ResetSoraStorageQuotaBytes()
|
||||
_u.mutation.SetSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableSoraStorageQuotaBytes sets the "sora_storage_quota_bytes" field if the given value is not nil.
|
||||
func (_u *GroupUpdateOne) SetNillableSoraStorageQuotaBytes(v *int64) *GroupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetSoraStorageQuotaBytes(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddSoraStorageQuotaBytes adds value to the "sora_storage_quota_bytes" field.
|
||||
func (_u *GroupUpdateOne) AddSoraStorageQuotaBytes(v int64) *GroupUpdateOne {
|
||||
_u.mutation.AddSoraStorageQuotaBytes(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetClaudeCodeOnly sets the "claude_code_only" field.
|
||||
func (_u *GroupUpdateOne) SetClaudeCodeOnly(v bool) *GroupUpdateOne {
|
||||
_u.mutation.SetClaudeCodeOnly(v)
|
||||
@@ -2067,6 +1846,34 @@ func (_u *GroupUpdateOne) SetNillableAllowMessagesDispatch(v *bool) *GroupUpdate
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRequireOauthOnly sets the "require_oauth_only" field.
|
||||
func (_u *GroupUpdateOne) SetRequireOauthOnly(v bool) *GroupUpdateOne {
|
||||
_u.mutation.SetRequireOauthOnly(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRequireOauthOnly sets the "require_oauth_only" field if the given value is not nil.
|
||||
func (_u *GroupUpdateOne) SetNillableRequireOauthOnly(v *bool) *GroupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetRequireOauthOnly(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRequirePrivacySet sets the "require_privacy_set" field.
|
||||
func (_u *GroupUpdateOne) SetRequirePrivacySet(v bool) *GroupUpdateOne {
|
||||
_u.mutation.SetRequirePrivacySet(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRequirePrivacySet sets the "require_privacy_set" field if the given value is not nil.
|
||||
func (_u *GroupUpdateOne) SetNillableRequirePrivacySet(v *bool) *GroupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetRequirePrivacySet(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDefaultMappedModel sets the "default_mapped_model" field.
|
||||
func (_u *GroupUpdateOne) SetDefaultMappedModel(v string) *GroupUpdateOne {
|
||||
_u.mutation.SetDefaultMappedModel(v)
|
||||
@@ -2081,6 +1888,41 @@ func (_u *GroupUpdateOne) SetNillableDefaultMappedModel(v *string) *GroupUpdateO
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field.
|
||||
func (_u *GroupUpdateOne) SetMessagesDispatchModelConfig(v domain.OpenAIMessagesDispatchModelConfig) *GroupUpdateOne {
|
||||
_u.mutation.SetMessagesDispatchModelConfig(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableMessagesDispatchModelConfig sets the "messages_dispatch_model_config" field if the given value is not nil.
|
||||
func (_u *GroupUpdateOne) SetNillableMessagesDispatchModelConfig(v *domain.OpenAIMessagesDispatchModelConfig) *GroupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetMessagesDispatchModelConfig(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetRpmLimit sets the "rpm_limit" field.
|
||||
func (_u *GroupUpdateOne) SetRpmLimit(v int) *GroupUpdateOne {
|
||||
_u.mutation.ResetRpmLimit()
|
||||
_u.mutation.SetRpmLimit(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableRpmLimit sets the "rpm_limit" field if the given value is not nil.
|
||||
func (_u *GroupUpdateOne) SetNillableRpmLimit(v *int) *GroupUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetRpmLimit(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddRpmLimit adds value to the "rpm_limit" field.
|
||||
func (_u *GroupUpdateOne) AddRpmLimit(v int) *GroupUpdateOne {
|
||||
_u.mutation.AddRpmLimit(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs.
|
||||
func (_u *GroupUpdateOne) AddAPIKeyIDs(ids ...int64) *GroupUpdateOne {
|
||||
_u.mutation.AddAPIKeyIDs(ids...)
|
||||
@@ -2512,48 +2354,6 @@ func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error)
|
||||
if _u.mutation.ImagePrice4kCleared() {
|
||||
_spec.ClearField(group.FieldImagePrice4k, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraImagePrice360(); ok {
|
||||
_spec.SetField(group.FieldSoraImagePrice360, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraImagePrice360(); ok {
|
||||
_spec.AddField(group.FieldSoraImagePrice360, field.TypeFloat64, value)
|
||||
}
|
||||
if _u.mutation.SoraImagePrice360Cleared() {
|
||||
_spec.ClearField(group.FieldSoraImagePrice360, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraImagePrice540(); ok {
|
||||
_spec.SetField(group.FieldSoraImagePrice540, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraImagePrice540(); ok {
|
||||
_spec.AddField(group.FieldSoraImagePrice540, field.TypeFloat64, value)
|
||||
}
|
||||
if _u.mutation.SoraImagePrice540Cleared() {
|
||||
_spec.ClearField(group.FieldSoraImagePrice540, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraVideoPricePerRequest(); ok {
|
||||
_spec.SetField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraVideoPricePerRequest(); ok {
|
||||
_spec.AddField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64, value)
|
||||
}
|
||||
if _u.mutation.SoraVideoPricePerRequestCleared() {
|
||||
_spec.ClearField(group.FieldSoraVideoPricePerRequest, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraVideoPricePerRequestHd(); ok {
|
||||
_spec.SetField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraVideoPricePerRequestHd(); ok {
|
||||
_spec.AddField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64, value)
|
||||
}
|
||||
if _u.mutation.SoraVideoPricePerRequestHdCleared() {
|
||||
_spec.ClearField(group.FieldSoraVideoPricePerRequestHd, field.TypeFloat64)
|
||||
}
|
||||
if value, ok := _u.mutation.SoraStorageQuotaBytes(); ok {
|
||||
_spec.SetField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedSoraStorageQuotaBytes(); ok {
|
||||
_spec.AddField(group.FieldSoraStorageQuotaBytes, field.TypeInt64, value)
|
||||
}
|
||||
if value, ok := _u.mutation.ClaudeCodeOnly(); ok {
|
||||
_spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value)
|
||||
}
|
||||
@@ -2604,9 +2404,24 @@ func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error)
|
||||
if value, ok := _u.mutation.AllowMessagesDispatch(); ok {
|
||||
_spec.SetField(group.FieldAllowMessagesDispatch, field.TypeBool, value)
|
||||
}
|
||||
if value, ok := _u.mutation.RequireOauthOnly(); ok {
|
||||
_spec.SetField(group.FieldRequireOauthOnly, field.TypeBool, value)
|
||||
}
|
||||
if value, ok := _u.mutation.RequirePrivacySet(); ok {
|
||||
_spec.SetField(group.FieldRequirePrivacySet, field.TypeBool, value)
|
||||
}
|
||||
if value, ok := _u.mutation.DefaultMappedModel(); ok {
|
||||
_spec.SetField(group.FieldDefaultMappedModel, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.MessagesDispatchModelConfig(); ok {
|
||||
_spec.SetField(group.FieldMessagesDispatchModelConfig, field.TypeJSON, value)
|
||||
}
|
||||
if value, ok := _u.mutation.RpmLimit(); ok {
|
||||
_spec.SetField(group.FieldRpmLimit, field.TypeInt, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AddedRpmLimit(); ok {
|
||||
_spec.AddField(group.FieldRpmLimit, field.TypeInt, value)
|
||||
}
|
||||
if _u.mutation.APIKeysCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
|
||||
@@ -69,6 +69,78 @@ func (f AnnouncementReadFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.V
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AnnouncementReadMutation", m)
|
||||
}
|
||||
|
||||
// The AuthIdentityFunc type is an adapter to allow the use of ordinary
|
||||
// function as AuthIdentity mutator.
|
||||
type AuthIdentityFunc func(context.Context, *ent.AuthIdentityMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f AuthIdentityFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.AuthIdentityMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AuthIdentityMutation", m)
|
||||
}
|
||||
|
||||
// The AuthIdentityChannelFunc type is an adapter to allow the use of ordinary
|
||||
// function as AuthIdentityChannel mutator.
|
||||
type AuthIdentityChannelFunc func(context.Context, *ent.AuthIdentityChannelMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f AuthIdentityChannelFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.AuthIdentityChannelMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AuthIdentityChannelMutation", m)
|
||||
}
|
||||
|
||||
// The ChannelMonitorFunc type is an adapter to allow the use of ordinary
|
||||
// function as ChannelMonitor mutator.
|
||||
type ChannelMonitorFunc func(context.Context, *ent.ChannelMonitorMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f ChannelMonitorFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.ChannelMonitorMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorMutation", m)
|
||||
}
|
||||
|
||||
// The ChannelMonitorDailyRollupFunc type is an adapter to allow the use of ordinary
|
||||
// function as ChannelMonitorDailyRollup mutator.
|
||||
type ChannelMonitorDailyRollupFunc func(context.Context, *ent.ChannelMonitorDailyRollupMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f ChannelMonitorDailyRollupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.ChannelMonitorDailyRollupMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorDailyRollupMutation", m)
|
||||
}
|
||||
|
||||
// The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary
|
||||
// function as ChannelMonitorHistory mutator.
|
||||
type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f ChannelMonitorHistoryFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.ChannelMonitorHistoryMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorHistoryMutation", m)
|
||||
}
|
||||
|
||||
// The ChannelMonitorRequestTemplateFunc type is an adapter to allow the use of ordinary
|
||||
// function as ChannelMonitorRequestTemplate mutator.
|
||||
type ChannelMonitorRequestTemplateFunc func(context.Context, *ent.ChannelMonitorRequestTemplateMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f ChannelMonitorRequestTemplateFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.ChannelMonitorRequestTemplateMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorRequestTemplateMutation", m)
|
||||
}
|
||||
|
||||
// The ErrorPassthroughRuleFunc type is an adapter to allow the use of ordinary
|
||||
// function as ErrorPassthroughRule mutator.
|
||||
type ErrorPassthroughRuleFunc func(context.Context, *ent.ErrorPassthroughRuleMutation) (ent.Value, error)
|
||||
@@ -105,6 +177,66 @@ func (f IdempotencyRecordFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.IdempotencyRecordMutation", m)
|
||||
}
|
||||
|
||||
// The IdentityAdoptionDecisionFunc type is an adapter to allow the use of ordinary
|
||||
// function as IdentityAdoptionDecision mutator.
|
||||
type IdentityAdoptionDecisionFunc func(context.Context, *ent.IdentityAdoptionDecisionMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f IdentityAdoptionDecisionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.IdentityAdoptionDecisionMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.IdentityAdoptionDecisionMutation", m)
|
||||
}
|
||||
|
||||
// The PaymentAuditLogFunc type is an adapter to allow the use of ordinary
|
||||
// function as PaymentAuditLog mutator.
|
||||
type PaymentAuditLogFunc func(context.Context, *ent.PaymentAuditLogMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f PaymentAuditLogFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.PaymentAuditLogMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PaymentAuditLogMutation", m)
|
||||
}
|
||||
|
||||
// The PaymentOrderFunc type is an adapter to allow the use of ordinary
|
||||
// function as PaymentOrder mutator.
|
||||
type PaymentOrderFunc func(context.Context, *ent.PaymentOrderMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f PaymentOrderFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.PaymentOrderMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PaymentOrderMutation", m)
|
||||
}
|
||||
|
||||
// The PaymentProviderInstanceFunc type is an adapter to allow the use of ordinary
|
||||
// function as PaymentProviderInstance mutator.
|
||||
type PaymentProviderInstanceFunc func(context.Context, *ent.PaymentProviderInstanceMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f PaymentProviderInstanceFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.PaymentProviderInstanceMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PaymentProviderInstanceMutation", m)
|
||||
}
|
||||
|
||||
// The PendingAuthSessionFunc type is an adapter to allow the use of ordinary
|
||||
// function as PendingAuthSession mutator.
|
||||
type PendingAuthSessionFunc func(context.Context, *ent.PendingAuthSessionMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f PendingAuthSessionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.PendingAuthSessionMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PendingAuthSessionMutation", m)
|
||||
}
|
||||
|
||||
// The PromoCodeFunc type is an adapter to allow the use of ordinary
|
||||
// function as PromoCode mutator.
|
||||
type PromoCodeFunc func(context.Context, *ent.PromoCodeMutation) (ent.Value, error)
|
||||
@@ -177,6 +309,30 @@ func (f SettingFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, err
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.SettingMutation", m)
|
||||
}
|
||||
|
||||
// The SubscriptionPlanFunc type is an adapter to allow the use of ordinary
|
||||
// function as SubscriptionPlan mutator.
|
||||
type SubscriptionPlanFunc func(context.Context, *ent.SubscriptionPlanMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f SubscriptionPlanFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.SubscriptionPlanMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.SubscriptionPlanMutation", m)
|
||||
}
|
||||
|
||||
// The TLSFingerprintProfileFunc type is an adapter to allow the use of ordinary
|
||||
// function as TLSFingerprintProfile mutator.
|
||||
type TLSFingerprintProfileFunc func(context.Context, *ent.TLSFingerprintProfileMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f TLSFingerprintProfileFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.TLSFingerprintProfileMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.TLSFingerprintProfileMutation", m)
|
||||
}
|
||||
|
||||
// The UsageCleanupTaskFunc type is an adapter to allow the use of ordinary
|
||||
// function as UsageCleanupTask mutator.
|
||||
type UsageCleanupTaskFunc func(context.Context, *ent.UsageCleanupTaskMutation) (ent.Value, error)
|
||||
|
||||
223
backend/ent/identityadoptiondecision.go
Normal file
@@ -0,0 +1,223 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/identityadoptiondecision"
|
||||
"github.com/Wei-Shaw/sub2api/ent/pendingauthsession"
|
||||
)
|
||||
|
||||
// IdentityAdoptionDecision is the model entity for the IdentityAdoptionDecision schema.
|
||||
type IdentityAdoptionDecision struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// PendingAuthSessionID holds the value of the "pending_auth_session_id" field.
|
||||
PendingAuthSessionID int64 `json:"pending_auth_session_id,omitempty"`
|
||||
// IdentityID holds the value of the "identity_id" field.
|
||||
IdentityID *int64 `json:"identity_id,omitempty"`
|
||||
// AdoptDisplayName holds the value of the "adopt_display_name" field.
|
||||
AdoptDisplayName bool `json:"adopt_display_name,omitempty"`
|
||||
// AdoptAvatar holds the value of the "adopt_avatar" field.
|
||||
AdoptAvatar bool `json:"adopt_avatar,omitempty"`
|
||||
// DecidedAt holds the value of the "decided_at" field.
|
||||
DecidedAt time.Time `json:"decided_at,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the IdentityAdoptionDecisionQuery when eager-loading is set.
|
||||
Edges IdentityAdoptionDecisionEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// IdentityAdoptionDecisionEdges holds the relations/edges for other nodes in the graph.
|
||||
type IdentityAdoptionDecisionEdges struct {
|
||||
// PendingAuthSession holds the value of the pending_auth_session edge.
|
||||
PendingAuthSession *PendingAuthSession `json:"pending_auth_session,omitempty"`
|
||||
// Identity holds the value of the identity edge.
|
||||
Identity *AuthIdentity `json:"identity,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [2]bool
|
||||
}
|
||||
|
||||
// PendingAuthSessionOrErr returns the PendingAuthSession value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e IdentityAdoptionDecisionEdges) PendingAuthSessionOrErr() (*PendingAuthSession, error) {
|
||||
if e.PendingAuthSession != nil {
|
||||
return e.PendingAuthSession, nil
|
||||
} else if e.loadedTypes[0] {
|
||||
return nil, &NotFoundError{label: pendingauthsession.Label}
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "pending_auth_session"}
|
||||
}
|
||||
|
||||
// IdentityOrErr returns the Identity value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e IdentityAdoptionDecisionEdges) IdentityOrErr() (*AuthIdentity, error) {
|
||||
if e.Identity != nil {
|
||||
return e.Identity, nil
|
||||
} else if e.loadedTypes[1] {
|
||||
return nil, &NotFoundError{label: authidentity.Label}
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "identity"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*IdentityAdoptionDecision) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case identityadoptiondecision.FieldAdoptDisplayName, identityadoptiondecision.FieldAdoptAvatar:
|
||||
values[i] = new(sql.NullBool)
|
||||
case identityadoptiondecision.FieldID, identityadoptiondecision.FieldPendingAuthSessionID, identityadoptiondecision.FieldIdentityID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case identityadoptiondecision.FieldCreatedAt, identityadoptiondecision.FieldUpdatedAt, identityadoptiondecision.FieldDecidedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the IdentityAdoptionDecision fields.
|
||||
func (_m *IdentityAdoptionDecision) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case identityadoptiondecision.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case identityadoptiondecision.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CreatedAt = value.Time
|
||||
}
|
||||
case identityadoptiondecision.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UpdatedAt = value.Time
|
||||
}
|
||||
case identityadoptiondecision.FieldPendingAuthSessionID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field pending_auth_session_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.PendingAuthSessionID = value.Int64
|
||||
}
|
||||
case identityadoptiondecision.FieldIdentityID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field identity_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.IdentityID = new(int64)
|
||||
*_m.IdentityID = value.Int64
|
||||
}
|
||||
case identityadoptiondecision.FieldAdoptDisplayName:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field adopt_display_name", values[i])
|
||||
} else if value.Valid {
|
||||
_m.AdoptDisplayName = value.Bool
|
||||
}
|
||||
case identityadoptiondecision.FieldAdoptAvatar:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field adopt_avatar", values[i])
|
||||
} else if value.Valid {
|
||||
_m.AdoptAvatar = value.Bool
|
||||
}
|
||||
case identityadoptiondecision.FieldDecidedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field decided_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.DecidedAt = value.Time
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the IdentityAdoptionDecision.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *IdentityAdoptionDecision) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryPendingAuthSession queries the "pending_auth_session" edge of the IdentityAdoptionDecision entity.
|
||||
func (_m *IdentityAdoptionDecision) QueryPendingAuthSession() *PendingAuthSessionQuery {
|
||||
return NewIdentityAdoptionDecisionClient(_m.config).QueryPendingAuthSession(_m)
|
||||
}
|
||||
|
||||
// QueryIdentity queries the "identity" edge of the IdentityAdoptionDecision entity.
|
||||
func (_m *IdentityAdoptionDecision) QueryIdentity() *AuthIdentityQuery {
|
||||
return NewIdentityAdoptionDecisionClient(_m.config).QueryIdentity(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this IdentityAdoptionDecision.
|
||||
// Note that you need to call IdentityAdoptionDecision.Unwrap() before calling this method if this IdentityAdoptionDecision
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *IdentityAdoptionDecision) Update() *IdentityAdoptionDecisionUpdateOne {
|
||||
return NewIdentityAdoptionDecisionClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the IdentityAdoptionDecision entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *IdentityAdoptionDecision) Unwrap() *IdentityAdoptionDecision {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: IdentityAdoptionDecision is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *IdentityAdoptionDecision) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("IdentityAdoptionDecision(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("pending_auth_session_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.PendingAuthSessionID))
|
||||
builder.WriteString(", ")
|
||||
if v := _m.IdentityID; v != nil {
|
||||
builder.WriteString("identity_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("adopt_display_name=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.AdoptDisplayName))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("adopt_avatar=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.AdoptAvatar))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("decided_at=")
|
||||
builder.WriteString(_m.DecidedAt.Format(time.ANSIC))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// IdentityAdoptionDecisions is a parsable slice of IdentityAdoptionDecision.
|
||||
type IdentityAdoptionDecisions []*IdentityAdoptionDecision
|
||||
159
backend/ent/identityadoptiondecision/identityadoptiondecision.go
Normal file
@@ -0,0 +1,159 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package identityadoptiondecision
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the identityadoptiondecision type in the database.
|
||||
Label = "identity_adoption_decision"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldPendingAuthSessionID holds the string denoting the pending_auth_session_id field in the database.
|
||||
FieldPendingAuthSessionID = "pending_auth_session_id"
|
||||
// FieldIdentityID holds the string denoting the identity_id field in the database.
|
||||
FieldIdentityID = "identity_id"
|
||||
// FieldAdoptDisplayName holds the string denoting the adopt_display_name field in the database.
|
||||
FieldAdoptDisplayName = "adopt_display_name"
|
||||
// FieldAdoptAvatar holds the string denoting the adopt_avatar field in the database.
|
||||
FieldAdoptAvatar = "adopt_avatar"
|
||||
// FieldDecidedAt holds the string denoting the decided_at field in the database.
|
||||
FieldDecidedAt = "decided_at"
|
||||
// EdgePendingAuthSession holds the string denoting the pending_auth_session edge name in mutations.
|
||||
EdgePendingAuthSession = "pending_auth_session"
|
||||
// EdgeIdentity holds the string denoting the identity edge name in mutations.
|
||||
EdgeIdentity = "identity"
|
||||
// Table holds the table name of the identityadoptiondecision in the database.
|
||||
Table = "identity_adoption_decisions"
|
||||
// PendingAuthSessionTable is the table that holds the pending_auth_session relation/edge.
|
||||
PendingAuthSessionTable = "identity_adoption_decisions"
|
||||
// PendingAuthSessionInverseTable is the table name for the PendingAuthSession entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "pendingauthsession" package.
|
||||
PendingAuthSessionInverseTable = "pending_auth_sessions"
|
||||
// PendingAuthSessionColumn is the table column denoting the pending_auth_session relation/edge.
|
||||
PendingAuthSessionColumn = "pending_auth_session_id"
|
||||
// IdentityTable is the table that holds the identity relation/edge.
|
||||
IdentityTable = "identity_adoption_decisions"
|
||||
// IdentityInverseTable is the table name for the AuthIdentity entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "authidentity" package.
|
||||
IdentityInverseTable = "auth_identities"
|
||||
// IdentityColumn is the table column denoting the identity relation/edge.
|
||||
IdentityColumn = "identity_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for identityadoptiondecision fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldPendingAuthSessionID,
|
||||
FieldIdentityID,
|
||||
FieldAdoptDisplayName,
|
||||
FieldAdoptAvatar,
|
||||
FieldDecidedAt,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
// DefaultAdoptDisplayName holds the default value on creation for the "adopt_display_name" field.
|
||||
DefaultAdoptDisplayName bool
|
||||
// DefaultAdoptAvatar holds the default value on creation for the "adopt_avatar" field.
|
||||
DefaultAdoptAvatar bool
|
||||
// DefaultDecidedAt holds the default value on creation for the "decided_at" field.
|
||||
DefaultDecidedAt func() time.Time
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the IdentityAdoptionDecision queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPendingAuthSessionID orders the results by the pending_auth_session_id field.
|
||||
func ByPendingAuthSessionID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPendingAuthSessionID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByIdentityID orders the results by the identity_id field.
|
||||
func ByIdentityID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldIdentityID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByAdoptDisplayName orders the results by the adopt_display_name field.
|
||||
func ByAdoptDisplayName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldAdoptDisplayName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByAdoptAvatar orders the results by the adopt_avatar field.
|
||||
func ByAdoptAvatar(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldAdoptAvatar, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDecidedAt orders the results by the decided_at field.
|
||||
func ByDecidedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDecidedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPendingAuthSessionField orders the results by pending_auth_session field.
|
||||
func ByPendingAuthSessionField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newPendingAuthSessionStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByIdentityField orders the results by identity field.
|
||||
func ByIdentityField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newIdentityStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newPendingAuthSessionStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(PendingAuthSessionInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2O, true, PendingAuthSessionTable, PendingAuthSessionColumn),
|
||||
)
|
||||
}
|
||||
func newIdentityStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(IdentityInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, IdentityTable, IdentityColumn),
|
||||
)
|
||||
}
|
||||
342
backend/ent/identityadoptiondecision/where.go
Normal file
@@ -0,0 +1,342 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package identityadoptiondecision
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// PendingAuthSessionID applies equality check predicate on the "pending_auth_session_id" field. It's identical to PendingAuthSessionIDEQ.
|
||||
func PendingAuthSessionID(v int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldPendingAuthSessionID, v))
|
||||
}
|
||||
|
||||
// IdentityID applies equality check predicate on the "identity_id" field. It's identical to IdentityIDEQ.
|
||||
func IdentityID(v int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldIdentityID, v))
|
||||
}
|
||||
|
||||
// AdoptDisplayName applies equality check predicate on the "adopt_display_name" field. It's identical to AdoptDisplayNameEQ.
|
||||
func AdoptDisplayName(v bool) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldAdoptDisplayName, v))
|
||||
}
|
||||
|
||||
// AdoptAvatar applies equality check predicate on the "adopt_avatar" field. It's identical to AdoptAvatarEQ.
|
||||
func AdoptAvatar(v bool) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldAdoptAvatar, v))
|
||||
}
|
||||
|
||||
// DecidedAt applies equality check predicate on the "decided_at" field. It's identical to DecidedAtEQ.
|
||||
func DecidedAt(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldDecidedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// PendingAuthSessionIDEQ applies the EQ predicate on the "pending_auth_session_id" field.
|
||||
func PendingAuthSessionIDEQ(v int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldPendingAuthSessionID, v))
|
||||
}
|
||||
|
||||
// PendingAuthSessionIDNEQ applies the NEQ predicate on the "pending_auth_session_id" field.
|
||||
func PendingAuthSessionIDNEQ(v int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNEQ(FieldPendingAuthSessionID, v))
|
||||
}
|
||||
|
||||
// PendingAuthSessionIDIn applies the In predicate on the "pending_auth_session_id" field.
|
||||
func PendingAuthSessionIDIn(vs ...int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldIn(FieldPendingAuthSessionID, vs...))
|
||||
}
|
||||
|
||||
// PendingAuthSessionIDNotIn applies the NotIn predicate on the "pending_auth_session_id" field.
|
||||
func PendingAuthSessionIDNotIn(vs ...int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNotIn(FieldPendingAuthSessionID, vs...))
|
||||
}
|
||||
|
||||
// IdentityIDEQ applies the EQ predicate on the "identity_id" field.
|
||||
func IdentityIDEQ(v int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldIdentityID, v))
|
||||
}
|
||||
|
||||
// IdentityIDNEQ applies the NEQ predicate on the "identity_id" field.
|
||||
func IdentityIDNEQ(v int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNEQ(FieldIdentityID, v))
|
||||
}
|
||||
|
||||
// IdentityIDIn applies the In predicate on the "identity_id" field.
|
||||
func IdentityIDIn(vs ...int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldIn(FieldIdentityID, vs...))
|
||||
}
|
||||
|
||||
// IdentityIDNotIn applies the NotIn predicate on the "identity_id" field.
|
||||
func IdentityIDNotIn(vs ...int64) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNotIn(FieldIdentityID, vs...))
|
||||
}
|
||||
|
||||
// IdentityIDIsNil applies the IsNil predicate on the "identity_id" field.
|
||||
func IdentityIDIsNil() predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldIsNull(FieldIdentityID))
|
||||
}
|
||||
|
||||
// IdentityIDNotNil applies the NotNil predicate on the "identity_id" field.
|
||||
func IdentityIDNotNil() predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNotNull(FieldIdentityID))
|
||||
}
|
||||
|
||||
// AdoptDisplayNameEQ applies the EQ predicate on the "adopt_display_name" field.
|
||||
func AdoptDisplayNameEQ(v bool) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldAdoptDisplayName, v))
|
||||
}
|
||||
|
||||
// AdoptDisplayNameNEQ applies the NEQ predicate on the "adopt_display_name" field.
|
||||
func AdoptDisplayNameNEQ(v bool) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNEQ(FieldAdoptDisplayName, v))
|
||||
}
|
||||
|
||||
// AdoptAvatarEQ applies the EQ predicate on the "adopt_avatar" field.
|
||||
func AdoptAvatarEQ(v bool) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldAdoptAvatar, v))
|
||||
}
|
||||
|
||||
// AdoptAvatarNEQ applies the NEQ predicate on the "adopt_avatar" field.
|
||||
func AdoptAvatarNEQ(v bool) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNEQ(FieldAdoptAvatar, v))
|
||||
}
|
||||
|
||||
// DecidedAtEQ applies the EQ predicate on the "decided_at" field.
|
||||
func DecidedAtEQ(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldEQ(FieldDecidedAt, v))
|
||||
}
|
||||
|
||||
// DecidedAtNEQ applies the NEQ predicate on the "decided_at" field.
|
||||
func DecidedAtNEQ(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNEQ(FieldDecidedAt, v))
|
||||
}
|
||||
|
||||
// DecidedAtIn applies the In predicate on the "decided_at" field.
|
||||
func DecidedAtIn(vs ...time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldIn(FieldDecidedAt, vs...))
|
||||
}
|
||||
|
||||
// DecidedAtNotIn applies the NotIn predicate on the "decided_at" field.
|
||||
func DecidedAtNotIn(vs ...time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldNotIn(FieldDecidedAt, vs...))
|
||||
}
|
||||
|
||||
// DecidedAtGT applies the GT predicate on the "decided_at" field.
|
||||
func DecidedAtGT(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldGT(FieldDecidedAt, v))
|
||||
}
|
||||
|
||||
// DecidedAtGTE applies the GTE predicate on the "decided_at" field.
|
||||
func DecidedAtGTE(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldGTE(FieldDecidedAt, v))
|
||||
}
|
||||
|
||||
// DecidedAtLT applies the LT predicate on the "decided_at" field.
|
||||
func DecidedAtLT(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldLT(FieldDecidedAt, v))
|
||||
}
|
||||
|
||||
// DecidedAtLTE applies the LTE predicate on the "decided_at" field.
|
||||
func DecidedAtLTE(v time.Time) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.FieldLTE(FieldDecidedAt, v))
|
||||
}
|
||||
|
||||
// HasPendingAuthSession applies the HasEdge predicate on the "pending_auth_session" edge.
|
||||
func HasPendingAuthSession() predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2O, true, PendingAuthSessionTable, PendingAuthSessionColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasPendingAuthSessionWith applies the HasEdge predicate on the "pending_auth_session" edge with a given conditions (other predicates).
|
||||
func HasPendingAuthSessionWith(preds ...predicate.PendingAuthSession) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(func(s *sql.Selector) {
|
||||
step := newPendingAuthSessionStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasIdentity applies the HasEdge predicate on the "identity" edge.
|
||||
func HasIdentity() predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, IdentityTable, IdentityColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasIdentityWith applies the HasEdge predicate on the "identity" edge with a given conditions (other predicates).
|
||||
func HasIdentityWith(preds ...predicate.AuthIdentity) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(func(s *sql.Selector) {
|
||||
step := newIdentityStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.IdentityAdoptionDecision) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.IdentityAdoptionDecision) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.IdentityAdoptionDecision) predicate.IdentityAdoptionDecision {
|
||||
return predicate.IdentityAdoptionDecision(sql.NotPredicates(p))
|
||||
}
|
||||
843
backend/ent/identityadoptiondecision_create.go
Normal file
@@ -0,0 +1,843 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/identityadoptiondecision"
|
||||
"github.com/Wei-Shaw/sub2api/ent/pendingauthsession"
|
||||
)
|
||||
|
||||
// IdentityAdoptionDecisionCreate is the builder for creating a IdentityAdoptionDecision entity.
|
||||
type IdentityAdoptionDecisionCreate struct {
|
||||
config
|
||||
mutation *IdentityAdoptionDecisionMutation
|
||||
hooks []Hook
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// SetCreatedAt sets the "created_at" field.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetCreatedAt(v time.Time) *IdentityAdoptionDecisionCreate {
|
||||
_c.mutation.SetCreatedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetNillableCreatedAt(v *time.Time) *IdentityAdoptionDecisionCreate {
|
||||
if v != nil {
|
||||
_c.SetCreatedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetUpdatedAt(v time.Time) *IdentityAdoptionDecisionCreate {
|
||||
_c.mutation.SetUpdatedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetNillableUpdatedAt(v *time.Time) *IdentityAdoptionDecisionCreate {
|
||||
if v != nil {
|
||||
_c.SetUpdatedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetPendingAuthSessionID sets the "pending_auth_session_id" field.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetPendingAuthSessionID(v int64) *IdentityAdoptionDecisionCreate {
|
||||
_c.mutation.SetPendingAuthSessionID(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetIdentityID(v int64) *IdentityAdoptionDecisionCreate {
|
||||
_c.mutation.SetIdentityID(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableIdentityID sets the "identity_id" field if the given value is not nil.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetNillableIdentityID(v *int64) *IdentityAdoptionDecisionCreate {
|
||||
if v != nil {
|
||||
_c.SetIdentityID(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetAdoptDisplayName sets the "adopt_display_name" field.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetAdoptDisplayName(v bool) *IdentityAdoptionDecisionCreate {
|
||||
_c.mutation.SetAdoptDisplayName(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableAdoptDisplayName sets the "adopt_display_name" field if the given value is not nil.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetNillableAdoptDisplayName(v *bool) *IdentityAdoptionDecisionCreate {
|
||||
if v != nil {
|
||||
_c.SetAdoptDisplayName(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetAdoptAvatar sets the "adopt_avatar" field.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetAdoptAvatar(v bool) *IdentityAdoptionDecisionCreate {
|
||||
_c.mutation.SetAdoptAvatar(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableAdoptAvatar sets the "adopt_avatar" field if the given value is not nil.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetNillableAdoptAvatar(v *bool) *IdentityAdoptionDecisionCreate {
|
||||
if v != nil {
|
||||
_c.SetAdoptAvatar(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetDecidedAt sets the "decided_at" field.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetDecidedAt(v time.Time) *IdentityAdoptionDecisionCreate {
|
||||
_c.mutation.SetDecidedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableDecidedAt sets the "decided_at" field if the given value is not nil.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetNillableDecidedAt(v *time.Time) *IdentityAdoptionDecisionCreate {
|
||||
if v != nil {
|
||||
_c.SetDecidedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetPendingAuthSession sets the "pending_auth_session" edge to the PendingAuthSession entity.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetPendingAuthSession(v *PendingAuthSession) *IdentityAdoptionDecisionCreate {
|
||||
return _c.SetPendingAuthSessionID(v.ID)
|
||||
}
|
||||
|
||||
// SetIdentity sets the "identity" edge to the AuthIdentity entity.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SetIdentity(v *AuthIdentity) *IdentityAdoptionDecisionCreate {
|
||||
return _c.SetIdentityID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the IdentityAdoptionDecisionMutation object of the builder.
|
||||
func (_c *IdentityAdoptionDecisionCreate) Mutation() *IdentityAdoptionDecisionMutation {
|
||||
return _c.mutation
|
||||
}
|
||||
|
||||
// Save creates the IdentityAdoptionDecision in the database.
|
||||
func (_c *IdentityAdoptionDecisionCreate) Save(ctx context.Context) (*IdentityAdoptionDecision, error) {
|
||||
_c.defaults()
|
||||
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (_c *IdentityAdoptionDecisionCreate) SaveX(ctx context.Context) *IdentityAdoptionDecision {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *IdentityAdoptionDecisionCreate) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *IdentityAdoptionDecisionCreate) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_c *IdentityAdoptionDecisionCreate) defaults() {
|
||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||
v := identityadoptiondecision.DefaultCreatedAt()
|
||||
_c.mutation.SetCreatedAt(v)
|
||||
}
|
||||
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||
v := identityadoptiondecision.DefaultUpdatedAt()
|
||||
_c.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
if _, ok := _c.mutation.AdoptDisplayName(); !ok {
|
||||
v := identityadoptiondecision.DefaultAdoptDisplayName
|
||||
_c.mutation.SetAdoptDisplayName(v)
|
||||
}
|
||||
if _, ok := _c.mutation.AdoptAvatar(); !ok {
|
||||
v := identityadoptiondecision.DefaultAdoptAvatar
|
||||
_c.mutation.SetAdoptAvatar(v)
|
||||
}
|
||||
if _, ok := _c.mutation.DecidedAt(); !ok {
|
||||
v := identityadoptiondecision.DefaultDecidedAt()
|
||||
_c.mutation.SetDecidedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_c *IdentityAdoptionDecisionCreate) check() error {
|
||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "IdentityAdoptionDecision.created_at"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "IdentityAdoptionDecision.updated_at"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.PendingAuthSessionID(); !ok {
|
||||
return &ValidationError{Name: "pending_auth_session_id", err: errors.New(`ent: missing required field "IdentityAdoptionDecision.pending_auth_session_id"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.AdoptDisplayName(); !ok {
|
||||
return &ValidationError{Name: "adopt_display_name", err: errors.New(`ent: missing required field "IdentityAdoptionDecision.adopt_display_name"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.AdoptAvatar(); !ok {
|
||||
return &ValidationError{Name: "adopt_avatar", err: errors.New(`ent: missing required field "IdentityAdoptionDecision.adopt_avatar"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.DecidedAt(); !ok {
|
||||
return &ValidationError{Name: "decided_at", err: errors.New(`ent: missing required field "IdentityAdoptionDecision.decided_at"`)}
|
||||
}
|
||||
if len(_c.mutation.PendingAuthSessionIDs()) == 0 {
|
||||
return &ValidationError{Name: "pending_auth_session", err: errors.New(`ent: missing required edge "IdentityAdoptionDecision.pending_auth_session"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_c *IdentityAdoptionDecisionCreate) sqlSave(ctx context.Context) (*IdentityAdoptionDecision, error) {
|
||||
if err := _c.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := _c.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int64(id)
|
||||
_c.mutation.id = &_node.ID
|
||||
_c.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (_c *IdentityAdoptionDecisionCreate) createSpec() (*IdentityAdoptionDecision, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &IdentityAdoptionDecision{config: _c.config}
|
||||
_spec = sqlgraph.NewCreateSpec(identityadoptiondecision.Table, sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64))
|
||||
)
|
||||
_spec.OnConflict = _c.conflict
|
||||
if value, ok := _c.mutation.CreatedAt(); ok {
|
||||
_spec.SetField(identityadoptiondecision.FieldCreatedAt, field.TypeTime, value)
|
||||
_node.CreatedAt = value
|
||||
}
|
||||
if value, ok := _c.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(identityadoptiondecision.FieldUpdatedAt, field.TypeTime, value)
|
||||
_node.UpdatedAt = value
|
||||
}
|
||||
if value, ok := _c.mutation.AdoptDisplayName(); ok {
|
||||
_spec.SetField(identityadoptiondecision.FieldAdoptDisplayName, field.TypeBool, value)
|
||||
_node.AdoptDisplayName = value
|
||||
}
|
||||
if value, ok := _c.mutation.AdoptAvatar(); ok {
|
||||
_spec.SetField(identityadoptiondecision.FieldAdoptAvatar, field.TypeBool, value)
|
||||
_node.AdoptAvatar = value
|
||||
}
|
||||
if value, ok := _c.mutation.DecidedAt(); ok {
|
||||
_spec.SetField(identityadoptiondecision.FieldDecidedAt, field.TypeTime, value)
|
||||
_node.DecidedAt = value
|
||||
}
|
||||
if nodes := _c.mutation.PendingAuthSessionIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2O,
|
||||
Inverse: true,
|
||||
Table: identityadoptiondecision.PendingAuthSessionTable,
|
||||
Columns: []string{identityadoptiondecision.PendingAuthSessionColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(pendingauthsession.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.PendingAuthSessionID = nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
if nodes := _c.mutation.IdentityIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: identityadoptiondecision.IdentityTable,
|
||||
Columns: []string{identityadoptiondecision.IdentityColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.IdentityID = &nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.IdentityAdoptionDecision.Create().
|
||||
// SetCreatedAt(v).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.IdentityAdoptionDecisionUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *IdentityAdoptionDecisionCreate) OnConflict(opts ...sql.ConflictOption) *IdentityAdoptionDecisionUpsertOne {
|
||||
_c.conflict = opts
|
||||
return &IdentityAdoptionDecisionUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.IdentityAdoptionDecision.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *IdentityAdoptionDecisionCreate) OnConflictColumns(columns ...string) *IdentityAdoptionDecisionUpsertOne {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &IdentityAdoptionDecisionUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
// IdentityAdoptionDecisionUpsertOne is the builder for "upsert"-ing
|
||||
// one IdentityAdoptionDecision node.
|
||||
IdentityAdoptionDecisionUpsertOne struct {
|
||||
create *IdentityAdoptionDecisionCreate
|
||||
}
|
||||
|
||||
// IdentityAdoptionDecisionUpsert is the "OnConflict" setter.
|
||||
IdentityAdoptionDecisionUpsert struct {
|
||||
*sql.UpdateSet
|
||||
}
|
||||
)
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *IdentityAdoptionDecisionUpsert) SetUpdatedAt(v time.Time) *IdentityAdoptionDecisionUpsert {
|
||||
u.Set(identityadoptiondecision.FieldUpdatedAt, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsert) UpdateUpdatedAt() *IdentityAdoptionDecisionUpsert {
|
||||
u.SetExcluded(identityadoptiondecision.FieldUpdatedAt)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetPendingAuthSessionID sets the "pending_auth_session_id" field.
|
||||
func (u *IdentityAdoptionDecisionUpsert) SetPendingAuthSessionID(v int64) *IdentityAdoptionDecisionUpsert {
|
||||
u.Set(identityadoptiondecision.FieldPendingAuthSessionID, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdatePendingAuthSessionID sets the "pending_auth_session_id" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsert) UpdatePendingAuthSessionID() *IdentityAdoptionDecisionUpsert {
|
||||
u.SetExcluded(identityadoptiondecision.FieldPendingAuthSessionID)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (u *IdentityAdoptionDecisionUpsert) SetIdentityID(v int64) *IdentityAdoptionDecisionUpsert {
|
||||
u.Set(identityadoptiondecision.FieldIdentityID, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateIdentityID sets the "identity_id" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsert) UpdateIdentityID() *IdentityAdoptionDecisionUpsert {
|
||||
u.SetExcluded(identityadoptiondecision.FieldIdentityID)
|
||||
return u
|
||||
}
|
||||
|
||||
// ClearIdentityID clears the value of the "identity_id" field.
|
||||
func (u *IdentityAdoptionDecisionUpsert) ClearIdentityID() *IdentityAdoptionDecisionUpsert {
|
||||
u.SetNull(identityadoptiondecision.FieldIdentityID)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetAdoptDisplayName sets the "adopt_display_name" field.
|
||||
func (u *IdentityAdoptionDecisionUpsert) SetAdoptDisplayName(v bool) *IdentityAdoptionDecisionUpsert {
|
||||
u.Set(identityadoptiondecision.FieldAdoptDisplayName, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateAdoptDisplayName sets the "adopt_display_name" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsert) UpdateAdoptDisplayName() *IdentityAdoptionDecisionUpsert {
|
||||
u.SetExcluded(identityadoptiondecision.FieldAdoptDisplayName)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetAdoptAvatar sets the "adopt_avatar" field.
|
||||
func (u *IdentityAdoptionDecisionUpsert) SetAdoptAvatar(v bool) *IdentityAdoptionDecisionUpsert {
|
||||
u.Set(identityadoptiondecision.FieldAdoptAvatar, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateAdoptAvatar sets the "adopt_avatar" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsert) UpdateAdoptAvatar() *IdentityAdoptionDecisionUpsert {
|
||||
u.SetExcluded(identityadoptiondecision.FieldAdoptAvatar)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.IdentityAdoptionDecision.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) UpdateNewValues() *IdentityAdoptionDecisionUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(identityadoptiondecision.FieldCreatedAt)
|
||||
}
|
||||
if _, exists := u.create.mutation.DecidedAt(); exists {
|
||||
s.SetIgnore(identityadoptiondecision.FieldDecidedAt)
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.IdentityAdoptionDecision.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) Ignore() *IdentityAdoptionDecisionUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) DoNothing() *IdentityAdoptionDecisionUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the IdentityAdoptionDecisionCreate.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) Update(set func(*IdentityAdoptionDecisionUpsert)) *IdentityAdoptionDecisionUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&IdentityAdoptionDecisionUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) SetUpdatedAt(v time.Time) *IdentityAdoptionDecisionUpsertOne {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) UpdateUpdatedAt() *IdentityAdoptionDecisionUpsertOne {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetPendingAuthSessionID sets the "pending_auth_session_id" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) SetPendingAuthSessionID(v int64) *IdentityAdoptionDecisionUpsertOne {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.SetPendingAuthSessionID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdatePendingAuthSessionID sets the "pending_auth_session_id" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) UpdatePendingAuthSessionID() *IdentityAdoptionDecisionUpsertOne {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.UpdatePendingAuthSessionID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) SetIdentityID(v int64) *IdentityAdoptionDecisionUpsertOne {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.SetIdentityID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateIdentityID sets the "identity_id" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) UpdateIdentityID() *IdentityAdoptionDecisionUpsertOne {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.UpdateIdentityID()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearIdentityID clears the value of the "identity_id" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) ClearIdentityID() *IdentityAdoptionDecisionUpsertOne {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.ClearIdentityID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetAdoptDisplayName sets the "adopt_display_name" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) SetAdoptDisplayName(v bool) *IdentityAdoptionDecisionUpsertOne {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.SetAdoptDisplayName(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateAdoptDisplayName sets the "adopt_display_name" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) UpdateAdoptDisplayName() *IdentityAdoptionDecisionUpsertOne {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.UpdateAdoptDisplayName()
|
||||
})
|
||||
}
|
||||
|
||||
// SetAdoptAvatar sets the "adopt_avatar" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) SetAdoptAvatar(v bool) *IdentityAdoptionDecisionUpsertOne {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.SetAdoptAvatar(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateAdoptAvatar sets the "adopt_avatar" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) UpdateAdoptAvatar() *IdentityAdoptionDecisionUpsertOne {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.UpdateAdoptAvatar()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for IdentityAdoptionDecisionCreate.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||
node, err := u.create.Save(ctx)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
return node.ID, nil
|
||||
}
|
||||
|
||||
// IDX is like ID, but panics if an error occurs.
|
||||
func (u *IdentityAdoptionDecisionUpsertOne) IDX(ctx context.Context) int64 {
|
||||
id, err := u.ID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// IdentityAdoptionDecisionCreateBulk is the builder for creating many IdentityAdoptionDecision entities in bulk.
|
||||
type IdentityAdoptionDecisionCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*IdentityAdoptionDecisionCreate
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// Save creates the IdentityAdoptionDecision entities in the database.
|
||||
func (_c *IdentityAdoptionDecisionCreateBulk) Save(ctx context.Context) ([]*IdentityAdoptionDecision, error) {
|
||||
if _c.err != nil {
|
||||
return nil, _c.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||
nodes := make([]*IdentityAdoptionDecision, len(_c.builders))
|
||||
mutators := make([]Mutator, len(_c.builders))
|
||||
for i := range _c.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := _c.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*IdentityAdoptionDecisionMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
spec.OnConflict = _c.conflict
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int64(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_c *IdentityAdoptionDecisionCreateBulk) SaveX(ctx context.Context) []*IdentityAdoptionDecision {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *IdentityAdoptionDecisionCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *IdentityAdoptionDecisionCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.IdentityAdoptionDecision.CreateBulk(builders...).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.IdentityAdoptionDecisionUpsert) {
|
||||
// SetCreatedAt(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *IdentityAdoptionDecisionCreateBulk) OnConflict(opts ...sql.ConflictOption) *IdentityAdoptionDecisionUpsertBulk {
|
||||
_c.conflict = opts
|
||||
return &IdentityAdoptionDecisionUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.IdentityAdoptionDecision.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *IdentityAdoptionDecisionCreateBulk) OnConflictColumns(columns ...string) *IdentityAdoptionDecisionUpsertBulk {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &IdentityAdoptionDecisionUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// IdentityAdoptionDecisionUpsertBulk is the builder for "upsert"-ing
|
||||
// a bulk of IdentityAdoptionDecision nodes.
|
||||
type IdentityAdoptionDecisionUpsertBulk struct {
|
||||
create *IdentityAdoptionDecisionCreateBulk
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that
|
||||
// were set on create. Using this option is equivalent to using:
|
||||
//
|
||||
// client.IdentityAdoptionDecision.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) UpdateNewValues() *IdentityAdoptionDecisionUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
for _, b := range u.create.builders {
|
||||
if _, exists := b.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(identityadoptiondecision.FieldCreatedAt)
|
||||
}
|
||||
if _, exists := b.mutation.DecidedAt(); exists {
|
||||
s.SetIgnore(identityadoptiondecision.FieldDecidedAt)
|
||||
}
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.IdentityAdoptionDecision.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) Ignore() *IdentityAdoptionDecisionUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) DoNothing() *IdentityAdoptionDecisionUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the IdentityAdoptionDecisionCreateBulk.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) Update(set func(*IdentityAdoptionDecisionUpsert)) *IdentityAdoptionDecisionUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&IdentityAdoptionDecisionUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) SetUpdatedAt(v time.Time) *IdentityAdoptionDecisionUpsertBulk {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.SetUpdatedAt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) UpdateUpdatedAt() *IdentityAdoptionDecisionUpsertBulk {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.UpdateUpdatedAt()
|
||||
})
|
||||
}
|
||||
|
||||
// SetPendingAuthSessionID sets the "pending_auth_session_id" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) SetPendingAuthSessionID(v int64) *IdentityAdoptionDecisionUpsertBulk {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.SetPendingAuthSessionID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdatePendingAuthSessionID sets the "pending_auth_session_id" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) UpdatePendingAuthSessionID() *IdentityAdoptionDecisionUpsertBulk {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.UpdatePendingAuthSessionID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) SetIdentityID(v int64) *IdentityAdoptionDecisionUpsertBulk {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.SetIdentityID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateIdentityID sets the "identity_id" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) UpdateIdentityID() *IdentityAdoptionDecisionUpsertBulk {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.UpdateIdentityID()
|
||||
})
|
||||
}
|
||||
|
||||
// ClearIdentityID clears the value of the "identity_id" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) ClearIdentityID() *IdentityAdoptionDecisionUpsertBulk {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.ClearIdentityID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetAdoptDisplayName sets the "adopt_display_name" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) SetAdoptDisplayName(v bool) *IdentityAdoptionDecisionUpsertBulk {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.SetAdoptDisplayName(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateAdoptDisplayName sets the "adopt_display_name" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) UpdateAdoptDisplayName() *IdentityAdoptionDecisionUpsertBulk {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.UpdateAdoptDisplayName()
|
||||
})
|
||||
}
|
||||
|
||||
// SetAdoptAvatar sets the "adopt_avatar" field.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) SetAdoptAvatar(v bool) *IdentityAdoptionDecisionUpsertBulk {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.SetAdoptAvatar(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateAdoptAvatar sets the "adopt_avatar" field to the value that was provided on create.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) UpdateAdoptAvatar() *IdentityAdoptionDecisionUpsertBulk {
|
||||
return u.Update(func(s *IdentityAdoptionDecisionUpsert) {
|
||||
s.UpdateAdoptAvatar()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
return u.create.err
|
||||
}
|
||||
for i, b := range u.create.builders {
|
||||
if len(b.conflict) != 0 {
|
||||
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the IdentityAdoptionDecisionCreateBulk instead", i)
|
||||
}
|
||||
}
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for IdentityAdoptionDecisionCreateBulk.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *IdentityAdoptionDecisionUpsertBulk) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
88
backend/ent/identityadoptiondecision_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/identityadoptiondecision"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// IdentityAdoptionDecisionDelete is the builder for deleting a IdentityAdoptionDecision entity.
|
||||
type IdentityAdoptionDecisionDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *IdentityAdoptionDecisionMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the IdentityAdoptionDecisionDelete builder.
|
||||
func (_d *IdentityAdoptionDecisionDelete) Where(ps ...predicate.IdentityAdoptionDecision) *IdentityAdoptionDecisionDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *IdentityAdoptionDecisionDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *IdentityAdoptionDecisionDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *IdentityAdoptionDecisionDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(identityadoptiondecision.Table, sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// IdentityAdoptionDecisionDeleteOne is the builder for deleting a single IdentityAdoptionDecision entity.
|
||||
type IdentityAdoptionDecisionDeleteOne struct {
|
||||
_d *IdentityAdoptionDecisionDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the IdentityAdoptionDecisionDelete builder.
|
||||
func (_d *IdentityAdoptionDecisionDeleteOne) Where(ps ...predicate.IdentityAdoptionDecision) *IdentityAdoptionDecisionDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *IdentityAdoptionDecisionDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{identityadoptiondecision.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *IdentityAdoptionDecisionDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
721
backend/ent/identityadoptiondecision_query.go
Normal file
@@ -0,0 +1,721 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/identityadoptiondecision"
|
||||
"github.com/Wei-Shaw/sub2api/ent/pendingauthsession"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// IdentityAdoptionDecisionQuery is the builder for querying IdentityAdoptionDecision entities.
|
||||
type IdentityAdoptionDecisionQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []identityadoptiondecision.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.IdentityAdoptionDecision
|
||||
withPendingAuthSession *PendingAuthSessionQuery
|
||||
withIdentity *AuthIdentityQuery
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the IdentityAdoptionDecisionQuery builder.
|
||||
func (_q *IdentityAdoptionDecisionQuery) Where(ps ...predicate.IdentityAdoptionDecision) *IdentityAdoptionDecisionQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *IdentityAdoptionDecisionQuery) Limit(limit int) *IdentityAdoptionDecisionQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *IdentityAdoptionDecisionQuery) Offset(offset int) *IdentityAdoptionDecisionQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *IdentityAdoptionDecisionQuery) Unique(unique bool) *IdentityAdoptionDecisionQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *IdentityAdoptionDecisionQuery) Order(o ...identityadoptiondecision.OrderOption) *IdentityAdoptionDecisionQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// QueryPendingAuthSession chains the current query on the "pending_auth_session" edge.
|
||||
func (_q *IdentityAdoptionDecisionQuery) QueryPendingAuthSession() *PendingAuthSessionQuery {
|
||||
query := (&PendingAuthSessionClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(identityadoptiondecision.Table, identityadoptiondecision.FieldID, selector),
|
||||
sqlgraph.To(pendingauthsession.Table, pendingauthsession.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2O, true, identityadoptiondecision.PendingAuthSessionTable, identityadoptiondecision.PendingAuthSessionColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryIdentity chains the current query on the "identity" edge.
|
||||
func (_q *IdentityAdoptionDecisionQuery) QueryIdentity() *AuthIdentityQuery {
|
||||
query := (&AuthIdentityClient{config: _q.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := _q.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(identityadoptiondecision.Table, identityadoptiondecision.FieldID, selector),
|
||||
sqlgraph.To(authidentity.Table, authidentity.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, identityadoptiondecision.IdentityTable, identityadoptiondecision.IdentityColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first IdentityAdoptionDecision entity from the query.
|
||||
// Returns a *NotFoundError when no IdentityAdoptionDecision was found.
|
||||
func (_q *IdentityAdoptionDecisionQuery) First(ctx context.Context) (*IdentityAdoptionDecision, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{identityadoptiondecision.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *IdentityAdoptionDecisionQuery) FirstX(ctx context.Context) *IdentityAdoptionDecision {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first IdentityAdoptionDecision ID from the query.
|
||||
// Returns a *NotFoundError when no IdentityAdoptionDecision ID was found.
|
||||
func (_q *IdentityAdoptionDecisionQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{identityadoptiondecision.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *IdentityAdoptionDecisionQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single IdentityAdoptionDecision entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one IdentityAdoptionDecision entity is found.
|
||||
// Returns a *NotFoundError when no IdentityAdoptionDecision entities are found.
|
||||
func (_q *IdentityAdoptionDecisionQuery) Only(ctx context.Context) (*IdentityAdoptionDecision, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{identityadoptiondecision.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{identityadoptiondecision.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *IdentityAdoptionDecisionQuery) OnlyX(ctx context.Context) *IdentityAdoptionDecision {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only IdentityAdoptionDecision ID in the query.
|
||||
// Returns a *NotSingularError when more than one IdentityAdoptionDecision ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *IdentityAdoptionDecisionQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{identityadoptiondecision.Label}
|
||||
default:
|
||||
err = &NotSingularError{identityadoptiondecision.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *IdentityAdoptionDecisionQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of IdentityAdoptionDecisions.
|
||||
func (_q *IdentityAdoptionDecisionQuery) All(ctx context.Context) ([]*IdentityAdoptionDecision, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*IdentityAdoptionDecision, *IdentityAdoptionDecisionQuery]()
|
||||
return withInterceptors[[]*IdentityAdoptionDecision](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *IdentityAdoptionDecisionQuery) AllX(ctx context.Context) []*IdentityAdoptionDecision {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of IdentityAdoptionDecision IDs.
|
||||
func (_q *IdentityAdoptionDecisionQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(identityadoptiondecision.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *IdentityAdoptionDecisionQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *IdentityAdoptionDecisionQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*IdentityAdoptionDecisionQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *IdentityAdoptionDecisionQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *IdentityAdoptionDecisionQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *IdentityAdoptionDecisionQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the IdentityAdoptionDecisionQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *IdentityAdoptionDecisionQuery) Clone() *IdentityAdoptionDecisionQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &IdentityAdoptionDecisionQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]identityadoptiondecision.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.IdentityAdoptionDecision{}, _q.predicates...),
|
||||
withPendingAuthSession: _q.withPendingAuthSession.Clone(),
|
||||
withIdentity: _q.withIdentity.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithPendingAuthSession tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "pending_auth_session" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *IdentityAdoptionDecisionQuery) WithPendingAuthSession(opts ...func(*PendingAuthSessionQuery)) *IdentityAdoptionDecisionQuery {
|
||||
query := (&PendingAuthSessionClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withPendingAuthSession = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// WithIdentity tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "identity" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (_q *IdentityAdoptionDecisionQuery) WithIdentity(opts ...func(*AuthIdentityQuery)) *IdentityAdoptionDecisionQuery {
|
||||
query := (&AuthIdentityClient{config: _q.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
_q.withIdentity = query
|
||||
return _q
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.IdentityAdoptionDecision.Query().
|
||||
// GroupBy(identityadoptiondecision.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *IdentityAdoptionDecisionQuery) GroupBy(field string, fields ...string) *IdentityAdoptionDecisionGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &IdentityAdoptionDecisionGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = identityadoptiondecision.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.IdentityAdoptionDecision.Query().
|
||||
// Select(identityadoptiondecision.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *IdentityAdoptionDecisionQuery) Select(fields ...string) *IdentityAdoptionDecisionSelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &IdentityAdoptionDecisionSelect{IdentityAdoptionDecisionQuery: _q}
|
||||
sbuild.label = identityadoptiondecision.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a IdentityAdoptionDecisionSelect configured with the given aggregations.
|
||||
func (_q *IdentityAdoptionDecisionQuery) Aggregate(fns ...AggregateFunc) *IdentityAdoptionDecisionSelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *IdentityAdoptionDecisionQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !identityadoptiondecision.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *IdentityAdoptionDecisionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*IdentityAdoptionDecision, error) {
|
||||
var (
|
||||
nodes = []*IdentityAdoptionDecision{}
|
||||
_spec = _q.querySpec()
|
||||
loadedTypes = [2]bool{
|
||||
_q.withPendingAuthSession != nil,
|
||||
_q.withIdentity != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*IdentityAdoptionDecision).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &IdentityAdoptionDecision{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := _q.withPendingAuthSession; query != nil {
|
||||
if err := _q.loadPendingAuthSession(ctx, query, nodes, nil,
|
||||
func(n *IdentityAdoptionDecision, e *PendingAuthSession) { n.Edges.PendingAuthSession = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := _q.withIdentity; query != nil {
|
||||
if err := _q.loadIdentity(ctx, query, nodes, nil,
|
||||
func(n *IdentityAdoptionDecision, e *AuthIdentity) { n.Edges.Identity = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *IdentityAdoptionDecisionQuery) loadPendingAuthSession(ctx context.Context, query *PendingAuthSessionQuery, nodes []*IdentityAdoptionDecision, init func(*IdentityAdoptionDecision), assign func(*IdentityAdoptionDecision, *PendingAuthSession)) error {
|
||||
ids := make([]int64, 0, len(nodes))
|
||||
nodeids := make(map[int64][]*IdentityAdoptionDecision)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].PendingAuthSessionID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(pendingauthsession.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "pending_auth_session_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (_q *IdentityAdoptionDecisionQuery) loadIdentity(ctx context.Context, query *AuthIdentityQuery, nodes []*IdentityAdoptionDecision, init func(*IdentityAdoptionDecision), assign func(*IdentityAdoptionDecision, *AuthIdentity)) error {
|
||||
ids := make([]int64, 0, len(nodes))
|
||||
nodeids := make(map[int64][]*IdentityAdoptionDecision)
|
||||
for i := range nodes {
|
||||
if nodes[i].IdentityID == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].IdentityID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(authidentity.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "identity_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *IdentityAdoptionDecisionQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *IdentityAdoptionDecisionQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(identityadoptiondecision.Table, identityadoptiondecision.Columns, sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, identityadoptiondecision.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != identityadoptiondecision.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
if _q.withPendingAuthSession != nil {
|
||||
_spec.Node.AddColumnOnce(identityadoptiondecision.FieldPendingAuthSessionID)
|
||||
}
|
||||
if _q.withIdentity != nil {
|
||||
_spec.Node.AddColumnOnce(identityadoptiondecision.FieldIdentityID)
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *IdentityAdoptionDecisionQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(identityadoptiondecision.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = identityadoptiondecision.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *IdentityAdoptionDecisionQuery) ForUpdate(opts ...sql.LockOption) *IdentityAdoptionDecisionQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *IdentityAdoptionDecisionQuery) ForShare(opts ...sql.LockOption) *IdentityAdoptionDecisionQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// IdentityAdoptionDecisionGroupBy is the group-by builder for IdentityAdoptionDecision entities.
|
||||
type IdentityAdoptionDecisionGroupBy struct {
|
||||
selector
|
||||
build *IdentityAdoptionDecisionQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *IdentityAdoptionDecisionGroupBy) Aggregate(fns ...AggregateFunc) *IdentityAdoptionDecisionGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *IdentityAdoptionDecisionGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*IdentityAdoptionDecisionQuery, *IdentityAdoptionDecisionGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *IdentityAdoptionDecisionGroupBy) sqlScan(ctx context.Context, root *IdentityAdoptionDecisionQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// IdentityAdoptionDecisionSelect is the builder for selecting fields of IdentityAdoptionDecision entities.
|
||||
type IdentityAdoptionDecisionSelect struct {
|
||||
*IdentityAdoptionDecisionQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *IdentityAdoptionDecisionSelect) Aggregate(fns ...AggregateFunc) *IdentityAdoptionDecisionSelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *IdentityAdoptionDecisionSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*IdentityAdoptionDecisionQuery, *IdentityAdoptionDecisionSelect](ctx, _s.IdentityAdoptionDecisionQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *IdentityAdoptionDecisionSelect) sqlScan(ctx context.Context, root *IdentityAdoptionDecisionQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
532
backend/ent/identityadoptiondecision_update.go
Normal file
@@ -0,0 +1,532 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/identityadoptiondecision"
|
||||
"github.com/Wei-Shaw/sub2api/ent/pendingauthsession"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// IdentityAdoptionDecisionUpdate is the builder for updating IdentityAdoptionDecision entities.
|
||||
type IdentityAdoptionDecisionUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *IdentityAdoptionDecisionMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the IdentityAdoptionDecisionUpdate builder.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) Where(ps ...predicate.IdentityAdoptionDecision) *IdentityAdoptionDecisionUpdate {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SetUpdatedAt(v time.Time) *IdentityAdoptionDecisionUpdate {
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetPendingAuthSessionID sets the "pending_auth_session_id" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SetPendingAuthSessionID(v int64) *IdentityAdoptionDecisionUpdate {
|
||||
_u.mutation.SetPendingAuthSessionID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillablePendingAuthSessionID sets the "pending_auth_session_id" field if the given value is not nil.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SetNillablePendingAuthSessionID(v *int64) *IdentityAdoptionDecisionUpdate {
|
||||
if v != nil {
|
||||
_u.SetPendingAuthSessionID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SetIdentityID(v int64) *IdentityAdoptionDecisionUpdate {
|
||||
_u.mutation.SetIdentityID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableIdentityID sets the "identity_id" field if the given value is not nil.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SetNillableIdentityID(v *int64) *IdentityAdoptionDecisionUpdate {
|
||||
if v != nil {
|
||||
_u.SetIdentityID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearIdentityID clears the value of the "identity_id" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) ClearIdentityID() *IdentityAdoptionDecisionUpdate {
|
||||
_u.mutation.ClearIdentityID()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetAdoptDisplayName sets the "adopt_display_name" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SetAdoptDisplayName(v bool) *IdentityAdoptionDecisionUpdate {
|
||||
_u.mutation.SetAdoptDisplayName(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableAdoptDisplayName sets the "adopt_display_name" field if the given value is not nil.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SetNillableAdoptDisplayName(v *bool) *IdentityAdoptionDecisionUpdate {
|
||||
if v != nil {
|
||||
_u.SetAdoptDisplayName(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetAdoptAvatar sets the "adopt_avatar" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SetAdoptAvatar(v bool) *IdentityAdoptionDecisionUpdate {
|
||||
_u.mutation.SetAdoptAvatar(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableAdoptAvatar sets the "adopt_avatar" field if the given value is not nil.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SetNillableAdoptAvatar(v *bool) *IdentityAdoptionDecisionUpdate {
|
||||
if v != nil {
|
||||
_u.SetAdoptAvatar(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetPendingAuthSession sets the "pending_auth_session" edge to the PendingAuthSession entity.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SetPendingAuthSession(v *PendingAuthSession) *IdentityAdoptionDecisionUpdate {
|
||||
return _u.SetPendingAuthSessionID(v.ID)
|
||||
}
|
||||
|
||||
// SetIdentity sets the "identity" edge to the AuthIdentity entity.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SetIdentity(v *AuthIdentity) *IdentityAdoptionDecisionUpdate {
|
||||
return _u.SetIdentityID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the IdentityAdoptionDecisionMutation object of the builder.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) Mutation() *IdentityAdoptionDecisionMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearPendingAuthSession clears the "pending_auth_session" edge to the PendingAuthSession entity.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) ClearPendingAuthSession() *IdentityAdoptionDecisionUpdate {
|
||||
_u.mutation.ClearPendingAuthSession()
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearIdentity clears the "identity" edge to the AuthIdentity entity.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) ClearIdentity() *IdentityAdoptionDecisionUpdate {
|
||||
_u.mutation.ClearIdentity()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) Save(ctx context.Context) (int, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) defaults() {
|
||||
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||
v := identityadoptiondecision.UpdateDefaultUpdatedAt()
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *IdentityAdoptionDecisionUpdate) check() error {
|
||||
if _u.mutation.PendingAuthSessionCleared() && len(_u.mutation.PendingAuthSessionIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "IdentityAdoptionDecision.pending_auth_session"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *IdentityAdoptionDecisionUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(identityadoptiondecision.Table, identityadoptiondecision.Columns, sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64))
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(identityadoptiondecision.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AdoptDisplayName(); ok {
|
||||
_spec.SetField(identityadoptiondecision.FieldAdoptDisplayName, field.TypeBool, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AdoptAvatar(); ok {
|
||||
_spec.SetField(identityadoptiondecision.FieldAdoptAvatar, field.TypeBool, value)
|
||||
}
|
||||
if _u.mutation.PendingAuthSessionCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2O,
|
||||
Inverse: true,
|
||||
Table: identityadoptiondecision.PendingAuthSessionTable,
|
||||
Columns: []string{identityadoptiondecision.PendingAuthSessionColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(pendingauthsession.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.PendingAuthSessionIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2O,
|
||||
Inverse: true,
|
||||
Table: identityadoptiondecision.PendingAuthSessionTable,
|
||||
Columns: []string{identityadoptiondecision.PendingAuthSessionColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(pendingauthsession.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _u.mutation.IdentityCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: identityadoptiondecision.IdentityTable,
|
||||
Columns: []string{identityadoptiondecision.IdentityColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.IdentityIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: identityadoptiondecision.IdentityTable,
|
||||
Columns: []string{identityadoptiondecision.IdentityColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{identityadoptiondecision.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
// IdentityAdoptionDecisionUpdateOne is the builder for updating a single IdentityAdoptionDecision entity.
|
||||
type IdentityAdoptionDecisionUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *IdentityAdoptionDecisionMutation
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SetUpdatedAt(v time.Time) *IdentityAdoptionDecisionUpdateOne {
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetPendingAuthSessionID sets the "pending_auth_session_id" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SetPendingAuthSessionID(v int64) *IdentityAdoptionDecisionUpdateOne {
|
||||
_u.mutation.SetPendingAuthSessionID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillablePendingAuthSessionID sets the "pending_auth_session_id" field if the given value is not nil.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SetNillablePendingAuthSessionID(v *int64) *IdentityAdoptionDecisionUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetPendingAuthSessionID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetIdentityID sets the "identity_id" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SetIdentityID(v int64) *IdentityAdoptionDecisionUpdateOne {
|
||||
_u.mutation.SetIdentityID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableIdentityID sets the "identity_id" field if the given value is not nil.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SetNillableIdentityID(v *int64) *IdentityAdoptionDecisionUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetIdentityID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearIdentityID clears the value of the "identity_id" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) ClearIdentityID() *IdentityAdoptionDecisionUpdateOne {
|
||||
_u.mutation.ClearIdentityID()
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetAdoptDisplayName sets the "adopt_display_name" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SetAdoptDisplayName(v bool) *IdentityAdoptionDecisionUpdateOne {
|
||||
_u.mutation.SetAdoptDisplayName(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableAdoptDisplayName sets the "adopt_display_name" field if the given value is not nil.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SetNillableAdoptDisplayName(v *bool) *IdentityAdoptionDecisionUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetAdoptDisplayName(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetAdoptAvatar sets the "adopt_avatar" field.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SetAdoptAvatar(v bool) *IdentityAdoptionDecisionUpdateOne {
|
||||
_u.mutation.SetAdoptAvatar(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableAdoptAvatar sets the "adopt_avatar" field if the given value is not nil.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SetNillableAdoptAvatar(v *bool) *IdentityAdoptionDecisionUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetAdoptAvatar(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetPendingAuthSession sets the "pending_auth_session" edge to the PendingAuthSession entity.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SetPendingAuthSession(v *PendingAuthSession) *IdentityAdoptionDecisionUpdateOne {
|
||||
return _u.SetPendingAuthSessionID(v.ID)
|
||||
}
|
||||
|
||||
// SetIdentity sets the "identity" edge to the AuthIdentity entity.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SetIdentity(v *AuthIdentity) *IdentityAdoptionDecisionUpdateOne {
|
||||
return _u.SetIdentityID(v.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the IdentityAdoptionDecisionMutation object of the builder.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) Mutation() *IdentityAdoptionDecisionMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// ClearPendingAuthSession clears the "pending_auth_session" edge to the PendingAuthSession entity.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) ClearPendingAuthSession() *IdentityAdoptionDecisionUpdateOne {
|
||||
_u.mutation.ClearPendingAuthSession()
|
||||
return _u
|
||||
}
|
||||
|
||||
// ClearIdentity clears the "identity" edge to the AuthIdentity entity.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) ClearIdentity() *IdentityAdoptionDecisionUpdateOne {
|
||||
_u.mutation.ClearIdentity()
|
||||
return _u
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the IdentityAdoptionDecisionUpdate builder.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) Where(ps ...predicate.IdentityAdoptionDecision) *IdentityAdoptionDecisionUpdateOne {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) Select(field string, fields ...string) *IdentityAdoptionDecisionUpdateOne {
|
||||
_u.fields = append([]string{field}, fields...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated IdentityAdoptionDecision entity.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) Save(ctx context.Context) (*IdentityAdoptionDecision, error) {
|
||||
_u.defaults()
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) SaveX(ctx context.Context) *IdentityAdoptionDecision {
|
||||
node, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) defaults() {
|
||||
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||
v := identityadoptiondecision.UpdateDefaultUpdatedAt()
|
||||
_u.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) check() error {
|
||||
if _u.mutation.PendingAuthSessionCleared() && len(_u.mutation.PendingAuthSessionIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "IdentityAdoptionDecision.pending_auth_session"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *IdentityAdoptionDecisionUpdateOne) sqlSave(ctx context.Context) (_node *IdentityAdoptionDecision, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(identityadoptiondecision.Table, identityadoptiondecision.Columns, sqlgraph.NewFieldSpec(identityadoptiondecision.FieldID, field.TypeInt64))
|
||||
id, ok := _u.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "IdentityAdoptionDecision.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := _u.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, identityadoptiondecision.FieldID)
|
||||
for _, f := range fields {
|
||||
if !identityadoptiondecision.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != identityadoptiondecision.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(identityadoptiondecision.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AdoptDisplayName(); ok {
|
||||
_spec.SetField(identityadoptiondecision.FieldAdoptDisplayName, field.TypeBool, value)
|
||||
}
|
||||
if value, ok := _u.mutation.AdoptAvatar(); ok {
|
||||
_spec.SetField(identityadoptiondecision.FieldAdoptAvatar, field.TypeBool, value)
|
||||
}
|
||||
if _u.mutation.PendingAuthSessionCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2O,
|
||||
Inverse: true,
|
||||
Table: identityadoptiondecision.PendingAuthSessionTable,
|
||||
Columns: []string{identityadoptiondecision.PendingAuthSessionColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(pendingauthsession.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.PendingAuthSessionIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2O,
|
||||
Inverse: true,
|
||||
Table: identityadoptiondecision.PendingAuthSessionTable,
|
||||
Columns: []string{identityadoptiondecision.PendingAuthSessionColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(pendingauthsession.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if _u.mutation.IdentityCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: identityadoptiondecision.IdentityTable,
|
||||
Columns: []string{identityadoptiondecision.IdentityColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := _u.mutation.IdentityIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: identityadoptiondecision.IdentityTable,
|
||||
Columns: []string{identityadoptiondecision.IdentityColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(authidentity.FieldID, field.TypeInt64),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &IdentityAdoptionDecision{config: _u.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{identityadoptiondecision.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
@@ -13,9 +13,20 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/ent/announcement"
|
||||
"github.com/Wei-Shaw/sub2api/ent/announcementread"
|
||||
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentity"
|
||||
"github.com/Wei-Shaw/sub2api/ent/authidentitychannel"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory"
|
||||
"github.com/Wei-Shaw/sub2api/ent/channelmonitorrequesttemplate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule"
|
||||
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||
"github.com/Wei-Shaw/sub2api/ent/idempotencyrecord"
|
||||
"github.com/Wei-Shaw/sub2api/ent/identityadoptiondecision"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentauditlog"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentorder"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentproviderinstance"
|
||||
"github.com/Wei-Shaw/sub2api/ent/pendingauthsession"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||
@@ -23,6 +34,8 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||
"github.com/Wei-Shaw/sub2api/ent/securitysecret"
|
||||
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||
"github.com/Wei-Shaw/sub2api/ent/subscriptionplan"
|
||||
"github.com/Wei-Shaw/sub2api/ent/tlsfingerprintprofile"
|
||||
"github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
||||
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||
@@ -223,6 +236,168 @@ func (f TraverseAnnouncementRead) Traverse(ctx context.Context, q ent.Query) err
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.AnnouncementReadQuery", q)
|
||||
}
|
||||
|
||||
// The AuthIdentityFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type AuthIdentityFunc func(context.Context, *ent.AuthIdentityQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f AuthIdentityFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.AuthIdentityQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.AuthIdentityQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseAuthIdentity type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseAuthIdentity func(context.Context, *ent.AuthIdentityQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseAuthIdentity) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseAuthIdentity) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.AuthIdentityQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.AuthIdentityQuery", q)
|
||||
}
|
||||
|
||||
// The AuthIdentityChannelFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type AuthIdentityChannelFunc func(context.Context, *ent.AuthIdentityChannelQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f AuthIdentityChannelFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.AuthIdentityChannelQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.AuthIdentityChannelQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseAuthIdentityChannel type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseAuthIdentityChannel func(context.Context, *ent.AuthIdentityChannelQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseAuthIdentityChannel) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseAuthIdentityChannel) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.AuthIdentityChannelQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.AuthIdentityChannelQuery", q)
|
||||
}
|
||||
|
||||
// The ChannelMonitorFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type ChannelMonitorFunc func(context.Context, *ent.ChannelMonitorQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f ChannelMonitorFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.ChannelMonitorQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseChannelMonitor type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseChannelMonitor func(context.Context, *ent.ChannelMonitorQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseChannelMonitor) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseChannelMonitor) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.ChannelMonitorQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorQuery", q)
|
||||
}
|
||||
|
||||
// The ChannelMonitorDailyRollupFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type ChannelMonitorDailyRollupFunc func(context.Context, *ent.ChannelMonitorDailyRollupQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f ChannelMonitorDailyRollupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.ChannelMonitorDailyRollupQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorDailyRollupQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseChannelMonitorDailyRollup type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseChannelMonitorDailyRollup func(context.Context, *ent.ChannelMonitorDailyRollupQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseChannelMonitorDailyRollup) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseChannelMonitorDailyRollup) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.ChannelMonitorDailyRollupQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorDailyRollupQuery", q)
|
||||
}
|
||||
|
||||
// The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f ChannelMonitorHistoryFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.ChannelMonitorHistoryQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorHistoryQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseChannelMonitorHistory type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseChannelMonitorHistory func(context.Context, *ent.ChannelMonitorHistoryQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseChannelMonitorHistory) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseChannelMonitorHistory) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.ChannelMonitorHistoryQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorHistoryQuery", q)
|
||||
}
|
||||
|
||||
// The ChannelMonitorRequestTemplateFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type ChannelMonitorRequestTemplateFunc func(context.Context, *ent.ChannelMonitorRequestTemplateQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f ChannelMonitorRequestTemplateFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.ChannelMonitorRequestTemplateQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorRequestTemplateQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseChannelMonitorRequestTemplate type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseChannelMonitorRequestTemplate func(context.Context, *ent.ChannelMonitorRequestTemplateQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseChannelMonitorRequestTemplate) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseChannelMonitorRequestTemplate) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.ChannelMonitorRequestTemplateQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorRequestTemplateQuery", q)
|
||||
}
|
||||
|
||||
// The ErrorPassthroughRuleFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type ErrorPassthroughRuleFunc func(context.Context, *ent.ErrorPassthroughRuleQuery) (ent.Value, error)
|
||||
|
||||
@@ -304,6 +479,141 @@ func (f TraverseIdempotencyRecord) Traverse(ctx context.Context, q ent.Query) er
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.IdempotencyRecordQuery", q)
|
||||
}
|
||||
|
||||
// The IdentityAdoptionDecisionFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type IdentityAdoptionDecisionFunc func(context.Context, *ent.IdentityAdoptionDecisionQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f IdentityAdoptionDecisionFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.IdentityAdoptionDecisionQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.IdentityAdoptionDecisionQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseIdentityAdoptionDecision type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseIdentityAdoptionDecision func(context.Context, *ent.IdentityAdoptionDecisionQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseIdentityAdoptionDecision) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseIdentityAdoptionDecision) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.IdentityAdoptionDecisionQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.IdentityAdoptionDecisionQuery", q)
|
||||
}
|
||||
|
||||
// The PaymentAuditLogFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type PaymentAuditLogFunc func(context.Context, *ent.PaymentAuditLogQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f PaymentAuditLogFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.PaymentAuditLogQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.PaymentAuditLogQuery", q)
|
||||
}
|
||||
|
||||
// The TraversePaymentAuditLog type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraversePaymentAuditLog func(context.Context, *ent.PaymentAuditLogQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraversePaymentAuditLog) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraversePaymentAuditLog) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.PaymentAuditLogQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.PaymentAuditLogQuery", q)
|
||||
}
|
||||
|
||||
// The PaymentOrderFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type PaymentOrderFunc func(context.Context, *ent.PaymentOrderQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f PaymentOrderFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.PaymentOrderQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.PaymentOrderQuery", q)
|
||||
}
|
||||
|
||||
// The TraversePaymentOrder type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraversePaymentOrder func(context.Context, *ent.PaymentOrderQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraversePaymentOrder) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraversePaymentOrder) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.PaymentOrderQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.PaymentOrderQuery", q)
|
||||
}
|
||||
|
||||
// The PaymentProviderInstanceFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type PaymentProviderInstanceFunc func(context.Context, *ent.PaymentProviderInstanceQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f PaymentProviderInstanceFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.PaymentProviderInstanceQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.PaymentProviderInstanceQuery", q)
|
||||
}
|
||||
|
||||
// The TraversePaymentProviderInstance type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraversePaymentProviderInstance func(context.Context, *ent.PaymentProviderInstanceQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraversePaymentProviderInstance) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraversePaymentProviderInstance) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.PaymentProviderInstanceQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.PaymentProviderInstanceQuery", q)
|
||||
}
|
||||
|
||||
// The PendingAuthSessionFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type PendingAuthSessionFunc func(context.Context, *ent.PendingAuthSessionQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f PendingAuthSessionFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.PendingAuthSessionQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.PendingAuthSessionQuery", q)
|
||||
}
|
||||
|
||||
// The TraversePendingAuthSession type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraversePendingAuthSession func(context.Context, *ent.PendingAuthSessionQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraversePendingAuthSession) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraversePendingAuthSession) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.PendingAuthSessionQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.PendingAuthSessionQuery", q)
|
||||
}
|
||||
|
||||
// The PromoCodeFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type PromoCodeFunc func(context.Context, *ent.PromoCodeQuery) (ent.Value, error)
|
||||
|
||||
@@ -466,6 +776,60 @@ func (f TraverseSetting) Traverse(ctx context.Context, q ent.Query) error {
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.SettingQuery", q)
|
||||
}
|
||||
|
||||
// The SubscriptionPlanFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type SubscriptionPlanFunc func(context.Context, *ent.SubscriptionPlanQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f SubscriptionPlanFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.SubscriptionPlanQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.SubscriptionPlanQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseSubscriptionPlan type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseSubscriptionPlan func(context.Context, *ent.SubscriptionPlanQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseSubscriptionPlan) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseSubscriptionPlan) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.SubscriptionPlanQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.SubscriptionPlanQuery", q)
|
||||
}
|
||||
|
||||
// The TLSFingerprintProfileFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type TLSFingerprintProfileFunc func(context.Context, *ent.TLSFingerprintProfileQuery) (ent.Value, error)
|
||||
|
||||
// Query calls f(ctx, q).
|
||||
func (f TLSFingerprintProfileFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||
if q, ok := q.(*ent.TLSFingerprintProfileQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected query type %T. expect *ent.TLSFingerprintProfileQuery", q)
|
||||
}
|
||||
|
||||
// The TraverseTLSFingerprintProfile type is an adapter to allow the use of ordinary function as Traverser.
|
||||
type TraverseTLSFingerprintProfile func(context.Context, *ent.TLSFingerprintProfileQuery) error
|
||||
|
||||
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||
func (f TraverseTLSFingerprintProfile) Intercept(next ent.Querier) ent.Querier {
|
||||
return next
|
||||
}
|
||||
|
||||
// Traverse calls f(ctx, q).
|
||||
func (f TraverseTLSFingerprintProfile) Traverse(ctx context.Context, q ent.Query) error {
|
||||
if q, ok := q.(*ent.TLSFingerprintProfileQuery); ok {
|
||||
return f(ctx, q)
|
||||
}
|
||||
return fmt.Errorf("unexpected query type %T. expect *ent.TLSFingerprintProfileQuery", q)
|
||||
}
|
||||
|
||||
// The UsageCleanupTaskFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||
type UsageCleanupTaskFunc func(context.Context, *ent.UsageCleanupTaskQuery) (ent.Value, error)
|
||||
|
||||
@@ -668,12 +1032,34 @@ func NewQuery(q ent.Query) (Query, error) {
|
||||
return &query[*ent.AnnouncementQuery, predicate.Announcement, announcement.OrderOption]{typ: ent.TypeAnnouncement, tq: q}, nil
|
||||
case *ent.AnnouncementReadQuery:
|
||||
return &query[*ent.AnnouncementReadQuery, predicate.AnnouncementRead, announcementread.OrderOption]{typ: ent.TypeAnnouncementRead, tq: q}, nil
|
||||
case *ent.AuthIdentityQuery:
|
||||
return &query[*ent.AuthIdentityQuery, predicate.AuthIdentity, authidentity.OrderOption]{typ: ent.TypeAuthIdentity, tq: q}, nil
|
||||
case *ent.AuthIdentityChannelQuery:
|
||||
return &query[*ent.AuthIdentityChannelQuery, predicate.AuthIdentityChannel, authidentitychannel.OrderOption]{typ: ent.TypeAuthIdentityChannel, tq: q}, nil
|
||||
case *ent.ChannelMonitorQuery:
|
||||
return &query[*ent.ChannelMonitorQuery, predicate.ChannelMonitor, channelmonitor.OrderOption]{typ: ent.TypeChannelMonitor, tq: q}, nil
|
||||
case *ent.ChannelMonitorDailyRollupQuery:
|
||||
return &query[*ent.ChannelMonitorDailyRollupQuery, predicate.ChannelMonitorDailyRollup, channelmonitordailyrollup.OrderOption]{typ: ent.TypeChannelMonitorDailyRollup, tq: q}, nil
|
||||
case *ent.ChannelMonitorHistoryQuery:
|
||||
return &query[*ent.ChannelMonitorHistoryQuery, predicate.ChannelMonitorHistory, channelmonitorhistory.OrderOption]{typ: ent.TypeChannelMonitorHistory, tq: q}, nil
|
||||
case *ent.ChannelMonitorRequestTemplateQuery:
|
||||
return &query[*ent.ChannelMonitorRequestTemplateQuery, predicate.ChannelMonitorRequestTemplate, channelmonitorrequesttemplate.OrderOption]{typ: ent.TypeChannelMonitorRequestTemplate, tq: q}, nil
|
||||
case *ent.ErrorPassthroughRuleQuery:
|
||||
return &query[*ent.ErrorPassthroughRuleQuery, predicate.ErrorPassthroughRule, errorpassthroughrule.OrderOption]{typ: ent.TypeErrorPassthroughRule, tq: q}, nil
|
||||
case *ent.GroupQuery:
|
||||
return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil
|
||||
case *ent.IdempotencyRecordQuery:
|
||||
return &query[*ent.IdempotencyRecordQuery, predicate.IdempotencyRecord, idempotencyrecord.OrderOption]{typ: ent.TypeIdempotencyRecord, tq: q}, nil
|
||||
case *ent.IdentityAdoptionDecisionQuery:
|
||||
return &query[*ent.IdentityAdoptionDecisionQuery, predicate.IdentityAdoptionDecision, identityadoptiondecision.OrderOption]{typ: ent.TypeIdentityAdoptionDecision, tq: q}, nil
|
||||
case *ent.PaymentAuditLogQuery:
|
||||
return &query[*ent.PaymentAuditLogQuery, predicate.PaymentAuditLog, paymentauditlog.OrderOption]{typ: ent.TypePaymentAuditLog, tq: q}, nil
|
||||
case *ent.PaymentOrderQuery:
|
||||
return &query[*ent.PaymentOrderQuery, predicate.PaymentOrder, paymentorder.OrderOption]{typ: ent.TypePaymentOrder, tq: q}, nil
|
||||
case *ent.PaymentProviderInstanceQuery:
|
||||
return &query[*ent.PaymentProviderInstanceQuery, predicate.PaymentProviderInstance, paymentproviderinstance.OrderOption]{typ: ent.TypePaymentProviderInstance, tq: q}, nil
|
||||
case *ent.PendingAuthSessionQuery:
|
||||
return &query[*ent.PendingAuthSessionQuery, predicate.PendingAuthSession, pendingauthsession.OrderOption]{typ: ent.TypePendingAuthSession, tq: q}, nil
|
||||
case *ent.PromoCodeQuery:
|
||||
return &query[*ent.PromoCodeQuery, predicate.PromoCode, promocode.OrderOption]{typ: ent.TypePromoCode, tq: q}, nil
|
||||
case *ent.PromoCodeUsageQuery:
|
||||
@@ -686,6 +1072,10 @@ func NewQuery(q ent.Query) (Query, error) {
|
||||
return &query[*ent.SecuritySecretQuery, predicate.SecuritySecret, securitysecret.OrderOption]{typ: ent.TypeSecuritySecret, tq: q}, nil
|
||||
case *ent.SettingQuery:
|
||||
return &query[*ent.SettingQuery, predicate.Setting, setting.OrderOption]{typ: ent.TypeSetting, tq: q}, nil
|
||||
case *ent.SubscriptionPlanQuery:
|
||||
return &query[*ent.SubscriptionPlanQuery, predicate.SubscriptionPlan, subscriptionplan.OrderOption]{typ: ent.TypeSubscriptionPlan, tq: q}, nil
|
||||
case *ent.TLSFingerprintProfileQuery:
|
||||
return &query[*ent.TLSFingerprintProfileQuery, predicate.TLSFingerprintProfile, tlsfingerprintprofile.OrderOption]{typ: ent.TypeTLSFingerprintProfile, tq: q}, nil
|
||||
case *ent.UsageCleanupTaskQuery:
|
||||
return &query[*ent.UsageCleanupTaskQuery, predicate.UsageCleanupTask, usagecleanuptask.OrderOption]{typ: ent.TypeUsageCleanupTask, tq: q}, nil
|
||||
case *ent.UsageLogQuery:
|
||||
|
||||
73
backend/ent/migrate/auth_identity_fk_ondelete_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"entgo.io/ent/dialect/entsql"
|
||||
entschema "entgo.io/ent/dialect/sql/schema"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAuthIdentityFoundationForeignKeyOnDeleteActions(t *testing.T) {
|
||||
require.Equal(
|
||||
t,
|
||||
entschema.Cascade,
|
||||
findForeignKeyBySymbol(t, AuthIdentitiesTable, "auth_identities_users_auth_identities").OnDelete,
|
||||
)
|
||||
require.Equal(
|
||||
t,
|
||||
entschema.Cascade,
|
||||
findForeignKeyBySymbol(t, AuthIdentityChannelsTable, "auth_identity_channels_auth_identities_channels").OnDelete,
|
||||
)
|
||||
require.Equal(
|
||||
t,
|
||||
entschema.Cascade,
|
||||
findForeignKeyBySymbol(t, IdentityAdoptionDecisionsTable, "identity_adoption_decisions_pending_auth_sessions_adoption_decision").OnDelete,
|
||||
)
|
||||
|
||||
require.Equal(
|
||||
t,
|
||||
entschema.SetNull,
|
||||
findForeignKeyBySymbol(t, PendingAuthSessionsTable, "pending_auth_sessions_users_pending_auth_sessions").OnDelete,
|
||||
)
|
||||
require.Equal(
|
||||
t,
|
||||
entschema.SetNull,
|
||||
findForeignKeyBySymbol(t, IdentityAdoptionDecisionsTable, "identity_adoption_decisions_auth_identities_adoption_decisions").OnDelete,
|
||||
)
|
||||
}
|
||||
|
||||
func TestPaymentOrdersOutTradeNoPartialUniqueIndex(t *testing.T) {
|
||||
idx := findIndexByName(t, PaymentOrdersTable, "paymentorder_out_trade_no")
|
||||
require.True(t, idx.Unique)
|
||||
require.Len(t, idx.Columns, 1)
|
||||
require.Equal(t, "out_trade_no", idx.Columns[0].Name)
|
||||
require.NotNil(t, idx.Annotation)
|
||||
require.Equal(t, (&entsql.IndexAnnotation{Where: "out_trade_no <> ''"}).Where, idx.Annotation.Where)
|
||||
}
|
||||
|
||||
func findForeignKeyBySymbol(t *testing.T, table *entschema.Table, symbol string) *entschema.ForeignKey {
|
||||
t.Helper()
|
||||
|
||||
for _, fk := range table.ForeignKeys {
|
||||
if fk.Symbol == symbol {
|
||||
return fk
|
||||
}
|
||||
}
|
||||
|
||||
require.Failf(t, "missing foreign key", "table %s should include foreign key %s", table.Name, symbol)
|
||||
return nil
|
||||
}
|
||||
|
||||
func findIndexByName(t *testing.T, table *entschema.Table, name string) *entschema.Index {
|
||||
t.Helper()
|
||||
|
||||
for _, idx := range table.Indexes {
|
||||
if idx.Name == name {
|
||||
return idx
|
||||
}
|
||||
}
|
||||
|
||||
require.Failf(t, "missing index", "table %s should include index %s", table.Name, name)
|
||||
return nil
|
||||
}
|
||||
@@ -338,6 +338,252 @@ var (
|
||||
},
|
||||
},
|
||||
}
|
||||
// AuthIdentitiesColumns holds the columns for the "auth_identities" table.
|
||||
AuthIdentitiesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "provider_type", Type: field.TypeString, Size: 20},
|
||||
{Name: "provider_key", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "provider_subject", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "verified_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "issuer", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "metadata", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "user_id", Type: field.TypeInt64},
|
||||
}
|
||||
// AuthIdentitiesTable holds the schema information for the "auth_identities" table.
|
||||
AuthIdentitiesTable = &schema.Table{
|
||||
Name: "auth_identities",
|
||||
Columns: AuthIdentitiesColumns,
|
||||
PrimaryKey: []*schema.Column{AuthIdentitiesColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "auth_identities_users_auth_identities",
|
||||
Columns: []*schema.Column{AuthIdentitiesColumns[9]},
|
||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||
OnDelete: schema.Cascade,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "authidentity_provider_type_provider_key_provider_subject",
|
||||
Unique: true,
|
||||
Columns: []*schema.Column{AuthIdentitiesColumns[3], AuthIdentitiesColumns[4], AuthIdentitiesColumns[5]},
|
||||
},
|
||||
{
|
||||
Name: "authidentity_user_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{AuthIdentitiesColumns[9]},
|
||||
},
|
||||
{
|
||||
Name: "authidentity_user_id_provider_type",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{AuthIdentitiesColumns[9], AuthIdentitiesColumns[3]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// AuthIdentityChannelsColumns holds the columns for the "auth_identity_channels" table.
|
||||
AuthIdentityChannelsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "provider_type", Type: field.TypeString, Size: 20},
|
||||
{Name: "provider_key", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "channel", Type: field.TypeString, Size: 20},
|
||||
{Name: "channel_app_id", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "channel_subject", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "metadata", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "identity_id", Type: field.TypeInt64},
|
||||
}
|
||||
// AuthIdentityChannelsTable holds the schema information for the "auth_identity_channels" table.
|
||||
AuthIdentityChannelsTable = &schema.Table{
|
||||
Name: "auth_identity_channels",
|
||||
Columns: AuthIdentityChannelsColumns,
|
||||
PrimaryKey: []*schema.Column{AuthIdentityChannelsColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "auth_identity_channels_auth_identities_channels",
|
||||
Columns: []*schema.Column{AuthIdentityChannelsColumns[9]},
|
||||
RefColumns: []*schema.Column{AuthIdentitiesColumns[0]},
|
||||
OnDelete: schema.Cascade,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "authidentitychannel_provider_type_provider_key_channel_channel_app_id_channel_subject",
|
||||
Unique: true,
|
||||
Columns: []*schema.Column{AuthIdentityChannelsColumns[3], AuthIdentityChannelsColumns[4], AuthIdentityChannelsColumns[5], AuthIdentityChannelsColumns[6], AuthIdentityChannelsColumns[7]},
|
||||
},
|
||||
{
|
||||
Name: "authidentitychannel_identity_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{AuthIdentityChannelsColumns[9]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// ChannelMonitorsColumns holds the columns for the "channel_monitors" table.
|
||||
ChannelMonitorsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "name", Type: field.TypeString, Size: 100},
|
||||
{Name: "provider", Type: field.TypeEnum, Enums: []string{"openai", "anthropic", "gemini"}},
|
||||
{Name: "endpoint", Type: field.TypeString, Size: 500},
|
||||
{Name: "api_key_encrypted", Type: field.TypeString},
|
||||
{Name: "primary_model", Type: field.TypeString, Size: 200},
|
||||
{Name: "extra_models", Type: field.TypeJSON},
|
||||
{Name: "group_name", Type: field.TypeString, Nullable: true, Size: 100, Default: ""},
|
||||
{Name: "enabled", Type: field.TypeBool, Default: true},
|
||||
{Name: "interval_seconds", Type: field.TypeInt},
|
||||
{Name: "last_checked_at", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "created_by", Type: field.TypeInt64},
|
||||
{Name: "extra_headers", Type: field.TypeJSON},
|
||||
{Name: "body_override_mode", Type: field.TypeString, Size: 10, Default: "off"},
|
||||
{Name: "body_override", Type: field.TypeJSON, Nullable: true},
|
||||
{Name: "template_id", Type: field.TypeInt64, Nullable: true},
|
||||
}
|
||||
// ChannelMonitorsTable holds the schema information for the "channel_monitors" table.
|
||||
ChannelMonitorsTable = &schema.Table{
|
||||
Name: "channel_monitors",
|
||||
Columns: ChannelMonitorsColumns,
|
||||
PrimaryKey: []*schema.Column{ChannelMonitorsColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "channel_monitors_channel_monitor_request_templates_request_template",
|
||||
Columns: []*schema.Column{ChannelMonitorsColumns[17]},
|
||||
RefColumns: []*schema.Column{ChannelMonitorRequestTemplatesColumns[0]},
|
||||
OnDelete: schema.SetNull,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "channelmonitor_enabled_last_checked_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorsColumns[10], ChannelMonitorsColumns[12]},
|
||||
},
|
||||
{
|
||||
Name: "channelmonitor_provider",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorsColumns[4]},
|
||||
},
|
||||
{
|
||||
Name: "channelmonitor_group_name",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorsColumns[9]},
|
||||
},
|
||||
{
|
||||
Name: "channelmonitor_template_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorsColumns[17]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// ChannelMonitorDailyRollupsColumns holds the columns for the "channel_monitor_daily_rollups" table.
|
||||
ChannelMonitorDailyRollupsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "model", Type: field.TypeString, Size: 200},
|
||||
{Name: "bucket_date", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "date"}},
|
||||
{Name: "total_checks", Type: field.TypeInt, Default: 0},
|
||||
{Name: "ok_count", Type: field.TypeInt, Default: 0},
|
||||
{Name: "operational_count", Type: field.TypeInt, Default: 0},
|
||||
{Name: "degraded_count", Type: field.TypeInt, Default: 0},
|
||||
{Name: "failed_count", Type: field.TypeInt, Default: 0},
|
||||
{Name: "error_count", Type: field.TypeInt, Default: 0},
|
||||
{Name: "sum_latency_ms", Type: field.TypeInt64, Default: 0},
|
||||
{Name: "count_latency", Type: field.TypeInt, Default: 0},
|
||||
{Name: "sum_ping_latency_ms", Type: field.TypeInt64, Default: 0},
|
||||
{Name: "count_ping_latency", Type: field.TypeInt, Default: 0},
|
||||
{Name: "computed_at", Type: field.TypeTime},
|
||||
{Name: "monitor_id", Type: field.TypeInt64},
|
||||
}
|
||||
// ChannelMonitorDailyRollupsTable holds the schema information for the "channel_monitor_daily_rollups" table.
|
||||
ChannelMonitorDailyRollupsTable = &schema.Table{
|
||||
Name: "channel_monitor_daily_rollups",
|
||||
Columns: ChannelMonitorDailyRollupsColumns,
|
||||
PrimaryKey: []*schema.Column{ChannelMonitorDailyRollupsColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "channel_monitor_daily_rollups_channel_monitors_daily_rollups",
|
||||
Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[14]},
|
||||
RefColumns: []*schema.Column{ChannelMonitorsColumns[0]},
|
||||
OnDelete: schema.Cascade,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "channelmonitordailyrollup_monitor_id_model_bucket_date",
|
||||
Unique: true,
|
||||
Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[14], ChannelMonitorDailyRollupsColumns[1], ChannelMonitorDailyRollupsColumns[2]},
|
||||
},
|
||||
{
|
||||
Name: "channelmonitordailyrollup_bucket_date",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[2]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// ChannelMonitorHistoriesColumns holds the columns for the "channel_monitor_histories" table.
|
||||
ChannelMonitorHistoriesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "model", Type: field.TypeString, Size: 200},
|
||||
{Name: "status", Type: field.TypeEnum, Enums: []string{"operational", "degraded", "failed", "error"}},
|
||||
{Name: "latency_ms", Type: field.TypeInt, Nullable: true},
|
||||
{Name: "ping_latency_ms", Type: field.TypeInt, Nullable: true},
|
||||
{Name: "message", Type: field.TypeString, Nullable: true, Size: 500, Default: ""},
|
||||
{Name: "checked_at", Type: field.TypeTime},
|
||||
{Name: "monitor_id", Type: field.TypeInt64},
|
||||
}
|
||||
// ChannelMonitorHistoriesTable holds the schema information for the "channel_monitor_histories" table.
|
||||
ChannelMonitorHistoriesTable = &schema.Table{
|
||||
Name: "channel_monitor_histories",
|
||||
Columns: ChannelMonitorHistoriesColumns,
|
||||
PrimaryKey: []*schema.Column{ChannelMonitorHistoriesColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "channel_monitor_histories_channel_monitors_history",
|
||||
Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7]},
|
||||
RefColumns: []*schema.Column{ChannelMonitorsColumns[0]},
|
||||
OnDelete: schema.Cascade,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "channelmonitorhistory_monitor_id_model_checked_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7], ChannelMonitorHistoriesColumns[1], ChannelMonitorHistoriesColumns[6]},
|
||||
},
|
||||
{
|
||||
Name: "channelmonitorhistory_checked_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{ChannelMonitorHistoriesColumns[6]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// ChannelMonitorRequestTemplatesColumns holds the columns for the "channel_monitor_request_templates" table.
|
||||
ChannelMonitorRequestTemplatesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "name", Type: field.TypeString, Size: 100},
|
||||
{Name: "provider", Type: field.TypeEnum, Enums: []string{"openai", "anthropic", "gemini"}},
|
||||
{Name: "description", Type: field.TypeString, Nullable: true, Size: 500, Default: ""},
|
||||
{Name: "extra_headers", Type: field.TypeJSON},
|
||||
{Name: "body_override_mode", Type: field.TypeString, Size: 10, Default: "off"},
|
||||
{Name: "body_override", Type: field.TypeJSON, Nullable: true},
|
||||
}
|
||||
// ChannelMonitorRequestTemplatesTable holds the schema information for the "channel_monitor_request_templates" table.
|
||||
ChannelMonitorRequestTemplatesTable = &schema.Table{
|
||||
Name: "channel_monitor_request_templates",
|
||||
Columns: ChannelMonitorRequestTemplatesColumns,
|
||||
PrimaryKey: []*schema.Column{ChannelMonitorRequestTemplatesColumns[0]},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "channelmonitorrequesttemplate_provider_name",
|
||||
Unique: true,
|
||||
Columns: []*schema.Column{ChannelMonitorRequestTemplatesColumns[4], ChannelMonitorRequestTemplatesColumns[3]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// ErrorPassthroughRulesColumns holds the columns for the "error_passthrough_rules" table.
|
||||
ErrorPassthroughRulesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
@@ -395,11 +641,6 @@ var (
|
||||
{Name: "image_price_1k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "image_price_2k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "image_price_4k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "sora_image_price_360", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "sora_image_price_540", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "sora_video_price_per_request", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "sora_video_price_per_request_hd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "sora_storage_quota_bytes", Type: field.TypeInt64, Default: 0},
|
||||
{Name: "claude_code_only", Type: field.TypeBool, Default: false},
|
||||
{Name: "fallback_group_id", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "fallback_group_id_on_invalid_request", Type: field.TypeInt64, Nullable: true},
|
||||
@@ -409,7 +650,11 @@ var (
|
||||
{Name: "supported_model_scopes", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "sort_order", Type: field.TypeInt, Default: 0},
|
||||
{Name: "allow_messages_dispatch", Type: field.TypeBool, Default: false},
|
||||
{Name: "require_oauth_only", Type: field.TypeBool, Default: false},
|
||||
{Name: "require_privacy_set", Type: field.TypeBool, Default: false},
|
||||
{Name: "default_mapped_model", Type: field.TypeString, Size: 100, Default: ""},
|
||||
{Name: "messages_dispatch_model_config", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "rpm_limit", Type: field.TypeInt, Default: 0},
|
||||
}
|
||||
// GroupsTable holds the schema information for the "groups" table.
|
||||
GroupsTable = &schema.Table{
|
||||
@@ -445,7 +690,7 @@ var (
|
||||
{
|
||||
Name: "group_sort_order",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{GroupsColumns[30]},
|
||||
Columns: []*schema.Column{GroupsColumns[25]},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -487,6 +732,273 @@ var (
|
||||
},
|
||||
},
|
||||
}
|
||||
// IdentityAdoptionDecisionsColumns holds the columns for the "identity_adoption_decisions" table.
|
||||
IdentityAdoptionDecisionsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "adopt_display_name", Type: field.TypeBool, Default: false},
|
||||
{Name: "adopt_avatar", Type: field.TypeBool, Default: false},
|
||||
{Name: "decided_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "identity_id", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "pending_auth_session_id", Type: field.TypeInt64, Unique: true},
|
||||
}
|
||||
// IdentityAdoptionDecisionsTable holds the schema information for the "identity_adoption_decisions" table.
|
||||
IdentityAdoptionDecisionsTable = &schema.Table{
|
||||
Name: "identity_adoption_decisions",
|
||||
Columns: IdentityAdoptionDecisionsColumns,
|
||||
PrimaryKey: []*schema.Column{IdentityAdoptionDecisionsColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "identity_adoption_decisions_auth_identities_adoption_decisions",
|
||||
Columns: []*schema.Column{IdentityAdoptionDecisionsColumns[6]},
|
||||
RefColumns: []*schema.Column{AuthIdentitiesColumns[0]},
|
||||
OnDelete: schema.SetNull,
|
||||
},
|
||||
{
|
||||
Symbol: "identity_adoption_decisions_pending_auth_sessions_adoption_decision",
|
||||
Columns: []*schema.Column{IdentityAdoptionDecisionsColumns[7]},
|
||||
RefColumns: []*schema.Column{PendingAuthSessionsColumns[0]},
|
||||
OnDelete: schema.Cascade,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "identityadoptiondecision_pending_auth_session_id",
|
||||
Unique: true,
|
||||
Columns: []*schema.Column{IdentityAdoptionDecisionsColumns[7]},
|
||||
},
|
||||
{
|
||||
Name: "identityadoptiondecision_identity_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{IdentityAdoptionDecisionsColumns[6]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// PaymentAuditLogsColumns holds the columns for the "payment_audit_logs" table.
|
||||
PaymentAuditLogsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "order_id", Type: field.TypeString, Size: 64},
|
||||
{Name: "action", Type: field.TypeString, Size: 50},
|
||||
{Name: "detail", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "operator", Type: field.TypeString, Size: 100, Default: "system"},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
}
|
||||
// PaymentAuditLogsTable holds the schema information for the "payment_audit_logs" table.
|
||||
PaymentAuditLogsTable = &schema.Table{
|
||||
Name: "payment_audit_logs",
|
||||
Columns: PaymentAuditLogsColumns,
|
||||
PrimaryKey: []*schema.Column{PaymentAuditLogsColumns[0]},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "paymentauditlog_order_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PaymentAuditLogsColumns[1]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// PaymentOrdersColumns holds the columns for the "payment_orders" table.
|
||||
PaymentOrdersColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "user_email", Type: field.TypeString, Size: 255},
|
||||
{Name: "user_name", Type: field.TypeString, Size: 100},
|
||||
{Name: "user_notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "amount", Type: field.TypeFloat64, SchemaType: map[string]string{"postgres": "decimal(20,2)"}},
|
||||
{Name: "pay_amount", Type: field.TypeFloat64, SchemaType: map[string]string{"postgres": "decimal(20,2)"}},
|
||||
{Name: "fee_rate", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(10,4)"}},
|
||||
{Name: "recharge_code", Type: field.TypeString, Size: 64},
|
||||
{Name: "out_trade_no", Type: field.TypeString, Size: 64, Default: ""},
|
||||
{Name: "payment_type", Type: field.TypeString, Size: 30},
|
||||
{Name: "payment_trade_no", Type: field.TypeString, Size: 128},
|
||||
{Name: "pay_url", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "qr_code", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "qr_code_img", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "order_type", Type: field.TypeString, Size: 20, Default: "balance"},
|
||||
{Name: "plan_id", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "subscription_group_id", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "subscription_days", Type: field.TypeInt, Nullable: true},
|
||||
{Name: "provider_instance_id", Type: field.TypeString, Nullable: true, Size: 64},
|
||||
{Name: "provider_key", Type: field.TypeString, Nullable: true, Size: 30},
|
||||
{Name: "provider_snapshot", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "status", Type: field.TypeString, Size: 30, Default: "PENDING"},
|
||||
{Name: "refund_amount", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,2)"}},
|
||||
{Name: "refund_reason", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "refund_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "force_refund", Type: field.TypeBool, Default: false},
|
||||
{Name: "refund_requested_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "refund_request_reason", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "refund_requested_by", Type: field.TypeString, Nullable: true, Size: 20},
|
||||
{Name: "expires_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "paid_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "completed_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "failed_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "failed_reason", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "client_ip", Type: field.TypeString, Size: 50},
|
||||
{Name: "src_host", Type: field.TypeString, Size: 255},
|
||||
{Name: "src_url", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "user_id", Type: field.TypeInt64},
|
||||
}
|
||||
// PaymentOrdersTable holds the schema information for the "payment_orders" table.
|
||||
PaymentOrdersTable = &schema.Table{
|
||||
Name: "payment_orders",
|
||||
Columns: PaymentOrdersColumns,
|
||||
PrimaryKey: []*schema.Column{PaymentOrdersColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "payment_orders_users_payment_orders",
|
||||
Columns: []*schema.Column{PaymentOrdersColumns[39]},
|
||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||
OnDelete: schema.NoAction,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "paymentorder_out_trade_no",
|
||||
Unique: true,
|
||||
Columns: []*schema.Column{PaymentOrdersColumns[8]},
|
||||
Annotation: &entsql.IndexAnnotation{
|
||||
Where: "out_trade_no <> ''",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "paymentorder_user_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PaymentOrdersColumns[39]},
|
||||
},
|
||||
{
|
||||
Name: "paymentorder_status",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PaymentOrdersColumns[21]},
|
||||
},
|
||||
{
|
||||
Name: "paymentorder_expires_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PaymentOrdersColumns[29]},
|
||||
},
|
||||
{
|
||||
Name: "paymentorder_created_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PaymentOrdersColumns[37]},
|
||||
},
|
||||
{
|
||||
Name: "paymentorder_paid_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PaymentOrdersColumns[30]},
|
||||
},
|
||||
{
|
||||
Name: "paymentorder_payment_type_paid_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PaymentOrdersColumns[9], PaymentOrdersColumns[30]},
|
||||
},
|
||||
{
|
||||
Name: "paymentorder_order_type",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PaymentOrdersColumns[14]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// PaymentProviderInstancesColumns holds the columns for the "payment_provider_instances" table.
|
||||
PaymentProviderInstancesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "provider_key", Type: field.TypeString, Size: 30},
|
||||
{Name: "name", Type: field.TypeString, Size: 100, Default: ""},
|
||||
{Name: "config", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "supported_types", Type: field.TypeString, Size: 200, Default: ""},
|
||||
{Name: "enabled", Type: field.TypeBool, Default: true},
|
||||
{Name: "payment_mode", Type: field.TypeString, Size: 20, Default: ""},
|
||||
{Name: "sort_order", Type: field.TypeInt, Default: 0},
|
||||
{Name: "limits", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "refund_enabled", Type: field.TypeBool, Default: false},
|
||||
{Name: "allow_user_refund", Type: field.TypeBool, Default: false},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
}
|
||||
// PaymentProviderInstancesTable holds the schema information for the "payment_provider_instances" table.
|
||||
PaymentProviderInstancesTable = &schema.Table{
|
||||
Name: "payment_provider_instances",
|
||||
Columns: PaymentProviderInstancesColumns,
|
||||
PrimaryKey: []*schema.Column{PaymentProviderInstancesColumns[0]},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "paymentproviderinstance_provider_key",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PaymentProviderInstancesColumns[1]},
|
||||
},
|
||||
{
|
||||
Name: "paymentproviderinstance_enabled",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PaymentProviderInstancesColumns[5]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// PendingAuthSessionsColumns holds the columns for the "pending_auth_sessions" table.
|
||||
PendingAuthSessionsColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "session_token", Type: field.TypeString, Size: 255},
|
||||
{Name: "intent", Type: field.TypeString, Size: 40},
|
||||
{Name: "provider_type", Type: field.TypeString, Size: 20},
|
||||
{Name: "provider_key", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "provider_subject", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "redirect_to", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "resolved_email", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "registration_password_hash", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "upstream_identity_claims", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "local_flow_state", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "browser_session_key", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "completion_code_hash", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "completion_code_expires_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "email_verified_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "password_verified_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "totp_verified_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "expires_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "consumed_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "target_user_id", Type: field.TypeInt64, Nullable: true},
|
||||
}
|
||||
// PendingAuthSessionsTable holds the schema information for the "pending_auth_sessions" table.
|
||||
PendingAuthSessionsTable = &schema.Table{
|
||||
Name: "pending_auth_sessions",
|
||||
Columns: PendingAuthSessionsColumns,
|
||||
PrimaryKey: []*schema.Column{PendingAuthSessionsColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "pending_auth_sessions_users_pending_auth_sessions",
|
||||
Columns: []*schema.Column{PendingAuthSessionsColumns[21]},
|
||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||
OnDelete: schema.SetNull,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "pendingauthsession_session_token",
|
||||
Unique: true,
|
||||
Columns: []*schema.Column{PendingAuthSessionsColumns[3]},
|
||||
},
|
||||
{
|
||||
Name: "pendingauthsession_target_user_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PendingAuthSessionsColumns[21]},
|
||||
},
|
||||
{
|
||||
Name: "pendingauthsession_expires_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PendingAuthSessionsColumns[19]},
|
||||
},
|
||||
{
|
||||
Name: "pendingauthsession_provider_type_provider_key_provider_subject",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PendingAuthSessionsColumns[5], PendingAuthSessionsColumns[6], PendingAuthSessionsColumns[7]},
|
||||
},
|
||||
{
|
||||
Name: "pendingauthsession_completion_code_hash",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{PendingAuthSessionsColumns[14]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// PromoCodesColumns holds the columns for the "promo_codes" table.
|
||||
PromoCodesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
@@ -673,6 +1185,65 @@ var (
|
||||
Columns: SettingsColumns,
|
||||
PrimaryKey: []*schema.Column{SettingsColumns[0]},
|
||||
}
|
||||
// SubscriptionPlansColumns holds the columns for the "subscription_plans" table.
|
||||
SubscriptionPlansColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "group_id", Type: field.TypeInt64},
|
||||
{Name: "name", Type: field.TypeString, Size: 100},
|
||||
{Name: "description", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "price", Type: field.TypeFloat64, SchemaType: map[string]string{"postgres": "decimal(20,2)"}},
|
||||
{Name: "original_price", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,2)"}},
|
||||
{Name: "validity_days", Type: field.TypeInt, Default: 30},
|
||||
{Name: "validity_unit", Type: field.TypeString, Size: 10, Default: "day"},
|
||||
{Name: "features", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "product_name", Type: field.TypeString, Size: 100, Default: ""},
|
||||
{Name: "for_sale", Type: field.TypeBool, Default: true},
|
||||
{Name: "sort_order", Type: field.TypeInt, Default: 0},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
}
|
||||
// SubscriptionPlansTable holds the schema information for the "subscription_plans" table.
|
||||
SubscriptionPlansTable = &schema.Table{
|
||||
Name: "subscription_plans",
|
||||
Columns: SubscriptionPlansColumns,
|
||||
PrimaryKey: []*schema.Column{SubscriptionPlansColumns[0]},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "subscriptionplan_group_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{SubscriptionPlansColumns[1]},
|
||||
},
|
||||
{
|
||||
Name: "subscriptionplan_for_sale",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{SubscriptionPlansColumns[10]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// TLSFingerprintProfilesColumns holds the columns for the "tls_fingerprint_profiles" table.
|
||||
TLSFingerprintProfilesColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "name", Type: field.TypeString, Unique: true, Size: 100},
|
||||
{Name: "description", Type: field.TypeString, Nullable: true, Size: 2147483647},
|
||||
{Name: "enable_grease", Type: field.TypeBool, Default: false},
|
||||
{Name: "cipher_suites", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "curves", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "point_formats", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "signature_algorithms", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "alpn_protocols", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "supported_versions", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "key_share_groups", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "psk_modes", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
{Name: "extensions", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||
}
|
||||
// TLSFingerprintProfilesTable holds the schema information for the "tls_fingerprint_profiles" table.
|
||||
TLSFingerprintProfilesTable = &schema.Table{
|
||||
Name: "tls_fingerprint_profiles",
|
||||
Columns: TLSFingerprintProfilesColumns,
|
||||
PrimaryKey: []*schema.Column{TLSFingerprintProfilesColumns[0]},
|
||||
}
|
||||
// UsageCleanupTasksColumns holds the columns for the "usage_cleanup_tasks" table.
|
||||
UsageCleanupTasksColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
@@ -716,7 +1287,12 @@ var (
|
||||
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||
{Name: "request_id", Type: field.TypeString, Size: 64},
|
||||
{Name: "model", Type: field.TypeString, Size: 100},
|
||||
{Name: "requested_model", Type: field.TypeString, Nullable: true, Size: 100},
|
||||
{Name: "upstream_model", Type: field.TypeString, Nullable: true, Size: 100},
|
||||
{Name: "channel_id", Type: field.TypeInt64, Nullable: true},
|
||||
{Name: "model_mapping_chain", Type: field.TypeString, Nullable: true, Size: 500},
|
||||
{Name: "billing_tier", Type: field.TypeString, Nullable: true, Size: 50},
|
||||
{Name: "billing_mode", Type: field.TypeString, Nullable: true, Size: 20},
|
||||
{Name: "input_tokens", Type: field.TypeInt, Default: 0},
|
||||
{Name: "output_tokens", Type: field.TypeInt, Default: 0},
|
||||
{Name: "cache_creation_tokens", Type: field.TypeInt, Default: 0},
|
||||
@@ -739,7 +1315,6 @@ var (
|
||||
{Name: "ip_address", Type: field.TypeString, Nullable: true, Size: 45},
|
||||
{Name: "image_count", Type: field.TypeInt, Default: 0},
|
||||
{Name: "image_size", Type: field.TypeString, Nullable: true, Size: 10},
|
||||
{Name: "media_type", Type: field.TypeString, Nullable: true, Size: 16},
|
||||
{Name: "cache_ttl_overridden", Type: field.TypeBool, Default: false},
|
||||
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "api_key_id", Type: field.TypeInt64},
|
||||
@@ -756,31 +1331,31 @@ var (
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "usage_logs_api_keys_usage_logs",
|
||||
Columns: []*schema.Column{UsageLogsColumns[29]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[33]},
|
||||
RefColumns: []*schema.Column{APIKeysColumns[0]},
|
||||
OnDelete: schema.NoAction,
|
||||
},
|
||||
{
|
||||
Symbol: "usage_logs_accounts_usage_logs",
|
||||
Columns: []*schema.Column{UsageLogsColumns[30]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[34]},
|
||||
RefColumns: []*schema.Column{AccountsColumns[0]},
|
||||
OnDelete: schema.NoAction,
|
||||
},
|
||||
{
|
||||
Symbol: "usage_logs_groups_usage_logs",
|
||||
Columns: []*schema.Column{UsageLogsColumns[31]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[35]},
|
||||
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||
OnDelete: schema.SetNull,
|
||||
},
|
||||
{
|
||||
Symbol: "usage_logs_users_usage_logs",
|
||||
Columns: []*schema.Column{UsageLogsColumns[32]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[36]},
|
||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||
OnDelete: schema.NoAction,
|
||||
},
|
||||
{
|
||||
Symbol: "usage_logs_user_subscriptions_usage_logs",
|
||||
Columns: []*schema.Column{UsageLogsColumns[33]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[37]},
|
||||
RefColumns: []*schema.Column{UserSubscriptionsColumns[0]},
|
||||
OnDelete: schema.SetNull,
|
||||
},
|
||||
@@ -789,38 +1364,43 @@ var (
|
||||
{
|
||||
Name: "usagelog_user_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[32]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[36]},
|
||||
},
|
||||
{
|
||||
Name: "usagelog_api_key_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[29]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[33]},
|
||||
},
|
||||
{
|
||||
Name: "usagelog_account_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[30]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[34]},
|
||||
},
|
||||
{
|
||||
Name: "usagelog_group_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[31]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[35]},
|
||||
},
|
||||
{
|
||||
Name: "usagelog_subscription_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[33]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[37]},
|
||||
},
|
||||
{
|
||||
Name: "usagelog_created_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[28]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[32]},
|
||||
},
|
||||
{
|
||||
Name: "usagelog_model",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[2]},
|
||||
},
|
||||
{
|
||||
Name: "usagelog_requested_model",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[3]},
|
||||
},
|
||||
{
|
||||
Name: "usagelog_request_id",
|
||||
Unique: false,
|
||||
@@ -829,17 +1409,17 @@ var (
|
||||
{
|
||||
Name: "usagelog_user_id_created_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[32], UsageLogsColumns[28]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[36], UsageLogsColumns[32]},
|
||||
},
|
||||
{
|
||||
Name: "usagelog_api_key_id_created_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[29], UsageLogsColumns[28]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[33], UsageLogsColumns[32]},
|
||||
},
|
||||
{
|
||||
Name: "usagelog_group_id_created_at",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{UsageLogsColumns[31], UsageLogsColumns[28]},
|
||||
Columns: []*schema.Column{UsageLogsColumns[35], UsageLogsColumns[32]},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -860,8 +1440,15 @@ var (
|
||||
{Name: "totp_secret_encrypted", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "totp_enabled", Type: field.TypeBool, Default: false},
|
||||
{Name: "totp_enabled_at", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "sora_storage_quota_bytes", Type: field.TypeInt64, Default: 0},
|
||||
{Name: "sora_storage_used_bytes", Type: field.TypeInt64, Default: 0},
|
||||
{Name: "signup_source", Type: field.TypeString, Default: "email"},
|
||||
{Name: "last_login_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "last_active_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||
{Name: "balance_notify_enabled", Type: field.TypeBool, Default: true},
|
||||
{Name: "balance_notify_threshold_type", Type: field.TypeString, Default: "fixed"},
|
||||
{Name: "balance_notify_threshold", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "balance_notify_extra_emails", Type: field.TypeString, Default: "[]", SchemaType: map[string]string{"postgres": "text"}},
|
||||
{Name: "total_recharged", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||
{Name: "rpm_limit", Type: field.TypeInt, Default: 0},
|
||||
}
|
||||
// UsersTable holds the schema information for the "users" table.
|
||||
UsersTable = &schema.Table{
|
||||
@@ -1096,15 +1683,28 @@ var (
|
||||
AccountGroupsTable,
|
||||
AnnouncementsTable,
|
||||
AnnouncementReadsTable,
|
||||
AuthIdentitiesTable,
|
||||
AuthIdentityChannelsTable,
|
||||
ChannelMonitorsTable,
|
||||
ChannelMonitorDailyRollupsTable,
|
||||
ChannelMonitorHistoriesTable,
|
||||
ChannelMonitorRequestTemplatesTable,
|
||||
ErrorPassthroughRulesTable,
|
||||
GroupsTable,
|
||||
IdempotencyRecordsTable,
|
||||
IdentityAdoptionDecisionsTable,
|
||||
PaymentAuditLogsTable,
|
||||
PaymentOrdersTable,
|
||||
PaymentProviderInstancesTable,
|
||||
PendingAuthSessionsTable,
|
||||
PromoCodesTable,
|
||||
PromoCodeUsagesTable,
|
||||
ProxiesTable,
|
||||
RedeemCodesTable,
|
||||
SecuritySecretsTable,
|
||||
SettingsTable,
|
||||
SubscriptionPlansTable,
|
||||
TLSFingerprintProfilesTable,
|
||||
UsageCleanupTasksTable,
|
||||
UsageLogsTable,
|
||||
UsersTable,
|
||||
@@ -1138,6 +1738,29 @@ func init() {
|
||||
AnnouncementReadsTable.Annotation = &entsql.Annotation{
|
||||
Table: "announcement_reads",
|
||||
}
|
||||
AuthIdentitiesTable.ForeignKeys[0].RefTable = UsersTable
|
||||
AuthIdentitiesTable.Annotation = &entsql.Annotation{
|
||||
Table: "auth_identities",
|
||||
}
|
||||
AuthIdentityChannelsTable.ForeignKeys[0].RefTable = AuthIdentitiesTable
|
||||
AuthIdentityChannelsTable.Annotation = &entsql.Annotation{
|
||||
Table: "auth_identity_channels",
|
||||
}
|
||||
ChannelMonitorsTable.ForeignKeys[0].RefTable = ChannelMonitorRequestTemplatesTable
|
||||
ChannelMonitorsTable.Annotation = &entsql.Annotation{
|
||||
Table: "channel_monitors",
|
||||
}
|
||||
ChannelMonitorDailyRollupsTable.ForeignKeys[0].RefTable = ChannelMonitorsTable
|
||||
ChannelMonitorDailyRollupsTable.Annotation = &entsql.Annotation{
|
||||
Table: "channel_monitor_daily_rollups",
|
||||
}
|
||||
ChannelMonitorHistoriesTable.ForeignKeys[0].RefTable = ChannelMonitorsTable
|
||||
ChannelMonitorHistoriesTable.Annotation = &entsql.Annotation{
|
||||
Table: "channel_monitor_histories",
|
||||
}
|
||||
ChannelMonitorRequestTemplatesTable.Annotation = &entsql.Annotation{
|
||||
Table: "channel_monitor_request_templates",
|
||||
}
|
||||
ErrorPassthroughRulesTable.Annotation = &entsql.Annotation{
|
||||
Table: "error_passthrough_rules",
|
||||
}
|
||||
@@ -1147,6 +1770,25 @@ func init() {
|
||||
IdempotencyRecordsTable.Annotation = &entsql.Annotation{
|
||||
Table: "idempotency_records",
|
||||
}
|
||||
IdentityAdoptionDecisionsTable.ForeignKeys[0].RefTable = AuthIdentitiesTable
|
||||
IdentityAdoptionDecisionsTable.ForeignKeys[1].RefTable = PendingAuthSessionsTable
|
||||
IdentityAdoptionDecisionsTable.Annotation = &entsql.Annotation{
|
||||
Table: "identity_adoption_decisions",
|
||||
}
|
||||
PaymentAuditLogsTable.Annotation = &entsql.Annotation{
|
||||
Table: "payment_audit_logs",
|
||||
}
|
||||
PaymentOrdersTable.ForeignKeys[0].RefTable = UsersTable
|
||||
PaymentOrdersTable.Annotation = &entsql.Annotation{
|
||||
Table: "payment_orders",
|
||||
}
|
||||
PaymentProviderInstancesTable.Annotation = &entsql.Annotation{
|
||||
Table: "payment_provider_instances",
|
||||
}
|
||||
PendingAuthSessionsTable.ForeignKeys[0].RefTable = UsersTable
|
||||
PendingAuthSessionsTable.Annotation = &entsql.Annotation{
|
||||
Table: "pending_auth_sessions",
|
||||
}
|
||||
PromoCodesTable.Annotation = &entsql.Annotation{
|
||||
Table: "promo_codes",
|
||||
}
|
||||
@@ -1169,6 +1811,12 @@ func init() {
|
||||
SettingsTable.Annotation = &entsql.Annotation{
|
||||
Table: "settings",
|
||||
}
|
||||
SubscriptionPlansTable.Annotation = &entsql.Annotation{
|
||||
Table: "subscription_plans",
|
||||
}
|
||||
TLSFingerprintProfilesTable.Annotation = &entsql.Annotation{
|
||||
Table: "tls_fingerprint_profiles",
|
||||
}
|
||||
UsageCleanupTasksTable.Annotation = &entsql.Annotation{
|
||||
Table: "usage_cleanup_tasks",
|
||||
}
|
||||
|
||||
18261
backend/ent/mutation.go
150
backend/ent/paymentauditlog.go
Normal file
@@ -0,0 +1,150 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentauditlog"
|
||||
)
|
||||
|
||||
// PaymentAuditLog is the model entity for the PaymentAuditLog schema.
|
||||
type PaymentAuditLog struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// OrderID holds the value of the "order_id" field.
|
||||
OrderID string `json:"order_id,omitempty"`
|
||||
// Action holds the value of the "action" field.
|
||||
Action string `json:"action,omitempty"`
|
||||
// Detail holds the value of the "detail" field.
|
||||
Detail string `json:"detail,omitempty"`
|
||||
// Operator holds the value of the "operator" field.
|
||||
Operator string `json:"operator,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*PaymentAuditLog) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case paymentauditlog.FieldID:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case paymentauditlog.FieldOrderID, paymentauditlog.FieldAction, paymentauditlog.FieldDetail, paymentauditlog.FieldOperator:
|
||||
values[i] = new(sql.NullString)
|
||||
case paymentauditlog.FieldCreatedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the PaymentAuditLog fields.
|
||||
func (_m *PaymentAuditLog) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case paymentauditlog.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case paymentauditlog.FieldOrderID:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field order_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.OrderID = value.String
|
||||
}
|
||||
case paymentauditlog.FieldAction:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field action", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Action = value.String
|
||||
}
|
||||
case paymentauditlog.FieldDetail:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field detail", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Detail = value.String
|
||||
}
|
||||
case paymentauditlog.FieldOperator:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field operator", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Operator = value.String
|
||||
}
|
||||
case paymentauditlog.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CreatedAt = value.Time
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the PaymentAuditLog.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *PaymentAuditLog) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this PaymentAuditLog.
|
||||
// Note that you need to call PaymentAuditLog.Unwrap() before calling this method if this PaymentAuditLog
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *PaymentAuditLog) Update() *PaymentAuditLogUpdateOne {
|
||||
return NewPaymentAuditLogClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the PaymentAuditLog entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *PaymentAuditLog) Unwrap() *PaymentAuditLog {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: PaymentAuditLog is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *PaymentAuditLog) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("PaymentAuditLog(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("order_id=")
|
||||
builder.WriteString(_m.OrderID)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("action=")
|
||||
builder.WriteString(_m.Action)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("detail=")
|
||||
builder.WriteString(_m.Detail)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("operator=")
|
||||
builder.WriteString(_m.Operator)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// PaymentAuditLogs is a parsable slice of PaymentAuditLog.
|
||||
type PaymentAuditLogs []*PaymentAuditLog
|
||||
96
backend/ent/paymentauditlog/paymentauditlog.go
Normal file
@@ -0,0 +1,96 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package paymentauditlog
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the paymentauditlog type in the database.
|
||||
Label = "payment_audit_log"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldOrderID holds the string denoting the order_id field in the database.
|
||||
FieldOrderID = "order_id"
|
||||
// FieldAction holds the string denoting the action field in the database.
|
||||
FieldAction = "action"
|
||||
// FieldDetail holds the string denoting the detail field in the database.
|
||||
FieldDetail = "detail"
|
||||
// FieldOperator holds the string denoting the operator field in the database.
|
||||
FieldOperator = "operator"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// Table holds the table name of the paymentauditlog in the database.
|
||||
Table = "payment_audit_logs"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for paymentauditlog fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldOrderID,
|
||||
FieldAction,
|
||||
FieldDetail,
|
||||
FieldOperator,
|
||||
FieldCreatedAt,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// OrderIDValidator is a validator for the "order_id" field. It is called by the builders before save.
|
||||
OrderIDValidator func(string) error
|
||||
// ActionValidator is a validator for the "action" field. It is called by the builders before save.
|
||||
ActionValidator func(string) error
|
||||
// DefaultDetail holds the default value on creation for the "detail" field.
|
||||
DefaultDetail string
|
||||
// DefaultOperator holds the default value on creation for the "operator" field.
|
||||
DefaultOperator string
|
||||
// OperatorValidator is a validator for the "operator" field. It is called by the builders before save.
|
||||
OperatorValidator func(string) error
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the PaymentAuditLog queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOrderID orders the results by the order_id field.
|
||||
func ByOrderID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldOrderID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByAction orders the results by the action field.
|
||||
func ByAction(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldAction, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDetail orders the results by the detail field.
|
||||
func ByDetail(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDetail, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOperator orders the results by the operator field.
|
||||
func ByOperator(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldOperator, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
395
backend/ent/paymentauditlog/where.go
Normal file
@@ -0,0 +1,395 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package paymentauditlog
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id int64) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id int64) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id int64) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...int64) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...int64) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id int64) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id int64) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id int64) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id int64) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// OrderID applies equality check predicate on the "order_id" field. It's identical to OrderIDEQ.
|
||||
func OrderID(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// Action applies equality check predicate on the "action" field. It's identical to ActionEQ.
|
||||
func Action(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldAction, v))
|
||||
}
|
||||
|
||||
// Detail applies equality check predicate on the "detail" field. It's identical to DetailEQ.
|
||||
func Detail(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldDetail, v))
|
||||
}
|
||||
|
||||
// Operator applies equality check predicate on the "operator" field. It's identical to OperatorEQ.
|
||||
func Operator(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldOperator, v))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// OrderIDEQ applies the EQ predicate on the "order_id" field.
|
||||
func OrderIDEQ(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// OrderIDNEQ applies the NEQ predicate on the "order_id" field.
|
||||
func OrderIDNEQ(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNEQ(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// OrderIDIn applies the In predicate on the "order_id" field.
|
||||
func OrderIDIn(vs ...string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldIn(FieldOrderID, vs...))
|
||||
}
|
||||
|
||||
// OrderIDNotIn applies the NotIn predicate on the "order_id" field.
|
||||
func OrderIDNotIn(vs ...string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNotIn(FieldOrderID, vs...))
|
||||
}
|
||||
|
||||
// OrderIDGT applies the GT predicate on the "order_id" field.
|
||||
func OrderIDGT(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGT(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// OrderIDGTE applies the GTE predicate on the "order_id" field.
|
||||
func OrderIDGTE(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGTE(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// OrderIDLT applies the LT predicate on the "order_id" field.
|
||||
func OrderIDLT(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLT(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// OrderIDLTE applies the LTE predicate on the "order_id" field.
|
||||
func OrderIDLTE(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLTE(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// OrderIDContains applies the Contains predicate on the "order_id" field.
|
||||
func OrderIDContains(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldContains(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// OrderIDHasPrefix applies the HasPrefix predicate on the "order_id" field.
|
||||
func OrderIDHasPrefix(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldHasPrefix(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// OrderIDHasSuffix applies the HasSuffix predicate on the "order_id" field.
|
||||
func OrderIDHasSuffix(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldHasSuffix(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// OrderIDEqualFold applies the EqualFold predicate on the "order_id" field.
|
||||
func OrderIDEqualFold(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEqualFold(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// OrderIDContainsFold applies the ContainsFold predicate on the "order_id" field.
|
||||
func OrderIDContainsFold(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldContainsFold(FieldOrderID, v))
|
||||
}
|
||||
|
||||
// ActionEQ applies the EQ predicate on the "action" field.
|
||||
func ActionEQ(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldAction, v))
|
||||
}
|
||||
|
||||
// ActionNEQ applies the NEQ predicate on the "action" field.
|
||||
func ActionNEQ(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNEQ(FieldAction, v))
|
||||
}
|
||||
|
||||
// ActionIn applies the In predicate on the "action" field.
|
||||
func ActionIn(vs ...string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldIn(FieldAction, vs...))
|
||||
}
|
||||
|
||||
// ActionNotIn applies the NotIn predicate on the "action" field.
|
||||
func ActionNotIn(vs ...string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNotIn(FieldAction, vs...))
|
||||
}
|
||||
|
||||
// ActionGT applies the GT predicate on the "action" field.
|
||||
func ActionGT(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGT(FieldAction, v))
|
||||
}
|
||||
|
||||
// ActionGTE applies the GTE predicate on the "action" field.
|
||||
func ActionGTE(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGTE(FieldAction, v))
|
||||
}
|
||||
|
||||
// ActionLT applies the LT predicate on the "action" field.
|
||||
func ActionLT(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLT(FieldAction, v))
|
||||
}
|
||||
|
||||
// ActionLTE applies the LTE predicate on the "action" field.
|
||||
func ActionLTE(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLTE(FieldAction, v))
|
||||
}
|
||||
|
||||
// ActionContains applies the Contains predicate on the "action" field.
|
||||
func ActionContains(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldContains(FieldAction, v))
|
||||
}
|
||||
|
||||
// ActionHasPrefix applies the HasPrefix predicate on the "action" field.
|
||||
func ActionHasPrefix(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldHasPrefix(FieldAction, v))
|
||||
}
|
||||
|
||||
// ActionHasSuffix applies the HasSuffix predicate on the "action" field.
|
||||
func ActionHasSuffix(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldHasSuffix(FieldAction, v))
|
||||
}
|
||||
|
||||
// ActionEqualFold applies the EqualFold predicate on the "action" field.
|
||||
func ActionEqualFold(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEqualFold(FieldAction, v))
|
||||
}
|
||||
|
||||
// ActionContainsFold applies the ContainsFold predicate on the "action" field.
|
||||
func ActionContainsFold(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldContainsFold(FieldAction, v))
|
||||
}
|
||||
|
||||
// DetailEQ applies the EQ predicate on the "detail" field.
|
||||
func DetailEQ(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldDetail, v))
|
||||
}
|
||||
|
||||
// DetailNEQ applies the NEQ predicate on the "detail" field.
|
||||
func DetailNEQ(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNEQ(FieldDetail, v))
|
||||
}
|
||||
|
||||
// DetailIn applies the In predicate on the "detail" field.
|
||||
func DetailIn(vs ...string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldIn(FieldDetail, vs...))
|
||||
}
|
||||
|
||||
// DetailNotIn applies the NotIn predicate on the "detail" field.
|
||||
func DetailNotIn(vs ...string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNotIn(FieldDetail, vs...))
|
||||
}
|
||||
|
||||
// DetailGT applies the GT predicate on the "detail" field.
|
||||
func DetailGT(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGT(FieldDetail, v))
|
||||
}
|
||||
|
||||
// DetailGTE applies the GTE predicate on the "detail" field.
|
||||
func DetailGTE(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGTE(FieldDetail, v))
|
||||
}
|
||||
|
||||
// DetailLT applies the LT predicate on the "detail" field.
|
||||
func DetailLT(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLT(FieldDetail, v))
|
||||
}
|
||||
|
||||
// DetailLTE applies the LTE predicate on the "detail" field.
|
||||
func DetailLTE(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLTE(FieldDetail, v))
|
||||
}
|
||||
|
||||
// DetailContains applies the Contains predicate on the "detail" field.
|
||||
func DetailContains(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldContains(FieldDetail, v))
|
||||
}
|
||||
|
||||
// DetailHasPrefix applies the HasPrefix predicate on the "detail" field.
|
||||
func DetailHasPrefix(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldHasPrefix(FieldDetail, v))
|
||||
}
|
||||
|
||||
// DetailHasSuffix applies the HasSuffix predicate on the "detail" field.
|
||||
func DetailHasSuffix(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldHasSuffix(FieldDetail, v))
|
||||
}
|
||||
|
||||
// DetailEqualFold applies the EqualFold predicate on the "detail" field.
|
||||
func DetailEqualFold(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEqualFold(FieldDetail, v))
|
||||
}
|
||||
|
||||
// DetailContainsFold applies the ContainsFold predicate on the "detail" field.
|
||||
func DetailContainsFold(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldContainsFold(FieldDetail, v))
|
||||
}
|
||||
|
||||
// OperatorEQ applies the EQ predicate on the "operator" field.
|
||||
func OperatorEQ(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldOperator, v))
|
||||
}
|
||||
|
||||
// OperatorNEQ applies the NEQ predicate on the "operator" field.
|
||||
func OperatorNEQ(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNEQ(FieldOperator, v))
|
||||
}
|
||||
|
||||
// OperatorIn applies the In predicate on the "operator" field.
|
||||
func OperatorIn(vs ...string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldIn(FieldOperator, vs...))
|
||||
}
|
||||
|
||||
// OperatorNotIn applies the NotIn predicate on the "operator" field.
|
||||
func OperatorNotIn(vs ...string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNotIn(FieldOperator, vs...))
|
||||
}
|
||||
|
||||
// OperatorGT applies the GT predicate on the "operator" field.
|
||||
func OperatorGT(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGT(FieldOperator, v))
|
||||
}
|
||||
|
||||
// OperatorGTE applies the GTE predicate on the "operator" field.
|
||||
func OperatorGTE(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGTE(FieldOperator, v))
|
||||
}
|
||||
|
||||
// OperatorLT applies the LT predicate on the "operator" field.
|
||||
func OperatorLT(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLT(FieldOperator, v))
|
||||
}
|
||||
|
||||
// OperatorLTE applies the LTE predicate on the "operator" field.
|
||||
func OperatorLTE(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLTE(FieldOperator, v))
|
||||
}
|
||||
|
||||
// OperatorContains applies the Contains predicate on the "operator" field.
|
||||
func OperatorContains(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldContains(FieldOperator, v))
|
||||
}
|
||||
|
||||
// OperatorHasPrefix applies the HasPrefix predicate on the "operator" field.
|
||||
func OperatorHasPrefix(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldHasPrefix(FieldOperator, v))
|
||||
}
|
||||
|
||||
// OperatorHasSuffix applies the HasSuffix predicate on the "operator" field.
|
||||
func OperatorHasSuffix(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldHasSuffix(FieldOperator, v))
|
||||
}
|
||||
|
||||
// OperatorEqualFold applies the EqualFold predicate on the "operator" field.
|
||||
func OperatorEqualFold(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEqualFold(FieldOperator, v))
|
||||
}
|
||||
|
||||
// OperatorContainsFold applies the ContainsFold predicate on the "operator" field.
|
||||
func OperatorContainsFold(v string) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldContainsFold(FieldOperator, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.PaymentAuditLog) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.PaymentAuditLog) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.PaymentAuditLog) predicate.PaymentAuditLog {
|
||||
return predicate.PaymentAuditLog(sql.NotPredicates(p))
|
||||
}
|
||||
696
backend/ent/paymentauditlog_create.go
Normal file
@@ -0,0 +1,696 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentauditlog"
|
||||
)
|
||||
|
||||
// PaymentAuditLogCreate is the builder for creating a PaymentAuditLog entity.
|
||||
type PaymentAuditLogCreate struct {
|
||||
config
|
||||
mutation *PaymentAuditLogMutation
|
||||
hooks []Hook
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// SetOrderID sets the "order_id" field.
|
||||
func (_c *PaymentAuditLogCreate) SetOrderID(v string) *PaymentAuditLogCreate {
|
||||
_c.mutation.SetOrderID(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetAction sets the "action" field.
|
||||
func (_c *PaymentAuditLogCreate) SetAction(v string) *PaymentAuditLogCreate {
|
||||
_c.mutation.SetAction(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetDetail sets the "detail" field.
|
||||
func (_c *PaymentAuditLogCreate) SetDetail(v string) *PaymentAuditLogCreate {
|
||||
_c.mutation.SetDetail(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableDetail sets the "detail" field if the given value is not nil.
|
||||
func (_c *PaymentAuditLogCreate) SetNillableDetail(v *string) *PaymentAuditLogCreate {
|
||||
if v != nil {
|
||||
_c.SetDetail(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetOperator sets the "operator" field.
|
||||
func (_c *PaymentAuditLogCreate) SetOperator(v string) *PaymentAuditLogCreate {
|
||||
_c.mutation.SetOperator(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableOperator sets the "operator" field if the given value is not nil.
|
||||
func (_c *PaymentAuditLogCreate) SetNillableOperator(v *string) *PaymentAuditLogCreate {
|
||||
if v != nil {
|
||||
_c.SetOperator(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetCreatedAt sets the "created_at" field.
|
||||
func (_c *PaymentAuditLogCreate) SetCreatedAt(v time.Time) *PaymentAuditLogCreate {
|
||||
_c.mutation.SetCreatedAt(v)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||
func (_c *PaymentAuditLogCreate) SetNillableCreatedAt(v *time.Time) *PaymentAuditLogCreate {
|
||||
if v != nil {
|
||||
_c.SetCreatedAt(*v)
|
||||
}
|
||||
return _c
|
||||
}
|
||||
|
||||
// Mutation returns the PaymentAuditLogMutation object of the builder.
|
||||
func (_c *PaymentAuditLogCreate) Mutation() *PaymentAuditLogMutation {
|
||||
return _c.mutation
|
||||
}
|
||||
|
||||
// Save creates the PaymentAuditLog in the database.
|
||||
func (_c *PaymentAuditLogCreate) Save(ctx context.Context) (*PaymentAuditLog, error) {
|
||||
_c.defaults()
|
||||
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (_c *PaymentAuditLogCreate) SaveX(ctx context.Context) *PaymentAuditLog {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *PaymentAuditLogCreate) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *PaymentAuditLogCreate) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (_c *PaymentAuditLogCreate) defaults() {
|
||||
if _, ok := _c.mutation.Detail(); !ok {
|
||||
v := paymentauditlog.DefaultDetail
|
||||
_c.mutation.SetDetail(v)
|
||||
}
|
||||
if _, ok := _c.mutation.Operator(); !ok {
|
||||
v := paymentauditlog.DefaultOperator
|
||||
_c.mutation.SetOperator(v)
|
||||
}
|
||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||
v := paymentauditlog.DefaultCreatedAt()
|
||||
_c.mutation.SetCreatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_c *PaymentAuditLogCreate) check() error {
|
||||
if _, ok := _c.mutation.OrderID(); !ok {
|
||||
return &ValidationError{Name: "order_id", err: errors.New(`ent: missing required field "PaymentAuditLog.order_id"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.OrderID(); ok {
|
||||
if err := paymentauditlog.OrderIDValidator(v); err != nil {
|
||||
return &ValidationError{Name: "order_id", err: fmt.Errorf(`ent: validator failed for field "PaymentAuditLog.order_id": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.Action(); !ok {
|
||||
return &ValidationError{Name: "action", err: errors.New(`ent: missing required field "PaymentAuditLog.action"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.Action(); ok {
|
||||
if err := paymentauditlog.ActionValidator(v); err != nil {
|
||||
return &ValidationError{Name: "action", err: fmt.Errorf(`ent: validator failed for field "PaymentAuditLog.action": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.Detail(); !ok {
|
||||
return &ValidationError{Name: "detail", err: errors.New(`ent: missing required field "PaymentAuditLog.detail"`)}
|
||||
}
|
||||
if _, ok := _c.mutation.Operator(); !ok {
|
||||
return &ValidationError{Name: "operator", err: errors.New(`ent: missing required field "PaymentAuditLog.operator"`)}
|
||||
}
|
||||
if v, ok := _c.mutation.Operator(); ok {
|
||||
if err := paymentauditlog.OperatorValidator(v); err != nil {
|
||||
return &ValidationError{Name: "operator", err: fmt.Errorf(`ent: validator failed for field "PaymentAuditLog.operator": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "PaymentAuditLog.created_at"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_c *PaymentAuditLogCreate) sqlSave(ctx context.Context) (*PaymentAuditLog, error) {
|
||||
if err := _c.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := _c.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
id := _spec.ID.Value.(int64)
|
||||
_node.ID = int64(id)
|
||||
_c.mutation.id = &_node.ID
|
||||
_c.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (_c *PaymentAuditLogCreate) createSpec() (*PaymentAuditLog, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &PaymentAuditLog{config: _c.config}
|
||||
_spec = sqlgraph.NewCreateSpec(paymentauditlog.Table, sqlgraph.NewFieldSpec(paymentauditlog.FieldID, field.TypeInt64))
|
||||
)
|
||||
_spec.OnConflict = _c.conflict
|
||||
if value, ok := _c.mutation.OrderID(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldOrderID, field.TypeString, value)
|
||||
_node.OrderID = value
|
||||
}
|
||||
if value, ok := _c.mutation.Action(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldAction, field.TypeString, value)
|
||||
_node.Action = value
|
||||
}
|
||||
if value, ok := _c.mutation.Detail(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldDetail, field.TypeString, value)
|
||||
_node.Detail = value
|
||||
}
|
||||
if value, ok := _c.mutation.Operator(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldOperator, field.TypeString, value)
|
||||
_node.Operator = value
|
||||
}
|
||||
if value, ok := _c.mutation.CreatedAt(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldCreatedAt, field.TypeTime, value)
|
||||
_node.CreatedAt = value
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.PaymentAuditLog.Create().
|
||||
// SetOrderID(v).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.PaymentAuditLogUpsert) {
|
||||
// SetOrderID(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *PaymentAuditLogCreate) OnConflict(opts ...sql.ConflictOption) *PaymentAuditLogUpsertOne {
|
||||
_c.conflict = opts
|
||||
return &PaymentAuditLogUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.PaymentAuditLog.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *PaymentAuditLogCreate) OnConflictColumns(columns ...string) *PaymentAuditLogUpsertOne {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &PaymentAuditLogUpsertOne{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
// PaymentAuditLogUpsertOne is the builder for "upsert"-ing
|
||||
// one PaymentAuditLog node.
|
||||
PaymentAuditLogUpsertOne struct {
|
||||
create *PaymentAuditLogCreate
|
||||
}
|
||||
|
||||
// PaymentAuditLogUpsert is the "OnConflict" setter.
|
||||
PaymentAuditLogUpsert struct {
|
||||
*sql.UpdateSet
|
||||
}
|
||||
)
|
||||
|
||||
// SetOrderID sets the "order_id" field.
|
||||
func (u *PaymentAuditLogUpsert) SetOrderID(v string) *PaymentAuditLogUpsert {
|
||||
u.Set(paymentauditlog.FieldOrderID, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateOrderID sets the "order_id" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsert) UpdateOrderID() *PaymentAuditLogUpsert {
|
||||
u.SetExcluded(paymentauditlog.FieldOrderID)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetAction sets the "action" field.
|
||||
func (u *PaymentAuditLogUpsert) SetAction(v string) *PaymentAuditLogUpsert {
|
||||
u.Set(paymentauditlog.FieldAction, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateAction sets the "action" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsert) UpdateAction() *PaymentAuditLogUpsert {
|
||||
u.SetExcluded(paymentauditlog.FieldAction)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetDetail sets the "detail" field.
|
||||
func (u *PaymentAuditLogUpsert) SetDetail(v string) *PaymentAuditLogUpsert {
|
||||
u.Set(paymentauditlog.FieldDetail, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateDetail sets the "detail" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsert) UpdateDetail() *PaymentAuditLogUpsert {
|
||||
u.SetExcluded(paymentauditlog.FieldDetail)
|
||||
return u
|
||||
}
|
||||
|
||||
// SetOperator sets the "operator" field.
|
||||
func (u *PaymentAuditLogUpsert) SetOperator(v string) *PaymentAuditLogUpsert {
|
||||
u.Set(paymentauditlog.FieldOperator, v)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateOperator sets the "operator" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsert) UpdateOperator() *PaymentAuditLogUpsert {
|
||||
u.SetExcluded(paymentauditlog.FieldOperator)
|
||||
return u
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.PaymentAuditLog.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *PaymentAuditLogUpsertOne) UpdateNewValues() *PaymentAuditLogUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(paymentauditlog.FieldCreatedAt)
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.PaymentAuditLog.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *PaymentAuditLogUpsertOne) Ignore() *PaymentAuditLogUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *PaymentAuditLogUpsertOne) DoNothing() *PaymentAuditLogUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the PaymentAuditLogCreate.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *PaymentAuditLogUpsertOne) Update(set func(*PaymentAuditLogUpsert)) *PaymentAuditLogUpsertOne {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&PaymentAuditLogUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetOrderID sets the "order_id" field.
|
||||
func (u *PaymentAuditLogUpsertOne) SetOrderID(v string) *PaymentAuditLogUpsertOne {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.SetOrderID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateOrderID sets the "order_id" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsertOne) UpdateOrderID() *PaymentAuditLogUpsertOne {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.UpdateOrderID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetAction sets the "action" field.
|
||||
func (u *PaymentAuditLogUpsertOne) SetAction(v string) *PaymentAuditLogUpsertOne {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.SetAction(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateAction sets the "action" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsertOne) UpdateAction() *PaymentAuditLogUpsertOne {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.UpdateAction()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDetail sets the "detail" field.
|
||||
func (u *PaymentAuditLogUpsertOne) SetDetail(v string) *PaymentAuditLogUpsertOne {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.SetDetail(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDetail sets the "detail" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsertOne) UpdateDetail() *PaymentAuditLogUpsertOne {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.UpdateDetail()
|
||||
})
|
||||
}
|
||||
|
||||
// SetOperator sets the "operator" field.
|
||||
func (u *PaymentAuditLogUpsertOne) SetOperator(v string) *PaymentAuditLogUpsertOne {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.SetOperator(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateOperator sets the "operator" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsertOne) UpdateOperator() *PaymentAuditLogUpsertOne {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.UpdateOperator()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *PaymentAuditLogUpsertOne) Exec(ctx context.Context) error {
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for PaymentAuditLogCreate.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *PaymentAuditLogUpsertOne) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||
func (u *PaymentAuditLogUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||
node, err := u.create.Save(ctx)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
return node.ID, nil
|
||||
}
|
||||
|
||||
// IDX is like ID, but panics if an error occurs.
|
||||
func (u *PaymentAuditLogUpsertOne) IDX(ctx context.Context) int64 {
|
||||
id, err := u.ID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// PaymentAuditLogCreateBulk is the builder for creating many PaymentAuditLog entities in bulk.
|
||||
type PaymentAuditLogCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*PaymentAuditLogCreate
|
||||
conflict []sql.ConflictOption
|
||||
}
|
||||
|
||||
// Save creates the PaymentAuditLog entities in the database.
|
||||
func (_c *PaymentAuditLogCreateBulk) Save(ctx context.Context) ([]*PaymentAuditLog, error) {
|
||||
if _c.err != nil {
|
||||
return nil, _c.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||
nodes := make([]*PaymentAuditLog, len(_c.builders))
|
||||
mutators := make([]Mutator, len(_c.builders))
|
||||
for i := range _c.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := _c.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*PaymentAuditLogMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
spec.OnConflict = _c.conflict
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
if specs[i].ID.Value != nil {
|
||||
id := specs[i].ID.Value.(int64)
|
||||
nodes[i].ID = int64(id)
|
||||
}
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_c *PaymentAuditLogCreateBulk) SaveX(ctx context.Context) []*PaymentAuditLog {
|
||||
v, err := _c.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_c *PaymentAuditLogCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := _c.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_c *PaymentAuditLogCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := _c.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||
// of the `INSERT` statement. For example:
|
||||
//
|
||||
// client.PaymentAuditLog.CreateBulk(builders...).
|
||||
// OnConflict(
|
||||
// // Update the row with the new values
|
||||
// // the was proposed for insertion.
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// // Override some of the fields with custom
|
||||
// // update values.
|
||||
// Update(func(u *ent.PaymentAuditLogUpsert) {
|
||||
// SetOrderID(v+v).
|
||||
// }).
|
||||
// Exec(ctx)
|
||||
func (_c *PaymentAuditLogCreateBulk) OnConflict(opts ...sql.ConflictOption) *PaymentAuditLogUpsertBulk {
|
||||
_c.conflict = opts
|
||||
return &PaymentAuditLogUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||
// as conflict target. Using this option is equivalent to using:
|
||||
//
|
||||
// client.PaymentAuditLog.Create().
|
||||
// OnConflict(sql.ConflictColumns(columns...)).
|
||||
// Exec(ctx)
|
||||
func (_c *PaymentAuditLogCreateBulk) OnConflictColumns(columns ...string) *PaymentAuditLogUpsertBulk {
|
||||
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||
return &PaymentAuditLogUpsertBulk{
|
||||
create: _c,
|
||||
}
|
||||
}
|
||||
|
||||
// PaymentAuditLogUpsertBulk is the builder for "upsert"-ing
|
||||
// a bulk of PaymentAuditLog nodes.
|
||||
type PaymentAuditLogUpsertBulk struct {
|
||||
create *PaymentAuditLogCreateBulk
|
||||
}
|
||||
|
||||
// UpdateNewValues updates the mutable fields using the new values that
|
||||
// were set on create. Using this option is equivalent to using:
|
||||
//
|
||||
// client.PaymentAuditLog.Create().
|
||||
// OnConflict(
|
||||
// sql.ResolveWithNewValues(),
|
||||
// ).
|
||||
// Exec(ctx)
|
||||
func (u *PaymentAuditLogUpsertBulk) UpdateNewValues() *PaymentAuditLogUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||
for _, b := range u.create.builders {
|
||||
if _, exists := b.mutation.CreatedAt(); exists {
|
||||
s.SetIgnore(paymentauditlog.FieldCreatedAt)
|
||||
}
|
||||
}
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// Ignore sets each column to itself in case of conflict.
|
||||
// Using this option is equivalent to using:
|
||||
//
|
||||
// client.PaymentAuditLog.Create().
|
||||
// OnConflict(sql.ResolveWithIgnore()).
|
||||
// Exec(ctx)
|
||||
func (u *PaymentAuditLogUpsertBulk) Ignore() *PaymentAuditLogUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||
return u
|
||||
}
|
||||
|
||||
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||
// Supported only by SQLite and PostgreSQL.
|
||||
func (u *PaymentAuditLogUpsertBulk) DoNothing() *PaymentAuditLogUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||
return u
|
||||
}
|
||||
|
||||
// Update allows overriding fields `UPDATE` values. See the PaymentAuditLogCreateBulk.OnConflict
|
||||
// documentation for more info.
|
||||
func (u *PaymentAuditLogUpsertBulk) Update(set func(*PaymentAuditLogUpsert)) *PaymentAuditLogUpsertBulk {
|
||||
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||
set(&PaymentAuditLogUpsert{UpdateSet: update})
|
||||
}))
|
||||
return u
|
||||
}
|
||||
|
||||
// SetOrderID sets the "order_id" field.
|
||||
func (u *PaymentAuditLogUpsertBulk) SetOrderID(v string) *PaymentAuditLogUpsertBulk {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.SetOrderID(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateOrderID sets the "order_id" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsertBulk) UpdateOrderID() *PaymentAuditLogUpsertBulk {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.UpdateOrderID()
|
||||
})
|
||||
}
|
||||
|
||||
// SetAction sets the "action" field.
|
||||
func (u *PaymentAuditLogUpsertBulk) SetAction(v string) *PaymentAuditLogUpsertBulk {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.SetAction(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateAction sets the "action" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsertBulk) UpdateAction() *PaymentAuditLogUpsertBulk {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.UpdateAction()
|
||||
})
|
||||
}
|
||||
|
||||
// SetDetail sets the "detail" field.
|
||||
func (u *PaymentAuditLogUpsertBulk) SetDetail(v string) *PaymentAuditLogUpsertBulk {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.SetDetail(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateDetail sets the "detail" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsertBulk) UpdateDetail() *PaymentAuditLogUpsertBulk {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.UpdateDetail()
|
||||
})
|
||||
}
|
||||
|
||||
// SetOperator sets the "operator" field.
|
||||
func (u *PaymentAuditLogUpsertBulk) SetOperator(v string) *PaymentAuditLogUpsertBulk {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.SetOperator(v)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateOperator sets the "operator" field to the value that was provided on create.
|
||||
func (u *PaymentAuditLogUpsertBulk) UpdateOperator() *PaymentAuditLogUpsertBulk {
|
||||
return u.Update(func(s *PaymentAuditLogUpsert) {
|
||||
s.UpdateOperator()
|
||||
})
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (u *PaymentAuditLogUpsertBulk) Exec(ctx context.Context) error {
|
||||
if u.create.err != nil {
|
||||
return u.create.err
|
||||
}
|
||||
for i, b := range u.create.builders {
|
||||
if len(b.conflict) != 0 {
|
||||
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the PaymentAuditLogCreateBulk instead", i)
|
||||
}
|
||||
}
|
||||
if len(u.create.conflict) == 0 {
|
||||
return errors.New("ent: missing options for PaymentAuditLogCreateBulk.OnConflict")
|
||||
}
|
||||
return u.create.Exec(ctx)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (u *PaymentAuditLogUpsertBulk) ExecX(ctx context.Context) {
|
||||
if err := u.create.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
88
backend/ent/paymentauditlog_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentauditlog"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// PaymentAuditLogDelete is the builder for deleting a PaymentAuditLog entity.
|
||||
type PaymentAuditLogDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *PaymentAuditLogMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the PaymentAuditLogDelete builder.
|
||||
func (_d *PaymentAuditLogDelete) Where(ps ...predicate.PaymentAuditLog) *PaymentAuditLogDelete {
|
||||
_d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (_d *PaymentAuditLogDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *PaymentAuditLogDelete) ExecX(ctx context.Context) int {
|
||||
n, err := _d.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (_d *PaymentAuditLogDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(paymentauditlog.Table, sqlgraph.NewFieldSpec(paymentauditlog.FieldID, field.TypeInt64))
|
||||
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
_d.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// PaymentAuditLogDeleteOne is the builder for deleting a single PaymentAuditLog entity.
|
||||
type PaymentAuditLogDeleteOne struct {
|
||||
_d *PaymentAuditLogDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the PaymentAuditLogDelete builder.
|
||||
func (_d *PaymentAuditLogDeleteOne) Where(ps ...predicate.PaymentAuditLog) *PaymentAuditLogDeleteOne {
|
||||
_d._d.mutation.Where(ps...)
|
||||
return _d
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (_d *PaymentAuditLogDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := _d._d.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{paymentauditlog.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_d *PaymentAuditLogDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := _d.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
564
backend/ent/paymentauditlog_query.go
Normal file
@@ -0,0 +1,564 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentauditlog"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// PaymentAuditLogQuery is the builder for querying PaymentAuditLog entities.
|
||||
type PaymentAuditLogQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []paymentauditlog.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.PaymentAuditLog
|
||||
modifiers []func(*sql.Selector)
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the PaymentAuditLogQuery builder.
|
||||
func (_q *PaymentAuditLogQuery) Where(ps ...predicate.PaymentAuditLog) *PaymentAuditLogQuery {
|
||||
_q.predicates = append(_q.predicates, ps...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (_q *PaymentAuditLogQuery) Limit(limit int) *PaymentAuditLogQuery {
|
||||
_q.ctx.Limit = &limit
|
||||
return _q
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (_q *PaymentAuditLogQuery) Offset(offset int) *PaymentAuditLogQuery {
|
||||
_q.ctx.Offset = &offset
|
||||
return _q
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (_q *PaymentAuditLogQuery) Unique(unique bool) *PaymentAuditLogQuery {
|
||||
_q.ctx.Unique = &unique
|
||||
return _q
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (_q *PaymentAuditLogQuery) Order(o ...paymentauditlog.OrderOption) *PaymentAuditLogQuery {
|
||||
_q.order = append(_q.order, o...)
|
||||
return _q
|
||||
}
|
||||
|
||||
// First returns the first PaymentAuditLog entity from the query.
|
||||
// Returns a *NotFoundError when no PaymentAuditLog was found.
|
||||
func (_q *PaymentAuditLogQuery) First(ctx context.Context) (*PaymentAuditLog, error) {
|
||||
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{paymentauditlog.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (_q *PaymentAuditLogQuery) FirstX(ctx context.Context) *PaymentAuditLog {
|
||||
node, err := _q.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first PaymentAuditLog ID from the query.
|
||||
// Returns a *NotFoundError when no PaymentAuditLog ID was found.
|
||||
func (_q *PaymentAuditLogQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{paymentauditlog.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (_q *PaymentAuditLogQuery) FirstIDX(ctx context.Context) int64 {
|
||||
id, err := _q.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single PaymentAuditLog entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one PaymentAuditLog entity is found.
|
||||
// Returns a *NotFoundError when no PaymentAuditLog entities are found.
|
||||
func (_q *PaymentAuditLogQuery) Only(ctx context.Context) (*PaymentAuditLog, error) {
|
||||
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{paymentauditlog.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{paymentauditlog.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (_q *PaymentAuditLogQuery) OnlyX(ctx context.Context) *PaymentAuditLog {
|
||||
node, err := _q.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only PaymentAuditLog ID in the query.
|
||||
// Returns a *NotSingularError when more than one PaymentAuditLog ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (_q *PaymentAuditLogQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||
var ids []int64
|
||||
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{paymentauditlog.Label}
|
||||
default:
|
||||
err = &NotSingularError{paymentauditlog.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (_q *PaymentAuditLogQuery) OnlyIDX(ctx context.Context) int64 {
|
||||
id, err := _q.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of PaymentAuditLogs.
|
||||
func (_q *PaymentAuditLogQuery) All(ctx context.Context) ([]*PaymentAuditLog, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*PaymentAuditLog, *PaymentAuditLogQuery]()
|
||||
return withInterceptors[[]*PaymentAuditLog](ctx, _q, qr, _q.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (_q *PaymentAuditLogQuery) AllX(ctx context.Context) []*PaymentAuditLog {
|
||||
nodes, err := _q.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of PaymentAuditLog IDs.
|
||||
func (_q *PaymentAuditLogQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||
if _q.ctx.Unique == nil && _q.path != nil {
|
||||
_q.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||
if err = _q.Select(paymentauditlog.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (_q *PaymentAuditLogQuery) IDsX(ctx context.Context) []int64 {
|
||||
ids, err := _q.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (_q *PaymentAuditLogQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||
if err := _q.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, _q, querierCount[*PaymentAuditLogQuery](), _q.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (_q *PaymentAuditLogQuery) CountX(ctx context.Context) int {
|
||||
count, err := _q.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (_q *PaymentAuditLogQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||
switch _, err := _q.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (_q *PaymentAuditLogQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := _q.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the PaymentAuditLogQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (_q *PaymentAuditLogQuery) Clone() *PaymentAuditLogQuery {
|
||||
if _q == nil {
|
||||
return nil
|
||||
}
|
||||
return &PaymentAuditLogQuery{
|
||||
config: _q.config,
|
||||
ctx: _q.ctx.Clone(),
|
||||
order: append([]paymentauditlog.OrderOption{}, _q.order...),
|
||||
inters: append([]Interceptor{}, _q.inters...),
|
||||
predicates: append([]predicate.PaymentAuditLog{}, _q.predicates...),
|
||||
// clone intermediate query.
|
||||
sql: _q.sql.Clone(),
|
||||
path: _q.path,
|
||||
}
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// OrderID string `json:"order_id,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.PaymentAuditLog.Query().
|
||||
// GroupBy(paymentauditlog.FieldOrderID).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *PaymentAuditLogQuery) GroupBy(field string, fields ...string) *PaymentAuditLogGroupBy {
|
||||
_q.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &PaymentAuditLogGroupBy{build: _q}
|
||||
grbuild.flds = &_q.ctx.Fields
|
||||
grbuild.label = paymentauditlog.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// OrderID string `json:"order_id,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.PaymentAuditLog.Query().
|
||||
// Select(paymentauditlog.FieldOrderID).
|
||||
// Scan(ctx, &v)
|
||||
func (_q *PaymentAuditLogQuery) Select(fields ...string) *PaymentAuditLogSelect {
|
||||
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||
sbuild := &PaymentAuditLogSelect{PaymentAuditLogQuery: _q}
|
||||
sbuild.label = paymentauditlog.Label
|
||||
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a PaymentAuditLogSelect configured with the given aggregations.
|
||||
func (_q *PaymentAuditLogQuery) Aggregate(fns ...AggregateFunc) *PaymentAuditLogSelect {
|
||||
return _q.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (_q *PaymentAuditLogQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range _q.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, _q); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range _q.ctx.Fields {
|
||||
if !paymentauditlog.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if _q.path != nil {
|
||||
prev, err := _q.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_q.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_q *PaymentAuditLogQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PaymentAuditLog, error) {
|
||||
var (
|
||||
nodes = []*PaymentAuditLog{}
|
||||
_spec = _q.querySpec()
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*PaymentAuditLog).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &PaymentAuditLog{config: _q.config}
|
||||
nodes = append(nodes, node)
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (_q *PaymentAuditLogQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := _q.querySpec()
|
||||
if len(_q.modifiers) > 0 {
|
||||
_spec.Modifiers = _q.modifiers
|
||||
}
|
||||
_spec.Node.Columns = _q.ctx.Fields
|
||||
if len(_q.ctx.Fields) > 0 {
|
||||
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||
}
|
||||
|
||||
func (_q *PaymentAuditLogQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(paymentauditlog.Table, paymentauditlog.Columns, sqlgraph.NewFieldSpec(paymentauditlog.FieldID, field.TypeInt64))
|
||||
_spec.From = _q.sql
|
||||
if unique := _q.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if _q.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, paymentauditlog.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != paymentauditlog.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _q.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := _q.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (_q *PaymentAuditLogQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(_q.driver.Dialect())
|
||||
t1 := builder.Table(paymentauditlog.Table)
|
||||
columns := _q.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = paymentauditlog.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if _q.sql != nil {
|
||||
selector = _q.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, m := range _q.modifiers {
|
||||
m(selector)
|
||||
}
|
||||
for _, p := range _q.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range _q.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := _q.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := _q.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||
// either committed or rolled-back.
|
||||
func (_q *PaymentAuditLogQuery) ForUpdate(opts ...sql.LockOption) *PaymentAuditLogQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForUpdate(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||
// until your transaction commits.
|
||||
func (_q *PaymentAuditLogQuery) ForShare(opts ...sql.LockOption) *PaymentAuditLogQuery {
|
||||
if _q.driver.Dialect() == dialect.Postgres {
|
||||
_q.Unique(false)
|
||||
}
|
||||
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||
s.ForShare(opts...)
|
||||
})
|
||||
return _q
|
||||
}
|
||||
|
||||
// PaymentAuditLogGroupBy is the group-by builder for PaymentAuditLog entities.
|
||||
type PaymentAuditLogGroupBy struct {
|
||||
selector
|
||||
build *PaymentAuditLogQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (_g *PaymentAuditLogGroupBy) Aggregate(fns ...AggregateFunc) *PaymentAuditLogGroupBy {
|
||||
_g.fns = append(_g.fns, fns...)
|
||||
return _g
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_g *PaymentAuditLogGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*PaymentAuditLogQuery, *PaymentAuditLogGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||
}
|
||||
|
||||
func (_g *PaymentAuditLogGroupBy) sqlScan(ctx context.Context, root *PaymentAuditLogQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(_g.fns))
|
||||
for _, fn := range _g.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||
for _, f := range *_g.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// PaymentAuditLogSelect is the builder for selecting fields of PaymentAuditLog entities.
|
||||
type PaymentAuditLogSelect struct {
|
||||
*PaymentAuditLogQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (_s *PaymentAuditLogSelect) Aggregate(fns ...AggregateFunc) *PaymentAuditLogSelect {
|
||||
_s.fns = append(_s.fns, fns...)
|
||||
return _s
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (_s *PaymentAuditLogSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||
if err := _s.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*PaymentAuditLogQuery, *PaymentAuditLogSelect](ctx, _s.PaymentAuditLogQuery, _s, _s.inters, v)
|
||||
}
|
||||
|
||||
func (_s *PaymentAuditLogSelect) sqlScan(ctx context.Context, root *PaymentAuditLogQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(_s.fns))
|
||||
for _, fn := range _s.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*_s.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
357
backend/ent/paymentauditlog_update.go
Normal file
@@ -0,0 +1,357 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentauditlog"
|
||||
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||
)
|
||||
|
||||
// PaymentAuditLogUpdate is the builder for updating PaymentAuditLog entities.
|
||||
type PaymentAuditLogUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *PaymentAuditLogMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the PaymentAuditLogUpdate builder.
|
||||
func (_u *PaymentAuditLogUpdate) Where(ps ...predicate.PaymentAuditLog) *PaymentAuditLogUpdate {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetOrderID sets the "order_id" field.
|
||||
func (_u *PaymentAuditLogUpdate) SetOrderID(v string) *PaymentAuditLogUpdate {
|
||||
_u.mutation.SetOrderID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOrderID sets the "order_id" field if the given value is not nil.
|
||||
func (_u *PaymentAuditLogUpdate) SetNillableOrderID(v *string) *PaymentAuditLogUpdate {
|
||||
if v != nil {
|
||||
_u.SetOrderID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetAction sets the "action" field.
|
||||
func (_u *PaymentAuditLogUpdate) SetAction(v string) *PaymentAuditLogUpdate {
|
||||
_u.mutation.SetAction(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableAction sets the "action" field if the given value is not nil.
|
||||
func (_u *PaymentAuditLogUpdate) SetNillableAction(v *string) *PaymentAuditLogUpdate {
|
||||
if v != nil {
|
||||
_u.SetAction(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDetail sets the "detail" field.
|
||||
func (_u *PaymentAuditLogUpdate) SetDetail(v string) *PaymentAuditLogUpdate {
|
||||
_u.mutation.SetDetail(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableDetail sets the "detail" field if the given value is not nil.
|
||||
func (_u *PaymentAuditLogUpdate) SetNillableDetail(v *string) *PaymentAuditLogUpdate {
|
||||
if v != nil {
|
||||
_u.SetDetail(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetOperator sets the "operator" field.
|
||||
func (_u *PaymentAuditLogUpdate) SetOperator(v string) *PaymentAuditLogUpdate {
|
||||
_u.mutation.SetOperator(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOperator sets the "operator" field if the given value is not nil.
|
||||
func (_u *PaymentAuditLogUpdate) SetNillableOperator(v *string) *PaymentAuditLogUpdate {
|
||||
if v != nil {
|
||||
_u.SetOperator(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// Mutation returns the PaymentAuditLogMutation object of the builder.
|
||||
func (_u *PaymentAuditLogUpdate) Mutation() *PaymentAuditLogMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (_u *PaymentAuditLogUpdate) Save(ctx context.Context) (int, error) {
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *PaymentAuditLogUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (_u *PaymentAuditLogUpdate) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *PaymentAuditLogUpdate) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *PaymentAuditLogUpdate) check() error {
|
||||
if v, ok := _u.mutation.OrderID(); ok {
|
||||
if err := paymentauditlog.OrderIDValidator(v); err != nil {
|
||||
return &ValidationError{Name: "order_id", err: fmt.Errorf(`ent: validator failed for field "PaymentAuditLog.order_id": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Action(); ok {
|
||||
if err := paymentauditlog.ActionValidator(v); err != nil {
|
||||
return &ValidationError{Name: "action", err: fmt.Errorf(`ent: validator failed for field "PaymentAuditLog.action": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Operator(); ok {
|
||||
if err := paymentauditlog.OperatorValidator(v); err != nil {
|
||||
return &ValidationError{Name: "operator", err: fmt.Errorf(`ent: validator failed for field "PaymentAuditLog.operator": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *PaymentAuditLogUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(paymentauditlog.Table, paymentauditlog.Columns, sqlgraph.NewFieldSpec(paymentauditlog.FieldID, field.TypeInt64))
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.OrderID(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldOrderID, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Action(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldAction, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Detail(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldDetail, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Operator(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldOperator, field.TypeString, value)
|
||||
}
|
||||
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{paymentauditlog.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
// PaymentAuditLogUpdateOne is the builder for updating a single PaymentAuditLog entity.
|
||||
type PaymentAuditLogUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *PaymentAuditLogMutation
|
||||
}
|
||||
|
||||
// SetOrderID sets the "order_id" field.
|
||||
func (_u *PaymentAuditLogUpdateOne) SetOrderID(v string) *PaymentAuditLogUpdateOne {
|
||||
_u.mutation.SetOrderID(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOrderID sets the "order_id" field if the given value is not nil.
|
||||
func (_u *PaymentAuditLogUpdateOne) SetNillableOrderID(v *string) *PaymentAuditLogUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetOrderID(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetAction sets the "action" field.
|
||||
func (_u *PaymentAuditLogUpdateOne) SetAction(v string) *PaymentAuditLogUpdateOne {
|
||||
_u.mutation.SetAction(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableAction sets the "action" field if the given value is not nil.
|
||||
func (_u *PaymentAuditLogUpdateOne) SetNillableAction(v *string) *PaymentAuditLogUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetAction(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetDetail sets the "detail" field.
|
||||
func (_u *PaymentAuditLogUpdateOne) SetDetail(v string) *PaymentAuditLogUpdateOne {
|
||||
_u.mutation.SetDetail(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableDetail sets the "detail" field if the given value is not nil.
|
||||
func (_u *PaymentAuditLogUpdateOne) SetNillableDetail(v *string) *PaymentAuditLogUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetDetail(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetOperator sets the "operator" field.
|
||||
func (_u *PaymentAuditLogUpdateOne) SetOperator(v string) *PaymentAuditLogUpdateOne {
|
||||
_u.mutation.SetOperator(v)
|
||||
return _u
|
||||
}
|
||||
|
||||
// SetNillableOperator sets the "operator" field if the given value is not nil.
|
||||
func (_u *PaymentAuditLogUpdateOne) SetNillableOperator(v *string) *PaymentAuditLogUpdateOne {
|
||||
if v != nil {
|
||||
_u.SetOperator(*v)
|
||||
}
|
||||
return _u
|
||||
}
|
||||
|
||||
// Mutation returns the PaymentAuditLogMutation object of the builder.
|
||||
func (_u *PaymentAuditLogUpdateOne) Mutation() *PaymentAuditLogMutation {
|
||||
return _u.mutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the PaymentAuditLogUpdate builder.
|
||||
func (_u *PaymentAuditLogUpdateOne) Where(ps ...predicate.PaymentAuditLog) *PaymentAuditLogUpdateOne {
|
||||
_u.mutation.Where(ps...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (_u *PaymentAuditLogUpdateOne) Select(field string, fields ...string) *PaymentAuditLogUpdateOne {
|
||||
_u.fields = append([]string{field}, fields...)
|
||||
return _u
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated PaymentAuditLog entity.
|
||||
func (_u *PaymentAuditLogUpdateOne) Save(ctx context.Context) (*PaymentAuditLog, error) {
|
||||
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (_u *PaymentAuditLogUpdateOne) SaveX(ctx context.Context) *PaymentAuditLog {
|
||||
node, err := _u.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (_u *PaymentAuditLogUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := _u.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (_u *PaymentAuditLogUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := _u.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (_u *PaymentAuditLogUpdateOne) check() error {
|
||||
if v, ok := _u.mutation.OrderID(); ok {
|
||||
if err := paymentauditlog.OrderIDValidator(v); err != nil {
|
||||
return &ValidationError{Name: "order_id", err: fmt.Errorf(`ent: validator failed for field "PaymentAuditLog.order_id": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Action(); ok {
|
||||
if err := paymentauditlog.ActionValidator(v); err != nil {
|
||||
return &ValidationError{Name: "action", err: fmt.Errorf(`ent: validator failed for field "PaymentAuditLog.action": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := _u.mutation.Operator(); ok {
|
||||
if err := paymentauditlog.OperatorValidator(v); err != nil {
|
||||
return &ValidationError{Name: "operator", err: fmt.Errorf(`ent: validator failed for field "PaymentAuditLog.operator": %w`, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_u *PaymentAuditLogUpdateOne) sqlSave(ctx context.Context) (_node *PaymentAuditLog, err error) {
|
||||
if err := _u.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(paymentauditlog.Table, paymentauditlog.Columns, sqlgraph.NewFieldSpec(paymentauditlog.FieldID, field.TypeInt64))
|
||||
id, ok := _u.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PaymentAuditLog.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := _u.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, paymentauditlog.FieldID)
|
||||
for _, f := range fields {
|
||||
if !paymentauditlog.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != paymentauditlog.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := _u.mutation.OrderID(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldOrderID, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Action(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldAction, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Detail(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldDetail, field.TypeString, value)
|
||||
}
|
||||
if value, ok := _u.mutation.Operator(); ok {
|
||||
_spec.SetField(paymentauditlog.FieldOperator, field.TypeString, value)
|
||||
}
|
||||
_node = &PaymentAuditLog{config: _u.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{paymentauditlog.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
_u.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
619
backend/ent/paymentorder.go
Normal file
@@ -0,0 +1,619 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/Wei-Shaw/sub2api/ent/paymentorder"
|
||||
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||
)
|
||||
|
||||
// PaymentOrder is the model entity for the PaymentOrder schema.
|
||||
type PaymentOrder struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID int64 `json:"id,omitempty"`
|
||||
// UserID holds the value of the "user_id" field.
|
||||
UserID int64 `json:"user_id,omitempty"`
|
||||
// UserEmail holds the value of the "user_email" field.
|
||||
UserEmail string `json:"user_email,omitempty"`
|
||||
// UserName holds the value of the "user_name" field.
|
||||
UserName string `json:"user_name,omitempty"`
|
||||
// UserNotes holds the value of the "user_notes" field.
|
||||
UserNotes *string `json:"user_notes,omitempty"`
|
||||
// Amount holds the value of the "amount" field.
|
||||
Amount float64 `json:"amount,omitempty"`
|
||||
// PayAmount holds the value of the "pay_amount" field.
|
||||
PayAmount float64 `json:"pay_amount,omitempty"`
|
||||
// FeeRate holds the value of the "fee_rate" field.
|
||||
FeeRate float64 `json:"fee_rate,omitempty"`
|
||||
// RechargeCode holds the value of the "recharge_code" field.
|
||||
RechargeCode string `json:"recharge_code,omitempty"`
|
||||
// OutTradeNo holds the value of the "out_trade_no" field.
|
||||
OutTradeNo string `json:"out_trade_no,omitempty"`
|
||||
// PaymentType holds the value of the "payment_type" field.
|
||||
PaymentType string `json:"payment_type,omitempty"`
|
||||
// PaymentTradeNo holds the value of the "payment_trade_no" field.
|
||||
PaymentTradeNo string `json:"payment_trade_no,omitempty"`
|
||||
// PayURL holds the value of the "pay_url" field.
|
||||
PayURL *string `json:"pay_url,omitempty"`
|
||||
// QrCode holds the value of the "qr_code" field.
|
||||
QrCode *string `json:"qr_code,omitempty"`
|
||||
// QrCodeImg holds the value of the "qr_code_img" field.
|
||||
QrCodeImg *string `json:"qr_code_img,omitempty"`
|
||||
// OrderType holds the value of the "order_type" field.
|
||||
OrderType string `json:"order_type,omitempty"`
|
||||
// PlanID holds the value of the "plan_id" field.
|
||||
PlanID *int64 `json:"plan_id,omitempty"`
|
||||
// SubscriptionGroupID holds the value of the "subscription_group_id" field.
|
||||
SubscriptionGroupID *int64 `json:"subscription_group_id,omitempty"`
|
||||
// SubscriptionDays holds the value of the "subscription_days" field.
|
||||
SubscriptionDays *int `json:"subscription_days,omitempty"`
|
||||
// ProviderInstanceID holds the value of the "provider_instance_id" field.
|
||||
ProviderInstanceID *string `json:"provider_instance_id,omitempty"`
|
||||
// ProviderKey holds the value of the "provider_key" field.
|
||||
ProviderKey *string `json:"provider_key,omitempty"`
|
||||
// ProviderSnapshot holds the value of the "provider_snapshot" field.
|
||||
ProviderSnapshot map[string]interface{} `json:"provider_snapshot,omitempty"`
|
||||
// Status holds the value of the "status" field.
|
||||
Status string `json:"status,omitempty"`
|
||||
// RefundAmount holds the value of the "refund_amount" field.
|
||||
RefundAmount float64 `json:"refund_amount,omitempty"`
|
||||
// RefundReason holds the value of the "refund_reason" field.
|
||||
RefundReason *string `json:"refund_reason,omitempty"`
|
||||
// RefundAt holds the value of the "refund_at" field.
|
||||
RefundAt *time.Time `json:"refund_at,omitempty"`
|
||||
// ForceRefund holds the value of the "force_refund" field.
|
||||
ForceRefund bool `json:"force_refund,omitempty"`
|
||||
// RefundRequestedAt holds the value of the "refund_requested_at" field.
|
||||
RefundRequestedAt *time.Time `json:"refund_requested_at,omitempty"`
|
||||
// RefundRequestReason holds the value of the "refund_request_reason" field.
|
||||
RefundRequestReason *string `json:"refund_request_reason,omitempty"`
|
||||
// RefundRequestedBy holds the value of the "refund_requested_by" field.
|
||||
RefundRequestedBy *string `json:"refund_requested_by,omitempty"`
|
||||
// ExpiresAt holds the value of the "expires_at" field.
|
||||
ExpiresAt time.Time `json:"expires_at,omitempty"`
|
||||
// PaidAt holds the value of the "paid_at" field.
|
||||
PaidAt *time.Time `json:"paid_at,omitempty"`
|
||||
// CompletedAt holds the value of the "completed_at" field.
|
||||
CompletedAt *time.Time `json:"completed_at,omitempty"`
|
||||
// FailedAt holds the value of the "failed_at" field.
|
||||
FailedAt *time.Time `json:"failed_at,omitempty"`
|
||||
// FailedReason holds the value of the "failed_reason" field.
|
||||
FailedReason *string `json:"failed_reason,omitempty"`
|
||||
// ClientIP holds the value of the "client_ip" field.
|
||||
ClientIP string `json:"client_ip,omitempty"`
|
||||
// SrcHost holds the value of the "src_host" field.
|
||||
SrcHost string `json:"src_host,omitempty"`
|
||||
// SrcURL holds the value of the "src_url" field.
|
||||
SrcURL *string `json:"src_url,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the PaymentOrderQuery when eager-loading is set.
|
||||
Edges PaymentOrderEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// PaymentOrderEdges holds the relations/edges for other nodes in the graph.
|
||||
type PaymentOrderEdges struct {
|
||||
// User holds the value of the user edge.
|
||||
User *User `json:"user,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [1]bool
|
||||
}
|
||||
|
||||
// UserOrErr returns the User value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e PaymentOrderEdges) UserOrErr() (*User, error) {
|
||||
if e.User != nil {
|
||||
return e.User, nil
|
||||
} else if e.loadedTypes[0] {
|
||||
return nil, &NotFoundError{label: user.Label}
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "user"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*PaymentOrder) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case paymentorder.FieldProviderSnapshot:
|
||||
values[i] = new([]byte)
|
||||
case paymentorder.FieldForceRefund:
|
||||
values[i] = new(sql.NullBool)
|
||||
case paymentorder.FieldAmount, paymentorder.FieldPayAmount, paymentorder.FieldFeeRate, paymentorder.FieldRefundAmount:
|
||||
values[i] = new(sql.NullFloat64)
|
||||
case paymentorder.FieldID, paymentorder.FieldUserID, paymentorder.FieldPlanID, paymentorder.FieldSubscriptionGroupID, paymentorder.FieldSubscriptionDays:
|
||||
values[i] = new(sql.NullInt64)
|
||||
case paymentorder.FieldUserEmail, paymentorder.FieldUserName, paymentorder.FieldUserNotes, paymentorder.FieldRechargeCode, paymentorder.FieldOutTradeNo, paymentorder.FieldPaymentType, paymentorder.FieldPaymentTradeNo, paymentorder.FieldPayURL, paymentorder.FieldQrCode, paymentorder.FieldQrCodeImg, paymentorder.FieldOrderType, paymentorder.FieldProviderInstanceID, paymentorder.FieldProviderKey, paymentorder.FieldStatus, paymentorder.FieldRefundReason, paymentorder.FieldRefundRequestReason, paymentorder.FieldRefundRequestedBy, paymentorder.FieldFailedReason, paymentorder.FieldClientIP, paymentorder.FieldSrcHost, paymentorder.FieldSrcURL:
|
||||
values[i] = new(sql.NullString)
|
||||
case paymentorder.FieldRefundAt, paymentorder.FieldRefundRequestedAt, paymentorder.FieldExpiresAt, paymentorder.FieldPaidAt, paymentorder.FieldCompletedAt, paymentorder.FieldFailedAt, paymentorder.FieldCreatedAt, paymentorder.FieldUpdatedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
default:
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the PaymentOrder fields.
|
||||
func (_m *PaymentOrder) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case paymentorder.FieldID:
|
||||
value, ok := values[i].(*sql.NullInt64)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", value)
|
||||
}
|
||||
_m.ID = int64(value.Int64)
|
||||
case paymentorder.FieldUserID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field user_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UserID = value.Int64
|
||||
}
|
||||
case paymentorder.FieldUserEmail:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field user_email", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UserEmail = value.String
|
||||
}
|
||||
case paymentorder.FieldUserName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field user_name", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UserName = value.String
|
||||
}
|
||||
case paymentorder.FieldUserNotes:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field user_notes", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UserNotes = new(string)
|
||||
*_m.UserNotes = value.String
|
||||
}
|
||||
case paymentorder.FieldAmount:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field amount", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Amount = value.Float64
|
||||
}
|
||||
case paymentorder.FieldPayAmount:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field pay_amount", values[i])
|
||||
} else if value.Valid {
|
||||
_m.PayAmount = value.Float64
|
||||
}
|
||||
case paymentorder.FieldFeeRate:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field fee_rate", values[i])
|
||||
} else if value.Valid {
|
||||
_m.FeeRate = value.Float64
|
||||
}
|
||||
case paymentorder.FieldRechargeCode:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field recharge_code", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RechargeCode = value.String
|
||||
}
|
||||
case paymentorder.FieldOutTradeNo:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field out_trade_no", values[i])
|
||||
} else if value.Valid {
|
||||
_m.OutTradeNo = value.String
|
||||
}
|
||||
case paymentorder.FieldPaymentType:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field payment_type", values[i])
|
||||
} else if value.Valid {
|
||||
_m.PaymentType = value.String
|
||||
}
|
||||
case paymentorder.FieldPaymentTradeNo:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field payment_trade_no", values[i])
|
||||
} else if value.Valid {
|
||||
_m.PaymentTradeNo = value.String
|
||||
}
|
||||
case paymentorder.FieldPayURL:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field pay_url", values[i])
|
||||
} else if value.Valid {
|
||||
_m.PayURL = new(string)
|
||||
*_m.PayURL = value.String
|
||||
}
|
||||
case paymentorder.FieldQrCode:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field qr_code", values[i])
|
||||
} else if value.Valid {
|
||||
_m.QrCode = new(string)
|
||||
*_m.QrCode = value.String
|
||||
}
|
||||
case paymentorder.FieldQrCodeImg:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field qr_code_img", values[i])
|
||||
} else if value.Valid {
|
||||
_m.QrCodeImg = new(string)
|
||||
*_m.QrCodeImg = value.String
|
||||
}
|
||||
case paymentorder.FieldOrderType:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field order_type", values[i])
|
||||
} else if value.Valid {
|
||||
_m.OrderType = value.String
|
||||
}
|
||||
case paymentorder.FieldPlanID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field plan_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.PlanID = new(int64)
|
||||
*_m.PlanID = value.Int64
|
||||
}
|
||||
case paymentorder.FieldSubscriptionGroupID:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field subscription_group_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SubscriptionGroupID = new(int64)
|
||||
*_m.SubscriptionGroupID = value.Int64
|
||||
}
|
||||
case paymentorder.FieldSubscriptionDays:
|
||||
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field subscription_days", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SubscriptionDays = new(int)
|
||||
*_m.SubscriptionDays = int(value.Int64)
|
||||
}
|
||||
case paymentorder.FieldProviderInstanceID:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider_instance_id", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ProviderInstanceID = new(string)
|
||||
*_m.ProviderInstanceID = value.String
|
||||
}
|
||||
case paymentorder.FieldProviderKey:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider_key", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ProviderKey = new(string)
|
||||
*_m.ProviderKey = value.String
|
||||
}
|
||||
case paymentorder.FieldProviderSnapshot:
|
||||
if value, ok := values[i].(*[]byte); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field provider_snapshot", values[i])
|
||||
} else if value != nil && len(*value) > 0 {
|
||||
if err := json.Unmarshal(*value, &_m.ProviderSnapshot); err != nil {
|
||||
return fmt.Errorf("unmarshal field provider_snapshot: %w", err)
|
||||
}
|
||||
}
|
||||
case paymentorder.FieldStatus:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||
} else if value.Valid {
|
||||
_m.Status = value.String
|
||||
}
|
||||
case paymentorder.FieldRefundAmount:
|
||||
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field refund_amount", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RefundAmount = value.Float64
|
||||
}
|
||||
case paymentorder.FieldRefundReason:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field refund_reason", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RefundReason = new(string)
|
||||
*_m.RefundReason = value.String
|
||||
}
|
||||
case paymentorder.FieldRefundAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field refund_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RefundAt = new(time.Time)
|
||||
*_m.RefundAt = value.Time
|
||||
}
|
||||
case paymentorder.FieldForceRefund:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field force_refund", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ForceRefund = value.Bool
|
||||
}
|
||||
case paymentorder.FieldRefundRequestedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field refund_requested_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RefundRequestedAt = new(time.Time)
|
||||
*_m.RefundRequestedAt = value.Time
|
||||
}
|
||||
case paymentorder.FieldRefundRequestReason:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field refund_request_reason", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RefundRequestReason = new(string)
|
||||
*_m.RefundRequestReason = value.String
|
||||
}
|
||||
case paymentorder.FieldRefundRequestedBy:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field refund_requested_by", values[i])
|
||||
} else if value.Valid {
|
||||
_m.RefundRequestedBy = new(string)
|
||||
*_m.RefundRequestedBy = value.String
|
||||
}
|
||||
case paymentorder.FieldExpiresAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field expires_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ExpiresAt = value.Time
|
||||
}
|
||||
case paymentorder.FieldPaidAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field paid_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.PaidAt = new(time.Time)
|
||||
*_m.PaidAt = value.Time
|
||||
}
|
||||
case paymentorder.FieldCompletedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field completed_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CompletedAt = new(time.Time)
|
||||
*_m.CompletedAt = value.Time
|
||||
}
|
||||
case paymentorder.FieldFailedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field failed_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.FailedAt = new(time.Time)
|
||||
*_m.FailedAt = value.Time
|
||||
}
|
||||
case paymentorder.FieldFailedReason:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field failed_reason", values[i])
|
||||
} else if value.Valid {
|
||||
_m.FailedReason = new(string)
|
||||
*_m.FailedReason = value.String
|
||||
}
|
||||
case paymentorder.FieldClientIP:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field client_ip", values[i])
|
||||
} else if value.Valid {
|
||||
_m.ClientIP = value.String
|
||||
}
|
||||
case paymentorder.FieldSrcHost:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field src_host", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SrcHost = value.String
|
||||
}
|
||||
case paymentorder.FieldSrcURL:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field src_url", values[i])
|
||||
} else if value.Valid {
|
||||
_m.SrcURL = new(string)
|
||||
*_m.SrcURL = value.String
|
||||
}
|
||||
case paymentorder.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.CreatedAt = value.Time
|
||||
}
|
||||
case paymentorder.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
_m.UpdatedAt = value.Time
|
||||
}
|
||||
default:
|
||||
_m.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the PaymentOrder.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_m *PaymentOrder) Value(name string) (ent.Value, error) {
|
||||
return _m.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryUser queries the "user" edge of the PaymentOrder entity.
|
||||
func (_m *PaymentOrder) QueryUser() *UserQuery {
|
||||
return NewPaymentOrderClient(_m.config).QueryUser(_m)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this PaymentOrder.
|
||||
// Note that you need to call PaymentOrder.Unwrap() before calling this method if this PaymentOrder
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (_m *PaymentOrder) Update() *PaymentOrderUpdateOne {
|
||||
return NewPaymentOrderClient(_m.config).UpdateOne(_m)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the PaymentOrder entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (_m *PaymentOrder) Unwrap() *PaymentOrder {
|
||||
_tx, ok := _m.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: PaymentOrder is not a transactional entity")
|
||||
}
|
||||
_m.config.driver = _tx.drv
|
||||
return _m
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (_m *PaymentOrder) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("PaymentOrder(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||
builder.WriteString("user_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.UserID))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("user_email=")
|
||||
builder.WriteString(_m.UserEmail)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("user_name=")
|
||||
builder.WriteString(_m.UserName)
|
||||
builder.WriteString(", ")
|
||||
if v := _m.UserNotes; v != nil {
|
||||
builder.WriteString("user_notes=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("amount=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.Amount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("pay_amount=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.PayAmount))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("fee_rate=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.FeeRate))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("recharge_code=")
|
||||
builder.WriteString(_m.RechargeCode)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("out_trade_no=")
|
||||
builder.WriteString(_m.OutTradeNo)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("payment_type=")
|
||||
builder.WriteString(_m.PaymentType)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("payment_trade_no=")
|
||||
builder.WriteString(_m.PaymentTradeNo)
|
||||
builder.WriteString(", ")
|
||||
if v := _m.PayURL; v != nil {
|
||||
builder.WriteString("pay_url=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.QrCode; v != nil {
|
||||
builder.WriteString("qr_code=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.QrCodeImg; v != nil {
|
||||
builder.WriteString("qr_code_img=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("order_type=")
|
||||
builder.WriteString(_m.OrderType)
|
||||
builder.WriteString(", ")
|
||||
if v := _m.PlanID; v != nil {
|
||||
builder.WriteString("plan_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.SubscriptionGroupID; v != nil {
|
||||
builder.WriteString("subscription_group_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.SubscriptionDays; v != nil {
|
||||
builder.WriteString("subscription_days=")
|
||||
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.ProviderInstanceID; v != nil {
|
||||
builder.WriteString("provider_instance_id=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.ProviderKey; v != nil {
|
||||
builder.WriteString("provider_key=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("provider_snapshot=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ProviderSnapshot))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("status=")
|
||||
builder.WriteString(_m.Status)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("refund_amount=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.RefundAmount))
|
||||
builder.WriteString(", ")
|
||||
if v := _m.RefundReason; v != nil {
|
||||
builder.WriteString("refund_reason=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.RefundAt; v != nil {
|
||||
builder.WriteString("refund_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("force_refund=")
|
||||
builder.WriteString(fmt.Sprintf("%v", _m.ForceRefund))
|
||||
builder.WriteString(", ")
|
||||
if v := _m.RefundRequestedAt; v != nil {
|
||||
builder.WriteString("refund_requested_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.RefundRequestReason; v != nil {
|
||||
builder.WriteString("refund_request_reason=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.RefundRequestedBy; v != nil {
|
||||
builder.WriteString("refund_requested_by=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("expires_at=")
|
||||
builder.WriteString(_m.ExpiresAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
if v := _m.PaidAt; v != nil {
|
||||
builder.WriteString("paid_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.CompletedAt; v != nil {
|
||||
builder.WriteString("completed_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.FailedAt; v != nil {
|
||||
builder.WriteString("failed_at=")
|
||||
builder.WriteString(v.Format(time.ANSIC))
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
if v := _m.FailedReason; v != nil {
|
||||
builder.WriteString("failed_reason=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("client_ip=")
|
||||
builder.WriteString(_m.ClientIP)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("src_host=")
|
||||
builder.WriteString(_m.SrcHost)
|
||||
builder.WriteString(", ")
|
||||
if v := _m.SrcURL; v != nil {
|
||||
builder.WriteString("src_url=")
|
||||
builder.WriteString(*v)
|
||||
}
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// PaymentOrders is a parsable slice of PaymentOrder.
|
||||
type PaymentOrders []*PaymentOrder
|
||||
419
backend/ent/paymentorder/paymentorder.go
Normal file
@@ -0,0 +1,419 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package paymentorder
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the paymentorder type in the database.
|
||||
Label = "payment_order"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldUserID holds the string denoting the user_id field in the database.
|
||||
FieldUserID = "user_id"
|
||||
// FieldUserEmail holds the string denoting the user_email field in the database.
|
||||
FieldUserEmail = "user_email"
|
||||
// FieldUserName holds the string denoting the user_name field in the database.
|
||||
FieldUserName = "user_name"
|
||||
// FieldUserNotes holds the string denoting the user_notes field in the database.
|
||||
FieldUserNotes = "user_notes"
|
||||
// FieldAmount holds the string denoting the amount field in the database.
|
||||
FieldAmount = "amount"
|
||||
// FieldPayAmount holds the string denoting the pay_amount field in the database.
|
||||
FieldPayAmount = "pay_amount"
|
||||
// FieldFeeRate holds the string denoting the fee_rate field in the database.
|
||||
FieldFeeRate = "fee_rate"
|
||||
// FieldRechargeCode holds the string denoting the recharge_code field in the database.
|
||||
FieldRechargeCode = "recharge_code"
|
||||
// FieldOutTradeNo holds the string denoting the out_trade_no field in the database.
|
||||
FieldOutTradeNo = "out_trade_no"
|
||||
// FieldPaymentType holds the string denoting the payment_type field in the database.
|
||||
FieldPaymentType = "payment_type"
|
||||
// FieldPaymentTradeNo holds the string denoting the payment_trade_no field in the database.
|
||||
FieldPaymentTradeNo = "payment_trade_no"
|
||||
// FieldPayURL holds the string denoting the pay_url field in the database.
|
||||
FieldPayURL = "pay_url"
|
||||
// FieldQrCode holds the string denoting the qr_code field in the database.
|
||||
FieldQrCode = "qr_code"
|
||||
// FieldQrCodeImg holds the string denoting the qr_code_img field in the database.
|
||||
FieldQrCodeImg = "qr_code_img"
|
||||
// FieldOrderType holds the string denoting the order_type field in the database.
|
||||
FieldOrderType = "order_type"
|
||||
// FieldPlanID holds the string denoting the plan_id field in the database.
|
||||
FieldPlanID = "plan_id"
|
||||
// FieldSubscriptionGroupID holds the string denoting the subscription_group_id field in the database.
|
||||
FieldSubscriptionGroupID = "subscription_group_id"
|
||||
// FieldSubscriptionDays holds the string denoting the subscription_days field in the database.
|
||||
FieldSubscriptionDays = "subscription_days"
|
||||
// FieldProviderInstanceID holds the string denoting the provider_instance_id field in the database.
|
||||
FieldProviderInstanceID = "provider_instance_id"
|
||||
// FieldProviderKey holds the string denoting the provider_key field in the database.
|
||||
FieldProviderKey = "provider_key"
|
||||
// FieldProviderSnapshot holds the string denoting the provider_snapshot field in the database.
|
||||
FieldProviderSnapshot = "provider_snapshot"
|
||||
// FieldStatus holds the string denoting the status field in the database.
|
||||
FieldStatus = "status"
|
||||
// FieldRefundAmount holds the string denoting the refund_amount field in the database.
|
||||
FieldRefundAmount = "refund_amount"
|
||||
// FieldRefundReason holds the string denoting the refund_reason field in the database.
|
||||
FieldRefundReason = "refund_reason"
|
||||
// FieldRefundAt holds the string denoting the refund_at field in the database.
|
||||
FieldRefundAt = "refund_at"
|
||||
// FieldForceRefund holds the string denoting the force_refund field in the database.
|
||||
FieldForceRefund = "force_refund"
|
||||
// FieldRefundRequestedAt holds the string denoting the refund_requested_at field in the database.
|
||||
FieldRefundRequestedAt = "refund_requested_at"
|
||||
// FieldRefundRequestReason holds the string denoting the refund_request_reason field in the database.
|
||||
FieldRefundRequestReason = "refund_request_reason"
|
||||
// FieldRefundRequestedBy holds the string denoting the refund_requested_by field in the database.
|
||||
FieldRefundRequestedBy = "refund_requested_by"
|
||||
// FieldExpiresAt holds the string denoting the expires_at field in the database.
|
||||
FieldExpiresAt = "expires_at"
|
||||
// FieldPaidAt holds the string denoting the paid_at field in the database.
|
||||
FieldPaidAt = "paid_at"
|
||||
// FieldCompletedAt holds the string denoting the completed_at field in the database.
|
||||
FieldCompletedAt = "completed_at"
|
||||
// FieldFailedAt holds the string denoting the failed_at field in the database.
|
||||
FieldFailedAt = "failed_at"
|
||||
// FieldFailedReason holds the string denoting the failed_reason field in the database.
|
||||
FieldFailedReason = "failed_reason"
|
||||
// FieldClientIP holds the string denoting the client_ip field in the database.
|
||||
FieldClientIP = "client_ip"
|
||||
// FieldSrcHost holds the string denoting the src_host field in the database.
|
||||
FieldSrcHost = "src_host"
|
||||
// FieldSrcURL holds the string denoting the src_url field in the database.
|
||||
FieldSrcURL = "src_url"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||
EdgeUser = "user"
|
||||
// Table holds the table name of the paymentorder in the database.
|
||||
Table = "payment_orders"
|
||||
// UserTable is the table that holds the user relation/edge.
|
||||
UserTable = "payment_orders"
|
||||
// UserInverseTable is the table name for the User entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||
UserInverseTable = "users"
|
||||
// UserColumn is the table column denoting the user relation/edge.
|
||||
UserColumn = "user_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for paymentorder fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldUserID,
|
||||
FieldUserEmail,
|
||||
FieldUserName,
|
||||
FieldUserNotes,
|
||||
FieldAmount,
|
||||
FieldPayAmount,
|
||||
FieldFeeRate,
|
||||
FieldRechargeCode,
|
||||
FieldOutTradeNo,
|
||||
FieldPaymentType,
|
||||
FieldPaymentTradeNo,
|
||||
FieldPayURL,
|
||||
FieldQrCode,
|
||||
FieldQrCodeImg,
|
||||
FieldOrderType,
|
||||
FieldPlanID,
|
||||
FieldSubscriptionGroupID,
|
||||
FieldSubscriptionDays,
|
||||
FieldProviderInstanceID,
|
||||
FieldProviderKey,
|
||||
FieldProviderSnapshot,
|
||||
FieldStatus,
|
||||
FieldRefundAmount,
|
||||
FieldRefundReason,
|
||||
FieldRefundAt,
|
||||
FieldForceRefund,
|
||||
FieldRefundRequestedAt,
|
||||
FieldRefundRequestReason,
|
||||
FieldRefundRequestedBy,
|
||||
FieldExpiresAt,
|
||||
FieldPaidAt,
|
||||
FieldCompletedAt,
|
||||
FieldFailedAt,
|
||||
FieldFailedReason,
|
||||
FieldClientIP,
|
||||
FieldSrcHost,
|
||||
FieldSrcURL,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// UserEmailValidator is a validator for the "user_email" field. It is called by the builders before save.
|
||||
UserEmailValidator func(string) error
|
||||
// UserNameValidator is a validator for the "user_name" field. It is called by the builders before save.
|
||||
UserNameValidator func(string) error
|
||||
// DefaultFeeRate holds the default value on creation for the "fee_rate" field.
|
||||
DefaultFeeRate float64
|
||||
// RechargeCodeValidator is a validator for the "recharge_code" field. It is called by the builders before save.
|
||||
RechargeCodeValidator func(string) error
|
||||
// DefaultOutTradeNo holds the default value on creation for the "out_trade_no" field.
|
||||
DefaultOutTradeNo string
|
||||
// OutTradeNoValidator is a validator for the "out_trade_no" field. It is called by the builders before save.
|
||||
OutTradeNoValidator func(string) error
|
||||
// PaymentTypeValidator is a validator for the "payment_type" field. It is called by the builders before save.
|
||||
PaymentTypeValidator func(string) error
|
||||
// PaymentTradeNoValidator is a validator for the "payment_trade_no" field. It is called by the builders before save.
|
||||
PaymentTradeNoValidator func(string) error
|
||||
// DefaultOrderType holds the default value on creation for the "order_type" field.
|
||||
DefaultOrderType string
|
||||
// OrderTypeValidator is a validator for the "order_type" field. It is called by the builders before save.
|
||||
OrderTypeValidator func(string) error
|
||||
// ProviderInstanceIDValidator is a validator for the "provider_instance_id" field. It is called by the builders before save.
|
||||
ProviderInstanceIDValidator func(string) error
|
||||
// ProviderKeyValidator is a validator for the "provider_key" field. It is called by the builders before save.
|
||||
ProviderKeyValidator func(string) error
|
||||
// DefaultStatus holds the default value on creation for the "status" field.
|
||||
DefaultStatus string
|
||||
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||
StatusValidator func(string) error
|
||||
// DefaultRefundAmount holds the default value on creation for the "refund_amount" field.
|
||||
DefaultRefundAmount float64
|
||||
// DefaultForceRefund holds the default value on creation for the "force_refund" field.
|
||||
DefaultForceRefund bool
|
||||
// RefundRequestedByValidator is a validator for the "refund_requested_by" field. It is called by the builders before save.
|
||||
RefundRequestedByValidator func(string) error
|
||||
// ClientIPValidator is a validator for the "client_ip" field. It is called by the builders before save.
|
||||
ClientIPValidator func(string) error
|
||||
// SrcHostValidator is a validator for the "src_host" field. It is called by the builders before save.
|
||||
SrcHostValidator func(string) error
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the PaymentOrder queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserID orders the results by the user_id field.
|
||||
func ByUserID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUserID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserEmail orders the results by the user_email field.
|
||||
func ByUserEmail(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUserEmail, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserName orders the results by the user_name field.
|
||||
func ByUserName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUserName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserNotes orders the results by the user_notes field.
|
||||
func ByUserNotes(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUserNotes, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByAmount orders the results by the amount field.
|
||||
func ByAmount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldAmount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPayAmount orders the results by the pay_amount field.
|
||||
func ByPayAmount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPayAmount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByFeeRate orders the results by the fee_rate field.
|
||||
func ByFeeRate(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldFeeRate, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRechargeCode orders the results by the recharge_code field.
|
||||
func ByRechargeCode(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRechargeCode, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOutTradeNo orders the results by the out_trade_no field.
|
||||
func ByOutTradeNo(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldOutTradeNo, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPaymentType orders the results by the payment_type field.
|
||||
func ByPaymentType(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPaymentType, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPaymentTradeNo orders the results by the payment_trade_no field.
|
||||
func ByPaymentTradeNo(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPaymentTradeNo, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPayURL orders the results by the pay_url field.
|
||||
func ByPayURL(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPayURL, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByQrCode orders the results by the qr_code field.
|
||||
func ByQrCode(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldQrCode, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByQrCodeImg orders the results by the qr_code_img field.
|
||||
func ByQrCodeImg(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldQrCodeImg, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByOrderType orders the results by the order_type field.
|
||||
func ByOrderType(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldOrderType, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPlanID orders the results by the plan_id field.
|
||||
func ByPlanID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPlanID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySubscriptionGroupID orders the results by the subscription_group_id field.
|
||||
func BySubscriptionGroupID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSubscriptionGroupID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySubscriptionDays orders the results by the subscription_days field.
|
||||
func BySubscriptionDays(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSubscriptionDays, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProviderInstanceID orders the results by the provider_instance_id field.
|
||||
func ByProviderInstanceID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldProviderInstanceID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByProviderKey orders the results by the provider_key field.
|
||||
func ByProviderKey(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldProviderKey, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByStatus orders the results by the status field.
|
||||
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRefundAmount orders the results by the refund_amount field.
|
||||
func ByRefundAmount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRefundAmount, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRefundReason orders the results by the refund_reason field.
|
||||
func ByRefundReason(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRefundReason, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRefundAt orders the results by the refund_at field.
|
||||
func ByRefundAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRefundAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByForceRefund orders the results by the force_refund field.
|
||||
func ByForceRefund(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldForceRefund, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRefundRequestedAt orders the results by the refund_requested_at field.
|
||||
func ByRefundRequestedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRefundRequestedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRefundRequestReason orders the results by the refund_request_reason field.
|
||||
func ByRefundRequestReason(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRefundRequestReason, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRefundRequestedBy orders the results by the refund_requested_by field.
|
||||
func ByRefundRequestedBy(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRefundRequestedBy, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByExpiresAt orders the results by the expires_at field.
|
||||
func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPaidAt orders the results by the paid_at field.
|
||||
func ByPaidAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPaidAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCompletedAt orders the results by the completed_at field.
|
||||
func ByCompletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCompletedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByFailedAt orders the results by the failed_at field.
|
||||
func ByFailedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldFailedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByFailedReason orders the results by the failed_reason field.
|
||||
func ByFailedReason(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldFailedReason, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByClientIP orders the results by the client_ip field.
|
||||
func ByClientIP(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldClientIP, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySrcHost orders the results by the src_host field.
|
||||
func BySrcHost(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSrcHost, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySrcURL orders the results by the src_url field.
|
||||
func BySrcURL(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSrcURL, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserField orders the results by user field.
|
||||
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newUserStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(UserInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
}
|
||||