mirror of
https://github.com/drakkan/sftpgo.git
synced 2025-12-08 07:10:56 +03:00
Compare commits
1099 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b65fc0bdc2 | ||
|
|
cb98f8fd6d | ||
|
|
cbef217cfa | ||
|
|
4a34ae6662 | ||
|
|
3ff7acc1b4 | ||
|
|
fc648454df | ||
|
|
836b36b816 | ||
|
|
2d19817431 | ||
|
|
df84c42e7e | ||
|
|
c304143eb3 | ||
|
|
d2acc6f5c1 | ||
|
|
531ed852f5 | ||
|
|
303a723b04 | ||
|
|
88bfdb9910 | ||
|
|
d3a523ba13 | ||
|
|
665016ed1e | ||
|
|
97d5680d1e | ||
|
|
e7866047aa | ||
|
|
5f313cc6be | ||
|
|
3c2c703408 | ||
|
|
78a399eed4 | ||
|
|
e6d434654d | ||
|
|
d34446e6e9 | ||
|
|
2da19ef233 | ||
|
|
b34bc2b818 | ||
|
|
378995147b | ||
|
|
6b995db864 | ||
|
|
371012a46e | ||
|
|
d3d788c8d0 | ||
|
|
756b122ab8 | ||
|
|
e244ba37b2 | ||
|
|
5610b98d19 | ||
|
|
b3ca20b5e6 | ||
|
|
d0b6ca8d2f | ||
|
|
550158ff4b | ||
|
|
14a3803c8f | ||
|
|
ca4da2f64e | ||
|
|
049c2b7430 | ||
|
|
7fd5558400 | ||
|
|
b60255752f | ||
|
|
37f79650c8 | ||
|
|
8988d6542b | ||
|
|
560e7f316a | ||
|
|
eee5d74e87 | ||
|
|
9ae473fcdc | ||
|
|
b774289c6d | ||
|
|
ecf715880f | ||
|
|
b2e28fe3a2 | ||
|
|
cc2f23bd89 | ||
|
|
7329cd804b | ||
|
|
84e3132ed1 | ||
|
|
f6b11c2d01 | ||
|
|
32da923dfe | ||
|
|
91dfa501f8 | ||
|
|
7c724e18fe | ||
|
|
302f83c7a4 | ||
|
|
984ca1fb7e | ||
|
|
87f6a18476 | ||
|
|
90c21458b8 | ||
|
|
f536c64043 | ||
|
|
1a33b5bb53 | ||
|
|
0ecaa862bd | ||
|
|
751946f47a | ||
|
|
796ea1dde9 | ||
|
|
a87aa9b98e | ||
|
|
9abd186166 | ||
|
|
18d0bf9dc3 | ||
|
|
c9bd08cf9c | ||
|
|
d2f4edcdb6 | ||
|
|
67abf03fe3 | ||
|
|
5d7f6960f3 | ||
|
|
4bea9ed760 | ||
|
|
4995cf1b02 | ||
|
|
a5d0cbbe44 | ||
|
|
7b1a0d3cd3 | ||
|
|
1e0b3a2a8c | ||
|
|
e72bb1e124 | ||
|
|
164621289c | ||
|
|
737109b2b8 | ||
|
|
8b8e27b702 | ||
|
|
4b099640de | ||
|
|
80da2dc722 | ||
|
|
61947e67ae | ||
|
|
9a37e3d159 | ||
|
|
14fb6c4038 | ||
|
|
dd9c5b2149 | ||
|
|
ecd488a840 | ||
|
|
4a44a7dfe1 | ||
|
|
16a44a144b | ||
|
|
97f8142b1e | ||
|
|
504cd3efda | ||
|
|
857b6cc10a | ||
|
|
002a06629e | ||
|
|
5bc0f4f8af | ||
|
|
cacfffc5bf | ||
|
|
87d7854453 | ||
|
|
aa34388de0 | ||
|
|
a3f50029ba | ||
|
|
f9d8b83c2a | ||
|
|
7c8bb5b18a | ||
|
|
254b2ae87f | ||
|
|
5a40f998ae | ||
|
|
77f3400161 | ||
|
|
3521bacc4a | ||
|
|
55f8171dd1 | ||
|
|
a7b159aebb | ||
|
|
5c114b28e3 | ||
|
|
e079444e8a | ||
|
|
3cb23ac956 | ||
|
|
8fb256ac91 | ||
|
|
ca32cd5e0e | ||
|
|
e0defafa26 | ||
|
|
5cccb872bb | ||
|
|
aaf940edab | ||
|
|
853086b942 | ||
|
|
81bdba6782 | ||
|
|
d955ddcef9 | ||
|
|
4bbb195711 | ||
|
|
a193089646 | ||
|
|
9bfdc10172 | ||
|
|
b062b38ef4 | ||
|
|
a31a9dc32c | ||
|
|
fa43791ea9 | ||
|
|
93b9c1617e | ||
|
|
4c710d731f | ||
|
|
d9f30e7ac5 | ||
|
|
03da7f696c | ||
|
|
883a3dceaf | ||
|
|
7b86e2ac59 | ||
|
|
8502d7b051 | ||
|
|
6f8b71b89f | ||
|
|
7e7f662a23 | ||
|
|
0bec1c6012 | ||
|
|
5582f5c811 | ||
|
|
48ed3dab1f | ||
|
|
d8de0faef5 | ||
|
|
df828b6021 | ||
|
|
056daaddfc | ||
|
|
5c2fd8d52a | ||
|
|
4519bffa39 | ||
|
|
1ea7429921 | ||
|
|
816c174036 | ||
|
|
79857a8733 | ||
|
|
dcc3292dbc | ||
|
|
7f674a7fb3 | ||
|
|
b64d3c2fbf | ||
|
|
7fc5cb80d6 | ||
|
|
92460f811f | ||
|
|
e18ad55067 | ||
|
|
4e9dae6fa4 | ||
|
|
f5a0559be6 | ||
|
|
670018f05e | ||
|
|
8bbf54d2b6 | ||
|
|
d31cccf85f | ||
|
|
c19b03a3f7 | ||
|
|
c6b8644828 | ||
|
|
f1a255aa6c | ||
|
|
876bf8aa4f | ||
|
|
900e519ff1 | ||
|
|
f1832d4478 | ||
|
|
ebbbf81e65 | ||
|
|
1fccd05e9e | ||
|
|
66945c0a02 | ||
|
|
fa0ca8fe89 | ||
|
|
c478c7dae9 | ||
|
|
9382db751c | ||
|
|
7e2a8e70c9 | ||
|
|
cd35636939 | ||
|
|
d51adb041e | ||
|
|
02db00d008 | ||
|
|
fb2d59ec92 | ||
|
|
1df1225eed | ||
|
|
aca71bff7a | ||
|
|
9709aed5e6 | ||
|
|
d2a4178846 | ||
|
|
d73be7aee5 | ||
|
|
ffe7f7ff16 | ||
|
|
a6ed6fc721 | ||
|
|
c3831de94e | ||
|
|
9b6b9cca3d | ||
|
|
64d1ea2d89 | ||
|
|
1c51239da8 | ||
|
|
51c15de892 | ||
|
|
b8efb1b8ec | ||
|
|
ec1d20f46f | ||
|
|
1f619d5ea6 | ||
|
|
6d3d94a01f | ||
|
|
0a3d94f73d | ||
|
|
7c68b03d07 | ||
|
|
2912b2e92e | ||
|
|
a6fe802370 | ||
|
|
ad483b7581 | ||
|
|
df86955f28 | ||
|
|
00ec426a80 | ||
|
|
222db53410 | ||
|
|
4d85dc108f | ||
|
|
6d582a821b | ||
|
|
794afbf85e | ||
|
|
e3f3997c5e | ||
|
|
f78090e47f | ||
|
|
4d7a4aa99a | ||
|
|
c36217c654 | ||
|
|
59bb578b89 | ||
|
|
7d8823307f | ||
|
|
8174349032 | ||
|
|
00a02dc14d | ||
|
|
ced73ed04e | ||
|
|
cc73bb811b | ||
|
|
a587228cf0 | ||
|
|
1472a0f415 | ||
|
|
0bb141960f | ||
|
|
c153330ab8 | ||
|
|
5b4ef0ee3b | ||
|
|
9632b6ee94 | ||
|
|
78eb1c1166 | ||
|
|
a7c0b07a2a | ||
|
|
dc1cc88a46 | ||
|
|
3f5451eab6 | ||
|
|
30d98326ca | ||
|
|
bedc8e288b | ||
|
|
6092b6628e | ||
|
|
6ee51c5cc1 | ||
|
|
4df0ae82ac | ||
|
|
5db31f0fb3 | ||
|
|
0f8170c10f | ||
|
|
3c24cb773f | ||
|
|
bec54ac8ae | ||
|
|
c330ac8418 | ||
|
|
3e478f42ea | ||
|
|
18ab757216 | ||
|
|
b6bcf0cd94 | ||
|
|
015aa36c56 | ||
|
|
f2480ce5c9 | ||
|
|
f828c58dca | ||
|
|
dc19921b0c | ||
|
|
3f3591bae0 | ||
|
|
fc048728d9 | ||
|
|
aeb4675196 | ||
|
|
4652f9ede8 | ||
|
|
531cb5b5a1 | ||
|
|
9fb43b2c46 | ||
|
|
8a8298ad46 | ||
|
|
3d6b09e949 | ||
|
|
fb8f013ea7 | ||
|
|
c41319bb7a | ||
|
|
46157ebbb6 | ||
|
|
200b1d08c7 | ||
|
|
24b0352eb6 | ||
|
|
52f3a98cc8 | ||
|
|
e29a3efd39 | ||
|
|
ca730e77a5 | ||
|
|
0833b4698e | ||
|
|
ee5c5e033d | ||
|
|
78233ff9a3 | ||
|
|
b331dc5686 | ||
|
|
dfcfcee208 | ||
|
|
094ee1522e | ||
|
|
3bc58f5988 | ||
|
|
f6938e76dc | ||
|
|
570964deb3 | ||
|
|
31984ffec1 | ||
|
|
74fc3aaf37 | ||
|
|
97d0a48557 | ||
|
|
3bbe67571f | ||
|
|
f131ef130b | ||
|
|
4a6a4ce28d | ||
|
|
a80ac80fcd | ||
|
|
4aa9686e3b | ||
|
|
64e87d64bd | ||
|
|
9ca0b46f30 | ||
|
|
6eb154bb74 | ||
|
|
ea01c3a125 | ||
|
|
1b4a1fbbe5 | ||
|
|
ec81a7ac29 | ||
|
|
22d28a37b6 | ||
|
|
cc134cad9a | ||
|
|
1459150024 | ||
|
|
87751e562e | ||
|
|
e6f969cb04 | ||
|
|
ba1febba73 | ||
|
|
af8fa7ff81 | ||
|
|
4ab2e4088a | ||
|
|
da0ccc6426 | ||
|
|
0661876e99 | ||
|
|
cd72ac4fc9 | ||
|
|
da5a061b65 | ||
|
|
65948a47f1 | ||
|
|
bf4b3e6840 | ||
|
|
6ea38188e8 | ||
|
|
b5639a51fd | ||
|
|
5c34d814d6 | ||
|
|
0eca4f1866 | ||
|
|
b52f829f05 | ||
|
|
90f64c9f63 | ||
|
|
c106498dd8 | ||
|
|
7bad65a43e | ||
|
|
101c2962ab | ||
|
|
59140a6d51 | ||
|
|
b1d54f69d9 | ||
|
|
374de07c7b | ||
|
|
8a4c21b64a | ||
|
|
16ba7ddb34 | ||
|
|
bd9506da42 | ||
|
|
b903a6e46f | ||
|
|
bcf088f586 | ||
|
|
be3857d572 | ||
|
|
b99d4ce82e | ||
|
|
0a558203da | ||
|
|
5a549a88fe | ||
|
|
fe953d6b38 | ||
|
|
05c62b9f40 | ||
|
|
555dc3b0c0 | ||
|
|
0de0d3308c | ||
|
|
a20373b613 | ||
|
|
ced2e16f41 | ||
|
|
3ac832c8dd | ||
|
|
a3c087456b | ||
|
|
419774158a | ||
|
|
0503215e7a | ||
|
|
9541843ff7 | ||
|
|
98f22ba110 | ||
|
|
1e9a19e326 | ||
|
|
0046c9960a | ||
|
|
7640612a95 | ||
|
|
a26962f367 | ||
|
|
f778e47d22 | ||
|
|
4781921336 | ||
|
|
3ae8abda9e | ||
|
|
90b324d707 | ||
|
|
3a22aae34f | ||
|
|
45a0473fec | ||
|
|
a7313e4492 | ||
|
|
c41ae116eb | ||
|
|
83c7453957 | ||
|
|
85a47810ff | ||
|
|
c997ef876c | ||
|
|
ae8ccadad2 | ||
|
|
5967aa1aa5 | ||
|
|
c900cde8e4 | ||
|
|
13183a9f76 | ||
|
|
5a568b4077 | ||
|
|
030507a2ce | ||
|
|
338301955f | ||
|
|
6d313f6d8f | ||
|
|
776dffcf12 | ||
|
|
e1a2451c22 | ||
|
|
7344366ce8 | ||
|
|
bd5191dfc5 | ||
|
|
bfa4085932 | ||
|
|
302ec2558c | ||
|
|
ff19879ffd | ||
|
|
04001f7ad3 | ||
|
|
076b2f0ee0 | ||
|
|
93dfb03eaf | ||
|
|
e09bdd43d4 | ||
|
|
ac8d8a3da1 | ||
|
|
a4157e83e9 | ||
|
|
13f23838a1 | ||
|
|
fd4c388b23 | ||
|
|
88b10da596 | ||
|
|
c07dc74d48 | ||
|
|
b48e01155c | ||
|
|
0ff010cc94 | ||
|
|
81aac15a6c | ||
|
|
c1b862394d | ||
|
|
f19937b715 | ||
|
|
f2f612b450 | ||
|
|
0c2640bbab | ||
|
|
3bb0ca1d2b | ||
|
|
d5b42f72e2 | ||
|
|
62744e081b | ||
|
|
9dcaf1555f | ||
|
|
a09cf5c8b9 | ||
|
|
47ebe42375 | ||
|
|
4d97ab9eb9 | ||
|
|
8ed13dc4a9 | ||
|
|
3b66dd0873 | ||
|
|
d992f0ffcc | ||
|
|
6c5a7e8f13 | ||
|
|
9d3d7db29c | ||
|
|
8607788975 | ||
|
|
4be6307d87 | ||
|
|
feec2118bb | ||
|
|
43182fc25e | ||
|
|
976f588863 | ||
|
|
575bcf1f03 | ||
|
|
969c992bfd | ||
|
|
c1239fbf59 | ||
|
|
c63b923ec3 | ||
|
|
574c4029fc | ||
|
|
423d8306be | ||
|
|
fc7066a25c | ||
|
|
e1bf46c6a5 | ||
|
|
3b46e6a6fb | ||
|
|
7a85c66ee7 | ||
|
|
25a44030f9 | ||
|
|
600268ebb8 | ||
|
|
1223957f91 | ||
|
|
15cde2dd1a | ||
|
|
50e441849a | ||
|
|
02bb09ec01 | ||
|
|
402947a43c | ||
|
|
b9bc8d722d | ||
|
|
0cb5c49cf3 | ||
|
|
9fc4be6d40 | ||
|
|
ecfed4dc04 | ||
|
|
b415e4d98f | ||
|
|
7d059efe06 | ||
|
|
60cfbd2989 | ||
|
|
8ecf64f481 | ||
|
|
019b0f2fd5 | ||
|
|
15d6cd144a | ||
|
|
f59f62317e | ||
|
|
f2b93c0402 | ||
|
|
0540b8780e | ||
|
|
fa45c9c138 | ||
|
|
b67cd0d3df | ||
|
|
c8f7fc9bc9 | ||
|
|
f1b998ce16 | ||
|
|
aaa758e978 | ||
|
|
716946a148 | ||
|
|
15934d72cc | ||
|
|
8f6cdacd00 | ||
|
|
8f736da4b8 | ||
|
|
4ea4202b99 | ||
|
|
d4bfc3f6b5 | ||
|
|
23d9ebfc91 | ||
|
|
5c99f4fb60 | ||
|
|
2263c7e20f | ||
|
|
515b2d917e | ||
|
|
af4723356d | ||
|
|
068dd34a38 | ||
|
|
b16a5c2caf | ||
|
|
a383957cfa | ||
|
|
00f97aabb4 | ||
|
|
32db0787bb | ||
|
|
1275328fdf | ||
|
|
7778716fa7 | ||
|
|
77476d0f56 | ||
|
|
c7a1fc2996 | ||
|
|
e7d8e73be8 | ||
|
|
3ee27f4370 | ||
|
|
92424cd1c2 | ||
|
|
0190dad984 | ||
|
|
198258f4e7 | ||
|
|
5be4b6bd44 | ||
|
|
3941255733 | ||
|
|
46998252e5 | ||
|
|
74b51f0ad3 | ||
|
|
b11865f971 | ||
|
|
f4369cdbef | ||
|
|
92638ce93d | ||
|
|
6ef85d6026 | ||
|
|
bc88503f25 | ||
|
|
47317bed9b | ||
|
|
f45c89fc46 | ||
|
|
112e3b2fc2 | ||
|
|
124c471a2b | ||
|
|
683ba6cd5b | ||
|
|
21fbcf4556 | ||
|
|
2ffefbeb33 | ||
|
|
c844fc7477 | ||
|
|
4b98f37df1 | ||
|
|
0bc4db9950 | ||
|
|
5acf29dae6 | ||
|
|
e9a42cd508 | ||
|
|
ed26d68948 | ||
|
|
b389f93d97 | ||
|
|
150aebf8d2 | ||
|
|
74e0223eb9 | ||
|
|
0823928f98 | ||
|
|
f895059660 | ||
|
|
acb4310c11 | ||
|
|
fdf3f23df5 | ||
|
|
d92861a8e8 | ||
|
|
1ee843757d | ||
|
|
ea26d7786c | ||
|
|
6eb43baf3d | ||
|
|
2f56375121 | ||
|
|
3bfd7e4d17 | ||
|
|
e1c66d96a1 | ||
|
|
a43854ae9b | ||
|
|
183bedd6ed | ||
|
|
2a89a8f664 | ||
|
|
5cd27ce529 | ||
|
|
cee2e18caf | ||
|
|
9ad750da54 | ||
|
|
5f49af1780 | ||
|
|
d5f092284a | ||
|
|
0e50310a66 | ||
|
|
5939ac4801 | ||
|
|
db274f1093 | ||
|
|
6bc5c64a3a | ||
|
|
70e035315e | ||
|
|
8a1249878a | ||
|
|
5e375f56dd | ||
|
|
28f1d66ae5 | ||
|
|
79060d37a7 | ||
|
|
800e64404b | ||
|
|
54c0c1b80d | ||
|
|
f7c7e2951d | ||
|
|
f249286cb1 | ||
|
|
d6dc3a507e | ||
|
|
0286da2356 | ||
|
|
76c08baaa0 | ||
|
|
67ea75cf03 | ||
|
|
4c658bb6f0 | ||
|
|
1ab02d5891 | ||
|
|
055506e518 | ||
|
|
88122ba2f8 | ||
|
|
bfe0c18976 | ||
|
|
df41f0c556 | ||
|
|
561c5021dd | ||
|
|
ad07fc78eb | ||
|
|
3243181c5f | ||
|
|
895117718e | ||
|
|
534b253c20 | ||
|
|
901cafc6da | ||
|
|
a6e36e7cad | ||
|
|
b566457e12 | ||
|
|
ca3e15578e | ||
|
|
4b2edff6dd | ||
|
|
2146b83343 | ||
|
|
3e1b07324d | ||
|
|
8cc2dfe5c2 | ||
|
|
78a837e8f1 | ||
|
|
49830516be | ||
|
|
41e1d9e68a | ||
|
|
5da4f931c5 | ||
|
|
552a96533e | ||
|
|
cebd069c77 | ||
|
|
be9230e85b | ||
|
|
b1ce6eb85b | ||
|
|
46176a54b4 | ||
|
|
a21ccad174 | ||
|
|
1129a868a5 | ||
|
|
1ac66d27b6 | ||
|
|
6a6e8fffbc | ||
|
|
51f110bc7b | ||
|
|
4ddfe41f23 | ||
|
|
ddd06fc2ac | ||
|
|
1bccb93fcb | ||
|
|
db80781716 | ||
|
|
a2a99f9b57 | ||
|
|
cd4a68cc96 | ||
|
|
b37eb68993 | ||
|
|
b13958a8d6 | ||
|
|
17e2b234a0 | ||
|
|
4ef1775e9a | ||
|
|
363977b474 | ||
|
|
05ae0ea5f2 | ||
|
|
8de7a81674 | ||
|
|
d32b195a57 | ||
|
|
267d9f1831 | ||
|
|
17a42a0c11 | ||
|
|
a219d25cac | ||
|
|
ce731020a7 | ||
|
|
fc9082c422 | ||
|
|
4872ba2ea0 | ||
|
|
70bb3c34ce | ||
|
|
1cde50f050 | ||
|
|
e9dd4ecdf0 | ||
|
|
f863530653 | ||
|
|
4f609cfa30 | ||
|
|
78bf808322 | ||
|
|
afe1da92c5 | ||
|
|
9985224966 | ||
|
|
02679d6df3 | ||
|
|
c2bbd468c4 | ||
|
|
46ab8f8d78 | ||
|
|
54321c5240 | ||
|
|
5fcbf2528f | ||
|
|
ea096db8e4 | ||
|
|
0caeb68680 | ||
|
|
2b9ba1d520 | ||
|
|
80f5ccd357 | ||
|
|
820169c5c6 | ||
|
|
aff75953e3 | ||
|
|
c0e09374a8 | ||
|
|
57976b4085 | ||
|
|
899f1a1844 | ||
|
|
41a1af863e | ||
|
|
778ec9b88f | ||
|
|
d42fcc3786 | ||
|
|
5d4f758c47 | ||
|
|
a8a17a223a | ||
|
|
aa40b04576 | ||
|
|
daac90c4e1 | ||
|
|
72b2c83392 | ||
|
|
c3410a3d91 | ||
|
|
173c1820e1 | ||
|
|
684f4ba1a6 | ||
|
|
6d84c5b9e3 | ||
|
|
4b522a2455 | ||
|
|
1e1c46ae1b | ||
|
|
d6b3acdb62 | ||
|
|
037d89a320 | ||
|
|
30eb3c4a99 | ||
|
|
0966d44c0f | ||
|
|
40e759c983 | ||
|
|
141ca6777c | ||
|
|
3c16a19269 | ||
|
|
b3c6d79f51 | ||
|
|
0c56b6d504 | ||
|
|
3d2da88da9 | ||
|
|
80c06d6b59 | ||
|
|
e536a638c9 | ||
|
|
bc397002d4 | ||
|
|
2a95d031ea | ||
|
|
1dce1eff48 | ||
|
|
5b1d8666b3 | ||
|
|
187a5b1908 | ||
|
|
7ab7941ddd | ||
|
|
c69d63c1f8 | ||
|
|
743b350fdd | ||
|
|
1ac610da1a | ||
|
|
bcf0fa073e | ||
|
|
140380716d | ||
|
|
143df87fee | ||
|
|
6d895843dc | ||
|
|
65e6d5475f | ||
|
|
15609cdbc7 | ||
|
|
f876c728ad | ||
|
|
f34462e3c3 | ||
|
|
ea0bf5e4c8 | ||
|
|
14d1b82f6b | ||
|
|
ed43ddd79d | ||
|
|
23192a3be7 | ||
|
|
72e3d464b8 | ||
|
|
a6985075b9 | ||
|
|
4d5494912d | ||
|
|
50982229e1 | ||
|
|
6977a4a18b | ||
|
|
ab1bf2ad44 | ||
|
|
c451f742aa | ||
|
|
034d89876d | ||
|
|
4a88ea5c03 | ||
|
|
95c6d41c35 | ||
|
|
2a9ed0abca | ||
|
|
3ff6b1bf64 | ||
|
|
a67276ccc2 | ||
|
|
87b51a6fd5 | ||
|
|
940836b25b | ||
|
|
634b723b5d | ||
|
|
af0c9b76c4 | ||
|
|
2142ef20c5 | ||
|
|
224ce5fe81 | ||
|
|
4bb9d07dde | ||
|
|
2054dfd83d | ||
|
|
6699f5c2cc | ||
|
|
70bde8b2bc | ||
|
|
ff73e5f53c | ||
|
|
0609188d3f | ||
|
|
99cd1ccfe5 | ||
|
|
dccc583b5d | ||
|
|
ac435b7890 | ||
|
|
37fc589896 | ||
|
|
5d789a01b7 | ||
|
|
ca0ff0d630 | ||
|
|
969b38586e | ||
|
|
e3eca424f1 | ||
|
|
a6355e298e | ||
|
|
c0f47a58f2 | ||
|
|
dc845fa2f4 | ||
|
|
7e855c83b3 | ||
|
|
3b8a9e0963 | ||
|
|
4445834fd3 | ||
|
|
19a619ff65 | ||
|
|
66a538dc9c | ||
|
|
1a6863f4b1 | ||
|
|
fbd9919afa | ||
|
|
eec8bc73f4 | ||
|
|
5720d40fee | ||
|
|
38e0cba675 | ||
|
|
4c5a0d663e | ||
|
|
093df15fac | ||
|
|
957430e675 | ||
|
|
14035f407e | ||
|
|
bf2b2525a9 | ||
|
|
4edb9cd6b9 | ||
|
|
c38d242bea | ||
|
|
c6ab6f94e7 | ||
|
|
36151d1ba9 | ||
|
|
1d5d184720 | ||
|
|
0119fd03a6 | ||
|
|
0a14297b48 | ||
|
|
442efa0607 | ||
|
|
6ad4cc317c | ||
|
|
57bec976ae | ||
|
|
641493e31a | ||
|
|
5b4e9ad982 | ||
|
|
950a5ad9ea | ||
|
|
fcfdd633f6 | ||
|
|
ebb18fa57d | ||
|
|
58b0ca585c | ||
|
|
5bc1c2de2d | ||
|
|
ec00613202 | ||
|
|
02ec3a5f48 | ||
|
|
ac3bae00fc | ||
|
|
e54828a7b8 | ||
|
|
f2acde789d | ||
|
|
9b49f63a97 | ||
|
|
14bcc6f2fc | ||
|
|
975a2f3632 | ||
|
|
5ff8f75917 | ||
|
|
db7e81e9d0 | ||
|
|
6a8039e76a | ||
|
|
56bf8364cd | ||
|
|
75750e3a79 | ||
|
|
bb5207ad77 | ||
|
|
b51d795e04 | ||
|
|
d12819932a | ||
|
|
d812c86812 | ||
|
|
1625cd5a9f | ||
|
|
756c3d0503 | ||
|
|
f884447b26 | ||
|
|
555394b95e | ||
|
|
00510a6af8 | ||
|
|
6c0839e197 | ||
|
|
5b79379c90 | ||
|
|
47fed45700 | ||
|
|
80d695f3a2 | ||
|
|
8d4f40ccd2 | ||
|
|
765bad5edd | ||
|
|
0c0382c9b5 | ||
|
|
bbab6149e8 | ||
|
|
ce9387f1ab | ||
|
|
d126c5736a | ||
|
|
5048d54d32 | ||
|
|
f22fe6af76 | ||
|
|
8034f289d1 | ||
|
|
eed61ac510 | ||
|
|
412d6096c0 | ||
|
|
c289ae07d2 | ||
|
|
87f78b07b3 | ||
|
|
5e2db77ef9 | ||
|
|
c992072286 | ||
|
|
0ef826c090 | ||
|
|
5da75c3915 | ||
|
|
8222baa7ed | ||
|
|
7b76b51314 | ||
|
|
c96dbbd3b5 | ||
|
|
da6ccedf24 | ||
|
|
13b37a835f | ||
|
|
863fa33309 | ||
|
|
9f4c54a212 | ||
|
|
2a7bff4c0e | ||
|
|
17406d1aab | ||
|
|
6537c53d43 | ||
|
|
b4bd10521a | ||
|
|
65cbef1962 | ||
|
|
a8d355900a | ||
|
|
ffd9c381ce | ||
|
|
2a0bce0beb | ||
|
|
f1f7b81088 | ||
|
|
f9827f958b | ||
|
|
3e2afc35ba | ||
|
|
c65dd86d5e | ||
|
|
2d6c0388af | ||
|
|
4d19d87720 | ||
|
|
5eabaf98e0 | ||
|
|
d1f0e9ae9f | ||
|
|
cd56039ab7 | ||
|
|
55515fee95 | ||
|
|
13d43a2d31 | ||
|
|
001261433b | ||
|
|
03bf595525 | ||
|
|
4ebedace1e | ||
|
|
b23276c002 | ||
|
|
bf708cb8bc | ||
|
|
a550d082a3 | ||
|
|
6c1a7449fe | ||
|
|
f0c9b55036 | ||
|
|
209badf10c | ||
|
|
242dde4480 | ||
|
|
2df0dd1f70 | ||
|
|
98a6d138d4 | ||
|
|
38f06ab373 | ||
|
|
3c1300721c | ||
|
|
61003c8079 | ||
|
|
01850c7399 | ||
|
|
b9c381e26f | ||
|
|
542554fb2c | ||
|
|
bdf18fa862 | ||
|
|
afc411c51b | ||
|
|
a59163e56c | ||
|
|
8391b19abb | ||
|
|
3925c7ff95 | ||
|
|
dbed110d02 | ||
|
|
f978355520 | ||
|
|
4748e6f54d | ||
|
|
91a4c64390 | ||
|
|
600a107699 | ||
|
|
2746c0b0f1 | ||
|
|
701a6115f8 | ||
|
|
56b00addc4 | ||
|
|
02e35ee002 | ||
|
|
5208e4a4ca | ||
|
|
7381a867ba | ||
|
|
f41ce6619f | ||
|
|
933427310d | ||
|
|
8b0a1817b3 | ||
|
|
04c9a5c008 | ||
|
|
bbc8c091e6 | ||
|
|
f3228713bc | ||
|
|
fa5333784b | ||
|
|
0dbf0cc81f | ||
|
|
196a56726e | ||
|
|
fe857dcb1b | ||
|
|
aa0ed5dbd0 | ||
|
|
a9e21c282a | ||
|
|
9a15a54885 | ||
|
|
91dcc349de | ||
|
|
fa41bfd06a | ||
|
|
8839c34d53 | ||
|
|
11ceaa8850 | ||
|
|
2a9f7db1e2 | ||
|
|
22338ed478 | ||
|
|
59a21158a6 | ||
|
|
93ce96d011 | ||
|
|
cc2f04b0e4 | ||
|
|
aa5191fa1b | ||
|
|
4e41a5583d | ||
|
|
ded8fad5e4 | ||
|
|
3702bc8413 | ||
|
|
7896d2eef7 | ||
|
|
da0f470f1c | ||
|
|
8fddb742df | ||
|
|
95fe26f3e3 | ||
|
|
1e10381143 | ||
|
|
96cbce52f9 | ||
|
|
0ea2ca3141 | ||
|
|
42877dd915 | ||
|
|
790c11c453 | ||
|
|
1ac4baa00a | ||
|
|
fc32286045 | ||
|
|
ee1131f254 | ||
|
|
c5dc3ee3b6 | ||
|
|
dd593b1035 | ||
|
|
4814786556 | ||
|
|
4f0a936ca0 | ||
|
|
aec372ca31 | ||
|
|
d2a739f8f6 | ||
|
|
165110872b | ||
|
|
6ab4e9f533 | ||
|
|
cf541d62ea | ||
|
|
19fc58dd1f | ||
|
|
ac9c475849 | ||
|
|
ddf99ab706 | ||
|
|
0056984d4b | ||
|
|
44fb276464 | ||
|
|
558a1b4050 | ||
|
|
8f934f2648 | ||
|
|
403b9a8310 | ||
|
|
33436488e2 | ||
|
|
3c28366fed | ||
|
|
b80abe6c05 | ||
|
|
8cb47817f6 | ||
|
|
23a80b01b6 | ||
|
|
b30614e9d8 | ||
|
|
e86089a9f3 | ||
|
|
3ceba7a147 | ||
|
|
c491133aff | ||
|
|
37418a7630 | ||
|
|
73a9c002e0 | ||
|
|
3d48fa7382 | ||
|
|
8e22dd1b13 | ||
|
|
7807fa7cc2 | ||
|
|
cd380973df | ||
|
|
01d681faa3 | ||
|
|
c231b663a3 | ||
|
|
8306b6bde6 | ||
|
|
dc011af90d | ||
|
|
c27e3ef436 | ||
|
|
760cc9ba5a | ||
|
|
5665e9c0e7 | ||
|
|
ad53429cf1 | ||
|
|
15298b0409 | ||
|
|
cfa710037c | ||
|
|
a08dd85efd | ||
|
|
469d36d979 | ||
|
|
7ae8b2cdeb | ||
|
|
cf148db75d | ||
|
|
738c7ab43e | ||
|
|
82fb7f8cf0 | ||
|
|
e0f2ab9c01 | ||
|
|
e0183217b6 | ||
|
|
f066b7fb9c | ||
|
|
0c6e2b566b | ||
|
|
f02e24437a | ||
|
|
e9534be1e6 | ||
|
|
7056997e49 | ||
|
|
155af19aaa | ||
|
|
f369fdf6f2 | ||
|
|
510a95bd6d | ||
|
|
da90dbe645 | ||
|
|
b006c5f914 | ||
|
|
3f75d46a16 | ||
|
|
14c2a244b7 | ||
|
|
94ff9d7346 | ||
|
|
14196167b0 | ||
|
|
d70959c34c | ||
|
|
67c6f27064 | ||
|
|
6bfbb27856 | ||
|
|
baac3749b3 | ||
|
|
d377181b25 | ||
|
|
ebd6a11f3a | ||
|
|
0a47412e8c | ||
|
|
4f668bf558 | ||
|
|
9248c5a987 | ||
|
|
b0ed190591 | ||
|
|
37357b2d63 | ||
|
|
9b06e0a3b7 | ||
|
|
5a5912ea66 | ||
|
|
b1c7317cf6 | ||
|
|
a0fe4cf5e4 | ||
|
|
7fe3c965e3 | ||
|
|
fd9b3c2767 | ||
|
|
fb9e188e36 | ||
|
|
c93d8cecfc | ||
|
|
94b46e57f1 | ||
|
|
9046acbe68 | ||
|
|
075bbe2aef | ||
|
|
b52d078986 | ||
|
|
0a9c4914aa | ||
|
|
f284008fb5 | ||
|
|
4759254e10 | ||
|
|
e22d377203 | ||
|
|
0787e3e595 | ||
|
|
c1194d558c | ||
|
|
952b10a9f6 | ||
|
|
f55851bdc8 | ||
|
|
76bb361393 | ||
|
|
81c8e8d898 | ||
|
|
f4e872c782 | ||
|
|
ddcb500c51 | ||
|
|
e8664c0ce4 | ||
|
|
3b002ddc86 | ||
|
|
1770da545d | ||
|
|
de3e69f846 | ||
|
|
cdf1233065 | ||
|
|
6b70f0b25f | ||
|
|
4fe51f7cce | ||
|
|
7221bf9b25 | ||
|
|
61f20f5449 | ||
|
|
5dafbb54de | ||
|
|
ec8ab28a22 | ||
|
|
aaa6d0c71f | ||
|
|
ea74aca165 | ||
|
|
9b119765fc | ||
|
|
df02496145 | ||
|
|
31d285813e | ||
|
|
f9fc5792fd | ||
|
|
6ad9c5ae64 | ||
|
|
016abda6d7 | ||
|
|
2eea6c95b9 | ||
|
|
7f1946de34 | ||
|
|
d0a81cabab | ||
|
|
df67f4ef34 | ||
|
|
ed11e1128a | ||
|
|
ed1c7cac17 | ||
|
|
7c115aa9c8 | ||
|
|
3ffddcba92 | ||
|
|
833b702b90 | ||
|
|
b885d453a2 | ||
|
|
7163fde724 | ||
|
|
830e3d1f64 | ||
|
|
637463a068 | ||
|
|
e69536f540 | ||
|
|
c516780289 | ||
|
|
eb1b869b73 | ||
|
|
703ccc8d91 | ||
|
|
45b9366dd0 | ||
|
|
382c6fda89 | ||
|
|
0f80de86b2 | ||
|
|
bc11cdd8d5 | ||
|
|
62b20cd884 | ||
|
|
ae8ed75ae5 | ||
|
|
c8cc81cf4a | ||
|
|
79c8b6cbc2 | ||
|
|
58253968fc | ||
|
|
dbd75209df | ||
|
|
da01848855 | ||
|
|
0b7be1175d | ||
|
|
3479a7e438 | ||
|
|
4f5c67e7df | ||
|
|
b99495ebbb | ||
|
|
0061978db8 | ||
|
|
e011f793ec | ||
|
|
5b47292366 | ||
|
|
8eff2df39c | ||
|
|
7bfe0ddf80 | ||
|
|
d6fa853a37 | ||
|
|
553cceab42 | ||
|
|
5bfaae9202 | ||
|
|
9359669cd4 | ||
|
|
8b039e0447 | ||
|
|
c64c080159 | ||
|
|
bcaf283c35 | ||
|
|
31a433cda2 | ||
|
|
e647f3626e | ||
|
|
3491717c26 | ||
|
|
45a13f5f4e | ||
|
|
6884ce3f3e | ||
|
|
5f4efc9148 | ||
|
|
d481294519 | ||
|
|
7ebbbe5c29 | ||
|
|
9ff303b8c0 | ||
|
|
4463421028 | ||
|
|
d75f56b914 | ||
|
|
a4834f4a83 | ||
|
|
0b42dbc3c3 | ||
|
|
2013ba497c | ||
|
|
2be37217cf | ||
|
|
55a03c2e2b | ||
|
|
27dbcf0066 | ||
|
|
ec194d73d2 | ||
|
|
1d9bb54073 | ||
|
|
5cf4a47b48 | ||
|
|
eec60d6309 | ||
|
|
37c602a477 | ||
|
|
8e604f888a | ||
|
|
531091906d | ||
|
|
e046b35b97 | ||
|
|
eb2ddc4798 | ||
|
|
aee9312cea | ||
|
|
6a99a5cb9f | ||
|
|
8e0ca88421 | ||
|
|
c7e55db4e0 | ||
|
|
1b1c740b29 | ||
|
|
ad5436e3f6 | ||
|
|
20606a0043 | ||
|
|
80e9902324 | ||
|
|
741e65a3a1 | ||
|
|
6aff8c2f5e | ||
|
|
e5770af2fa | ||
|
|
ae094d3479 | ||
|
|
f49c280a7f | ||
|
|
ae812e55af | ||
|
|
489101668c | ||
|
|
f8fd5c067c | ||
|
|
39fc9b73e9 | ||
|
|
80a5138115 | ||
|
|
bc844105b2 | ||
|
|
7de0fe467a | ||
|
|
0a025aabfd | ||
|
|
7a8b1645ef | ||
|
|
b3729e4666 | ||
|
|
9c4dbbc3f8 | ||
|
|
ca6cb34d98 | ||
|
|
fc442d7862 | ||
|
|
3ac5af47f2 | ||
|
|
bb37a1c1ce | ||
|
|
206799ff1c | ||
|
|
f3de83707f | ||
|
|
5be1d1be69 | ||
|
|
08e85f6be9 | ||
|
|
acdf351047 | ||
|
|
c2ff50c917 | ||
|
|
363b9ccc7f | ||
|
|
74367a65cc | ||
|
|
2221d3307a | ||
|
|
4ff34b3e53 | ||
|
|
191da1ecaf | ||
|
|
77db2bd3d1 | ||
|
|
758f2ee834 | ||
|
|
c5a6ca5650 | ||
|
|
b409523d5c | ||
|
|
8cd0aec417 | ||
|
|
a4cddf4f7f | ||
|
|
d970e757eb | ||
|
|
083d9f76c6 | ||
|
|
2003d08c59 | ||
|
|
9cf4653425 | ||
|
|
4f6bb00996 | ||
|
|
25f97bbe62 | ||
|
|
44d403cf9c | ||
|
|
8682ae4a54 | ||
|
|
f98a29a1e0 | ||
|
|
2932dba5cc | ||
|
|
24914e90d1 | ||
|
|
587c8a0347 | ||
|
|
62224debd2 | ||
|
|
871e2ccbbf | ||
|
|
4b5ce3913e | ||
|
|
1d917561fe | ||
|
|
4f36c1de06 | ||
|
|
5ffa34dacb | ||
|
|
60d4a3e1b5 | ||
|
|
3e0558c0e9 | ||
|
|
c74d90407b | ||
|
|
557831fa0d | ||
|
|
afd312f26a | ||
|
|
bb0338870a | ||
|
|
fb8ccfe824 | ||
|
|
0b4ff97a1a | ||
|
|
00dd5db226 | ||
|
|
71093bbe1b | ||
|
|
088e187e6a |
12
.github/FUNDING.yml
vendored
Normal file
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: [drakkan] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
20
.github/dependabot.yml
vendored
Normal file
20
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
version: 2
|
||||
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 2
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 2
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 2
|
||||
2
.github/workflows/.editorconfig
vendored
Normal file
2
.github/workflows/.editorconfig
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
[*.yml]
|
||||
indent_size = 2
|
||||
523
.github/workflows/development.yml
vendored
Normal file
523
.github/workflows/development.yml
vendored
Normal file
@@ -0,0 +1,523 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [2.3.x]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test-deploy:
|
||||
name: Test and deploy
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go: [1.18]
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
upload-coverage: [true]
|
||||
include:
|
||||
- go: 1.18
|
||||
os: windows-latest
|
||||
upload-coverage: false
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
|
||||
- name: Build for Linux/macOS x86_64
|
||||
if: startsWith(matrix.os, 'windows-') != true
|
||||
run: |
|
||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
cd tests/eventsearcher
|
||||
go build -trimpath -ldflags "-s -w" -o eventsearcher
|
||||
cd -
|
||||
cd tests/ipfilter
|
||||
go build -trimpath -ldflags "-s -w" -o ipfilter
|
||||
cd -
|
||||
|
||||
- name: Build for macOS arm64
|
||||
if: startsWith(matrix.os, 'macos-') == true
|
||||
run: CGO_ENABLED=1 GOOS=darwin GOARCH=arm64 SDKROOT=$(xcrun --sdk macosx --show-sdk-path) go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/version.date=`date -u +%FT%TZ`" -o sftpgo_arm64
|
||||
|
||||
- name: Build for Windows
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
run: |
|
||||
$GIT_COMMIT = (git describe --always --abbrev=8 --dirty) | Out-String
|
||||
$DATE_TIME = ([datetime]::Now.ToUniversalTime().toString("yyyy-MM-ddTHH:mm:ssZ")) | Out-String
|
||||
$LATEST_TAG = ((git describe --tags $(git rev-list --tags --max-count=1)) | Out-String).Trim()
|
||||
$REV_LIST=$LATEST_TAG+"..HEAD"
|
||||
$COMMITS_FROM_TAG= ((git rev-list $REV_LIST --count) | Out-String).Trim()
|
||||
$FILE_VERSION = $LATEST_TAG.substring(1) + "." + $COMMITS_FROM_TAG
|
||||
go install github.com/tc-hib/go-winres@latest
|
||||
go-winres simply --arch amd64 --product-version $LATEST_TAG-dev-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/version.date=$DATE_TIME" -o sftpgo.exe
|
||||
cd tests/eventsearcher
|
||||
go build -trimpath -ldflags "-s -w" -o eventsearcher.exe
|
||||
cd ../..
|
||||
cd tests/ipfilter
|
||||
go build -trimpath -ldflags "-s -w" -o ipfilter.exe
|
||||
cd ../..
|
||||
mkdir arm64
|
||||
$Env:CGO_ENABLED='0'
|
||||
$Env:GOOS='windows'
|
||||
$Env:GOARCH='arm64'
|
||||
go-winres simply --arch arm64 --product-version $LATEST_TAG-dev-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/version.date=$DATE_TIME" -o .\arm64\sftpgo.exe
|
||||
mkdir x86
|
||||
$Env:GOARCH='386'
|
||||
go-winres simply --arch 386 --product-version $LATEST_TAG-dev-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/version.date=$DATE_TIME" -o .\x86\sftpgo.exe
|
||||
Remove-Item Env:\CGO_ENABLED
|
||||
Remove-Item Env:\GOOS
|
||||
Remove-Item Env:\GOARCH
|
||||
|
||||
- name: Run test cases using SQLite provider
|
||||
run: go test -v -p 1 -timeout 15m ./... -coverprofile=coverage.txt -covermode=atomic
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
if: ${{ matrix.upload-coverage }}
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
fail_ci_if_error: false
|
||||
|
||||
- name: Run test cases using bolt provider
|
||||
run: |
|
||||
go test -v -p 1 -timeout 2m ./config -covermode=atomic
|
||||
go test -v -p 1 -timeout 5m ./common -covermode=atomic
|
||||
go test -v -p 1 -timeout 5m ./httpd -covermode=atomic
|
||||
go test -v -p 1 -timeout 8m ./sftpd -covermode=atomic
|
||||
go test -v -p 1 -timeout 5m ./ftpd -covermode=atomic
|
||||
go test -v -p 1 -timeout 5m ./webdavd -covermode=atomic
|
||||
go test -v -p 1 -timeout 2m ./telemetry -covermode=atomic
|
||||
go test -v -p 1 -timeout 2m ./mfa -covermode=atomic
|
||||
go test -v -p 1 -timeout 2m ./command -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: bolt
|
||||
SFTPGO_DATA_PROVIDER__NAME: 'sftpgo_bolt.db'
|
||||
|
||||
- name: Run test cases using memory provider
|
||||
run: go test -v -p 1 -timeout 15m ./... -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: memory
|
||||
SFTPGO_DATA_PROVIDER__NAME: ''
|
||||
|
||||
- name: Prepare build artifact for macOS
|
||||
if: startsWith(matrix.os, 'macos-') == true
|
||||
run: |
|
||||
mkdir -p output/{init,bash_completion,zsh_completion}
|
||||
cp sftpgo output/sftpgo_x86_64
|
||||
cp sftpgo_arm64 output/
|
||||
cp sftpgo.json output/
|
||||
cp -r templates output/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp init/com.github.drakkan.sftpgo.plist output/init/
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
|
||||
- name: Prepare Windows installer
|
||||
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||
run: |
|
||||
Remove-Item -LiteralPath "output" -Force -Recurse -ErrorAction Ignore
|
||||
mkdir output
|
||||
copy .\sftpgo.exe .\output
|
||||
copy .\sftpgo.json .\output
|
||||
copy .\sftpgo.db .\output
|
||||
copy .\LICENSE .\output\LICENSE.txt
|
||||
mkdir output\templates
|
||||
xcopy .\templates .\output\templates\ /E
|
||||
mkdir output\static
|
||||
xcopy .\static .\output\static\ /E
|
||||
mkdir output\openapi
|
||||
xcopy .\openapi .\output\openapi\ /E
|
||||
$LATEST_TAG = ((git describe --tags $(git rev-list --tags --max-count=1)) | Out-String).Trim()
|
||||
$REV_LIST=$LATEST_TAG+"..HEAD"
|
||||
$COMMITS_FROM_TAG= ((git rev-list $REV_LIST --count) | Out-String).Trim()
|
||||
$Env:SFTPGO_ISS_DEV_VERSION = $LATEST_TAG + "." + $COMMITS_FROM_TAG
|
||||
$CERT_PATH=(Get-Location -PSProvider FileSystem).ProviderPath + "\cert.pfx"
|
||||
[IO.File]::WriteAllBytes($CERT_PATH,[System.Convert]::FromBase64String($Env:CERT_DATA))
|
||||
certutil -f -p "$Env:CERT_PASS" -importpfx MY "$CERT_PATH"
|
||||
rm "$CERT_PATH"
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\sftpgo.exe
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\arm64\sftpgo.exe
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\x86\sftpgo.exe
|
||||
$INNO_S='/Ssigntool=$qC:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe$q sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n $qNicola Murino$q /d $qSFTPGo$q $f'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
|
||||
rm .\output\sftpgo.exe
|
||||
rm .\output\sftpgo.db
|
||||
copy .\arm64\sftpgo.exe .\output
|
||||
(Get-Content .\output\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\output\sftpgo.json
|
||||
$Env:SFTPGO_DATA_PROVIDER__DRIVER='bolt'
|
||||
$Env:SFTPGO_DATA_PROVIDER__NAME='.\output\sftpgo.db'
|
||||
.\sftpgo.exe initprovider
|
||||
Remove-Item Env:\SFTPGO_DATA_PROVIDER__DRIVER
|
||||
Remove-Item Env:\SFTPGO_DATA_PROVIDER__NAME
|
||||
$Env:SFTPGO_ISS_ARCH='arm64'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
|
||||
rm .\output\sftpgo.exe
|
||||
copy .\x86\sftpgo.exe .\output
|
||||
$Env:SFTPGO_ISS_ARCH='x86'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
certutil -delstore MY "Nicola Murino"
|
||||
env:
|
||||
CERT_DATA: ${{ secrets.CERT_DATA }}
|
||||
CERT_PASS: ${{ secrets.CERT_PASS }}
|
||||
|
||||
- name: Upload Windows installer x86_64 artifact
|
||||
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_windows_installer_x86_64
|
||||
path: ./sftpgo_windows_x86_64.exe
|
||||
|
||||
- name: Upload Windows installer arm64 artifact
|
||||
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_windows_installer_arm64
|
||||
path: ./sftpgo_windows_arm64.exe
|
||||
|
||||
- name: Upload Windows installer x86 artifact
|
||||
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_windows_installer_x86
|
||||
path: ./sftpgo_windows_x86.exe
|
||||
|
||||
- name: Prepare build artifact for Windows
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
run: |
|
||||
Remove-Item -LiteralPath "output" -Force -Recurse -ErrorAction Ignore
|
||||
mkdir output
|
||||
copy .\sftpgo.exe .\output
|
||||
mkdir output\arm64
|
||||
copy .\arm64\sftpgo.exe .\output\arm64
|
||||
mkdir output\x86
|
||||
copy .\x86\sftpgo.exe .\output\x86
|
||||
copy .\sftpgo.json .\output
|
||||
(Get-Content .\output\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\output\sftpgo.json
|
||||
mkdir output\templates
|
||||
xcopy .\templates .\output\templates\ /E
|
||||
mkdir output\static
|
||||
xcopy .\static .\output\static\ /E
|
||||
mkdir output\openapi
|
||||
xcopy .\openapi .\output\openapi\ /E
|
||||
|
||||
- name: Upload build artifact
|
||||
if: startsWith(matrix.os, 'ubuntu-') != true
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ matrix.os }}-go-${{ matrix.go }}
|
||||
path: output
|
||||
|
||||
test-goarch-386:
|
||||
name: Run test cases on 32-bit arch
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
cd tests/eventsearcher
|
||||
go build -trimpath -ldflags "-s -w" -o eventsearcher
|
||||
cd -
|
||||
cd tests/ipfilter
|
||||
go build -trimpath -ldflags "-s -w" -o ipfilter
|
||||
cd -
|
||||
env:
|
||||
GOARCH: 386
|
||||
|
||||
- name: Run test cases
|
||||
run: go test -v -p 1 -timeout 15m ./... -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: memory
|
||||
SFTPGO_DATA_PROVIDER__NAME: ''
|
||||
GOARCH: 386
|
||||
|
||||
test-postgresql-mysql-crdb:
|
||||
name: Test with PgSQL/MySQL/Cockroach
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:latest
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: sftpgo
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 5432:5432
|
||||
|
||||
mariadb:
|
||||
image: mariadb:latest
|
||||
env:
|
||||
MYSQL_ROOT_PASSWORD: mysql
|
||||
MYSQL_DATABASE: sftpgo
|
||||
MYSQL_USER: sftpgo
|
||||
MYSQL_PASSWORD: sftpgo
|
||||
options: >-
|
||||
--health-cmd "mysqladmin status -h 127.0.0.1 -P 3306 -u root -p$MYSQL_ROOT_PASSWORD"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 6
|
||||
ports:
|
||||
- 3307:3306
|
||||
|
||||
mysql:
|
||||
image: mysql:latest
|
||||
env:
|
||||
MYSQL_ROOT_PASSWORD: mysql
|
||||
MYSQL_DATABASE: sftpgo
|
||||
MYSQL_USER: sftpgo
|
||||
MYSQL_PASSWORD: sftpgo
|
||||
options: >-
|
||||
--health-cmd "mysqladmin status -h 127.0.0.1 -P 3306 -u root -p$MYSQL_ROOT_PASSWORD"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 6
|
||||
ports:
|
||||
- 3308:3306
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
cd tests/eventsearcher
|
||||
go build -trimpath -ldflags "-s -w" -o eventsearcher
|
||||
cd -
|
||||
cd tests/ipfilter
|
||||
go build -trimpath -ldflags "-s -w" -o ipfilter
|
||||
cd -
|
||||
|
||||
- name: Run tests using PostgreSQL provider
|
||||
run: |
|
||||
go test -v -p 1 -timeout 15m ./... -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: postgresql
|
||||
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__HOST: localhost
|
||||
SFTPGO_DATA_PROVIDER__PORT: 5432
|
||||
SFTPGO_DATA_PROVIDER__USERNAME: postgres
|
||||
SFTPGO_DATA_PROVIDER__PASSWORD: postgres
|
||||
|
||||
- name: Run tests using MySQL provider
|
||||
run: |
|
||||
go test -v -p 1 -timeout 15m ./... -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: mysql
|
||||
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__HOST: localhost
|
||||
SFTPGO_DATA_PROVIDER__PORT: 3308
|
||||
SFTPGO_DATA_PROVIDER__USERNAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__PASSWORD: sftpgo
|
||||
|
||||
- name: Run tests using MariaDB provider
|
||||
run: |
|
||||
go test -v -p 1 -timeout 15m ./... -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: mysql
|
||||
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__HOST: localhost
|
||||
SFTPGO_DATA_PROVIDER__PORT: 3307
|
||||
SFTPGO_DATA_PROVIDER__USERNAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__PASSWORD: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__SQL_TABLES_PREFIX: prefix_
|
||||
|
||||
- name: Run tests using CockroachDB provider
|
||||
run: |
|
||||
docker run --rm --name crdb --health-cmd "curl -I http://127.0.0.1:8080" --health-interval 10s --health-timeout 5s --health-retries 6 -p 26257:26257 -d cockroachdb/cockroach:latest start-single-node --insecure --listen-addr :26257
|
||||
sleep 10
|
||||
docker exec crdb cockroach sql --insecure -e 'create database "sftpgo"'
|
||||
go test -v -p 1 -timeout 15m ./... -covermode=atomic
|
||||
docker stop crdb
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: cockroachdb
|
||||
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__HOST: localhost
|
||||
SFTPGO_DATA_PROVIDER__PORT: 26257
|
||||
SFTPGO_DATA_PROVIDER__USERNAME: root
|
||||
SFTPGO_DATA_PROVIDER__PASSWORD:
|
||||
SFTPGO_DATA_PROVIDER__SQL_TABLES_PREFIX: prefix_
|
||||
|
||||
build-linux-packages:
|
||||
name: Build Linux packages
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- arch: amd64
|
||||
distro: ubuntu:18.04
|
||||
go: latest
|
||||
go-arch: amd64
|
||||
- arch: aarch64
|
||||
distro: ubuntu18.04
|
||||
go: latest
|
||||
go-arch: arm64
|
||||
- arch: ppc64le
|
||||
distro: ubuntu18.04
|
||||
go: latest
|
||||
go-arch: ppc64le
|
||||
- arch: armv7
|
||||
distro: ubuntu18.04
|
||||
go: latest
|
||||
go-arch: arm7
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get commit SHA
|
||||
id: get_commit
|
||||
run: echo ::set-output name=COMMIT::${GITHUB_SHA::8}
|
||||
shell: bash
|
||||
|
||||
- name: Build on amd64
|
||||
if: ${{ matrix.arch == 'amd64' }}
|
||||
run: |
|
||||
echo '#!/bin/bash' > build.sh
|
||||
echo '' >> build.sh
|
||||
echo 'set -e' >> build.sh
|
||||
echo 'apt-get update -q -y' >> build.sh
|
||||
echo 'apt-get install -q -y curl gcc' >> build.sh
|
||||
if [ ${{ matrix.go }} == 'latest' ]
|
||||
then
|
||||
echo 'GO_VERSION=$(curl -L https://go.dev/VERSION?m=text)' >> build.sh
|
||||
else
|
||||
echo 'GO_VERSION=${{ matrix.go }}' >> build.sh
|
||||
fi
|
||||
echo 'GO_DOWNLOAD_ARCH=${{ matrix.go-arch }}' >> build.sh
|
||||
echo 'curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/${GO_VERSION}.linux-${GO_DOWNLOAD_ARCH}.tar.gz' >> build.sh
|
||||
echo 'tar -C /usr/local -xzf go.tar.gz' >> build.sh
|
||||
echo 'export PATH=$PATH:/usr/local/go/bin' >> build.sh
|
||||
echo 'go version' >> build.sh
|
||||
echo 'cd /usr/local/src' >> build.sh
|
||||
echo 'go build -buildvcs=false -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=${{ steps.get_commit.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/version.date=`date -u +%FT%TZ`" -o sftpgo' >> build.sh
|
||||
|
||||
chmod 755 build.sh
|
||||
docker run --rm --name ubuntu-build --mount type=bind,source=`pwd`,target=/usr/local/src ${{ matrix.distro }} /usr/local/src/build.sh
|
||||
mkdir -p output/{init,bash_completion,zsh_completion}
|
||||
cp sftpgo.json output/
|
||||
cp -r templates output/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp init/sftpgo.service output/init/
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
cp sftpgo output/
|
||||
|
||||
- uses: uraimo/run-on-arch-action@v2
|
||||
if: ${{ matrix.arch != 'amd64' }}
|
||||
name: Build for ${{ matrix.arch }}
|
||||
id: build
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
distro: ${{ matrix.distro }}
|
||||
setup: |
|
||||
mkdir -p "${PWD}/output"
|
||||
dockerRunArgs: |
|
||||
--volume "${PWD}/output:/output"
|
||||
shell: /bin/bash
|
||||
install: |
|
||||
apt-get update -q -y
|
||||
apt-get install -q -y curl gcc
|
||||
if [ ${{ matrix.go }} == 'latest' ]
|
||||
then
|
||||
GO_VERSION=$(curl -L https://go.dev/VERSION?m=text)
|
||||
else
|
||||
GO_VERSION=${{ matrix.go }}
|
||||
fi
|
||||
GO_DOWNLOAD_ARCH=${{ matrix.go-arch }}
|
||||
if [ ${{ matrix.arch}} == 'armv7' ]
|
||||
then
|
||||
GO_DOWNLOAD_ARCH=armv6l
|
||||
fi
|
||||
curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/${GO_VERSION}.linux-${GO_DOWNLOAD_ARCH}.tar.gz
|
||||
tar -C /usr/local -xzf go.tar.gz
|
||||
run: |
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
go version
|
||||
if [ ${{ matrix.arch}} == 'armv7' ]
|
||||
then
|
||||
export GOARM=7
|
||||
fi
|
||||
go build -buildvcs=false -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=${{ steps.get_commit.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
mkdir -p output/{init,bash_completion,zsh_completion}
|
||||
cp sftpgo.json output/
|
||||
cp -r templates output/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp init/sftpgo.service output/init/
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
cp sftpgo output/
|
||||
|
||||
- name: Upload build artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo-linux-${{ matrix.arch }}-go-${{ matrix.go }}
|
||||
path: output
|
||||
|
||||
- name: Build Packages
|
||||
id: build_linux_pkgs
|
||||
run: |
|
||||
export NFPM_ARCH=${{ matrix.go-arch }}
|
||||
cd pkgs
|
||||
./build.sh
|
||||
PKG_VERSION=$(cat dist/version)
|
||||
echo "::set-output name=pkg-version::${PKG_VERSION}"
|
||||
|
||||
- name: Upload Debian Package
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-${{ matrix.go-arch }}-deb
|
||||
path: pkgs/dist/deb/*
|
||||
|
||||
- name: Upload RPM Package
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-${{ matrix.go-arch }}-rpm
|
||||
path: pkgs/dist/rpm/*
|
||||
|
||||
golangci-lint:
|
||||
name: golangci-lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: latest
|
||||
181
.github/workflows/docker.yml
vendored
Normal file
181
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
name: Docker
|
||||
|
||||
on:
|
||||
#schedule:
|
||||
# - cron: '0 4 * * *' # everyday at 4:00 AM UTC
|
||||
push:
|
||||
branches:
|
||||
- 2.3.x
|
||||
tags:
|
||||
- v*
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-latest
|
||||
docker_pkg:
|
||||
- debian
|
||||
- alpine
|
||||
optional_deps:
|
||||
- true
|
||||
- false
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
docker_pkg: distroless
|
||||
optional_deps: false
|
||||
- os: ubuntu-latest
|
||||
docker_pkg: debian-plugins
|
||||
optional_deps: true
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Gather image information
|
||||
id: info
|
||||
run: |
|
||||
VERSION=noop
|
||||
DOCKERFILE=Dockerfile
|
||||
MINOR=""
|
||||
MAJOR=""
|
||||
if [ "${{ github.event_name }}" = "schedule" ]; then
|
||||
VERSION=nightly
|
||||
elif [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=edge
|
||||
fi
|
||||
elif [[ $GITHUB_REF == refs/pull/* ]]; then
|
||||
VERSION=pr-${{ github.event.number }}
|
||||
fi
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
MINOR=${VERSION%.*}
|
||||
MAJOR=${MINOR%.*}
|
||||
fi
|
||||
VERSION_SLIM="${VERSION}-slim"
|
||||
if [[ $DOCKER_PKG == alpine ]]; then
|
||||
VERSION="${VERSION}-alpine"
|
||||
VERSION_SLIM="${VERSION}-slim"
|
||||
DOCKERFILE=Dockerfile.alpine
|
||||
elif [[ $DOCKER_PKG == distroless ]]; then
|
||||
VERSION="${VERSION}-distroless"
|
||||
VERSION_SLIM="${VERSION}-slim"
|
||||
DOCKERFILE=Dockerfile.distroless
|
||||
elif [[ $DOCKER_PKG == debian-plugins ]]; then
|
||||
VERSION="${VERSION}-plugins"
|
||||
VERSION_SLIM="${VERSION}-slim"
|
||||
fi
|
||||
DOCKER_IMAGES=("drakkan/sftpgo" "ghcr.io/drakkan/sftpgo")
|
||||
TAGS="${DOCKER_IMAGES[0]}:${VERSION}"
|
||||
TAGS_SLIM="${DOCKER_IMAGES[0]}:${VERSION_SLIM}"
|
||||
|
||||
for DOCKER_IMAGE in ${DOCKER_IMAGES[@]}; do
|
||||
if [[ ${DOCKER_IMAGE} != ${DOCKER_IMAGES[0]} ]]; then
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:${VERSION}"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${VERSION_SLIM}"
|
||||
fi
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
if [[ $DOCKER_PKG == debian ]]; then
|
||||
if [[ -n $MAJOR && -n $MINOR ]]; then
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR},${DOCKER_IMAGE}:${MAJOR}"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${MINOR}-slim,${DOCKER_IMAGE}:${MAJOR}-slim"
|
||||
fi
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:latest"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:slim"
|
||||
elif [[ $DOCKER_PKG == distroless ]]; then
|
||||
if [[ -n $MAJOR && -n $MINOR ]]; then
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR}-distroless,${DOCKER_IMAGE}:${MAJOR}-distroless"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${MINOR}-distroless-slim,${DOCKER_IMAGE}:${MAJOR}-distroless-slim"
|
||||
fi
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:distroless"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:distroless-slim"
|
||||
elif [[ $DOCKER_PKG == debian-plugins ]]; then
|
||||
if [[ -n $MAJOR && -n $MINOR ]]; then
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR}-plugins,${DOCKER_IMAGE}:${MAJOR}-plugins"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${MINOR}-plugins-slim,${DOCKER_IMAGE}:${MAJOR}-plugins-slim"
|
||||
fi
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:plugins"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:plugins-slim"
|
||||
else
|
||||
if [[ -n $MAJOR && -n $MINOR ]]; then
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR}-alpine,${DOCKER_IMAGE}:${MAJOR}-alpine"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${MINOR}-alpine-slim,${DOCKER_IMAGE}:${MAJOR}-alpine-slim"
|
||||
fi
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:alpine"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:alpine-slim"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $OPTIONAL_DEPS == true ]]; then
|
||||
echo ::set-output name=version::${VERSION}
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
echo ::set-output name=full::true
|
||||
else
|
||||
echo ::set-output name=version::${VERSION_SLIM}
|
||||
echo ::set-output name=tags::${TAGS_SLIM}
|
||||
echo ::set-output name=full::false
|
||||
fi
|
||||
if [[ $DOCKER_PKG == debian-plugins ]]; then
|
||||
echo ::set-output name=plugins::true
|
||||
else
|
||||
echo ::set-output name=plugins::false
|
||||
fi
|
||||
echo ::set-output name=dockerfile::${DOCKERFILE}
|
||||
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
echo ::set-output name=sha::${GITHUB_SHA::8}
|
||||
env:
|
||||
DOCKER_PKG: ${{ matrix.docker_pkg }}
|
||||
OPTIONAL_DEPS: ${{ matrix.optional_deps }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up builder
|
||||
uses: docker/setup-buildx-action@v2
|
||||
id: builder
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
builder: ${{ steps.builder.outputs.name }}
|
||||
file: ./${{ steps.info.outputs.dockerfile }}
|
||||
platforms: linux/amd64,linux/arm64,linux/ppc64le
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.info.outputs.tags }}
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ steps.info.outputs.sha }}
|
||||
INSTALL_OPTIONAL_PACKAGES=${{ steps.info.outputs.full }}
|
||||
DOWNLOAD_PLUGINS=${{ steps.info.outputs.plugins }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=SFTPGo
|
||||
org.opencontainers.image.description=Fully featured and highly configurable SFTP server with optional HTTP, FTP/S and WebDAV support
|
||||
org.opencontainers.image.url=https://github.com/drakkan/sftpgo
|
||||
org.opencontainers.image.documentation=https://github.com/drakkan/sftpgo/blob/${{ github.sha }}/docker/README.md
|
||||
org.opencontainers.image.source=https://github.com/drakkan/sftpgo
|
||||
org.opencontainers.image.version=${{ steps.info.outputs.version }}
|
||||
org.opencontainers.image.created=${{ steps.info.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.licenses=AGPL-3.0
|
||||
607
.github/workflows/release.yml
vendored
Normal file
607
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,607 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: 'v*'
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.18.6
|
||||
|
||||
jobs:
|
||||
prepare-sources-with-deps:
|
||||
name: Prepare sources with deps
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Get SFTPGo version
|
||||
id: get_version
|
||||
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
||||
|
||||
- name: Prepare release
|
||||
run: |
|
||||
go mod vendor
|
||||
echo "${SFTPGO_VERSION}" > VERSION.txt
|
||||
echo "${GITHUB_SHA::8}" >> VERSION.txt
|
||||
tar cJvf sftpgo_${SFTPGO_VERSION}_src_with_deps.tar.xz *
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- name: Upload build artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_src_with_deps.tar.xz
|
||||
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_src_with_deps.tar.xz
|
||||
retention-days: 1
|
||||
|
||||
prepare-window-mac:
|
||||
name: Prepare binaries
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-11, windows-2022]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Get SFTPGo version
|
||||
id: get_version
|
||||
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
||||
shell: bash
|
||||
|
||||
- name: Get OS name
|
||||
id: get_os_name
|
||||
run: |
|
||||
if [[ $MATRIX_OS =~ ^macos.* ]]
|
||||
then
|
||||
echo ::set-output name=OS::macOS
|
||||
else
|
||||
echo ::set-output name=OS::windows
|
||||
fi
|
||||
shell: bash
|
||||
env:
|
||||
MATRIX_OS: ${{ matrix.os }}
|
||||
|
||||
- name: Build for macOS x86_64
|
||||
if: startsWith(matrix.os, 'windows-') != true
|
||||
run: go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
|
||||
- name: Build for macOS arm64
|
||||
if: startsWith(matrix.os, 'macos-') == true
|
||||
run: CGO_ENABLED=1 GOOS=darwin GOARCH=arm64 SDKROOT=$(xcrun --sdk macosx --show-sdk-path) go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/version.date=`date -u +%FT%TZ`" -o sftpgo_arm64
|
||||
|
||||
- name: Build for Windows
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
run: |
|
||||
$GIT_COMMIT = (git describe --always --abbrev=8 --dirty) | Out-String
|
||||
$DATE_TIME = ([datetime]::Now.ToUniversalTime().toString("yyyy-MM-ddTHH:mm:ssZ")) | Out-String
|
||||
$FILE_VERSION = $Env:SFTPGO_VERSION.substring(1) + ".0"
|
||||
go install github.com/tc-hib/go-winres@latest
|
||||
go-winres simply --arch amd64 --product-version $Env:SFTPGO_VERSION-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/version.date=$DATE_TIME" -o sftpgo.exe
|
||||
mkdir arm64
|
||||
$Env:CGO_ENABLED='0'
|
||||
$Env:GOOS='windows'
|
||||
$Env:GOARCH='arm64'
|
||||
go-winres simply --arch arm64 --product-version $Env:SFTPGO_VERSION-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/version.date=$DATE_TIME" -o .\arm64\sftpgo.exe
|
||||
mkdir x86
|
||||
$Env:GOARCH='386'
|
||||
go-winres simply --arch 386 --product-version $Env:SFTPGO_VERSION-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/version.date=$DATE_TIME" -o .\x86\sftpgo.exe
|
||||
Remove-Item Env:\CGO_ENABLED
|
||||
Remove-Item Env:\GOOS
|
||||
Remove-Item Env:\GOARCH
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- name: Initialize data provider
|
||||
run: ./sftpgo initprovider
|
||||
shell: bash
|
||||
|
||||
- name: Prepare Release for macOS
|
||||
if: startsWith(matrix.os, 'macos-')
|
||||
run: |
|
||||
mkdir -p output/{init,sqlite,bash_completion,zsh_completion}
|
||||
echo "For documentation please take a look here:" > output/README.txt
|
||||
echo "" >> output/README.txt
|
||||
echo "https://github.com/drakkan/sftpgo/blob/${SFTPGO_VERSION}/README.md" >> output/README.txt
|
||||
cp LICENSE output/
|
||||
cp sftpgo output/
|
||||
cp sftpgo.json output/
|
||||
cp sftpgo.db output/sqlite/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp -r templates output/
|
||||
cp init/com.github.drakkan.sftpgo.plist output/init/
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
cd output
|
||||
tar cJvf ../sftpgo_${SFTPGO_VERSION}_${OS}_x86_64.tar.xz *
|
||||
cd ..
|
||||
cp sftpgo_arm64 output/sftpgo
|
||||
cd output
|
||||
tar cJvf ../sftpgo_${SFTPGO_VERSION}_${OS}_arm64.tar.xz *
|
||||
cd ..
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
OS: ${{ steps.get_os_name.outputs.OS }}
|
||||
|
||||
- name: Prepare Release for Windows
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
run: |
|
||||
mkdir output
|
||||
copy .\sftpgo.exe .\output
|
||||
copy .\sftpgo.json .\output
|
||||
copy .\sftpgo.db .\output
|
||||
copy .\LICENSE .\output\LICENSE.txt
|
||||
mkdir output\templates
|
||||
xcopy .\templates .\output\templates\ /E
|
||||
mkdir output\static
|
||||
xcopy .\static .\output\static\ /E
|
||||
mkdir output\openapi
|
||||
xcopy .\openapi .\output\openapi\ /E
|
||||
$CERT_PATH=(Get-Location -PSProvider FileSystem).ProviderPath + "\cert.pfx"
|
||||
[IO.File]::WriteAllBytes($CERT_PATH,[System.Convert]::FromBase64String($Env:CERT_DATA))
|
||||
certutil -f -p "$Env:CERT_PASS" -importpfx MY "$CERT_PATH"
|
||||
rm "$CERT_PATH"
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\sftpgo.exe
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\arm64\sftpgo.exe
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\x86\sftpgo.exe
|
||||
$INNO_S='/Ssigntool=$qC:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe$q sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n $qNicola Murino$q /d $qSFTPGo$q $f'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
|
||||
rm .\output\sftpgo.exe
|
||||
rm .\output\sftpgo.db
|
||||
copy .\arm64\sftpgo.exe .\output
|
||||
(Get-Content .\output\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\output\sftpgo.json
|
||||
$Env:SFTPGO_DATA_PROVIDER__DRIVER='bolt'
|
||||
$Env:SFTPGO_DATA_PROVIDER__NAME='.\output\sftpgo.db'
|
||||
.\sftpgo.exe initprovider
|
||||
Remove-Item Env:\SFTPGO_DATA_PROVIDER__DRIVER
|
||||
Remove-Item Env:\SFTPGO_DATA_PROVIDER__NAME
|
||||
$Env:SFTPGO_ISS_ARCH='arm64'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
|
||||
rm .\output\sftpgo.exe
|
||||
copy .\x86\sftpgo.exe .\output
|
||||
$Env:SFTPGO_ISS_ARCH='x86'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
certutil -delstore MY "Nicola Murino"
|
||||
env:
|
||||
SFTPGO_ISS_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
SFTPGO_ISS_DOC_URL: https://github.com/drakkan/sftpgo/blob/${{ steps.get_version.outputs.VERSION }}/README.md
|
||||
CERT_DATA: ${{ secrets.CERT_DATA }}
|
||||
CERT_PASS: ${{ secrets.CERT_PASS }}
|
||||
|
||||
- name: Prepare Portable Release for Windows
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
run: |
|
||||
mkdir win-portable
|
||||
copy .\sftpgo.exe .\win-portable
|
||||
mkdir win-portable\arm64
|
||||
copy .\arm64\sftpgo.exe .\win-portable\arm64
|
||||
mkdir win-portable\x86
|
||||
copy .\x86\sftpgo.exe .\win-portable\x86
|
||||
copy .\sftpgo.json .\win-portable
|
||||
(Get-Content .\win-portable\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\win-portable\sftpgo.json
|
||||
copy .\output\sftpgo.db .\win-portable
|
||||
copy .\LICENSE .\win-portable\LICENSE.txt
|
||||
mkdir win-portable\templates
|
||||
xcopy .\templates .\win-portable\templates\ /E
|
||||
mkdir win-portable\static
|
||||
xcopy .\static .\win-portable\static\ /E
|
||||
mkdir win-portable\openapi
|
||||
xcopy .\openapi .\win-portable\openapi\ /E
|
||||
Compress-Archive .\win-portable\* sftpgo_portable.zip
|
||||
|
||||
- name: Upload macOS x86_64 artifact
|
||||
if: startsWith(matrix.os, 'macos-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.tar.xz
|
||||
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.tar.xz
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload macOS arm64 artifact
|
||||
if: startsWith(matrix.os, 'macos-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_arm64.tar.xz
|
||||
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_arm64.tar.xz
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload Windows installer x86_64 artifact
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.exe
|
||||
path: ./sftpgo_windows_x86_64.exe
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload Windows installer arm64 artifact
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_arm64.exe
|
||||
path: ./sftpgo_windows_arm64.exe
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload Windows installer x86 artifact
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86.exe
|
||||
path: ./sftpgo_windows_x86.exe
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload Windows portable artifact
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_portable.zip
|
||||
path: ./sftpgo_portable.zip
|
||||
retention-days: 1
|
||||
|
||||
prepare-linux:
|
||||
name: Prepare Linux binaries
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- arch: amd64
|
||||
distro: ubuntu:18.04
|
||||
go-arch: amd64
|
||||
deb-arch: amd64
|
||||
rpm-arch: x86_64
|
||||
tar-arch: x86_64
|
||||
- arch: aarch64
|
||||
distro: ubuntu18.04
|
||||
go-arch: arm64
|
||||
deb-arch: arm64
|
||||
rpm-arch: aarch64
|
||||
tar-arch: arm64
|
||||
- arch: ppc64le
|
||||
distro: ubuntu18.04
|
||||
go-arch: ppc64le
|
||||
deb-arch: ppc64el
|
||||
rpm-arch: ppc64le
|
||||
tar-arch: ppc64le
|
||||
- arch: armv7
|
||||
distro: ubuntu18.04
|
||||
go-arch: arm7
|
||||
deb-arch: armhf
|
||||
rpm-arch: armv7hl
|
||||
tar-arch: armv7
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get versions
|
||||
id: get_version
|
||||
run: |
|
||||
echo ::set-output name=SFTPGO_VERSION::${GITHUB_REF/refs\/tags\//}
|
||||
echo ::set-output name=GO_VERSION::${GO_VERSION}
|
||||
echo ::set-output name=COMMIT::${GITHUB_SHA::8}
|
||||
shell: bash
|
||||
env:
|
||||
GO_VERSION: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Build on amd64
|
||||
if: ${{ matrix.arch == 'amd64' }}
|
||||
run: |
|
||||
echo '#!/bin/bash' > build.sh
|
||||
echo '' >> build.sh
|
||||
echo 'set -e' >> build.sh
|
||||
echo 'apt-get update -q -y' >> build.sh
|
||||
echo 'apt-get install -q -y curl gcc' >> build.sh
|
||||
echo 'curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/go${{ steps.get_version.outputs.GO_VERSION }}.linux-${{ matrix.go-arch }}.tar.gz' >> build.sh
|
||||
echo 'tar -C /usr/local -xzf go.tar.gz' >> build.sh
|
||||
echo 'export PATH=$PATH:/usr/local/go/bin' >> build.sh
|
||||
echo 'go version' >> build.sh
|
||||
echo 'cd /usr/local/src' >> build.sh
|
||||
echo 'go build -buildvcs=false -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=${{ steps.get_version.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/version.date=`date -u +%FT%TZ`" -o sftpgo' >> build.sh
|
||||
|
||||
chmod 755 build.sh
|
||||
docker run --rm --name ubuntu-build --mount type=bind,source=`pwd`,target=/usr/local/src ${{ matrix.distro }} /usr/local/src/build.sh
|
||||
mkdir -p output/{init,sqlite,bash_completion,zsh_completion}
|
||||
echo "For documentation please take a look here:" > output/README.txt
|
||||
echo "" >> output/README.txt
|
||||
echo "https://github.com/drakkan/sftpgo/blob/${SFTPGO_VERSION}/README.md" >> output/README.txt
|
||||
cp LICENSE output/
|
||||
cp sftpgo.json output/
|
||||
cp -r templates output/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp init/sftpgo.service output/init/
|
||||
./sftpgo initprovider
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
cp sftpgo output/
|
||||
cp sftpgo.db output/sqlite/
|
||||
cd output
|
||||
tar cJvf sftpgo_${SFTPGO_VERSION}_linux_${{ matrix.tar-arch }}.tar.xz *
|
||||
cd ..
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
||||
|
||||
- uses: uraimo/run-on-arch-action@v2
|
||||
if: ${{ matrix.arch != 'amd64' }}
|
||||
name: Build for ${{ matrix.arch }}
|
||||
id: build
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
distro: ${{ matrix.distro }}
|
||||
setup: |
|
||||
mkdir -p "${PWD}/output"
|
||||
dockerRunArgs: |
|
||||
--volume "${PWD}/output:/output"
|
||||
shell: /bin/bash
|
||||
install: |
|
||||
apt-get update -q -y
|
||||
apt-get install -q -y curl gcc xz-utils
|
||||
GO_DOWNLOAD_ARCH=${{ matrix.go-arch }}
|
||||
if [ ${{ matrix.arch}} == 'armv7' ]
|
||||
then
|
||||
GO_DOWNLOAD_ARCH=armv6l
|
||||
fi
|
||||
curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/go${{ steps.get_version.outputs.GO_VERSION }}.linux-${GO_DOWNLOAD_ARCH}.tar.gz
|
||||
tar -C /usr/local -xzf go.tar.gz
|
||||
run: |
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
go version
|
||||
go build -buildvcs=false -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=${{ steps.get_version.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
mkdir -p output/{init,sqlite,bash_completion,zsh_completion}
|
||||
echo "For documentation please take a look here:" > output/README.txt
|
||||
echo "" >> output/README.txt
|
||||
echo "https://github.com/drakkan/sftpgo/blob/${{ steps.get_version.outputs.SFTPGO_VERSION }}/README.md" >> output/README.txt
|
||||
cp LICENSE output/
|
||||
cp sftpgo.json output/
|
||||
cp -r templates output/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp init/sftpgo.service output/init/
|
||||
./sftpgo initprovider
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
cp sftpgo output/
|
||||
cp sftpgo.db output/sqlite/
|
||||
cd output
|
||||
tar cJvf sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_${{ matrix.tar-arch }}.tar.xz *
|
||||
cd ..
|
||||
|
||||
- name: Upload build artifact for ${{ matrix.arch }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_${{ matrix.tar-arch }}.tar.xz
|
||||
path: ./output/sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_${{ matrix.tar-arch }}.tar.xz
|
||||
retention-days: 1
|
||||
|
||||
- name: Build Packages
|
||||
id: build_linux_pkgs
|
||||
run: |
|
||||
export NFPM_ARCH=${{ matrix.go-arch }}
|
||||
cd pkgs
|
||||
./build.sh
|
||||
PKG_VERSION=${SFTPGO_VERSION:1}
|
||||
echo "::set-output name=pkg-version::${PKG_VERSION}"
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
||||
|
||||
- name: Upload Deb Package
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.build_linux_pkgs.outputs.pkg-version }}-1_${{ matrix.deb-arch}}.deb
|
||||
path: ./pkgs/dist/deb/sftpgo_${{ steps.build_linux_pkgs.outputs.pkg-version }}-1_${{ matrix.deb-arch}}.deb
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload RPM Package
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-1.${{ matrix.rpm-arch}}.rpm
|
||||
path: ./pkgs/dist/rpm/sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-1.${{ matrix.rpm-arch}}.rpm
|
||||
retention-days: 1
|
||||
|
||||
prepare-linux-bundle:
|
||||
name: Prepare Linux bundle
|
||||
needs: prepare-linux
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get versions
|
||||
id: get_version
|
||||
run: |
|
||||
echo ::set-output name=SFTPGO_VERSION::${GITHUB_REF/refs\/tags\//}
|
||||
shell: bash
|
||||
|
||||
- name: Download amd64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_x86_64.tar.xz
|
||||
|
||||
- name: Download arm64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_arm64.tar.xz
|
||||
|
||||
- name: Download ppc64le artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_ppc64le.tar.xz
|
||||
|
||||
- name: Download armv7 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_armv7.tar.xz
|
||||
|
||||
- name: Build bundle
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p bundle/{arm64,ppc64le,armv7}
|
||||
cd bundle
|
||||
tar xvf ../sftpgo_${SFTPGO_VERSION}_linux_x86_64.tar.xz
|
||||
cd arm64
|
||||
tar xvf ../../sftpgo_${SFTPGO_VERSION}_linux_arm64.tar.xz sftpgo
|
||||
cd ../ppc64le
|
||||
tar xvf ../../sftpgo_${SFTPGO_VERSION}_linux_ppc64le.tar.xz sftpgo
|
||||
cd ../armv7
|
||||
tar xvf ../../sftpgo_${SFTPGO_VERSION}_linux_armv7.tar.xz sftpgo
|
||||
cd ..
|
||||
tar cJvf sftpgo_${SFTPGO_VERSION}_linux_bundle.tar.xz *
|
||||
cd ..
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
||||
|
||||
- name: Upload Linux bundle
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
||||
path: ./bundle/sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
||||
retention-days: 1
|
||||
|
||||
create-release:
|
||||
name: Release
|
||||
needs: [prepare-linux-bundle, prepare-sources-with-deps, prepare-window-mac]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Get versions
|
||||
id: get_version
|
||||
run: |
|
||||
SFTPGO_VERSION=${GITHUB_REF/refs\/tags\//}
|
||||
PKG_VERSION=${SFTPGO_VERSION:1}
|
||||
echo ::set-output name=SFTPGO_VERSION::${SFTPGO_VERSION}
|
||||
echo "::set-output name=PKG_VERSION::${PKG_VERSION}"
|
||||
shell: bash
|
||||
|
||||
- name: Download amd64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_x86_64.tar.xz
|
||||
|
||||
- name: Download arm64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_arm64.tar.xz
|
||||
|
||||
- name: Download ppc64le artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_ppc64le.tar.xz
|
||||
|
||||
- name: Download armv7 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_armv7.tar.xz
|
||||
|
||||
- name: Download Linux bundle artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
||||
|
||||
- name: Download Deb amd64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_amd64.deb
|
||||
|
||||
- name: Download Deb arm64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_arm64.deb
|
||||
|
||||
- name: Download Deb ppc64le artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_ppc64el.deb
|
||||
|
||||
- name: Download Deb armv7 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_armhf.deb
|
||||
|
||||
- name: Download RPM x86_64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.x86_64.rpm
|
||||
|
||||
- name: Download RPM aarch64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.aarch64.rpm
|
||||
|
||||
- name: Download RPM ppc64le artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.ppc64le.rpm
|
||||
|
||||
- name: Download RPM armv7 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.armv7hl.rpm
|
||||
|
||||
- name: Download macOS x86_64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_macOS_x86_64.tar.xz
|
||||
|
||||
- name: Download macOS arm64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_macOS_arm64.tar.xz
|
||||
|
||||
- name: Download Windows installer x86_64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_x86_64.exe
|
||||
|
||||
- name: Download Windows installer arm64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_arm64.exe
|
||||
|
||||
- name: Download Windows installer x86 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_x86.exe
|
||||
|
||||
- name: Download Windows portable artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_portable.zip
|
||||
|
||||
- name: Download source with deps artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_src_with_deps.tar.xz
|
||||
|
||||
- name: Create release
|
||||
run: |
|
||||
mv sftpgo_windows_x86_64.exe sftpgo_${SFTPGO_VERSION}_windows_x86_64.exe
|
||||
mv sftpgo_windows_arm64.exe sftpgo_${SFTPGO_VERSION}_windows_arm64.exe
|
||||
mv sftpgo_windows_x86.exe sftpgo_${SFTPGO_VERSION}_windows_x86.exe
|
||||
mv sftpgo_portable.zip sftpgo_${SFTPGO_VERSION}_windows_portable.zip
|
||||
gh release create "${SFTPGO_VERSION}" -t "${SFTPGO_VERSION}"
|
||||
gh release upload "${SFTPGO_VERSION}" sftpgo_*.xz --clobber
|
||||
gh release upload "${SFTPGO_VERSION}" sftpgo-*.rpm --clobber
|
||||
gh release upload "${SFTPGO_VERSION}" sftpgo_*.deb --clobber
|
||||
gh release upload "${SFTPGO_VERSION}" sftpgo_*.exe --clobber
|
||||
gh release upload "${SFTPGO_VERSION}" sftpgo_*.zip --clobber
|
||||
gh release view "${SFTPGO_VERSION}"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
||||
3
.gitignore
vendored
Normal file
3
.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# compilation output
|
||||
sftpgo
|
||||
sftpgo.exe
|
||||
52
.golangci.yml
Normal file
52
.golangci.yml
Normal file
@@ -0,0 +1,52 @@
|
||||
run:
|
||||
timeout: 5m
|
||||
issues-exit-code: 1
|
||||
tests: true
|
||||
|
||||
|
||||
linters-settings:
|
||||
dupl:
|
||||
threshold: 150
|
||||
errcheck:
|
||||
check-type-assertions: false
|
||||
check-blank: false
|
||||
goconst:
|
||||
min-len: 3
|
||||
min-occurrences: 3
|
||||
gocyclo:
|
||||
min-complexity: 15
|
||||
gofmt:
|
||||
simplify: true
|
||||
goimports:
|
||||
local-prefixes: github.com/drakkan/sftpgo
|
||||
#govet:
|
||||
# report about shadowed variables
|
||||
#check-shadowing: true
|
||||
#enable:
|
||||
# - fieldalignment
|
||||
|
||||
issues:
|
||||
include:
|
||||
- EXC0002
|
||||
- EXC0012
|
||||
- EXC0013
|
||||
- EXC0014
|
||||
- EXC0015
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- goconst
|
||||
- errcheck
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
- unconvert
|
||||
- unparam
|
||||
- bodyclose
|
||||
- gocyclo
|
||||
- misspell
|
||||
- whitespace
|
||||
- dupl
|
||||
- rowserrcheck
|
||||
- dogsled
|
||||
- govet
|
||||
24
.travis.yml
24
.travis.yml
@@ -1,24 +0,0 @@
|
||||
language: go
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
go:
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
before_script:
|
||||
- sqlite3 sftpgo.db 'CREATE TABLE "users" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL UNIQUE, "password" varchar(255) NULL, "public_keys" text NULL, "home_dir" varchar(255) NOT NULL, "uid" integer NOT NULL, "gid" integer NOT NULL, "max_sessions" integer NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "permissions" text NOT NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL, "upload_bandwidth" integer NOT NULL, "download_bandwidth" integer NOT NULL);'
|
||||
|
||||
install:
|
||||
- go get -v -t ./...
|
||||
|
||||
script:
|
||||
- go test -v ./... -coverprofile=coverage.txt -covermode=atomic
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
34
DCO
Normal file
34
DCO
Normal file
@@ -0,0 +1,34 @@
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
65
Dockerfile
Normal file
65
Dockerfile
Normal file
@@ -0,0 +1,65 @@
|
||||
FROM golang:1.18-bullseye as builder
|
||||
|
||||
ENV GOFLAGS="-mod=readonly"
|
||||
|
||||
RUN mkdir -p /workspace
|
||||
WORKDIR /workspace
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
ARG COMMIT_SHA
|
||||
|
||||
# This ARG allows to disable some optional features and it might be useful if you build the image yourself.
|
||||
# For example you can disable S3 and GCS support like this:
|
||||
# --build-arg FEATURES=nos3,nogcs
|
||||
ARG FEATURES
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN set -xe && \
|
||||
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --abbrev=8 --dirty)} && \
|
||||
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/v2/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
||||
|
||||
# Set to "true" to download the "official" plugins in /usr/local/bin
|
||||
ARG DOWNLOAD_PLUGINS=false
|
||||
|
||||
RUN if [ "${DOWNLOAD_PLUGINS}" = "true" ]; then apt-get update && apt-get install --no-install-recommends -y curl && ./docker/scripts/download-plugins.sh; fi
|
||||
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
# Set to "true" to install jq and the optional git and rsync dependencies
|
||||
ARG INSTALL_OPTIONAL_PACKAGES=false
|
||||
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y ca-certificates media-types && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN if [ "${INSTALL_OPTIONAL_PACKAGES}" = "true" ]; then apt-get update && apt-get install --no-install-recommends -y jq git rsync && rm -rf /var/lib/apt/lists/*; fi
|
||||
|
||||
RUN mkdir -p /etc/sftpgo /var/lib/sftpgo /usr/share/sftpgo /srv/sftpgo/data /srv/sftpgo/backups
|
||||
|
||||
RUN groupadd --system -g 1000 sftpgo && \
|
||||
useradd --system --gid sftpgo --no-create-home \
|
||||
--home-dir /var/lib/sftpgo --shell /usr/sbin/nologin \
|
||||
--comment "SFTPGo user" --uid 1000 sftpgo
|
||||
|
||||
COPY --from=builder /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
|
||||
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
|
||||
COPY --from=builder /workspace/static /usr/share/sftpgo/static
|
||||
COPY --from=builder /workspace/openapi /usr/share/sftpgo/openapi
|
||||
COPY --from=builder /workspace/sftpgo /usr/local/bin/sftpgo-plugin-* /usr/local/bin/
|
||||
|
||||
# Log to the stdout so the logs will be available using docker logs
|
||||
ENV SFTPGO_LOG_FILE_PATH=""
|
||||
|
||||
# Modify the default configuration file
|
||||
RUN sed -i 's|"users_base_dir": "",|"users_base_dir": "/srv/sftpgo/data",|' /etc/sftpgo/sftpgo.json && \
|
||||
sed -i 's|"backups"|"/srv/sftpgo/backups"|' /etc/sftpgo/sftpgo.json
|
||||
|
||||
RUN chown -R sftpgo:sftpgo /etc/sftpgo /srv/sftpgo && chown sftpgo:sftpgo /var/lib/sftpgo && chmod 700 /srv/sftpgo/backups
|
||||
|
||||
WORKDIR /var/lib/sftpgo
|
||||
USER 1000:1000
|
||||
|
||||
CMD ["sftpgo", "serve"]
|
||||
65
Dockerfile.alpine
Normal file
65
Dockerfile.alpine
Normal file
@@ -0,0 +1,65 @@
|
||||
FROM golang:1.18-alpine3.16 AS builder
|
||||
|
||||
ENV GOFLAGS="-mod=readonly"
|
||||
|
||||
RUN apk add --update --no-cache bash ca-certificates curl git gcc g++
|
||||
|
||||
RUN mkdir -p /workspace
|
||||
WORKDIR /workspace
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
ARG COMMIT_SHA
|
||||
|
||||
# This ARG allows to disable some optional features and it might be useful if you build the image yourself.
|
||||
# For example you can disable S3 and GCS support like this:
|
||||
# --build-arg FEATURES=nos3,nogcs
|
||||
ARG FEATURES
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN set -xe && \
|
||||
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --abbrev=8 --dirty)} && \
|
||||
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/v2/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
||||
|
||||
|
||||
FROM alpine:3.16
|
||||
|
||||
# Set to "true" to install jq and the optional git and rsync dependencies
|
||||
ARG INSTALL_OPTIONAL_PACKAGES=false
|
||||
|
||||
RUN apk add --update --no-cache ca-certificates tzdata mailcap
|
||||
|
||||
RUN if [ "${INSTALL_OPTIONAL_PACKAGES}" = "true" ]; then apk add --update --no-cache jq git rsync; fi
|
||||
|
||||
# set up nsswitch.conf for Go's "netgo" implementation
|
||||
# https://github.com/gliderlabs/docker-alpine/issues/367#issuecomment-424546457
|
||||
RUN test ! -e /etc/nsswitch.conf && echo 'hosts: files dns' > /etc/nsswitch.conf
|
||||
|
||||
RUN mkdir -p /etc/sftpgo /var/lib/sftpgo /usr/share/sftpgo /srv/sftpgo/data /srv/sftpgo/backups
|
||||
|
||||
RUN addgroup -g 1000 -S sftpgo && \
|
||||
adduser -u 1000 -h /var/lib/sftpgo -s /sbin/nologin -G sftpgo -S -D -H -g "SFTPGo user" sftpgo
|
||||
|
||||
COPY --from=builder /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
|
||||
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
|
||||
COPY --from=builder /workspace/static /usr/share/sftpgo/static
|
||||
COPY --from=builder /workspace/openapi /usr/share/sftpgo/openapi
|
||||
COPY --from=builder /workspace/sftpgo /usr/local/bin/
|
||||
|
||||
# Log to the stdout so the logs will be available using docker logs
|
||||
ENV SFTPGO_LOG_FILE_PATH=""
|
||||
|
||||
# Modify the default configuration file
|
||||
RUN sed -i 's|"users_base_dir": "",|"users_base_dir": "/srv/sftpgo/data",|' /etc/sftpgo/sftpgo.json && \
|
||||
sed -i 's|"backups"|"/srv/sftpgo/backups"|' /etc/sftpgo/sftpgo.json
|
||||
|
||||
RUN chown -R sftpgo:sftpgo /etc/sftpgo /srv/sftpgo && chown sftpgo:sftpgo /var/lib/sftpgo && chmod 700 /srv/sftpgo/backups
|
||||
|
||||
WORKDIR /var/lib/sftpgo
|
||||
USER 1000:1000
|
||||
|
||||
CMD ["sftpgo", "serve"]
|
||||
57
Dockerfile.distroless
Normal file
57
Dockerfile.distroless
Normal file
@@ -0,0 +1,57 @@
|
||||
FROM golang:1.18-bullseye as builder
|
||||
|
||||
ENV CGO_ENABLED=0 GOFLAGS="-mod=readonly"
|
||||
|
||||
RUN mkdir -p /workspace
|
||||
WORKDIR /workspace
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
ARG COMMIT_SHA
|
||||
|
||||
# This ARG allows to disable some optional features and it might be useful if you build the image yourself.
|
||||
# For this variant we disable SQLite support since it requires CGO and so a C runtime which is not installed
|
||||
# in distroless/static-* images
|
||||
ARG FEATURES=nosqlite
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN set -xe && \
|
||||
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --abbrev=8 --dirty)} && \
|
||||
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/v2/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
||||
|
||||
# Modify the default configuration file
|
||||
RUN sed -i 's|"users_base_dir": "",|"users_base_dir": "/srv/sftpgo/data",|' sftpgo.json && \
|
||||
sed -i 's|"backups"|"/srv/sftpgo/backups"|' sftpgo.json && \
|
||||
sed -i 's|"sqlite"|"bolt"|' sftpgo.json
|
||||
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y media-types && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /etc/sftpgo /var/lib/sftpgo /srv/sftpgo
|
||||
|
||||
FROM gcr.io/distroless/static-debian11
|
||||
|
||||
COPY --from=builder --chown=1000:1000 /etc/sftpgo /etc/sftpgo
|
||||
COPY --from=builder --chown=1000:1000 /srv/sftpgo /srv/sftpgo
|
||||
COPY --from=builder --chown=1000:1000 /var/lib/sftpgo /var/lib/sftpgo
|
||||
COPY --from=builder --chown=1000:1000 /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
|
||||
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
|
||||
COPY --from=builder /workspace/static /usr/share/sftpgo/static
|
||||
COPY --from=builder /workspace/openapi /usr/share/sftpgo/openapi
|
||||
COPY --from=builder /workspace/sftpgo /usr/local/bin/
|
||||
COPY --from=builder /etc/mime.types /etc/mime.types
|
||||
|
||||
# Log to the stdout so the logs will be available using docker logs
|
||||
ENV SFTPGO_LOG_FILE_PATH=""
|
||||
# These env vars are required to avoid the following error when calling user.Current():
|
||||
# unable to get the current user: user: Current requires cgo or $USER set in environment
|
||||
ENV USER=sftpgo
|
||||
ENV HOME=/var/lib/sftpgo
|
||||
|
||||
WORKDIR /var/lib/sftpgo
|
||||
USER 1000:1000
|
||||
|
||||
CMD ["sftpgo", "serve"]
|
||||
145
LICENSE
145
LICENSE
@@ -1,5 +1,5 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
@@ -7,17 +7,15 @@
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
@@ -26,44 +24,34 @@ them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
@@ -72,7 +60,7 @@ modification follow.
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
@@ -549,35 +537,45 @@ to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
@@ -635,40 +633,29 @@ the "copyright" line and a pointer to where the full notice is found.
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
616
README.md
616
README.md
@@ -1,410 +1,334 @@
|
||||
# SFTPGo
|
||||
[](https://travis-ci.org/drakkan/sftpgo) [](https://codecov.io/gh/drakkan/sftpgo/branch/master) [](https://goreportcard.com/report/github.com/drakkan/sftpgo) [](https://www.gnu.org/licenses/gpl-3.0) [](https://github.com/avelino/awesome-go)
|
||||
|
||||
Full featured and highly configurable SFTP server
|
||||

|
||||
[](https://codecov.io/gh/drakkan/sftpgo/branch/main)
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://hub.docker.com/r/drakkan/sftpgo)
|
||||
[](https://github.com/avelino/awesome-go)
|
||||
|
||||
[English](./README.md) | [简体中文](./README.zh_CN.md)
|
||||
|
||||
Fully featured and highly configurable SFTP server with optional HTTP/S, FTP/S and WebDAV support.
|
||||
Several storage backends are supported: local filesystem, encrypted local filesystem, S3 (compatible) Object Storage, Google Cloud Storage, Azure Blob Storage, SFTP.
|
||||
|
||||
## Sponsors
|
||||
|
||||
If you find SFTPGo useful please consider supporting this Open Source project.
|
||||
|
||||
Maintaining and evolving SFTPGo is a lot of work - easily the equivalent of a full time job - for me.
|
||||
|
||||
I'd like to make SFTPGo into a sustainable long term project and would not like to introduce a dual licensing option and limit some features to the proprietary version only.
|
||||
|
||||
If you use SFTPGo, it is in your best interest to ensure that the project you rely on stays healthy and well maintained.
|
||||
This can only happen with your donations and [sponsorships](https://github.com/sponsors/drakkan) :heart:
|
||||
|
||||
If you just take and don't return anything back, the project will die in the long run and you will be forced to pay for a similar proprietary solution.
|
||||
|
||||
More [info](https://github.com/drakkan/sftpgo/issues/452).
|
||||
|
||||
Thank you to our sponsors!
|
||||
|
||||
[<img src="https://www.7digital.com/wp-content/themes/sevendigital/images/top_logo.png" alt="7digital logo">](https://www.7digital.com/)
|
||||
|
||||
## Features
|
||||
|
||||
- Each account is chrooted to his Home Dir.
|
||||
- SFTP accounts are virtual accounts stored in a "data provider".
|
||||
- SQLite, MySQL, PostgreSQL and bbolt (key/value store in pure Go) data providers are supported.
|
||||
- Public key and password authentication. Multiple public keys per user are supported.
|
||||
- Quota support: accounts can have individual quota expressed as max total size and/or max number of files.
|
||||
- Bandwidth throttling is supported, with distinct settings for upload and download.
|
||||
- Per user maximum concurrent sessions.
|
||||
- Per user permissions: list directories content, upload, overwrite, download, delete, rename, create directories, create symlinks can be enabled or disabled.
|
||||
- Per user files/folders ownership: you can map all the users to the system account that runs SFTPGo (all platforms are supported) or you can run SFTPGo as root user and map each user or group of users to a different system account (*NIX only).
|
||||
- Configurable custom commands and/or HTTP notifications on upload, download, delete or rename.
|
||||
- Support for serving local filesystem, encrypted local filesystem, S3 Compatible Object Storage, Google Cloud Storage, Azure Blob Storage or other SFTP accounts over SFTP/SCP/FTP/WebDAV.
|
||||
- Virtual folders are supported: a virtual folder can use any of the supported storage backends. So you can have, for example, an S3 user that exposes a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one. Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
||||
- Configurable [custom commands and/or HTTP hooks](./docs/custom-actions.md) on upload, pre-upload, download, pre-download, delete, pre-delete, rename, mkdir, rmdir on SSH commands and on user add, update and delete.
|
||||
- Virtual accounts stored within a "data provider".
|
||||
- SQLite, MySQL, PostgreSQL, CockroachDB, Bolt (key/value store in pure Go) and in-memory data providers are supported.
|
||||
- Chroot isolation for local accounts. Cloud-based accounts can be restricted to a certain base path.
|
||||
- Per-user and per-directory virtual permissions, for each exposed path you can allow or deny: directory listing, upload, overwrite, download, delete, rename, create directories, create symlinks, change owner/group/file mode and modification time.
|
||||
- [REST API](./docs/rest-api.md) for users and folders management, data retention, backup, restore and real time reports of the active connections with possibility of forcibly closing a connection.
|
||||
- [Web based administration interface](./docs/web-admin.md) to easily manage users, folders and connections.
|
||||
- [Web client interface](./docs/web-client.md) so that end users can change their credentials, manage and share their files in the browser.
|
||||
- Public key and password authentication. Multiple public keys per-user are supported.
|
||||
- SSH user [certificate authentication](https://cvsweb.openbsd.org/src/usr.bin/ssh/PROTOCOL.certkeys?rev=1.8).
|
||||
- Keyboard interactive authentication. You can easily setup a customizable multi-factor authentication.
|
||||
- Partial authentication. You can configure multi-step authentication requiring, for example, the user password after successful public key authentication.
|
||||
- Per-user authentication methods.
|
||||
- [Two-factor authentication](./docs/howto/two-factor-authentication.md) based on time-based one time passwords (RFC 6238) which works with Authy, Google Authenticator and other compatible apps.
|
||||
- Simplified user administrations using [groups](./docs/groups.md).
|
||||
- Custom authentication via external programs/HTTP API.
|
||||
- Web Client and Web Admin user interfaces support [OpenID Connect](https://openid.net/connect/) authentication and so they can be integrated with identity providers such as [Keycloak](https://www.keycloak.org/). You can find more details [here](./docs/oidc.md).
|
||||
- [Data At Rest Encryption](./docs/dare.md).
|
||||
- Dynamic user modification before login via external programs/HTTP API.
|
||||
- Quota support: accounts can have individual disk quota expressed as max total size and/or max number of files.
|
||||
- Bandwidth throttling, with separate settings for upload and download and overrides based on the client's IP address.
|
||||
- Data transfer bandwidth limits, with total limit or separate settings for uploads and downloads and overrides based on the client's IP address. Limits can be reset using the REST API.
|
||||
- Per-protocol [rate limiting](./docs/rate-limiting.md) is supported and can be optionally connected to the built-in defender to automatically block hosts that repeatedly exceed the configured limit.
|
||||
- Per-user maximum concurrent sessions.
|
||||
- Per-user and global IP filters: login can be restricted to specific ranges of IP addresses or to a specific IP address.
|
||||
- Per-user and per-directory shell like patterns filters: files can be allowed, denied and optionally hidden based on shell like patterns.
|
||||
- Automatically terminating idle connections.
|
||||
- Automatic blocklist management using the built-in [defender](./docs/defender.md).
|
||||
- Geo-IP filtering using a [plugin](https://github.com/sftpgo/sftpgo-plugin-geoipfilter).
|
||||
- Atomic uploads are configurable.
|
||||
- Optional SCP support.
|
||||
- REST API for users and quota management and real time reports for the active connections with possibility of forcibly closing a connection.
|
||||
- Prometheus metrics are exposed.
|
||||
- Configuration is a your choice: JSON, TOML, YAML, HCL, envfile are supported.
|
||||
- Log files are accurate and they are saved in the easily parsable JSON format.
|
||||
- Per-user files/folders ownership mapping: you can map all the users to the system account that runs SFTPGo (all platforms are supported) or you can run SFTPGo as root user and map each user or group of users to a different system account (\*NIX only).
|
||||
- Support for Git repositories over SSH.
|
||||
- SCP and rsync are supported.
|
||||
- FTP/S is supported. You can configure the FTP service to require TLS for both control and data connections.
|
||||
- [WebDAV](./docs/webdav.md) is supported.
|
||||
- ACME protocol is supported. SFTPGo can obtain and automatically renew TLS certificates for HTTPS, WebDAV and FTPS from `Let's Encrypt` or other ACME compliant certificate authorities, using the the `HTTP-01` or `TLS-ALPN-01` [challenge types](https://letsencrypt.org/docs/challenge-types/).
|
||||
- Two-Way TLS authentication, aka TLS with client certificate authentication, is supported for REST API/Web Admin, FTPS and WebDAV over HTTPS.
|
||||
- Per-user protocols restrictions. You can configure the allowed protocols (SSH/HTTP/FTP/WebDAV) for each user.
|
||||
- [Prometheus metrics](./docs/metrics.md) are exposed.
|
||||
- Support for HAProxy PROXY protocol: you can proxy and/or load balance the SFTP/SCP/FTP service without losing the information about the client's address.
|
||||
- Easy [migration](./examples/convertusers) from Linux system user accounts.
|
||||
- [Portable mode](./docs/portable-mode.md): a convenient way to share a single directory on demand.
|
||||
- [SFTP subsystem mode](./docs/sftp-subsystem.md): you can use SFTPGo as OpenSSH's SFTP subsystem.
|
||||
- Performance analysis using built-in [profiler](./docs/profiling.md).
|
||||
- Configuration format is at your choice: JSON, TOML, YAML, HCL, envfile are supported.
|
||||
- Log files are accurate and they are saved in the easily parsable JSON format ([more information](./docs/logs.md)).
|
||||
- SFTPGo supports a [plugin system](./docs/plugins.md) and therefore can be extended using external plugins.
|
||||
|
||||
## Platforms
|
||||
|
||||
SFTPGo is developed and tested on Linux. After each commit the code is automatically built and tested on Linux and macOS using Travis CI.
|
||||
Regularly the test cases are manually executed and pass on Windows. Other UNIX variants such as *BSD should work too.
|
||||
SFTPGo is developed and tested on Linux. After each commit, the code is automatically built and tested on Linux, macOS and Windows using [GitHub Actions](./.github/workflows/development.yml). The test cases are regularly manually executed and passed on FreeBSD. Other *BSD variants should work too.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Go 1.12 or higher.
|
||||
- A suitable SQL server or key/value store to use as data provider: PostreSQL 9.4+ or MySQL 5.6+ or SQLite 3.x or bbolt 1.3.x
|
||||
- Go as build only dependency. We support the Go version(s) used in [continuous integration workflows](./.github/workflows).
|
||||
- A suitable SQL server to use as data provider: PostgreSQL 9.4+, MySQL 5.6+, SQLite 3.x, CockroachDB stable.
|
||||
- The SQL server is optional: you can choose to use an embedded bolt database as key/value store or an in memory data provider.
|
||||
|
||||
## Installation
|
||||
|
||||
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
|
||||
Binary releases for Linux, macOS, and Windows are available. Please visit the [releases](https://github.com/drakkan/sftpgo/releases "releases") page.
|
||||
|
||||
```
|
||||
$ go get -u github.com/drakkan/sftpgo
|
||||
```
|
||||
An official Docker image is available. Documentation is [here](./docker/README.md).
|
||||
|
||||
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
|
||||
<details>
|
||||
|
||||
SFTPGo depends on [go-sqlite3](https://github.com/mattn/go-sqlite3) that is a CGO package and so it requires a `C` compiler at build time.
|
||||
On Linux and macOS a compiler is easy to install or already installed, on Windows you need to download [MinGW-w64](https://sourceforge.net/projects/mingw-w64/files/) and build SFTPGo from its command prompt.
|
||||
<summary>Some Linux distro packages are available</summary>
|
||||
|
||||
The compiler is a build time only dependency, it is not not required at runtime.
|
||||
- For Arch Linux via AUR:
|
||||
- [sftpgo](https://aur.archlinux.org/packages/sftpgo/). This package follows stable releases. It requires `git`, `gcc` and `go` to build.
|
||||
- [sftpgo-bin](https://aur.archlinux.org/packages/sftpgo-bin/). This package follows stable releases downloading the prebuilt linux binary from GitHub. It does not require `git`, `gcc` and `go` to build.
|
||||
- [sftpgo-git](https://aur.archlinux.org/packages/sftpgo-git/). This package builds and installs the latest git `main` branch. It requires `git`, `gcc` and `go` to build.
|
||||
- Deb and RPM packages are built after each commit and for each release.
|
||||
- For Ubuntu a PPA is available [here](https://launchpad.net/~sftpgo/+archive/ubuntu/sftpgo).
|
||||
- Void Linux provides an [official package](https://github.com/void-linux/void-packages/tree/master/srcpkgs/sftpgo).
|
||||
|
||||
If you don't need SQLite, you can also get/build SFTPGo setting the environment variable `GCO_ENABLED` to 0, this way SQLite support will be disabled but PostgreSQL, MySQL and bbolt will work and you don't need a `C` compiler for building.
|
||||
</details>
|
||||
|
||||
Version info, such as git commit and build date, can be embedded setting the following string variables at build time:
|
||||
APT and YUM repositories are [available](./docs/repo.md).
|
||||
|
||||
- `github.com/drakkan/sftpgo/utils.commit`
|
||||
- `github.com/drakkan/sftpgo/utils.date`
|
||||
SFTPGo is also available on [AWS Marketplace](https://aws.amazon.com/marketplace/seller-profile?id=6e849ab8-70a6-47de-9a43-13c3fa849335) and [Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/prasselsrl1645470739547.sftpgo_linux), purchasing from there will help keep SFTPGo a long-term sustainable project.
|
||||
|
||||
For example you can build using the following command:
|
||||
<details><summary>Windows packages</summary>
|
||||
|
||||
```
|
||||
go build -i -ldflags "-s -w -X github.com/drakkan/sftpgo/utils.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/utils.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
```
|
||||
- The Windows installer to install and run SFTPGo as a Windows service.
|
||||
- The portable package to start SFTPGo on demand.
|
||||
- The [winget](https://docs.microsoft.com/en-us/windows/package-manager/winget/install) package to install and run SFTPGo as a Windows service: `winget install SFTPGo`.
|
||||
- The [Chocolatey package](https://community.chocolatey.org/packages/sftpgo) to install and run SFTPGo as a Windows service.
|
||||
|
||||
and you will get a version that includes git commit and build date like this one:
|
||||
</details>
|
||||
|
||||
```
|
||||
sftpgo -v
|
||||
SFTPGo version: 0.9.0-dev-90607d4-dirty-2019-08-08T19:28:36Z
|
||||
```
|
||||
On macOS you can install from the Homebrew [Formula](https://formulae.brew.sh/formula/sftpgo).
|
||||
On FreeBSD you can install from the [SFTPGo port](https://www.freshports.org/ftp/sftpgo).
|
||||
On DragonFlyBSD you can install SFTPGo from [DPorts](https://github.com/DragonFlyBSD/DPorts/tree/master/ftp/sftpgo).
|
||||
|
||||
For Linux, a `systemd` sample [service](https://github.com/drakkan/sftpgo/tree/master/init/sftpgo.service "systemd service") can be found inside the source tree.
|
||||
You can easily test new features selecting a commit from the [Actions](https://github.com/drakkan/sftpgo/actions) page and downloading the matching build artifacts for Linux, macOS or Windows. GitHub stores artifacts for 90 days.
|
||||
|
||||
Alternately you can use distro packages:
|
||||
Alternately, you can [build from source](./docs/build-from-source.md).
|
||||
|
||||
- Arch Linux PKGBUILD is available on [AUR](https://aur.archlinux.org/packages/sftpgo/ "SFTPGo")
|
||||
|
||||
For macOS a `launchd` sample [service](https://github.com/drakkan/sftpgo/tree/master/init/com.github.drakkan.sftpgo.plist "launchd plist") can be found inside the source tree. The `launchd` plist assumes that `sftpgo` has `/usr/local/opt/sftpgo` as base directory.
|
||||
|
||||
On Windows you can run `SFTPGo` as Windows Service, please read the "Configuration" section below for more details.
|
||||
[Getting Started Guide for the Impatient](./docs/howto/getting-started.md).
|
||||
|
||||
## Configuration
|
||||
|
||||
The `sftpgo` executable can be used this way:
|
||||
A full explanation of all configuration methods can be found [here](./docs/full-configuration.md).
|
||||
|
||||
```
|
||||
Usage:
|
||||
sftpgo [command]
|
||||
Please make sure to [initialize the data provider](#data-provider-initialization-and-management) before running the daemon.
|
||||
|
||||
Available Commands:
|
||||
help Help about any command
|
||||
serve Start the SFTP Server
|
||||
To start SFTPGo with the default settings, simply run:
|
||||
|
||||
Flags:
|
||||
-h, --help help for sftpgo
|
||||
-v, --version
|
||||
|
||||
Use "sftpgo [command] --help" for more information about a command
|
||||
```
|
||||
|
||||
The `serve` subcommand supports the following flags:
|
||||
|
||||
- `--config-dir` string. Location of the config dir. This directory should contain the `sftpgo` configuration file and is used as the base for files with a relative path (eg. the private keys for the SFTP server, the SQLite or bblot database if you use SQLite or bbolt as data provider). The default value is "." or the value of `SFTPGO_CONFIG_DIR` environment variable.
|
||||
- `--config-file` string. Name of the configuration file. It must be the name of a file stored in config-dir not the absolute path to the configuration file. The specified file name must have no extension we automatically load JSON, YAML, TOML, HCL and Java properties. The default value is "sftpgo" (and therefore `sftpgo.json`, `sftpgo.yaml` and so on are searched) or the value of `SFTPGO_CONFIG_FILE` environment variable.
|
||||
- `--log-compress` boolean. Determine if the rotated log files should be compressed using gzip. Default `false` or the value of `SFTPGO_LOG_COMPRESS` environment variable (1 or `true`, 0 or `false`). It is unused if `log-file-path` is empty.
|
||||
- `--log-file-path` string. Location for the log file, default "sftpgo.log" or the value of `SFTPGO_LOG_FILE_PATH` environment variable. Leave empty to write logs to the standard error.
|
||||
- `--log-max-age` int. Maximum number of days to retain old log files. Default 28 or the value of `SFTPGO_LOG_MAX_AGE` environment variable. It is unused if `log-file-path` is empty.
|
||||
- `--log-max-backups` int. Maximum number of old log files to retain. Default 5 or the value of `SFTPGO_LOG_MAX_BACKUPS` environment variable. It is unused if `log-file-path` is empty.
|
||||
- `--log-max-size` int. Maximum size in megabytes of the log file before it gets rotated. Default 10 or the value of `SFTPGO_LOG_MAX_SIZE` environment variable. It is unused if `log-file-path` is empty.
|
||||
- `--log-verbose` boolean. Enable verbose logs. Default `true` or the value of `SFTPGO_LOG_VERBOSE` environment variable (1 or `true`, 0 or `false`).
|
||||
|
||||
If you don't configure any private host keys, the daemon will use `id_rsa` in the configuration directory. If that file doesn't exist, the daemon will attempt to autogenerate it (if the user that executes SFTPGo has write access to the config-dir). The server supports any private key format supported by [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/keys.go#L32).
|
||||
|
||||
Before starting `sftpgo` a dataprovider must be configured.
|
||||
|
||||
Sample SQL scripts to create the required database structure can be found inside the source tree [sql](https://github.com/drakkan/sftpgo/tree/master/sql "sql") directory. The SQL scripts filename's is, by convention, the date as `YYYYMMDD` and the suffix `.sql`. You need to apply all the SQL scripts for your database ordered by name, for example `20190706.sql` must be applied before `20190728.sql` and so on.
|
||||
|
||||
The `sftpgo` configuration file contains the following sections:
|
||||
|
||||
- **"sftpd"**, the configuration for the SFTP server
|
||||
- `bind_port`, integer. The port used for serving SFTP requests. Default: 2022
|
||||
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: ""
|
||||
- `idle_timeout`, integer. Time in minutes after which an idle client will be disconnected. 0 menas disabled. Default: 15
|
||||
- `max_auth_tries` integer. Maximum number of authentication attempts permitted per connection. If set to a negative number, the number of attempts are unlimited. If set to zero, the number of attempts are limited to 6.
|
||||
- `umask`, string. Umask for the new files and directories. This setting has no effect on Windows. Default: "0022"
|
||||
- `banner`, string. Identification string used by the server. Leave empty to use the default banner. Default "SFTPGo_version"
|
||||
- `upload_mode` integer. 0 means standard, the files are uploaded directly to the requested path. 1 means atomic: files are uploaded to a temporary path and renamed to the requested path when the client ends the upload. Atomic mode avoids problems such as a web server that serves partial files when the files are being uploaded. In atomic mode if there is an upload error the temporary file is deleted and so the requested upload path will not contain a partial file.
|
||||
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions
|
||||
- `execute_on`, list of strings. Valid values are `download`, `upload`, `delete`, `rename`. On folder deletion a `delete` notification will be sent for each deleted file. Actions will be not executed if an error is detected and so a partial file is uploaded or downloaded. Leave empty to disable actions. The `upload` condition includes both uploads to new files and overwrite existing files
|
||||
- `command`, string. Absolute path to the command to execute. Leave empty to disable. The command is invoked with the following arguments:
|
||||
- `action`, any valid `execute_on` string
|
||||
- `username`, user who did the action
|
||||
- `path` to the affected file. For `rename` action this is the old file name
|
||||
- `target_path`, non empty for `rename` action, this is the new file name
|
||||
- `http_notification_url`, a valid URL. An HTTP GET request will be executed to this URL. Leave empty to disable. The query string will contain the following parameters that have the same meaning of the command's arguments:
|
||||
- `action`
|
||||
- `username`
|
||||
- `path`
|
||||
- `target_path`, added for `rename` action only
|
||||
- `keys`, struct array. It contains the daemon's private keys. If empty or missing the daemon will search or try to generate `id_rsa` in the configuration directory.
|
||||
- `private_key`, path to the private key file. It can be a path relative to the config dir or an absolute one.
|
||||
- `enable_scp`, boolean. Default disabled. Set to `true` to enable SCP support. SCP is an experimental feature, we have our own SCP implementation since we can't rely on `scp` system command to proper handle permissions, quota and user's home dir restrictions. The SCP protocol is quite simple but there is no official docs about it, so we need more testing and feedbacks before enabling it by default. We may not handle some borderline cases or have sneaky bugs. Please do accurate tests yourself before enabling SCP and let us known if something does not work as expected for your use cases. SCP between two remote hosts is supported using the `-3` scp option.
|
||||
- `kex_algorithms`, list of strings. Available KEX (Key Exchange) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L46 "Supported kex algos")
|
||||
- `ciphers`, list of strings. Allowed ciphers. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L28 "Supported ciphers")
|
||||
- `macs`, list of strings. available MAC (message authentication code) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L84 "Supported MACs")
|
||||
- `login_banner_file`, path to the login banner file. The contents of the specified file, if any, are sent to the remote user before authentication is allowed. It can be a path relative to the config dir or an absolute one. Leave empty to send no login banner
|
||||
- **"data_provider"**, the configuration for the data provider
|
||||
- `driver`, string. Supported drivers are `sqlite`, `mysql`, `postgresql`, `bolt`
|
||||
- `name`, string. Database name. For driver `sqlite` this can be the database name relative to the config dir or the absolute path to the SQLite database.
|
||||
- `host`, string. Database host. Leave empty for driver `sqlite` and `bolt`
|
||||
- `port`, integer. Database port. Leave empty for driver `sqlite` and `bolt`
|
||||
- `username`, string. Database user. Leave empty for driver `sqlite` and `bolt`
|
||||
- `password`, string. Database password. Leave empty for driver `sqlite` and `bolt`
|
||||
- `sslmode`, integer. Used for drivers `mysql` and `postgresql`. 0 disable SSL/TLS connections, 1 require ssl, 2 set ssl mode to `verify-ca` for driver `postgresql` and `skip-verify` for driver `mysql`, 3 set ssl mode to `verify-full` for driver `postgresql` and `preferred` for driver `mysql`
|
||||
- `connectionstring`, string. Provide a custom database connection string. If not empty this connection string will be used instead of build one using the previous parameters. Leave empty for driver `bolt`
|
||||
- `users_table`, string. Database table for SFTP users
|
||||
- `manage_users`, integer. Set to 0 to disable users management, 1 to enable
|
||||
- `track_quota`, integer. Set the preferred way to track users quota between the following choices:
|
||||
- 0, disable quota tracking. REST API to scan user dir and update quota will do nothing
|
||||
- 1, quota is updated each time a user upload or delete a file even if the user has no quota restrictions
|
||||
- 2, quota is updated each time a user upload or delete a file but only for users with quota restrictions. With this configuration the "quota scan" REST API can still be used to periodically update space usage for users without quota restrictions
|
||||
- `pool_size`, integer. Sets the maximum number of open connections for `mysql` and `postgresql` driver. Default 0 (unlimited)
|
||||
- **"httpd"**, the configuration for the HTTP server used to serve REST API
|
||||
- `bind_port`, integer. The port used for serving HTTP requests. Set to 0 to disable HTTP server. Default: 8080
|
||||
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: "127.0.0.1"
|
||||
|
||||
Here is a full example showing the default config in JSON format:
|
||||
|
||||
```json
|
||||
{
|
||||
"sftpd": {
|
||||
"bind_port": 2022,
|
||||
"bind_address": "",
|
||||
"idle_timeout": 15,
|
||||
"max_auth_tries": 0,
|
||||
"umask": "0022",
|
||||
"banner": "SFTPGo",
|
||||
"actions": {
|
||||
"execute_on": [],
|
||||
"command": "",
|
||||
"http_notification_url": ""
|
||||
},
|
||||
"keys": [],
|
||||
"enable_scp": false
|
||||
},
|
||||
"data_provider": {
|
||||
"driver": "sqlite",
|
||||
"name": "sftpgo.db",
|
||||
"host": "",
|
||||
"port": 5432,
|
||||
"username": "",
|
||||
"password": "",
|
||||
"sslmode": 0,
|
||||
"connection_string": "",
|
||||
"users_table": "users",
|
||||
"manage_users": 1,
|
||||
"track_quota": 2,
|
||||
"pool_size": 0
|
||||
},
|
||||
"httpd": {
|
||||
"bind_port": 8080,
|
||||
"bind_address": "127.0.0.1"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If you want to use a private key that use an algorithm different from RSA or more than one private key then replace the empty `keys` array with something like this:
|
||||
|
||||
```json
|
||||
"keys": [
|
||||
{
|
||||
"private_key": "id_rsa"
|
||||
},
|
||||
{
|
||||
"private_key": "id_ecdsa"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
The configuration can be read from JSON, TOML, YAML, HCL, envfile and Java properties config files, if your `config-file` flag is set to `sftpgo` (default value) you need to create a configuration file called `sftpgo.json` or `sftpgo.yaml` and so on inside `config-dir`.
|
||||
|
||||
You can also override all the available configuration options using environment variables, sftpgo will check for environment variables with a name matching the key uppercased and prefixed with the `SFTPGO_`. You need to use `__` to traverse a struct.
|
||||
|
||||
Let's see some examples:
|
||||
|
||||
- To set sftpd `bind_port` you need to define the env var `SFTPGO_SFTPD__BIND_PORT`
|
||||
- To set the `execute_on` actions you need to define the env var `SFTPGO_SFTPD__ACTIONS__EXECUTE_ON` for example `SFTPGO_SFTPD__ACTIONS__EXECUTE_ON=upload,download`
|
||||
|
||||
Please note that to override configuration options with environment variables a configuration file containing the options to override is required. You can, for example, deploy the default configuration file and then override the options you need to customize using environment variables.
|
||||
|
||||
To start the SFTP Server with the default values for the command line flags simply use:
|
||||
|
||||
```
|
||||
```bash
|
||||
sftpgo serve
|
||||
```
|
||||
|
||||
On Windows you can register `SFTPGo` as Windows Service, take a look at the CLI usage to learn how:
|
||||
Check out [this documentation](./docs/service.md) if you want to run SFTPGo as a service.
|
||||
|
||||
```
|
||||
sftpgo.exe service --help
|
||||
Install, Uninstall, Start, Stop and retrieve status for SFTPGo Windows Service
|
||||
### Data provider initialization and management
|
||||
|
||||
Usage:
|
||||
sftpgo service [command]
|
||||
Before starting the SFTPGo server please ensure that the configured data provider is properly initialized/updated.
|
||||
|
||||
Available Commands:
|
||||
install Install SFTPGo as Windows Service
|
||||
start Start SFTPGo Windows Service
|
||||
status Retrieve the status for the SFTPGo Windows Service
|
||||
stop Stop SFTPGo Windows Service
|
||||
uninstall Uninstall SFTPGo Windows Service
|
||||
For PostgreSQL, MySQL and CockroachDB providers, you need to create the configured database. For SQLite, the configured database will be automatically created at startup. Memory and bolt data providers do not require an initialization but they could require an update to the existing data after upgrading SFTPGo.
|
||||
|
||||
Flags:
|
||||
-h, --help help for service
|
||||
SFTPGo will attempt to automatically detect if the data provider is initialized/updated and if not, will attempt to initialize/ update it on startup as needed.
|
||||
|
||||
Use "sftpgo service [command] --help" for more information about a command.
|
||||
Alternately, you can create/update the required data provider structures yourself using the `initprovider` command.
|
||||
|
||||
For example, you can simply execute the following command from the configuration directory:
|
||||
|
||||
```bash
|
||||
sftpgo initprovider
|
||||
```
|
||||
|
||||
`install` subcommand accepts the same flags valid for `serve`.
|
||||
Take a look at the CLI usage to learn how to specify a different configuration file:
|
||||
|
||||
After installing as Windows Service please remember to allow network access to the SFTPGo executable using something like this:
|
||||
|
||||
```
|
||||
netsh advfirewall firewall add rule name="SFTPGo Service" dir=in action=allow program="C:\Program Files\SFTPGo\sftpgo.exe"
|
||||
```bash
|
||||
sftpgo initprovider --help
|
||||
```
|
||||
|
||||
or through the Windows Firewall GUI.
|
||||
You can disable automatic data provider checks/updates at startup by setting the `update_mode` configuration key to `1`.
|
||||
|
||||
You can also reset your provider by using the `resetprovider` sub-command. Take a look at the CLI usage for more details:
|
||||
|
||||
```bash
|
||||
sftpgo resetprovider --help
|
||||
```
|
||||
|
||||
:warning: Please note that some data providers (e.g. MySQL and CockroachDB) do not support schema changes within a transaction, this means that you may end up with an inconsistent schema if migrations are forcibly aborted. CockroachDB doesn't support database-level locks, so make sure you don't execute migrations concurrently.
|
||||
|
||||
## Create the first admin
|
||||
|
||||
To start using SFTPGo you need to create an admin user, you can do it in several ways:
|
||||
|
||||
- by using the web admin interface. The default URL is [http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web/admin)
|
||||
- by loading initial data
|
||||
- by enabling `create_default_admin` in your configuration file and setting the environment variables `SFTPGO_DEFAULT_ADMIN_USERNAME` and `SFTPGO_DEFAULT_ADMIN_PASSWORD`
|
||||
|
||||
## Upgrading
|
||||
|
||||
SFTPGo supports upgrading from the previous release branch to the current one.
|
||||
Some examples for supported upgrade paths are:
|
||||
|
||||
- from 1.2.x to 2.0.x
|
||||
- from 2.0.x to 2.1.x and so on.
|
||||
|
||||
For supported upgrade paths, the data and schema are migrated automatically, alternately you can use the `initprovider` command.
|
||||
|
||||
So if, for example, you want to upgrade from a version before 1.2.x to 2.0.x, you must first install version 1.2.x, update the data provider and finally install the version 2.0.x. It is recommended to always install the latest available minor version, ie do not install 1.2.0 if 1.2.2 is available.
|
||||
|
||||
Loading data from a provider independent JSON dump is supported from the previous release branch to the current one too. After upgrading SFTPGo it is advisable to regenerate the JSON dump from the new version.
|
||||
|
||||
## Downgrading
|
||||
|
||||
If for some reason you want to downgrade SFTPGo, you may need to downgrade your data provider schema and data as well. You can use the `revertprovider` command for this task.
|
||||
|
||||
As for upgrading, SFTPGo supports downgrading from the previous release branch to the current one.
|
||||
|
||||
So, if you plan to downgrade from 2.0.x to 1.2.x, before uninstalling 2.0.x version, you can prepare your data provider executing the following command from the configuration directory:
|
||||
|
||||
```shell
|
||||
sftpgo revertprovider --to-version 4
|
||||
```
|
||||
|
||||
Take a look at the CLI usage to see the supported parameter for the `--to-version` argument and to learn how to specify a different configuration file:
|
||||
|
||||
```shell
|
||||
sftpgo revertprovider --help
|
||||
```
|
||||
|
||||
The `revertprovider` command is not supported for the memory provider.
|
||||
|
||||
Please note that we only support the current release branch and the current main branch, if you find a bug it is better to report it rather than downgrading to an older unsupported version.
|
||||
|
||||
## Users and folders management
|
||||
|
||||
After starting SFTPGo you can manage users and folders using:
|
||||
|
||||
- the [web based administration interface](./docs/web-admin.md)
|
||||
- the [REST API](./docs/rest-api.md)
|
||||
|
||||
To support embedded data providers like `bolt` and `SQLite` we can't have a CLI that directly write users and folders to the data provider, we always have to use the REST API.
|
||||
|
||||
Full details for users, folders, admins and other resources are documented in the [OpenAPI](./openapi/openapi.yaml) schema. If you want to render the schema without importing it manually, you can explore it on [Stoplight](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml).
|
||||
|
||||
## Tutorials
|
||||
|
||||
Some step-to-step tutorials can be found inside the source tree [howto](./docs/howto "How-to") directory.
|
||||
|
||||
## Authentication options
|
||||
|
||||
<details><summary> External Authentication</summary>
|
||||
|
||||
Custom authentication methods can easily be added. SFTPGo supports external authentication modules, and writing a new backend can be as simple as a few lines of shell script. More information can be found [here](./docs/external-auth.md).
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary> Keyboard Interactive Authentication</summary>
|
||||
|
||||
Keyboard interactive authentication is, in general, a series of questions asked by the server with responses provided by the client.
|
||||
This authentication method is typically used for multi-factor authentication.
|
||||
|
||||
More information can be found [here](./docs/keyboard-interactive.md).
|
||||
|
||||
</details>
|
||||
|
||||
## Dynamic user creation or modification
|
||||
|
||||
A user can be created or modified by an external program just before the login. More information about this can be found [here](./docs/dynamic-user-mod.md).
|
||||
|
||||
## Custom Actions
|
||||
|
||||
SFTPGo allows you to configure custom commands and/or HTTP hooks to receive notifications about file uploads, deletions and several other events.
|
||||
|
||||
More information about custom actions can be found [here](./docs/custom-actions.md).
|
||||
|
||||
## Virtual folders
|
||||
|
||||
Directories outside the user home directory or based on a different storage provider can be exposed as virtual folders, more information [here](./docs/virtual-folders.md).
|
||||
|
||||
## Other hooks
|
||||
|
||||
You can get notified as soon as a new connection is established using the [Post-connect hook](./docs/post-connect-hook.md) and after each login using the [Post-login hook](./docs/post-login-hook.md).
|
||||
You can use your own hook to [check passwords](./docs/check-password-hook.md).
|
||||
|
||||
## Storage backends
|
||||
|
||||
### S3/GCP/Azure
|
||||
|
||||
Each user can be mapped with a [S3 Compatible Object Storage](./docs/s3.md) /[Google Cloud Storage](./docs/google-cloud-storage.md)/[Azure Blob Storage](./docs/azure-blob-storage.md) bucket or a bucket virtual folder that is exposed over SFTP/SCP/FTP/WebDAV.
|
||||
|
||||
### SFTP backend
|
||||
|
||||
Each user can be mapped to another SFTP server account or a subfolder of it. More information can be found [here](./docs/sftpfs.md).
|
||||
|
||||
### Encrypted backend
|
||||
|
||||
Data at-rest encryption is supported via the [cryptfs backend](./docs/dare.md).
|
||||
|
||||
### Other Storage backends
|
||||
|
||||
Adding new storage backends is quite easy:
|
||||
|
||||
- implement the [Fs interface](./vfs/vfs.go#L28 "interface for filesystem backends").
|
||||
- update the user method `GetFilesystem` to return the new backend
|
||||
- update the web interface and the REST API CLI
|
||||
- add the flags for the new storage backed to the `portable` mode
|
||||
|
||||
Anyway, some backends require a pay per-use account (or they offer free account for a limited time period only). To be able to add support for such backends or to review pull requests, please provide a test account. The test account must be available for enough time to be able to maintain the backend and do basic tests before each new release.
|
||||
|
||||
## Brute force protection
|
||||
|
||||
SFTPGo supports a built-in [defender](./docs/defender.md).
|
||||
|
||||
Alternately you can use the [connection failed logs](./docs/logs.md) for integration in tools such as [Fail2ban](http://www.fail2ban.org/). Example of [jails](./fail2ban/jails) and [filters](./fail2ban/filters) working with `systemd`/`journald` are available in fail2ban directory.
|
||||
|
||||
## Account's configuration properties
|
||||
|
||||
For each account the following properties can be configured:
|
||||
Details information about account configuration properties can be found [here](./docs/account.md).
|
||||
|
||||
- `username`
|
||||
- `password` used for password authentication. For users created using SFTPGo REST API if the password has no known hashing algo prefix it will be stored using argon2id. SFTPGo supports checking passwords stored with bcrypt, pbkdf2 and sha512crypt too. For pbkdf2 the supported format is `$<algo>$<iterations>$<salt>$<hashed pwd base64 encoded>`, where algo is `pbkdf2-sha1` or `pbkdf2-sha256` or `pbkdf2-sha512`. For example the `pbkdf2-sha256` of the word `password` using 150000 iterations and `E86a9YMX3zC7` as salt must be stored as `$pbkdf2-sha256$150000$E86a9YMX3zC7$R5J62hsSq+pYw00hLLPKBbcGXmq7fj5+/M0IFoYtZbo=`. For bcrypt the format must be the one supported by golang's [crypto/bcrypt](https://godoc.org/golang.org/x/crypto/bcrypt) package, for example the password `secret` with cost `14` must be stored as `$2a$14$ajq8Q7fbtFRQvXpdCq7Jcuy.Rx1h/L4J60Otx.gyNLbAYctGMJ9tK`. For sha512crypt we support the format used in `/etc/shadow` with the `$6$` prefix, this is useful if you are migrating from system user's accounts. Using the REST API you can send a password hashed as bcrypt, pbkdf2 or sha512crypt and it will be stored as is.
|
||||
- `public_keys` array of public keys. At least one public key or the password is mandatory.
|
||||
- `home_dir` The user cannot upload or download files outside this directory. Must be an absolute path
|
||||
- `uid`, `gid`. If sftpgo runs as root system user then the created files and directories will be assigned to this system uid/gid. Ignored on windows and if sftpgo runs as non root user: in this case files and directories for all SFTP users will be owned by the system user that runs sftpgo.
|
||||
- `max_sessions` maximum concurrent sessions. 0 means unlimited
|
||||
- `quota_size` maximum size allowed as bytes. 0 means unlimited
|
||||
- `quota_files` maximum number of files allowed. 0 means unlimited
|
||||
- `permissions` the following permissions are supported:
|
||||
- `*` all permissions are granted
|
||||
- `list` list items is allowed
|
||||
- `download` download files is allowed
|
||||
- `upload` upload files is allowed
|
||||
- `overwrite` overwrite an existing file, while uploading, is allowed. `upload` permission is required to allow file overwrite
|
||||
- `delete` delete files or directories is allowed
|
||||
- `rename` rename files or directories is allowed
|
||||
- `create_dirs` create directories is allowed
|
||||
- `create_symlinks` create symbolic links is allowed
|
||||
- `upload_bandwidth` maximum upload bandwidth as KB/s, 0 means unlimited
|
||||
- `download_bandwidth` maximum download bandwidth as KB/s, 0 means unlimited
|
||||
## Performance
|
||||
|
||||
These properties are stored inside the data provider. If you want to use your existing accounts, you can create a database view. Since a view is read only, you have to disable user management and quota tracking so SFTPGo will never try to write to the view.
|
||||
SFTPGo can easily saturate a Gigabit connection on low end hardware with no special configuration, this is generally enough for most use cases.
|
||||
|
||||
## REST API
|
||||
More in-depth analysis of performance can be found [here](./docs/performance.md).
|
||||
|
||||
SFTPGo exposes REST API to manage users and quota and to get real time reports for the active connections with possibility of forcibly closing a connection.
|
||||
## Release Cadence
|
||||
|
||||
If quota tracking is enabled in `sftpgo` configuration file, then the used size and number of files are updated each time a file is added/removed. If files are added/removed not using SFTP or if you change `track_quota` from `2` to `1`, you can rescan the user home dir and update the used quota using the REST API.
|
||||
|
||||
REST API is designed to run on localhost or on a trusted network, if you need HTTPS or authentication you can setup a reverse proxy using an HTTP Server such as Apache or NGNIX.
|
||||
|
||||
For example you can keep SFTPGo listening on localhost and expose it externally configuring a reverse proxy using Apache HTTP Server this way:
|
||||
|
||||
```
|
||||
ProxyPass /api/v1 http://127.0.0.1:8080/api/v1
|
||||
ProxyPassReverse /api/v1 http://127.0.0.1:8080/api/v1
|
||||
```
|
||||
|
||||
and you can add authentication with something like this:
|
||||
|
||||
```
|
||||
<Location /api/v1>
|
||||
AuthType Digest
|
||||
AuthName "Private"
|
||||
AuthDigestDomain "/api/v1"
|
||||
AuthDigestProvider file
|
||||
AuthUserFile "/etc/httpd/conf/auth_digest"
|
||||
Require valid-user
|
||||
</Location>
|
||||
```
|
||||
|
||||
and, of course, you can configure the web server to use HTTPS.
|
||||
|
||||
The OpenAPI 3 schema for the exposed API can be found inside the source tree: [openapi.yaml](https://github.com/drakkan/sftpgo/tree/master/api/schema/openapi.yaml "OpenAPI 3 specs").
|
||||
|
||||
A sample CLI client for the REST API can be found inside the source tree [scripts](https://github.com/drakkan/sftpgo/tree/master/scripts "scripts") directory.
|
||||
|
||||
You can also generate your own REST client, in your preferred programming language or even bash scripts, using an OpenAPI generator such as [swagger-codegen](https://github.com/swagger-api/swagger-codegen) or [OpenAPI Generator](https://openapi-generator.tech/)
|
||||
|
||||
## Metrics
|
||||
|
||||
SFTPGo exposes [Prometheus](https://prometheus.io/) metrics at the `/metrics` HTTP endpoint.
|
||||
Several counters and gauges are available, for example:
|
||||
|
||||
- Total uploads and downloads
|
||||
- Total uploads and downloads size
|
||||
- Total uploads and downloads errors
|
||||
- Number of active connections
|
||||
- Data provider availability
|
||||
- Total successful and failed logins using a password or a public key
|
||||
- Total HTTP requests served and totals for response code
|
||||
- Go's runtime like details about GC, number of gouroutines and OS threads
|
||||
- Process information like CPU, memory, file descriptor usage and start time
|
||||
|
||||
Please check the `/metrics` page for more details.
|
||||
|
||||
## Logs
|
||||
|
||||
Inside the log file each line is a JSON struct, each struct has a `sender` fields that identify the log type.
|
||||
|
||||
The logs can be divided into the following categories:
|
||||
|
||||
- **"app logs"**, internal logs used to debug `sftpgo`:
|
||||
- `sender` string. This is generally the package name that emits the log
|
||||
- `time` string. Date/time with millisecond precision
|
||||
- `level` string
|
||||
- `message` string
|
||||
- **"transfer logs"**, SFTP/SCP transfer logs:
|
||||
- `sender` string. `Upload` or `Download`
|
||||
- `time` string. Date/time with millisecond precision
|
||||
- `level` string
|
||||
- `elapsed_ms`, int64. Elapsed time, as milliseconds, for the upload/download
|
||||
- `size_bytes`, int64. Size, as bytes, of the download/upload
|
||||
- `username`, string
|
||||
- `file_path` string
|
||||
- `connection_id` string. Unique connection identifier
|
||||
- `protocol` string. `SFTP` or `SCP`
|
||||
- **"command logs"**, SFTP/SCP command logs:
|
||||
- `sender` string. `Rename`, `Rmdir`, `Mkdir`, `Symlink`, `Remove`
|
||||
- `level` string
|
||||
- `username`, string
|
||||
- `file_path` string
|
||||
- `target_path` string
|
||||
- `connection_id` string. Unique connection identifier
|
||||
- `protocol` string. `SFTP` or `SCP`
|
||||
- **"http logs"**, REST API logs:
|
||||
- `sender` string. `httpd`
|
||||
- `level` string
|
||||
- `remote_addr` string. IP and port of the remote client
|
||||
- `proto` string, for example `HTTP/1.1`
|
||||
- `method` string. HTTP method (`GET`, `POST`, `PUT`, `DELETE` etc.)
|
||||
- `user_agent` string
|
||||
- `uri` string. Full uri
|
||||
- `resp_status` integer. HTTP response status code
|
||||
- `resp_size` integer. Size in bytes of the HTTP response
|
||||
- `elapsed_ms` int64. Elapsed time, as milliseconds, to complete the request
|
||||
- `request_id` string. Unique request identifier
|
||||
SFTPGo releases are feature-driven, we don't have a fixed time based schedule. As a rough estimate, you can expect 1 or 2 new releases per year.
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
- [pkg/sftp](https://github.com/pkg/sftp)
|
||||
- [go-chi](https://github.com/go-chi/chi)
|
||||
- [zerolog](https://github.com/rs/zerolog)
|
||||
- [lumberjack](https://gopkg.in/natefinch/lumberjack.v2)
|
||||
- [argon2id](https://github.com/alexedwards/argon2id)
|
||||
- [go-sqlite3](https://github.com/mattn/go-sqlite3)
|
||||
- [go-sql-driver/mysql](https://github.com/go-sql-driver/mysql)
|
||||
- [bbolt](https://github.com/etcd-io/bbolt)
|
||||
- [lib/pq](https://github.com/lib/pq)
|
||||
- [viper](https://github.com/spf13/viper)
|
||||
- [cobra](https://github.com/spf13/cobra)
|
||||
- [xid](https://github.com/rs/xid)
|
||||
- [nathanaelle/password](https://github.com/nathanaelle/password)
|
||||
SFTPGo makes use of the third party libraries listed inside [go.mod](./go.mod).
|
||||
|
||||
Some code was initially taken from [Pterodactyl sftp server](https://github.com/pterodactyl/sftp-server)
|
||||
We are very grateful to all the people who contributed with ideas and/or pull requests.
|
||||
|
||||
Thank you [ysura](https://www.ysura.com/) for granting me stable access to a test AWS S3 account.
|
||||
|
||||
## License
|
||||
|
||||
GNU GPLv3
|
||||
GNU AGPLv3
|
||||
|
||||
318
README.zh_CN.md
Normal file
318
README.zh_CN.md
Normal file
@@ -0,0 +1,318 @@
|
||||
# SFTPGo
|
||||
|
||||

|
||||
[](https://codecov.io/gh/drakkan/sftpgo/branch/main)
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://hub.docker.com/r/drakkan/sftpgo)
|
||||
[](https://github.com/avelino/awesome-go)
|
||||
|
||||
[English](./README.md) | [简体中文](./README.zh_CN.md)
|
||||
|
||||
功能齐全、高度可配置化、支持自定义 HTTP/S,FTP/S 和 WebDAV 的 SFTP 服务。
|
||||
一些存储后端支持:本地文件系统、加密本地文件系统、S3(兼容)对象存储,Google Cloud 存储,Azure Blob 存储,SFTP。
|
||||
|
||||
## 特性
|
||||
|
||||
- 支持服务本地文件系统、加密本地文件系统、S3 兼容对象存储、Google Cloud 存储、Azure Blob 存储或其它基于 SFTP/SCP/FTP/WebDAV 协议的 SFTP 账户。
|
||||
- 虚拟目录支持:一个虚拟目录可以用于支持的存储后端。你可以,比如,一个 S3 用户暴露了一个 GCS bucket(或者其中一部分)在特定的路径下、一个加密本地文件系统在另一个。虚拟目录可以对于大量用户作为私密或者共享,分享虚拟目录你可以为每个用户定义不同的配额。
|
||||
- 可配置的 [自定义命令 和/或 HTTP 钩子](./docs/custom-actions.md) 在 SSH 命令的 upload, pre-upload, download, pre-download, delete, pre-delete, rename, mkdir, rmdir 阶段,和用户添加、更新、删除阶段。
|
||||
- 存储在 “数据提供程序” 中的虚拟账户。
|
||||
- 支持 SQLite, MySQL, PostgreSQL, CockroachDB, Bolt (Go 原生键/值存储) 和内存数据提供程序。
|
||||
- 为本地账户提供 Chroot 隔离。云端账户可以限制为特定的基本路径。
|
||||
- 每个用户和每个目录虚拟权限,对于每个暴露的路径你可以允许或禁止:目录展示、上传、覆盖、下载、删除、重命名、创建文件夹、创建软连接、修改 owner/group/file 模式和更改时间。
|
||||
- 为用户和目录管理提供、数据保留、备份、恢复和即时活动连接的实时报告,可能会强制关闭连接,提供 [REST API](./docs/rest-api.md)。
|
||||
- [基于 Web 的管理员界面](./docs/web-admin.md) 可以容易地管理用户、目录和连接。
|
||||
- [Web 客户端界面](./docs/web-client.md) 以便终端用户可以在浏览器中更改他们的凭据、管理和共享他们的文件。
|
||||
- 公钥和密码认证。支持每个用户多个公钥。
|
||||
- SSH 用户 [证书认证](https://cvsweb.openbsd.org/src/usr.bin/ssh/PROTOCOL.certkeys?rev=1.8).
|
||||
- 键盘交互认证。您可以轻松设置可定制的多因素身份认证。
|
||||
- 部分验证。你可以配置多步验证请求,例如,用户密码在公钥验证之后。
|
||||
- 每个用户的身份验证方法。
|
||||
- [双重验证](./docs/howto/two-factor-authentication.md) 基于实现一次性密码 (RFC 6238) 可以与 Authy、Google Authenticator 和其他兼容的应用程序配合使用。
|
||||
- 通过 [群组](./docs/groups.md) 精简用户管理。
|
||||
- 通过外部 程序/HTTP API 自定义验证。
|
||||
- Web 客户端和 Web 管理员他用户界面支持 [OpenID Connect](https://openid.net/connect/) 验证,所以它们很容易被集成在诸如 [Keycloak](https://www.keycloak.org/) 之类的身份认证程序。你可以在 [此](./docs/oidc.md) 获取更多信息。
|
||||
- [静态数据加密](./docs/dare.md)。
|
||||
- 在登录之前通过 程序/HTTP API 进行动态用户修改。
|
||||
- 配额支持:账户拥有独立的磁盘配额表示为总计最大体积 和/或 最大文件数量。
|
||||
- 带宽节流,基于客户端 IP 地址独立设置上传、下载和覆盖。
|
||||
- 数据传输带宽限制,限制总量或基于客户端 IP 地址设置上传、下载和覆盖。限制可以通过 REST API 重置。
|
||||
- 支持每个协议[限速](./docs/rate-limiting.md),可以可选与内置的防护连接实现自动封禁重复超过设置限制的主机。
|
||||
- 每个用户的最大并发会话。
|
||||
- 每个用户和全局 IP 过滤:登录可以被限制在特定的 IP 段和指定的 IP 地址。
|
||||
- 每个用户和每个文件夹类似于 shell 的模式过滤:文件可以被允许、禁止和隐藏基于类 shell 模式。
|
||||
- 自动使 idle 连接终止。
|
||||
- 通过内置的 [防护](./docs/defender.md) 自动管理禁止名单。
|
||||
- 通过 [插件](https://github.com/sftpgo/sftpgo-plugin-geoipfilter) 实现 地理-IP 过滤。
|
||||
- 原子上传是可配置的。
|
||||
- 每个用户 文件/目录 所有权映射:你可以将所有用户映射到运行 SFEPGo 的系统账户(所有的平台都是支持的),或者你可以使用 root 用户运行 SFTPGo 并且映射每个用户或用户组到一个不同系统账户(仅支持 \*NIX)。
|
||||
- 通过 SSH 支持 Git 仓库。
|
||||
- 支持 SCP 和 rsync。
|
||||
- 支持 FTP/S。你可以配置 FTP 服务为控制和数据连接都需要 TLS。
|
||||
- [WebDAV](./docs/webdav.md) 是支持的。
|
||||
- 两步 TLS 验证,具有客户端证书身份验证的 aka TLS,支持 REST API/Web Admin、FTPS 和 基于 HTTPS 的 WebDAV。
|
||||
- 每个用户协议限制。你可以为每个用户配置允许的协议(SSH/HTTP/FTP/WebDAV)。
|
||||
- 暴露 [输出指标](./docs/metrics.md)。
|
||||
- 支持 HAProxy PROXY 协议:你可以不需要丢失客户端地址信息代理 和/或 负载平衡 SFTP/SCP/FTP 服务。
|
||||
- 简单从 Linux 系统用户账户进行 [迁移](./examples/convertusers)。
|
||||
- [可携带模式](./docs/portable-mode.md):按需共享单个目录的便捷方式。
|
||||
- [SFTP 子系统模式](./docs/sftp-subsystem.md):你可以使用 SFTPGo 作为 OpenSSH 的 SFTP 子系统。
|
||||
- 性能分析基于内置的 [分析器](./docs/profiling.md)。
|
||||
- 配置项格式基于你的选择:JSON, TOML, YAML, HCL, envfile 都是支持的。
|
||||
- 日志文件是精确的,它们被存储为易被解析的 JSON 格式。([更多信息](./docs/logs.md))
|
||||
- SFTPGo 支持 [插件系统](./docs/plugins.md),因此可以使用外部插件拓展。
|
||||
|
||||
## 平台
|
||||
|
||||
SFTPGo 基于 Linux 开发和创建。在每一次提交之后,代码会自动通过 [GitHub Actions](./.github/workflows/development.yml) 在 Linux、macOS 和 Windows 构建和测试。测试用例定期手动在 FreeBSD 执行,其他的 *BSD 变体同样适用。
|
||||
|
||||
## 要求
|
||||
|
||||
- Go 作为构建仅有的依赖。我们支持 [持续集成工作流](./.github/workflows) 中使用的 Go 版本。
|
||||
- 使用适配的 SQL 服务作为数据提供程序:PostgreSQL 9.4+, MySQL 5.6+, SQLite 3.x, CockroachDB stable.
|
||||
- SQL 服务是可选的:你可以使用一个内置的 bolt 数据库以 键/值 存储,或者一个内存中的数据提供程序。
|
||||
|
||||
## 安装
|
||||
|
||||
为 Linux、macOS 和 Windows 提供的二进制发行版是可用的。请参考 [发行版](https://github.com/drakkan/sftpgo/releases "releases") 页面。
|
||||
|
||||
一个官方的 Docker 镜像是可用的。文档参考 [Docker](./docker/README.md)。
|
||||
|
||||
<details>
|
||||
|
||||
<summary>一些 Linux 分支包是可用的</summary>
|
||||
|
||||
- Arch Linux 通过 AUR:
|
||||
- [sftpgo](https://aur.archlinux.org/packages/sftpgo/)。这个包跟随稳定的发行版。需要 `git`、`gcc` 和 `go` 进行构建。
|
||||
- [sftpgo-bin](https://aur.archlinux.org/packages/sftpgo-bin/)。这个包跟随稳定的发行版从 GitHub 下载预构建 Linux 二进制文件。不需要 `git`、`gcc` 和 `go` 进行构建。
|
||||
- [sftpgo-git](https://aur.archlinux.org/packages/sftpgo-git/)。这个包构建和下载基于最新的 `git` 主分支。需要 `git`、`gcc` 和 `go` 进行构建。
|
||||
- Deb and RPM 包在每次提交和发行之后构建。
|
||||
- Ubuntu PPA 在 [此](https://launchpad.net/~sftpgo/+archive/ubuntu/sftpgo) 可用。
|
||||
- Void Linux 提供一个 [官方包](https://github.com/void-linux/void-packages/tree/master/srcpkgs/sftpgo)。
|
||||
|
||||
</details>
|
||||
|
||||
SFTPGo 在 [AWS Marketplace](https://aws.amazon.com/marketplace/seller-profile?id=6e849ab8-70a6-47de-9a43-13c3fa849335) 和 [Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/prasselsrl1645470739547.sftpgo_linux) 同样可用,在此付费可以帮助 SFTPGo 成为一个可持续发展的长期项目。
|
||||
|
||||
<details><summary>Windows 包</summary>
|
||||
|
||||
- Windows installer 安装和运行 SFTPGo 作为一个 Windows 服务。
|
||||
- 开箱即用的包启动按需使用的 SFTPGo。
|
||||
- [winget](https://docs.microsoft.com/en-us/windows/package-manager/winget/install) 包下载和运行 SFTPGo 作为一个 Windows 服务:`winget install SFTPGo`。
|
||||
- [Chocolatey 包](https://community.chocolatey.org/packages/sftpgo) 下载和运行 SFTPGo 作为一个 Windows 服务。
|
||||
|
||||
</details>
|
||||
|
||||
在 FreeBSD,你可以从 [SFTPGo port](https://www.freshports.org/ftp/sftpgo) 下载。
|
||||
在 DragonFlyBSD,你可以从 [DPorts](https://github.com/DragonFlyBSD/DPorts/tree/master/ftp/sftpgo) 下载。
|
||||
您可以从 [Actions](https://github.com/drakkan/sftpgo/Actions) 页面选择一个 commit 并下载 Linux、macOS 或 Windows 的匹配构建,从而轻松测试新特性。GitHub 存储 90 天。
|
||||
|
||||
另外,你可以 [从源码构建](./docs/build-from-source.md)。
|
||||
|
||||
[不耐烦的快速上手指南](./docs/howto/getting-started.md).
|
||||
|
||||
## 配置项
|
||||
|
||||
可以完整的配置项方法说明可以参考 [配置项](./docs/full-configuration.md)。
|
||||
|
||||
请确保按需运行之前,[初始化数据提供程序](#data-provider-initialization-and-management)。
|
||||
|
||||
默认配置启动 STFPGo,运行:
|
||||
|
||||
```bash
|
||||
sftpgo serve
|
||||
```
|
||||
|
||||
如果你将 SFTPGo作为服务,请参阅 [这篇文档](./docs/service.md)。
|
||||
|
||||
### 数据提供程序初始化和管理
|
||||
|
||||
在启动 SFTPGo 服务之前,请确保配置的数据提供程序已经被适当的 初始化/更新。
|
||||
|
||||
对于 PostgreSQL, MySQL 和 CockroachDB 提供,你需要创建一个配置数据库。对于 SQLite,配置数据库将会在启动时被自动创建。内存和 bolt 数据提供程序不需要初始化,但是它们需要在升级 SFTPGo 之后更新现有的数据。
|
||||
|
||||
SFTPGo 会尝试自动探测数据提供程序是否被 初始化/更新;如果没有,将会在启动时尝试 初始化/更新。
|
||||
|
||||
或者,你可以通过 `initprovider` 命令自行 创建/更新 需要的数据提供程序结构。
|
||||
|
||||
比如,你可以执行在配置文件目录下面的命令:
|
||||
|
||||
```bash
|
||||
sftpgo initprovider
|
||||
```
|
||||
|
||||
看一看 CLI 用法学习如何指定一个不同的配置文件:
|
||||
|
||||
```bash
|
||||
sftpgo initprovider --help
|
||||
```
|
||||
|
||||
你可以在启动阶段通过设置 `update_mode` 配置项为 `1`,禁止自动数据提供程序 检查/更新。
|
||||
|
||||
你可以通过使用 `resetprovider` 子命令重置你的数据提供程序。看一看 CLI 用法获取更多细节信息:
|
||||
|
||||
```bash
|
||||
sftpgo resetprovider --help
|
||||
```
|
||||
|
||||
:warning: 请注意一些数据提供程序(比如 MySQL 和 CockroachDB)不支持事务内的方案更改,这意味着如果迁移被强制中止或由多个实例同时运行,您可能会得到不一致的方案。
|
||||
|
||||
## 创建第一个管理员
|
||||
|
||||
开始使用 SFTPGo,你需要创建一个管理员用户,你可以通过不同的方式进行实现:
|
||||
|
||||
- 通过 web 管理员界面。默认 URL 是 [http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web/admin)
|
||||
- 通过加载初始数据
|
||||
- 通过在你的配置文件启用 `create_default_admin` 并设置环境变量 `SFTPGO_DEFAULT_ADMIN_USERNAME` 和 `SFTPGO_DEFAULT_ADMIN_PASSWORD`
|
||||
|
||||
## 升级
|
||||
|
||||
SFTPGo 支持从之前的发行版分支升级到当前分支。
|
||||
一些支持的升级路径如下:
|
||||
|
||||
- 从 1.2.x 到 2.0.x
|
||||
- 从 2.0.x 到 2.1.x 等。
|
||||
|
||||
对支持的升级路径,数据和方案将会自动迁移,你可以使用 `initprovider` 命令作为替代。
|
||||
|
||||
所以,比如,你想从 1.2.x 之前的版本升级到 2.0.x,你必须首先安装 1.2.x 版本,升级数据提供程序并最终安装版本 2.0.x。建议安装最新的可用小版本,如果 1.2.2 可用就不要安装 1.2.0 版本。
|
||||
|
||||
从以前发行版分支到当前版本,都支持从独立于数据提供程序的 JSON 转储中加载数据。升级 SFTPGo 后,建议从新版本重新生成 JSON 转储。
|
||||
|
||||
## 降级
|
||||
|
||||
如果因为一些原因你想降级 SFTPGo,你可能需要降级你的用户数据提供程序方案和数据。你可以使用 `revertprovider` 命令执行这项任务。
|
||||
|
||||
对于升级,SFTPGo 支持从先前的发行版分支降级到当前分支。
|
||||
|
||||
所以,如果你有计划从 2.0.x 降级到 1.2.x,之前先卸载 2.0.x 版本,你可以通过从配置目录执行以下命令来准备你的数据提供程序:
|
||||
|
||||
```shell
|
||||
sftpgo revertprovider --to-version 4
|
||||
```
|
||||
|
||||
看一看 CLI 的用法、了解 `--to-version` 参数支持的参数,了解如何去指定一个不同的配置文件:
|
||||
|
||||
```shell
|
||||
sftpgo revertprovider --help
|
||||
```
|
||||
|
||||
`revertprovider` 命令不支持内存数据提供程序。
|
||||
|
||||
请注意我们只支持当前发行版分支和当前主分支,如果你发现了个 bug,最好是报告这个问题而不是降级到一个老的、不被支持的版本。
|
||||
|
||||
## 用户和目录管理
|
||||
|
||||
在启动 SFTPGo 之后,你可以管理用户和目录使用:
|
||||
|
||||
- [基于 Web 的管理员界面](./docs/web-admin.md)
|
||||
- [REST API](./docs/rest-api.md)
|
||||
|
||||
支持内置的数据提供程序比如 `bolt` 和 `SQLite`。我们不能使用 CLI 直接将用户和文件夹写到数据提供程序,通常使用 REAST API。
|
||||
|
||||
对于用户、目录、管理员和其它资源的细节,都记录在 [OpenAPI](./openapi/openapi.yaml) 方案。如果你想在不手动引入的情况下渲染方案,你可以在 [Stoplight](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml) 上暴露它。
|
||||
|
||||
## 教程
|
||||
|
||||
一些手把手教程可以在源码文件树中的 [howto](./docs/howto "How-to") 目录找到。
|
||||
|
||||
## 认证选项
|
||||
|
||||
<details><summary>外部认证</summary>
|
||||
|
||||
自定义认证方法可以很容易被添加。SFTPGo 支持外部认证模块,编写一个后端可以如编写几行 shell 脚本那样简单。更多的信息可以参考 [外部认证](./docs/external-auth.md)。
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>键盘交互认证</summary>
|
||||
|
||||
一般来说,键盘交互身份验证是服务器提出的一系列问题,由客户端提供响应。
|
||||
|
||||
这种身份认证方法通常用于多因素身份认证。
|
||||
|
||||
更多信息参考 [键盘交互](./docs/keyboard-interactive.md)。
|
||||
|
||||
</details>
|
||||
|
||||
## 动态用户创建或修改
|
||||
|
||||
一个用户可以通过外部程序在登录之前被创建和修改。更多关于此可以参考 [动态用户修改](./docs/dynamic-user-mod.md)。
|
||||
|
||||
## 自定义动作
|
||||
|
||||
SFTPGo 允许你配置自定义的命令 和/或 HTTP 钩子去获取关于文件上传、删除和一些其它操作的通知。
|
||||
|
||||
更多关于自定义动作的信息你可以参考 [自定义动作](./docs/custom-actions.md)。
|
||||
|
||||
## 虚拟目录
|
||||
|
||||
用户 home 文件夹外或者基于不同存储提供的目录,可以作为虚拟目录进行暴露,详细信息参考 [虚拟目录](./docs/virtual-folders.md)。
|
||||
|
||||
## 其它钩子
|
||||
|
||||
你可以使用 [Post-connect 钩子](./docs/post-connect-hook.md) 及时获取新的连接建立,使用 [Post-login hook](./docs/post-login-hook.md) 获取每次登录之后的通知。你可以使用你自己的钩子去 [验证密码](./docs/check-password-hook.md)。
|
||||
|
||||
## 存储后端
|
||||
|
||||
### S3/GCP/Azure
|
||||
|
||||
每个用户可以被映射到 [S3 兼容对象存储](./docs/s3.md) /[Google Cloud 存储](./docs/google-cloud-storage.md)/[Azure Blob 存储](./docs/azure-blob-storage.md) bucket 或者一个 bucket 虚拟目录,通过 SFTP/SCP/FTP/WebDAV 进行暴露。
|
||||
|
||||
### SFTP 后端
|
||||
|
||||
每个用户可以被映射到另一个 SFTP 服务器账户或者它的子目录。更多的信息可以参考 [sftpfs](./docs/sftpfs.md)。
|
||||
|
||||
### 加密后端
|
||||
|
||||
数据静态加密通过 [cryptfs 后端](./docs/dare.md) 进行支持。
|
||||
|
||||
### 其它存储后端
|
||||
|
||||
添加新的存储后端非常简单:
|
||||
|
||||
- 实现 [Fs 接口](./vfs/vfs.go#L28 "interface for filesystem backends")
|
||||
- 更新用户方法 `GetFilesystem` 返回新的后端
|
||||
- 更新 web 接口和 REST API CLI
|
||||
- 为新的存储后端添加向 `portable` 模式添加 flags
|
||||
|
||||
无论如何,一些后端需要按次付费账户(或者他们提供限制期限内提供免费账户)。为了能够添加这些账户支持或者预览 PRs,请提供一个测试账户。测试账户必须在提供足够长时间维护此后端,并且支持每一次新的发行版之前做基本测试。
|
||||
|
||||
## 强力保护
|
||||
|
||||
SFTPGo 支持内置 [防护](./docs/defender.md)。
|
||||
|
||||
你可以使用 [连接失败日志](./docs/logs.md) 在诸如 [Fail2ban](http://www.fail2ban.org/) 进行工具内集成。[jails](./fail2ban/jails) 和 [filters](./fail2ban/filters) 示例,在 fail2ban 目录中与 `systemd`/`journald` 是可以同时工作的。
|
||||
|
||||
## 账户配置属性
|
||||
|
||||
关于账户配置属性的细节信息,请参考 [账户](./docs/account.md)。
|
||||
|
||||
## 性能
|
||||
|
||||
SFTPGo 在没有特殊配置的情况下,可以实现低端硬件轻松达到 GB 量级连接,对于大多数场景足够使用了。
|
||||
|
||||
更多深度性能分析可以参考 [性能](./docs/performance.md)。
|
||||
|
||||
## 发行节奏
|
||||
|
||||
STFPGo 发行版是特性驱动的,我们没有基于计划的固定时间。粗略估计,你可以每年期待一到两个新的发行版。
|
||||
|
||||
## 感谢
|
||||
|
||||
SFTPGo 使用了 [go.mod](./go.mod) 中列出的第三方库。
|
||||
|
||||
我们非常感激所有贡献想法 和/或 PRs。
|
||||
|
||||
感谢 [ysura](https://www.ysura.com/) 给予我测试 AWS S3 账户的稳定权限。
|
||||
|
||||
## 赞助者
|
||||
|
||||
我希望可以使 STFPGo 成为一个可持续发展的长期项目,你的 [赞助](https://github.com/sponsors/drakkan) 对我很有帮助!:heart:
|
||||
|
||||
感谢我们的赞助者!
|
||||
|
||||
[<img src="https://www.7digital.com/wp-content/themes/sevendigital/images/top_logo.png" alt="7digital logo">](https://www.7digital.com/)
|
||||
|
||||
## 许可证
|
||||
|
||||
GNU AGPLv3
|
||||
12
SECURITY.md
Normal file
12
SECURITY.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the current release of the software is actively supported. If you need
|
||||
help backporting fixes into an older release, feel free to ask.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Email your vulnerability information to SFTPGo's maintainer:
|
||||
|
||||
Nicola Murino <nicola.murino@gmail.com>
|
||||
46
acme/account.go
Normal file
46
acme/account.go
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
|
||||
"github.com/go-acme/lego/v4/registration"
|
||||
)
|
||||
|
||||
type account struct {
|
||||
Email string `json:"email"`
|
||||
Registration *registration.Resource `json:"registration"`
|
||||
key crypto.PrivateKey
|
||||
}
|
||||
|
||||
/** Implementation of the registration.User interface **/
|
||||
|
||||
// GetEmail returns the email address for the account.
|
||||
func (a *account) GetEmail() string {
|
||||
return a.Email
|
||||
}
|
||||
|
||||
// GetRegistration returns the server registration.
|
||||
func (a *account) GetRegistration() *registration.Resource {
|
||||
return a.Registration
|
||||
}
|
||||
|
||||
// GetPrivateKey returns the private account key.
|
||||
func (a *account) GetPrivateKey() crypto.PrivateKey {
|
||||
return a.key
|
||||
}
|
||||
|
||||
/** End **/
|
||||
699
acme/acme.go
Normal file
699
acme/acme.go
Normal file
@@ -0,0 +1,699 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
// Package acme provides automatic access to certificates from Let's Encrypt and any other ACME-based CA
|
||||
// The code here is largely coiped from https://github.com/go-acme/lego/tree/master/cmd
|
||||
// This package is intended to provide basic functionality for obtaining and renewing certificates
|
||||
// and implements the "HTTP-01" and "TLSALPN-01" challenge types.
|
||||
// For more advanced features use external tools such as "lego"
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-acme/lego/v4/certcrypto"
|
||||
"github.com/go-acme/lego/v4/certificate"
|
||||
"github.com/go-acme/lego/v4/challenge"
|
||||
"github.com/go-acme/lego/v4/challenge/http01"
|
||||
"github.com/go-acme/lego/v4/challenge/tlsalpn01"
|
||||
"github.com/go-acme/lego/v4/lego"
|
||||
"github.com/go-acme/lego/v4/log"
|
||||
"github.com/go-acme/lego/v4/providers/http/webroot"
|
||||
"github.com/go-acme/lego/v4/registration"
|
||||
"github.com/robfig/cron/v3"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/ftpd"
|
||||
"github.com/drakkan/sftpgo/v2/httpd"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/telemetry"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
"github.com/drakkan/sftpgo/v2/webdavd"
|
||||
)
|
||||
|
||||
const (
|
||||
logSender = "acme"
|
||||
)
|
||||
|
||||
var (
|
||||
config *Configuration
|
||||
scheduler *cron.Cron
|
||||
logMode int
|
||||
)
|
||||
|
||||
// GetCertificates tries to obtain the certificates for the configured domains
|
||||
func GetCertificates() error {
|
||||
if config == nil {
|
||||
return errors.New("acme is disabled")
|
||||
}
|
||||
return config.getCertificates()
|
||||
}
|
||||
|
||||
// HTTP01Challenge defines the configuration for HTTP-01 challenge type
|
||||
type HTTP01Challenge struct {
|
||||
Port int `json:"port" mapstructure:"port"`
|
||||
WebRoot string `json:"webroot" mapstructure:"webroot"`
|
||||
ProxyHeader string `json:"proxy_header" mapstructure:"proxy_header"`
|
||||
}
|
||||
|
||||
func (c *HTTP01Challenge) isEnabled() bool {
|
||||
return c.Port > 0 || c.WebRoot != ""
|
||||
}
|
||||
|
||||
func (c *HTTP01Challenge) validate() error {
|
||||
if !c.isEnabled() {
|
||||
return nil
|
||||
}
|
||||
if c.WebRoot != "" {
|
||||
if !filepath.IsAbs(c.WebRoot) {
|
||||
return fmt.Errorf("invalid HTTP-01 challenge web root, please set an absolute path")
|
||||
}
|
||||
_, err := os.Stat(c.WebRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid HTTP-01 challenge web root: %w", err)
|
||||
}
|
||||
} else {
|
||||
if c.Port > 65535 {
|
||||
return fmt.Errorf("invalid HTTP-01 challenge port: %d", c.Port)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TLSALPN01Challenge defines the configuration for TLSALPN-01 challenge type
|
||||
type TLSALPN01Challenge struct {
|
||||
Port int `json:"port" mapstructure:"port"`
|
||||
}
|
||||
|
||||
func (c *TLSALPN01Challenge) isEnabled() bool {
|
||||
return c.Port > 0
|
||||
}
|
||||
|
||||
func (c *TLSALPN01Challenge) validate() error {
|
||||
if !c.isEnabled() {
|
||||
return nil
|
||||
}
|
||||
if c.Port > 65535 {
|
||||
return fmt.Errorf("invalid TLSALPN-01 challenge port: %d", c.Port)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Configuration holds the ACME configuration
|
||||
type Configuration struct {
|
||||
Email string `json:"email" mapstructure:"email"`
|
||||
KeyType string `json:"key_type" mapstructure:"key_type"`
|
||||
CertsPath string `json:"certs_path" mapstructure:"certs_path"`
|
||||
CAEndpoint string `json:"ca_endpoint" mapstructure:"ca_endpoint"`
|
||||
// if a certificate is to be valid for multiple domains specify the names separated by commas,
|
||||
// for example: example.com,www.example.com
|
||||
Domains []string `json:"domains" mapstructure:"domains"`
|
||||
RenewDays int `json:"renew_days" mapstructure:"renew_days"`
|
||||
HTTP01Challenge HTTP01Challenge `json:"http01_challenge" mapstructure:"http01_challenge"`
|
||||
TLSALPN01Challenge TLSALPN01Challenge `json:"tls_alpn01_challenge" mapstructure:"tls_alpn01_challenge"`
|
||||
accountConfigPath string
|
||||
accountKeyPath string
|
||||
lockPath string
|
||||
tempDir string
|
||||
}
|
||||
|
||||
// Initialize validates and set the configuration
|
||||
func (c *Configuration) Initialize(configDir string, checkRenew bool) error {
|
||||
config = nil
|
||||
setLogMode(checkRenew)
|
||||
c.checkDomains()
|
||||
if len(c.Domains) == 0 {
|
||||
acmeLog(logger.LevelInfo, "no domains configured, acme disabled")
|
||||
return nil
|
||||
}
|
||||
if c.Email == "" || !util.IsEmailValid(c.Email) {
|
||||
return fmt.Errorf("invalid email address %#v", c.Email)
|
||||
}
|
||||
if c.RenewDays < 1 {
|
||||
return fmt.Errorf("invalid number of days remaining before renewal: %d", c.RenewDays)
|
||||
}
|
||||
supportedKeyTypes := []string{
|
||||
string(certcrypto.EC256),
|
||||
string(certcrypto.EC384),
|
||||
string(certcrypto.RSA2048),
|
||||
string(certcrypto.RSA4096),
|
||||
string(certcrypto.RSA8192),
|
||||
}
|
||||
if !util.Contains(supportedKeyTypes, c.KeyType) {
|
||||
return fmt.Errorf("invalid key type %#v", c.KeyType)
|
||||
}
|
||||
caURL, err := url.Parse(c.CAEndpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid CA endopoint: %w", err)
|
||||
}
|
||||
if !util.IsFileInputValid(c.CertsPath) {
|
||||
return fmt.Errorf("invalid certs path %#v", c.CertsPath)
|
||||
}
|
||||
if !filepath.IsAbs(c.CertsPath) {
|
||||
c.CertsPath = filepath.Join(configDir, c.CertsPath)
|
||||
}
|
||||
err = os.MkdirAll(c.CertsPath, 0700)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create certs path %#v: %w", c.CertsPath, err)
|
||||
}
|
||||
c.tempDir = filepath.Join(c.CertsPath, "temp")
|
||||
err = os.MkdirAll(c.CertsPath, 0700)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create certs temp path %#v: %w", c.tempDir, err)
|
||||
}
|
||||
serverPath := strings.NewReplacer(":", "_", "/", string(os.PathSeparator)).Replace(caURL.Host)
|
||||
accountPath := filepath.Join(c.CertsPath, serverPath)
|
||||
err = os.MkdirAll(accountPath, 0700)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create account path %#v: %w", accountPath, err)
|
||||
}
|
||||
c.accountConfigPath = filepath.Join(accountPath, c.Email+".json")
|
||||
c.accountKeyPath = filepath.Join(accountPath, c.Email+".key")
|
||||
c.lockPath = filepath.Join(c.CertsPath, "lock")
|
||||
|
||||
if err = c.validateChallenges(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
acmeLog(logger.LevelInfo, "configured domains: %+v", c.Domains)
|
||||
config = c
|
||||
if checkRenew {
|
||||
return startScheduler()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Configuration) validateChallenges() error {
|
||||
if !c.HTTP01Challenge.isEnabled() && !c.TLSALPN01Challenge.isEnabled() {
|
||||
return fmt.Errorf("no challenge type defined")
|
||||
}
|
||||
if err := c.HTTP01Challenge.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.TLSALPN01Challenge.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Configuration) checkDomains() {
|
||||
var domains []string
|
||||
for _, domain := range c.Domains {
|
||||
domain = strings.TrimSpace(domain)
|
||||
if domain == "" {
|
||||
continue
|
||||
}
|
||||
if d, ok := isDomainValid(domain); ok {
|
||||
domains = append(domains, d)
|
||||
}
|
||||
}
|
||||
c.Domains = util.RemoveDuplicates(domains, true)
|
||||
}
|
||||
|
||||
func (c *Configuration) setLockTime() error {
|
||||
lockTime := fmt.Sprintf("%v", util.GetTimeAsMsSinceEpoch(time.Now()))
|
||||
err := os.WriteFile(c.lockPath, []byte(lockTime), 0600)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to save lock time to %#v: %v", c.lockPath, err)
|
||||
return fmt.Errorf("unable to save lock time: %w", err)
|
||||
}
|
||||
acmeLog(logger.LevelDebug, "lock time saved: %#v", lockTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Configuration) getLockTime() (time.Time, error) {
|
||||
content, err := os.ReadFile(c.lockPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
acmeLog(logger.LevelDebug, "lock file %#v not found", c.lockPath)
|
||||
return time.Time{}, nil
|
||||
}
|
||||
acmeLog(logger.LevelError, "unable to read lock file %#v: %v", c.lockPath, err)
|
||||
return time.Time{}, err
|
||||
}
|
||||
msec, err := strconv.ParseInt(strings.TrimSpace(string(content)), 10, 64)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to parse lock time: %v", err)
|
||||
return time.Time{}, fmt.Errorf("unable to parse lock time: %w", err)
|
||||
}
|
||||
return util.GetTimeFromMsecSinceEpoch(msec), nil
|
||||
}
|
||||
|
||||
func (c *Configuration) saveAccount(account *account) error {
|
||||
jsonBytes, err := json.MarshalIndent(account, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(c.accountConfigPath, jsonBytes, 0600)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to save account to file %#v: %v", c.accountConfigPath, err)
|
||||
return fmt.Errorf("unable to save account: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Configuration) getAccount(privateKey crypto.PrivateKey) (account, error) {
|
||||
_, err := os.Stat(c.accountConfigPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
acmeLog(logger.LevelDebug, "account does not exist")
|
||||
return account{Email: c.Email, key: privateKey}, nil
|
||||
}
|
||||
var account account
|
||||
fileBytes, err := os.ReadFile(c.accountConfigPath)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to read account from file %#v: %v", c.accountConfigPath, err)
|
||||
return account, fmt.Errorf("unable to read account from file: %w", err)
|
||||
}
|
||||
err = json.Unmarshal(fileBytes, &account)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "invalid account file content: %v", err)
|
||||
return account, fmt.Errorf("unable to parse account file as JSON: %w", err)
|
||||
}
|
||||
account.key = privateKey
|
||||
if account.Registration == nil || account.Registration.Body.Status == "" {
|
||||
acmeLog(logger.LevelInfo, "couldn't load account but got a key. Try to look the account up")
|
||||
reg, err := c.tryRecoverRegistration(privateKey)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to look the account up: %v", err)
|
||||
return account, fmt.Errorf("unable to look the account up: %w", err)
|
||||
}
|
||||
account.Registration = reg
|
||||
err = c.saveAccount(&account)
|
||||
if err != nil {
|
||||
return account, err
|
||||
}
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
func (c *Configuration) loadPrivateKey() (crypto.PrivateKey, error) {
|
||||
keyBytes, err := os.ReadFile(c.accountKeyPath)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to read account key from file %#v: %v", c.accountKeyPath, err)
|
||||
return nil, fmt.Errorf("unable to read account key: %w", err)
|
||||
}
|
||||
|
||||
keyBlock, _ := pem.Decode(keyBytes)
|
||||
|
||||
var privateKey crypto.PrivateKey
|
||||
switch keyBlock.Type {
|
||||
case "RSA PRIVATE KEY":
|
||||
privateKey, err = x509.ParsePKCS1PrivateKey(keyBlock.Bytes)
|
||||
case "EC PRIVATE KEY":
|
||||
privateKey, err = x509.ParseECPrivateKey(keyBlock.Bytes)
|
||||
default:
|
||||
err = fmt.Errorf("unknown private key type %#v", keyBlock.Type)
|
||||
}
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to parse private key from file %#v: %v", c.accountKeyPath, err)
|
||||
return privateKey, fmt.Errorf("unable to parse private key: %w", err)
|
||||
}
|
||||
return privateKey, nil
|
||||
}
|
||||
|
||||
func (c *Configuration) generatePrivateKey() (crypto.PrivateKey, error) {
|
||||
privateKey, err := certcrypto.GeneratePrivateKey(certcrypto.KeyType(c.KeyType))
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to generate private key: %v", err)
|
||||
return nil, fmt.Errorf("unable to generate private key: %w", err)
|
||||
}
|
||||
certOut, err := os.Create(c.accountKeyPath)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to save private key to file %#v: %v", c.accountKeyPath, err)
|
||||
return nil, fmt.Errorf("unable to save private key: %w", err)
|
||||
}
|
||||
defer certOut.Close()
|
||||
|
||||
pemKey := certcrypto.PEMBlock(privateKey)
|
||||
err = pem.Encode(certOut, pemKey)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to encode private key: %v", err)
|
||||
return nil, fmt.Errorf("unable to encode private key: %w", err)
|
||||
}
|
||||
acmeLog(logger.LevelDebug, "new account private key generated")
|
||||
|
||||
return privateKey, nil
|
||||
}
|
||||
|
||||
func (c *Configuration) getPrivateKey() (crypto.PrivateKey, error) {
|
||||
_, err := os.Stat(c.accountKeyPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
acmeLog(logger.LevelDebug, "private key file %#v does not exist, generating new private key", c.accountKeyPath)
|
||||
return c.generatePrivateKey()
|
||||
}
|
||||
acmeLog(logger.LevelDebug, "loading private key from file %#v, stat error: %v", c.accountKeyPath, err)
|
||||
return c.loadPrivateKey()
|
||||
}
|
||||
|
||||
func (c *Configuration) loadCertificatesForDomain(domain string) ([]*x509.Certificate, error) {
|
||||
domain = sanitizedDomain(domain)
|
||||
acmeLog(logger.LevelDebug, "loading certificates for domain %#v", domain)
|
||||
content, err := os.ReadFile(filepath.Join(c.CertsPath, domain+".crt"))
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to load certificates for domain %#v: %v", domain, err)
|
||||
return nil, fmt.Errorf("unable to load certificates for domain %#v: %w", domain, err)
|
||||
}
|
||||
certs, err := certcrypto.ParsePEMBundle(content)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to parse certificates for domain %#v: %v", domain, err)
|
||||
return certs, fmt.Errorf("unable to parse certificates for domain %#v: %w", domain, err)
|
||||
}
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
func (c *Configuration) needRenewal(x509Cert *x509.Certificate, domain string) bool {
|
||||
if x509Cert.IsCA {
|
||||
acmeLog(logger.LevelError, "certificate bundle starts with a CA certificate, cannot renew domain %v", domain)
|
||||
return false
|
||||
}
|
||||
notAfter := int(time.Until(x509Cert.NotAfter).Hours() / 24.0)
|
||||
if notAfter > c.RenewDays {
|
||||
acmeLog(logger.LevelDebug, "the certificate for domain %#v expires in %d days, no renewal", domain, notAfter)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Configuration) setup() (*account, *lego.Client, error) {
|
||||
privateKey, err := c.getPrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
account, err := c.getAccount(privateKey)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
config := lego.NewConfig(&account)
|
||||
config.CADirURL = c.CAEndpoint
|
||||
config.Certificate.KeyType = certcrypto.KeyType(c.KeyType)
|
||||
config.UserAgent = fmt.Sprintf("SFTPGo/%v", version.Get().Version)
|
||||
client, err := lego.NewClient(config)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to get ACME client: %v", err)
|
||||
return nil, nil, fmt.Errorf("unable to get ACME client: %w", err)
|
||||
}
|
||||
err = c.setupChalleges(client)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &account, client, nil
|
||||
}
|
||||
|
||||
func (c *Configuration) setupChalleges(client *lego.Client) error {
|
||||
client.Challenge.Remove(challenge.DNS01)
|
||||
if c.HTTP01Challenge.isEnabled() {
|
||||
if c.HTTP01Challenge.WebRoot != "" {
|
||||
acmeLog(logger.LevelDebug, "configuring HTTP-01 web root challenge, path %#v", c.HTTP01Challenge.WebRoot)
|
||||
providerServer, err := webroot.NewHTTPProvider(c.HTTP01Challenge.WebRoot)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to create HTTP-01 web root challenge provider from path %#v: %v",
|
||||
c.HTTP01Challenge.WebRoot, err)
|
||||
return fmt.Errorf("unable to create HTTP-01 web root challenge provider: %w", err)
|
||||
}
|
||||
err = client.Challenge.SetHTTP01Provider(providerServer)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to set HTTP-01 challenge provider: %v", err)
|
||||
return fmt.Errorf("unable to set HTTP-01 challenge provider: %w", err)
|
||||
}
|
||||
} else {
|
||||
acmeLog(logger.LevelDebug, "configuring HTTP-01 challenge, port %d", c.HTTP01Challenge.Port)
|
||||
providerServer := http01.NewProviderServer("", fmt.Sprintf("%d", c.HTTP01Challenge.Port))
|
||||
if c.HTTP01Challenge.ProxyHeader != "" {
|
||||
acmeLog(logger.LevelDebug, "setting proxy header to \"%s\"", c.HTTP01Challenge.ProxyHeader)
|
||||
providerServer.SetProxyHeader(c.HTTP01Challenge.ProxyHeader)
|
||||
}
|
||||
err := client.Challenge.SetHTTP01Provider(providerServer)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to set HTTP-01 challenge provider: %v", err)
|
||||
return fmt.Errorf("unable to set HTTP-01 challenge provider: %w", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
client.Challenge.Remove(challenge.HTTP01)
|
||||
}
|
||||
if c.TLSALPN01Challenge.isEnabled() {
|
||||
acmeLog(logger.LevelDebug, "configuring TLSALPN-01 challenge, port %d", c.TLSALPN01Challenge.Port)
|
||||
err := client.Challenge.SetTLSALPN01Provider(tlsalpn01.NewProviderServer("", fmt.Sprintf("%d", c.TLSALPN01Challenge.Port)))
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to set TLSALPN-01 challenge provider: %v", err)
|
||||
return fmt.Errorf("unable to set TLSALPN-01 challenge provider: %w", err)
|
||||
}
|
||||
} else {
|
||||
client.Challenge.Remove(challenge.TLSALPN01)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Configuration) register(client *lego.Client) (*registration.Resource, error) {
|
||||
return client.Registration.Register(registration.RegisterOptions{TermsOfServiceAgreed: true})
|
||||
}
|
||||
|
||||
func (c *Configuration) tryRecoverRegistration(privateKey crypto.PrivateKey) (*registration.Resource, error) {
|
||||
config := lego.NewConfig(&account{key: privateKey})
|
||||
config.CADirURL = c.CAEndpoint
|
||||
config.UserAgent = fmt.Sprintf("SFTPGo/%v", version.Get().Version)
|
||||
|
||||
client, err := lego.NewClient(config)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to get the ACME client: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.Registration.ResolveAccountByKey()
|
||||
}
|
||||
|
||||
func (c *Configuration) obtainAndSaveCertificate(client *lego.Client, domain string) error {
|
||||
var domains []string
|
||||
|
||||
for _, d := range strings.Split(domain, ",") {
|
||||
d = strings.TrimSpace(d)
|
||||
if d != "" {
|
||||
domains = append(domains, d)
|
||||
}
|
||||
}
|
||||
acmeLog(logger.LevelInfo, "requesting certificates for domains %+v", domains)
|
||||
request := certificate.ObtainRequest{
|
||||
Domains: domains,
|
||||
Bundle: true,
|
||||
MustStaple: false,
|
||||
PreferredChain: "",
|
||||
AlwaysDeactivateAuthorizations: false,
|
||||
}
|
||||
cert, err := client.Certificate.Obtain(request)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to obtain certificates for domains %+v: %v", domains, err)
|
||||
return fmt.Errorf("unable to obtain certificates: %w", err)
|
||||
}
|
||||
domain = sanitizedDomain(domain)
|
||||
err = os.WriteFile(filepath.Join(c.CertsPath, domain+".crt"), cert.Certificate, 0600)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to save certificate for domain %v: %v", domain, err)
|
||||
return fmt.Errorf("unable to save certificate: %w", err)
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(c.CertsPath, domain+".key"), cert.PrivateKey, 0600)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to save private key for domain %v: %v", domain, err)
|
||||
return fmt.Errorf("unable to save private key: %w", err)
|
||||
}
|
||||
jsonBytes, err := json.MarshalIndent(cert, "", "\t")
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to marshal certificate resources for domain %v: %v", domain, err)
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(filepath.Join(c.CertsPath, domain+".json"), jsonBytes, 0600)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to save certificate resources for domain %v: %v", domain, err)
|
||||
return fmt.Errorf("unable to save certificate resources: %w", err)
|
||||
}
|
||||
|
||||
acmeLog(logger.LevelInfo, "certificates for domains %+v saved", domains)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Configuration) getCertificates() error {
|
||||
account, client, err := c.setup()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if account.Registration == nil {
|
||||
reg, err := c.register(client)
|
||||
if err != nil {
|
||||
acmeLog(logger.LevelError, "unable to register account: %v", err)
|
||||
return fmt.Errorf("unable to register account: %w", err)
|
||||
}
|
||||
account.Registration = reg
|
||||
err = c.saveAccount(account)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, domain := range c.Domains {
|
||||
err = c.obtainAndSaveCertificate(client, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Configuration) renewCertificates() error {
|
||||
lockTime, err := c.getLockTime()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
acmeLog(logger.LevelDebug, "certificate renew lock time %v", lockTime)
|
||||
if lockTime.Add(-30*time.Second).Before(time.Now()) && lockTime.Add(5*time.Minute).After(time.Now()) {
|
||||
acmeLog(logger.LevelInfo, "certificate renew skipped, lock time too close: %v", lockTime)
|
||||
return nil
|
||||
}
|
||||
err = c.setLockTime()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
account, client, err := c.setup()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if account.Registration == nil {
|
||||
acmeLog(logger.LevelError, "cannot renew certificates, your account is not registered")
|
||||
return fmt.Errorf("cannot renew certificates, your account is not registered")
|
||||
}
|
||||
var errRenew error
|
||||
needReload := false
|
||||
for _, domain := range c.Domains {
|
||||
certificates, err := c.loadCertificatesForDomain(domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cert := certificates[0]
|
||||
if !c.needRenewal(cert, domain) {
|
||||
continue
|
||||
}
|
||||
err = c.obtainAndSaveCertificate(client, domain)
|
||||
if err != nil {
|
||||
errRenew = err
|
||||
} else {
|
||||
needReload = true
|
||||
}
|
||||
}
|
||||
if needReload {
|
||||
// at least one certificate has been renewed, sends a reload to all services that may be using certificates
|
||||
err = ftpd.ReloadCertificateMgr()
|
||||
acmeLog(logger.LevelInfo, "ftpd certificate manager reloaded , error: %v", err)
|
||||
err = httpd.ReloadCertificateMgr()
|
||||
acmeLog(logger.LevelInfo, "httpd certificates manager reloaded , error: %v", err)
|
||||
err = webdavd.ReloadCertificateMgr()
|
||||
acmeLog(logger.LevelInfo, "webdav certificates manager reloaded , error: %v", err)
|
||||
err = telemetry.ReloadCertificateMgr()
|
||||
acmeLog(logger.LevelInfo, "telemetry certificates manager reloaded , error: %v", err)
|
||||
}
|
||||
|
||||
return errRenew
|
||||
}
|
||||
|
||||
func isDomainValid(domain string) (string, bool) {
|
||||
isValid := false
|
||||
for _, d := range strings.Split(domain, ",") {
|
||||
d = strings.TrimSpace(d)
|
||||
if d != "" {
|
||||
isValid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return domain, isValid
|
||||
}
|
||||
|
||||
func sanitizedDomain(domain string) string {
|
||||
return strings.NewReplacer(":", "_", "*", "_", ",", "_").Replace(domain)
|
||||
}
|
||||
|
||||
func stopScheduler() {
|
||||
if scheduler != nil {
|
||||
scheduler.Stop()
|
||||
scheduler = nil
|
||||
}
|
||||
}
|
||||
|
||||
func startScheduler() error {
|
||||
stopScheduler()
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
randSecs := rand.Intn(59)
|
||||
|
||||
scheduler = cron.New()
|
||||
_, err := scheduler.AddFunc(fmt.Sprintf("@every 12h0m%ds", randSecs), renewCertificates)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to schedule certificates renewal: %w", err)
|
||||
}
|
||||
|
||||
acmeLog(logger.LevelInfo, "starting scheduler, initial certificates check in %d seconds", randSecs)
|
||||
initialTimer := time.NewTimer(time.Duration(randSecs) * time.Second)
|
||||
go func() {
|
||||
<-initialTimer.C
|
||||
renewCertificates()
|
||||
}()
|
||||
|
||||
scheduler.Start()
|
||||
return nil
|
||||
}
|
||||
|
||||
func renewCertificates() {
|
||||
if config != nil {
|
||||
if err := config.renewCertificates(); err != nil {
|
||||
acmeLog(logger.LevelError, "unable to renew certificates: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setLogMode(checkRenew bool) {
|
||||
if checkRenew {
|
||||
logMode = 1
|
||||
} else {
|
||||
logMode = 2
|
||||
}
|
||||
log.Logger = &logger.LegoAdapter{
|
||||
LogToConsole: logMode != 1,
|
||||
}
|
||||
}
|
||||
|
||||
func acmeLog(level logger.LogLevel, format string, v ...any) {
|
||||
if logMode == 1 {
|
||||
logger.Log(level, logSender, "", format, v...)
|
||||
} else {
|
||||
switch level {
|
||||
case logger.LevelDebug:
|
||||
logger.DebugToConsole(format, v...)
|
||||
case logger.LevelInfo:
|
||||
logger.InfoToConsole(format, v...)
|
||||
case logger.LevelWarn:
|
||||
logger.WarnToConsole(format, v...)
|
||||
default:
|
||||
logger.ErrorToConsole(format, v...)
|
||||
}
|
||||
}
|
||||
}
|
||||
78
api/api.go
78
api/api.go
@@ -1,78 +0,0 @@
|
||||
// Package api implements REST API for sftpgo.
|
||||
// REST API allows to manage users and quota and to get real time reports for the active connections
|
||||
// with possibility of forcibly closing a connection.
|
||||
// The OpenAPI 3 schema for the exposed API can be found inside the source tree:
|
||||
// https://github.com/drakkan/sftpgo/tree/master/api/schema/openapi.yaml
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/go-chi/chi"
|
||||
"github.com/go-chi/render"
|
||||
)
|
||||
|
||||
const (
|
||||
logSender = "api"
|
||||
activeConnectionsPath = "/api/v1/connection"
|
||||
quotaScanPath = "/api/v1/quota_scan"
|
||||
userPath = "/api/v1/user"
|
||||
versionPath = "/api/v1/version"
|
||||
metricsPath = "/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
router *chi.Mux
|
||||
dataProvider dataprovider.Provider
|
||||
)
|
||||
|
||||
// HTTPDConf httpd daemon configuration
|
||||
type HTTPDConf struct {
|
||||
// The port used for serving HTTP requests. 0 disable the HTTP server. Default: 8080
|
||||
BindPort int `json:"bind_port" mapstructure:"bind_port"`
|
||||
// The address to listen on. A blank value means listen on all available network interfaces. Default: "127.0.0.1"
|
||||
BindAddress string `json:"bind_address" mapstructure:"bind_address"`
|
||||
}
|
||||
|
||||
type apiResponse struct {
|
||||
Error string `json:"error"`
|
||||
Message string `json:"message"`
|
||||
HTTPStatus int `json:"status"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
initializeRouter()
|
||||
}
|
||||
|
||||
// SetDataProvider sets the data provider to use to fetch the data about users
|
||||
func SetDataProvider(provider dataprovider.Provider) {
|
||||
dataProvider = provider
|
||||
}
|
||||
|
||||
func sendAPIResponse(w http.ResponseWriter, r *http.Request, err error, message string, code int) {
|
||||
var errorString string
|
||||
if err != nil {
|
||||
errorString = err.Error()
|
||||
}
|
||||
resp := apiResponse{
|
||||
Error: errorString,
|
||||
Message: message,
|
||||
HTTPStatus: code,
|
||||
}
|
||||
if code != http.StatusOK {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.WriteHeader(code)
|
||||
}
|
||||
render.JSON(w, r, resp)
|
||||
}
|
||||
|
||||
func getRespStatus(err error) int {
|
||||
if _, ok := err.(*dataprovider.ValidationError); ok {
|
||||
return http.StatusBadRequest
|
||||
}
|
||||
if _, ok := err.(*dataprovider.MethodDisabledError); ok {
|
||||
return http.StatusForbidden
|
||||
}
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
762
api/api_test.go
762
api/api_test.go
@@ -1,762 +0,0 @@
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/render"
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
_ "github.com/lib/pq"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"github.com/drakkan/sftpgo/api"
|
||||
"github.com/drakkan/sftpgo/config"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultUsername = "test_user"
|
||||
defaultPassword = "test_password"
|
||||
testPubKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03jj0D+djk7pxIf/0OhrxrchJTRZklofJ1NoIu4752Sq02mdXmarMVsqJ1cAjV5LBVy3D1F5U6XW4rppkXeVtd04Pxb09ehtH0pRRPaoHHlALiJt8CoMpbKYMA8b3KXPPriGxgGomvtU2T2RMURSwOZbMtpsugfjYSWenyYX+VORYhylWnSXL961LTyC21ehd6d6QnW9G7E5hYMITMY9TuQZz3bROYzXiTsgN0+g6Hn7exFQp50p45StUMfV/SftCMdCxlxuyGny2CrN/vfjO7xxOo2uv7q1qm10Q46KPWJQv+pgZ/OfL+EDjy07n5QVSKHlbx+2nT4Q0EgOSQaCTYwn3YjtABfIxWwgAFdyj6YlPulCL22qU4MYhDcA6PSBwDdf8hvxBfvsiHdM+JcSHvv8/VeJhk6CmnZxGY0fxBupov27z3yEO8nAg8k+6PaUiW1MSUfuGMF/ktB8LOstXsEPXSszuyXiOv4DaryOXUiSn7bmRqKcEFlJusO6aZP0= nicola@p1"
|
||||
logSender = "APITesting"
|
||||
userPath = "/api/v1/user"
|
||||
activeConnectionsPath = "/api/v1/connection"
|
||||
quotaScanPath = "/api/v1/quota_scan"
|
||||
versionPath = "/api/v1/version"
|
||||
metricsPath = "/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultPerms = []string{dataprovider.PermAny}
|
||||
homeBasePath string
|
||||
testServer *httptest.Server
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if runtime.GOOS == "windows" {
|
||||
homeBasePath = "C:\\"
|
||||
} else {
|
||||
homeBasePath = "/tmp"
|
||||
}
|
||||
configDir := ".."
|
||||
logfilePath := filepath.Join(configDir, "sftpgo_api_test.log")
|
||||
logger.InitLogger(logfilePath, 5, 1, 28, false, zerolog.DebugLevel)
|
||||
config.LoadConfig(configDir, "")
|
||||
providerConf := config.GetProviderConf()
|
||||
|
||||
err := dataprovider.Initialize(providerConf, configDir)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error initializing data provider: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
dataProvider := dataprovider.GetProvider()
|
||||
httpdConf := config.GetHTTPDConfig()
|
||||
router := api.GetHTTPRouter()
|
||||
|
||||
httpdConf.BindPort = 8081
|
||||
api.SetBaseURL("http://127.0.0.1:8081")
|
||||
|
||||
sftpd.SetDataProvider(dataProvider)
|
||||
api.SetDataProvider(dataProvider)
|
||||
|
||||
go func() {
|
||||
logger.Debug(logSender, "", "initializing HTTP server with config %+v", httpdConf)
|
||||
s := &http.Server{
|
||||
Addr: fmt.Sprintf("%s:%d", httpdConf.BindAddress, httpdConf.BindPort),
|
||||
Handler: router,
|
||||
ReadTimeout: 300 * time.Second,
|
||||
WriteTimeout: 300 * time.Second,
|
||||
MaxHeaderBytes: 1 << 20, // 1MB
|
||||
}
|
||||
if err := s.ListenAndServe(); err != nil {
|
||||
logger.Error(logSender, "", "could not start HTTP server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
testServer = httptest.NewServer(api.GetHTTPRouter())
|
||||
defer testServer.Close()
|
||||
|
||||
waitTCPListening(fmt.Sprintf("%s:%d", httpdConf.BindAddress, httpdConf.BindPort))
|
||||
|
||||
exitCode := m.Run()
|
||||
os.Remove(logfilePath)
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func TestBasicUserHandling(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.MaxSessions = 10
|
||||
user.QuotaSize = 4096
|
||||
user.QuotaFiles = 2
|
||||
user.UploadBandwidth = 128
|
||||
user.DownloadBandwidth = 64
|
||||
user, _, err = api.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
}
|
||||
users, _, err := api.GetUsers(0, 0, defaultUsername, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get users: %v", err)
|
||||
}
|
||||
if len(users) != 1 {
|
||||
t.Errorf("number of users mismatch, expected: 1, actual: %v", len(users))
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserNoCredentials(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.Password = ""
|
||||
u.PublicKeys = []string{}
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with no credentials: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserNoUsername(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.Username = ""
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with no home dir: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserNoHomeDir(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.HomeDir = ""
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with no home dir: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserInvalidHomeDir(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.HomeDir = "relative_path"
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with invalid home dir: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserNoPerms(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.Permissions = []string{}
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with no perms: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserInvalidPerms(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.Permissions = []string{"invalidPerm"}
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with no perms: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUserPublicKey(t *testing.T) {
|
||||
u := getTestUser()
|
||||
invalidPubKey := "invalid"
|
||||
validPubKey := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03jj0D+djk7pxIf/0OhrxrchJTRZklofJ1NoIu4752Sq02mdXmarMVsqJ1cAjV5LBVy3D1F5U6XW4rppkXeVtd04Pxb09ehtH0pRRPaoHHlALiJt8CoMpbKYMA8b3KXPPriGxgGomvtU2T2RMURSwOZbMtpsugfjYSWenyYX+VORYhylWnSXL961LTyC21ehd6d6QnW9G7E5hYMITMY9TuQZz3bROYzXiTsgN0+g6Hn7exFQp50p45StUMfV/SftCMdCxlxuyGny2CrN/vfjO7xxOo2uv7q1qm10Q46KPWJQv+pgZ/OfL+EDjy07n5QVSKHlbx+2nT4Q0EgOSQaCTYwn3YjtABfIxWwgAFdyj6YlPulCL22qU4MYhDcA6PSBwDdf8hvxBfvsiHdM+JcSHvv8/VeJhk6CmnZxGY0fxBupov27z3yEO8nAg8k+6PaUiW1MSUfuGMF/ktB8LOstXsEPXSszuyXiOv4DaryOXUiSn7bmRqKcEFlJusO6aZP0= nicola@p1"
|
||||
u.PublicKeys = []string{invalidPubKey}
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with invalid pub key: %v", err)
|
||||
}
|
||||
u.PublicKeys = []string{validPubKey}
|
||||
user, _, err := api.AddUser(u, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.PublicKeys = []string{validPubKey, invalidPubKey}
|
||||
_, _, err = api.UpdateUser(user, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("update user with invalid public key must fail: %v", err)
|
||||
}
|
||||
user.PublicKeys = []string{validPubKey, validPubKey, validPubKey}
|
||||
_, _, err = api.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateUser(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.HomeDir = filepath.Join(homeBasePath, "testmod")
|
||||
user.UID = 33
|
||||
user.GID = 101
|
||||
user.MaxSessions = 10
|
||||
user.QuotaSize = 4096
|
||||
user.QuotaFiles = 2
|
||||
user.Permissions = []string{dataprovider.PermCreateDirs, dataprovider.PermDelete, dataprovider.PermDownload}
|
||||
user.UploadBandwidth = 1024
|
||||
user.DownloadBandwidth = 512
|
||||
user, _, err = api.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateUserNoCredentials(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.Password = ""
|
||||
user.PublicKeys = []string{}
|
||||
// password and public key will be omitted from json serialization if empty and so they will remain unchanged
|
||||
// and no validation error will be raised
|
||||
_, _, err = api.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error updating user with no credentials: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateUserEmptyHomeDir(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.HomeDir = ""
|
||||
_, _, err = api.UpdateUser(user, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error updating user with empty home dir: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateUserInvalidHomeDir(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.HomeDir = "relative_path"
|
||||
_, _, err = api.UpdateUser(user, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error updating user with empty home dir: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateNonExistentUser(t *testing.T) {
|
||||
_, _, err := api.UpdateUser(getTestUser(), http.StatusNotFound)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNonExistentUser(t *testing.T) {
|
||||
_, _, err := api.GetUserByID(0, http.StatusNotFound)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteNonExistentUser(t *testing.T) {
|
||||
_, err := api.RemoveUser(getTestUser(), http.StatusNotFound)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddDuplicateUser(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
_, _, err = api.AddUser(getTestUser(), http.StatusInternalServerError)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add second user: %v", err)
|
||||
}
|
||||
_, _, err = api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err == nil {
|
||||
t.Errorf("adding a duplicate user must fail")
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetUsers(t *testing.T) {
|
||||
user1, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
u := getTestUser()
|
||||
u.Username = defaultUsername + "1"
|
||||
user2, _, err := api.AddUser(u, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add second user: %v", err)
|
||||
}
|
||||
users, _, err := api.GetUsers(0, 0, "", http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get users: %v", err)
|
||||
}
|
||||
if len(users) < 2 {
|
||||
t.Errorf("at least 2 users are expected")
|
||||
}
|
||||
users, _, err = api.GetUsers(1, 0, "", http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get users: %v", err)
|
||||
}
|
||||
if len(users) != 1 {
|
||||
t.Errorf("1 user is expected")
|
||||
}
|
||||
users, _, err = api.GetUsers(1, 1, "", http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get users: %v", err)
|
||||
}
|
||||
if len(users) != 1 {
|
||||
t.Errorf("1 user is expected")
|
||||
}
|
||||
_, _, err = api.GetUsers(1, 1, "", http.StatusInternalServerError)
|
||||
if err == nil {
|
||||
t.Errorf("get users must succeed, we requested a fail for a good request")
|
||||
}
|
||||
_, err = api.RemoveUser(user1, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove user: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user2, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetQuotaScans(t *testing.T) {
|
||||
_, _, err := api.GetQuotaScans(http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get quota scans: %v", err)
|
||||
}
|
||||
_, _, err = api.GetQuotaScans(http.StatusInternalServerError)
|
||||
if err == nil {
|
||||
t.Errorf("quota scan request must succeed, we requested to check a wrong status code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartQuotaScan(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
_, err = api.StartQuotaScan(user, http.StatusCreated)
|
||||
if err != nil {
|
||||
t.Errorf("unable to start quota scan: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVersion(t *testing.T) {
|
||||
_, _, err := api.GetVersion(http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get sftp version: %v", err)
|
||||
}
|
||||
_, _, err = api.GetVersion(http.StatusInternalServerError)
|
||||
if err == nil {
|
||||
t.Errorf("get version request must succeed, we requested to check a wrong status code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConnections(t *testing.T) {
|
||||
_, _, err := api.GetConnections(http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get sftp connections: %v", err)
|
||||
}
|
||||
_, _, err = api.GetConnections(http.StatusInternalServerError)
|
||||
if err == nil {
|
||||
t.Errorf("get sftp connections request must succeed, we requested to check a wrong status code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloseActiveConnection(t *testing.T) {
|
||||
_, err := api.CloseConnection("non_existent_id", http.StatusNotFound)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error closing non existent sftp connection: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// test using mock http server
|
||||
|
||||
func TestBasicUserHandlingMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
if err != nil {
|
||||
t.Errorf("Error get user: %v", err)
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusInternalServerError, rr.Code)
|
||||
user.MaxSessions = 10
|
||||
user.UploadBandwidth = 128
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(user.ID, 10), bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
|
||||
var updatedUser dataprovider.User
|
||||
err = render.DecodeJSON(rr.Body, &updatedUser)
|
||||
if err != nil {
|
||||
t.Errorf("Error decoding updated user: %v", err)
|
||||
}
|
||||
if user.MaxSessions != updatedUser.MaxSessions || user.UploadBandwidth != updatedUser.UploadBandwidth {
|
||||
t.Errorf("Error modifying user actual: %v, %v", updatedUser.MaxSessions, updatedUser.UploadBandwidth)
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestGetUserByIdInvalidParamsMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, userPath+"/0", nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"/a", nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestAddUserNoUsernameMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
user.Username = ""
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestAddUserInvalidHomeDirMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
user.HomeDir = "relative_path"
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestAddUserInvalidPermsMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
user.Permissions = []string{}
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestAddUserInvalidJsonMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer([]byte("invalid json")))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestUpdateUserInvalidJsonMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
if err != nil {
|
||||
t.Errorf("Error get user: %v", err)
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(user.ID, 10), bytes.NewBuffer([]byte("Invalid json")))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestUpdateUserInvalidParamsMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
if err != nil {
|
||||
t.Errorf("Error get user: %v", err)
|
||||
}
|
||||
user.HomeDir = ""
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(user.ID, 10), bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
userID := user.ID
|
||||
user.ID = 0
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(userID, 10), bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
user.ID = userID
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/0", bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/a", bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestGetUsersMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
if err != nil {
|
||||
t.Errorf("Error get user: %v", err)
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=510&offset=0&order=ASC&username="+defaultUsername, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
var users []dataprovider.User
|
||||
err = render.DecodeJSON(rr.Body, &users)
|
||||
if err != nil {
|
||||
t.Errorf("Error decoding users: %v", err)
|
||||
}
|
||||
if len(users) != 1 {
|
||||
t.Errorf("1 user is expected")
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=a&offset=0&order=ASC", nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=a&order=ASC", nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASCa", nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestDeleteUserInvalidParamsMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodDelete, userPath+"/0", nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/a", nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestGetQuotaScansMock(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", quotaScanPath, nil)
|
||||
if err != nil {
|
||||
t.Errorf("error get quota scan: %v", err)
|
||||
}
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestStartQuotaScanMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
if err != nil {
|
||||
t.Errorf("Error get user: %v", err)
|
||||
}
|
||||
_, err = os.Stat(user.HomeDir)
|
||||
if err == nil {
|
||||
os.Remove(user.HomeDir)
|
||||
}
|
||||
// simulate a duplicate quota scan
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
sftpd.AddQuotaScan(user.Username)
|
||||
req, _ = http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusConflict, rr.Code)
|
||||
sftpd.RemoveQuotaScan(user.Username)
|
||||
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
req, _ = http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusCreated, rr.Code)
|
||||
|
||||
req, _ = http.NewRequest(http.MethodGet, quotaScanPath, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
var scans []sftpd.ActiveQuotaScan
|
||||
err = render.DecodeJSON(rr.Body, &scans)
|
||||
if err != nil {
|
||||
t.Errorf("Error get active scans: %v", err)
|
||||
}
|
||||
for len(scans) > 0 {
|
||||
req, _ = http.NewRequest(http.MethodGet, quotaScanPath, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err = render.DecodeJSON(rr.Body, &scans)
|
||||
if err != nil {
|
||||
t.Errorf("Error get active scans: %v", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
_, err = os.Stat(user.HomeDir)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
os.MkdirAll(user.HomeDir, 0777)
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusCreated, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestStartQuotaScanBadUserMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
}
|
||||
|
||||
func TestStartQuotaScanNonExistentUserMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer([]byte("invalid json")))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestGetVersionMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, versionPath, nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestGetConnectionsMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, activeConnectionsPath, nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestDeleteActiveConnectionMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodDelete, activeConnectionsPath+"/connectionID", nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
}
|
||||
|
||||
func TestNotFoundMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, "/non/existing/path", nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
}
|
||||
|
||||
func TestMethodNotAllowedMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodPost, activeConnectionsPath, nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusMethodNotAllowed, rr.Code)
|
||||
}
|
||||
|
||||
func TestMetricsMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, metricsPath, nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func waitTCPListening(address string) {
|
||||
for {
|
||||
conn, err := net.Dial("tcp", address)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("tcp server %v not listening: %v\n", address, err)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
logger.InfoToConsole("tcp server %v now listening\n", address)
|
||||
defer conn.Close()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
func getTestUser() dataprovider.User {
|
||||
return dataprovider.User{
|
||||
Username: defaultUsername,
|
||||
Password: defaultPassword,
|
||||
HomeDir: filepath.Join(homeBasePath, defaultUsername),
|
||||
Permissions: defaultPerms,
|
||||
}
|
||||
}
|
||||
|
||||
func getUserAsJSON(t *testing.T, user dataprovider.User) []byte {
|
||||
json, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
t.Errorf("error get user as json: %v", err)
|
||||
return []byte("{}")
|
||||
}
|
||||
return json
|
||||
}
|
||||
|
||||
func executeRequest(req *http.Request) *httptest.ResponseRecorder {
|
||||
rr := httptest.NewRecorder()
|
||||
testServer.Config.Handler.ServeHTTP(rr, req)
|
||||
return rr
|
||||
}
|
||||
|
||||
func checkResponseCode(t *testing.T, expected, actual int) {
|
||||
if expected != actual {
|
||||
t.Errorf("Expected response code %d. Got %d", expected, actual)
|
||||
}
|
||||
}
|
||||
330
api/api_utils.go
330
api/api_utils.go
@@ -1,330 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/go-chi/render"
|
||||
)
|
||||
|
||||
var (
|
||||
httpBaseURL = "http://127.0.0.1:8080"
|
||||
)
|
||||
|
||||
// SetBaseURL sets the base url to use for HTTP requests, default is "http://127.0.0.1:8080"
|
||||
func SetBaseURL(url string) {
|
||||
httpBaseURL = url
|
||||
}
|
||||
|
||||
// gets an HTTP Client with a timeout
|
||||
func getHTTPClient() *http.Client {
|
||||
return &http.Client{
|
||||
Timeout: 15 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
func buildURLRelativeToBase(paths ...string) string {
|
||||
// we need to use path.Join and not filepath.Join
|
||||
// since filepath.Join will use backslash separator on Windows
|
||||
p := path.Join(paths...)
|
||||
return fmt.Sprintf("%s/%s", strings.TrimRight(httpBaseURL, "/"), strings.TrimLeft(p, "/"))
|
||||
}
|
||||
|
||||
// AddUser adds a new user and checks the received HTTP Status code against expectedStatusCode.
|
||||
func AddUser(user dataprovider.User, expectedStatusCode int) (dataprovider.User, []byte, error) {
|
||||
var newUser dataprovider.User
|
||||
var body []byte
|
||||
userAsJSON, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return newUser, body, err
|
||||
}
|
||||
resp, err := getHTTPClient().Post(buildURLRelativeToBase(userPath), "application/json", bytes.NewBuffer(userAsJSON))
|
||||
if err != nil {
|
||||
return newUser, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if expectedStatusCode != http.StatusOK {
|
||||
body, _ = getResponseBody(resp)
|
||||
return newUser, body, err
|
||||
}
|
||||
if err == nil {
|
||||
err = render.DecodeJSON(resp.Body, &newUser)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
if err == nil {
|
||||
err = checkUser(user, newUser)
|
||||
}
|
||||
return newUser, body, err
|
||||
}
|
||||
|
||||
// UpdateUser updates an existing user and checks the received HTTP Status code against expectedStatusCode.
|
||||
func UpdateUser(user dataprovider.User, expectedStatusCode int) (dataprovider.User, []byte, error) {
|
||||
var newUser dataprovider.User
|
||||
var body []byte
|
||||
userAsJSON, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return user, body, err
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPut, buildURLRelativeToBase(userPath, strconv.FormatInt(user.ID, 10)),
|
||||
bytes.NewBuffer(userAsJSON))
|
||||
if err != nil {
|
||||
return user, body, err
|
||||
}
|
||||
resp, err := getHTTPClient().Do(req)
|
||||
if err != nil {
|
||||
return user, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, _ = getResponseBody(resp)
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if expectedStatusCode != http.StatusOK {
|
||||
return newUser, body, err
|
||||
}
|
||||
if err == nil {
|
||||
newUser, body, err = GetUserByID(user.ID, expectedStatusCode)
|
||||
}
|
||||
if err == nil {
|
||||
err = checkUser(user, newUser)
|
||||
}
|
||||
return newUser, body, err
|
||||
}
|
||||
|
||||
// RemoveUser removes an existing user and checks the received HTTP Status code against expectedStatusCode.
|
||||
func RemoveUser(user dataprovider.User, expectedStatusCode int) ([]byte, error) {
|
||||
var body []byte
|
||||
req, err := http.NewRequest(http.MethodDelete, buildURLRelativeToBase(userPath, strconv.FormatInt(user.ID, 10)), nil)
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
resp, err := getHTTPClient().Do(req)
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, _ = getResponseBody(resp)
|
||||
return body, checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
}
|
||||
|
||||
// GetUserByID gets an user by database id and checks the received HTTP Status code against expectedStatusCode.
|
||||
func GetUserByID(userID int64, expectedStatusCode int) (dataprovider.User, []byte, error) {
|
||||
var user dataprovider.User
|
||||
var body []byte
|
||||
resp, err := getHTTPClient().Get(buildURLRelativeToBase(userPath, strconv.FormatInt(userID, 10)))
|
||||
if err != nil {
|
||||
return user, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if err == nil && expectedStatusCode == http.StatusOK {
|
||||
err = render.DecodeJSON(resp.Body, &user)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
return user, body, err
|
||||
}
|
||||
|
||||
// GetUsers allows to get a list of users and checks the received HTTP Status code against expectedStatusCode.
|
||||
// The number of results can be limited specifying a limit.
|
||||
// Some results can be skipped specifying an offset.
|
||||
// The results can be filtered specifying an username, the username filter is an exact match
|
||||
func GetUsers(limit int64, offset int64, username string, expectedStatusCode int) ([]dataprovider.User, []byte, error) {
|
||||
var users []dataprovider.User
|
||||
var body []byte
|
||||
url, err := url.Parse(buildURLRelativeToBase(userPath))
|
||||
if err != nil {
|
||||
return users, body, err
|
||||
}
|
||||
q := url.Query()
|
||||
if limit > 0 {
|
||||
q.Add("limit", strconv.FormatInt(limit, 10))
|
||||
}
|
||||
if offset > 0 {
|
||||
q.Add("offset", strconv.FormatInt(offset, 10))
|
||||
}
|
||||
if len(username) > 0 {
|
||||
q.Add("username", username)
|
||||
}
|
||||
url.RawQuery = q.Encode()
|
||||
resp, err := getHTTPClient().Get(url.String())
|
||||
if err != nil {
|
||||
return users, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if err == nil && expectedStatusCode == http.StatusOK {
|
||||
err = render.DecodeJSON(resp.Body, &users)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
return users, body, err
|
||||
}
|
||||
|
||||
// GetQuotaScans gets active quota scans and checks the received HTTP Status code against expectedStatusCode.
|
||||
func GetQuotaScans(expectedStatusCode int) ([]sftpd.ActiveQuotaScan, []byte, error) {
|
||||
var quotaScans []sftpd.ActiveQuotaScan
|
||||
var body []byte
|
||||
resp, err := getHTTPClient().Get(buildURLRelativeToBase(quotaScanPath))
|
||||
if err != nil {
|
||||
return quotaScans, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if err == nil && expectedStatusCode == http.StatusOK {
|
||||
err = render.DecodeJSON(resp.Body, "aScans)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
return quotaScans, body, err
|
||||
}
|
||||
|
||||
// StartQuotaScan start a new quota scan for the given user and checks the received HTTP Status code against expectedStatusCode.
|
||||
func StartQuotaScan(user dataprovider.User, expectedStatusCode int) ([]byte, error) {
|
||||
var body []byte
|
||||
userAsJSON, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
resp, err := getHTTPClient().Post(buildURLRelativeToBase(quotaScanPath), "application/json", bytes.NewBuffer(userAsJSON))
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, _ = getResponseBody(resp)
|
||||
return body, checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
}
|
||||
|
||||
// GetConnections returns status and stats for active SFTP/SCP connections
|
||||
func GetConnections(expectedStatusCode int) ([]sftpd.ConnectionStatus, []byte, error) {
|
||||
var connections []sftpd.ConnectionStatus
|
||||
var body []byte
|
||||
resp, err := getHTTPClient().Get(buildURLRelativeToBase(activeConnectionsPath))
|
||||
if err != nil {
|
||||
return connections, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if err == nil && expectedStatusCode == http.StatusOK {
|
||||
err = render.DecodeJSON(resp.Body, &connections)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
return connections, body, err
|
||||
}
|
||||
|
||||
// CloseConnection closes an active connection identified by connectionID
|
||||
func CloseConnection(connectionID string, expectedStatusCode int) ([]byte, error) {
|
||||
var body []byte
|
||||
req, err := http.NewRequest(http.MethodDelete, buildURLRelativeToBase(activeConnectionsPath, connectionID), nil)
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
resp, err := getHTTPClient().Do(req)
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
body, _ = getResponseBody(resp)
|
||||
return body, err
|
||||
}
|
||||
|
||||
// GetVersion returns version details
|
||||
func GetVersion(expectedStatusCode int) (utils.VersionInfo, []byte, error) {
|
||||
var version utils.VersionInfo
|
||||
var body []byte
|
||||
resp, err := getHTTPClient().Get(buildURLRelativeToBase(versionPath))
|
||||
if err != nil {
|
||||
return version, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if err == nil && expectedStatusCode == http.StatusOK {
|
||||
err = render.DecodeJSON(resp.Body, &version)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
return version, body, err
|
||||
}
|
||||
|
||||
func checkResponse(actual int, expected int) error {
|
||||
if expected != actual {
|
||||
return fmt.Errorf("wrong status code: got %v want %v", actual, expected)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getResponseBody(resp *http.Response) ([]byte, error) {
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
func checkUser(expected dataprovider.User, actual dataprovider.User) error {
|
||||
if len(actual.Password) > 0 {
|
||||
return errors.New("User password must not be visible")
|
||||
}
|
||||
if len(actual.PublicKeys) > 0 {
|
||||
return errors.New("User public keys must not be visible")
|
||||
}
|
||||
if expected.ID <= 0 {
|
||||
if actual.ID <= 0 {
|
||||
return errors.New("actual user ID must be > 0")
|
||||
}
|
||||
} else {
|
||||
if actual.ID != expected.ID {
|
||||
return errors.New("user ID mismatch")
|
||||
}
|
||||
}
|
||||
for _, v := range expected.Permissions {
|
||||
if !utils.IsStringInSlice(v, actual.Permissions) {
|
||||
return errors.New("Permissions contents mismatch")
|
||||
}
|
||||
}
|
||||
return compareEqualsUserFields(expected, actual)
|
||||
}
|
||||
|
||||
func compareEqualsUserFields(expected dataprovider.User, actual dataprovider.User) error {
|
||||
if expected.Username != actual.Username {
|
||||
return errors.New("Username mismatch")
|
||||
}
|
||||
if expected.HomeDir != actual.HomeDir {
|
||||
return errors.New("HomeDir mismatch")
|
||||
}
|
||||
if expected.UID != actual.UID {
|
||||
return errors.New("UID mismatch")
|
||||
}
|
||||
if expected.GID != actual.GID {
|
||||
return errors.New("GID mismatch")
|
||||
}
|
||||
if expected.MaxSessions != actual.MaxSessions {
|
||||
return errors.New("MaxSessions mismatch")
|
||||
}
|
||||
if expected.QuotaSize != actual.QuotaSize {
|
||||
return errors.New("QuotaSize mismatch")
|
||||
}
|
||||
if expected.QuotaFiles != actual.QuotaFiles {
|
||||
return errors.New("QuotaFiles mismatch")
|
||||
}
|
||||
if len(expected.Permissions) != len(actual.Permissions) {
|
||||
return errors.New("Permissions mismatch")
|
||||
}
|
||||
if expected.UploadBandwidth != actual.UploadBandwidth {
|
||||
return errors.New("UploadBandwidth mismatch")
|
||||
}
|
||||
if expected.DownloadBandwidth != actual.DownloadBandwidth {
|
||||
return errors.New("DownloadBandwidth mismatch")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,228 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/go-chi/chi"
|
||||
)
|
||||
|
||||
const (
|
||||
invalidURL = "http://foo\x7f.com/"
|
||||
inactiveURL = "http://127.0.0.1:12345"
|
||||
)
|
||||
|
||||
func TestGetRespStatus(t *testing.T) {
|
||||
var err error
|
||||
err = &dataprovider.MethodDisabledError{}
|
||||
respStatus := getRespStatus(err)
|
||||
if respStatus != http.StatusForbidden {
|
||||
t.Errorf("wrong resp status extected: %d got: %d", http.StatusForbidden, respStatus)
|
||||
}
|
||||
err = fmt.Errorf("generic error")
|
||||
respStatus = getRespStatus(err)
|
||||
if respStatus != http.StatusInternalServerError {
|
||||
t.Errorf("wrong resp status extected: %d got: %d", http.StatusInternalServerError, respStatus)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckResponse(t *testing.T) {
|
||||
err := checkResponse(http.StatusOK, http.StatusCreated)
|
||||
if err == nil {
|
||||
t.Errorf("check must fail")
|
||||
}
|
||||
err = checkResponse(http.StatusBadRequest, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("test must succeed, error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckUser(t *testing.T) {
|
||||
expected := dataprovider.User{}
|
||||
actual := dataprovider.User{}
|
||||
actual.Password = "password"
|
||||
err := checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("actual password must be nil")
|
||||
}
|
||||
actual.Password = ""
|
||||
actual.PublicKeys = []string{"pub key"}
|
||||
err = checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("actual public key must be nil")
|
||||
}
|
||||
actual.PublicKeys = []string{}
|
||||
err = checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("actual ID must be > 0")
|
||||
}
|
||||
expected.ID = 1
|
||||
actual.ID = 2
|
||||
err = checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("actual ID must be equal to expected ID")
|
||||
}
|
||||
expected.ID = 2
|
||||
actual.ID = 2
|
||||
expected.Permissions = []string{dataprovider.PermCreateDirs, dataprovider.PermDelete, dataprovider.PermDownload}
|
||||
actual.Permissions = []string{dataprovider.PermCreateDirs, dataprovider.PermCreateSymlinks}
|
||||
err = checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("Permissions are not equal")
|
||||
}
|
||||
expected.Permissions = append(expected.Permissions, dataprovider.PermRename)
|
||||
err = checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("Permissions are not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareUserFields(t *testing.T) {
|
||||
expected := dataprovider.User{}
|
||||
actual := dataprovider.User{}
|
||||
expected.Username = "test"
|
||||
err := compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("Username does not match")
|
||||
}
|
||||
expected.Username = ""
|
||||
expected.HomeDir = "homedir"
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("HomeDir does not match")
|
||||
}
|
||||
expected.HomeDir = ""
|
||||
expected.UID = 1
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("UID does not match")
|
||||
}
|
||||
expected.UID = 0
|
||||
expected.GID = 1
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("GID does not match")
|
||||
}
|
||||
expected.GID = 0
|
||||
expected.MaxSessions = 2
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("MaxSessions do not match")
|
||||
}
|
||||
expected.MaxSessions = 0
|
||||
expected.QuotaSize = 4096
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("QuotaSize does not match")
|
||||
}
|
||||
expected.QuotaSize = 0
|
||||
expected.QuotaFiles = 2
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("QuotaFiles do not match")
|
||||
}
|
||||
expected.QuotaFiles = 0
|
||||
expected.Permissions = []string{dataprovider.PermCreateDirs}
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("Permissions are not equal")
|
||||
}
|
||||
expected.Permissions = nil
|
||||
expected.UploadBandwidth = 64
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("UploadBandwidth does not match")
|
||||
}
|
||||
expected.UploadBandwidth = 0
|
||||
expected.DownloadBandwidth = 128
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("DownloadBandwidth does not match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestApiCallsWithBadURL(t *testing.T) {
|
||||
oldBaseURL := httpBaseURL
|
||||
SetBaseURL(invalidURL)
|
||||
u := dataprovider.User{}
|
||||
_, _, err := UpdateUser(u, http.StatusBadRequest)
|
||||
if err == nil {
|
||||
t.Errorf("request with invalid URL must fail")
|
||||
}
|
||||
_, err = RemoveUser(u, http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request with invalid URL must fail")
|
||||
}
|
||||
_, _, err = GetUsers(1, 0, "", http.StatusBadRequest)
|
||||
if err == nil {
|
||||
t.Errorf("request with invalid URL must fail")
|
||||
}
|
||||
_, err = CloseConnection("non_existent_id", http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request with invalid URL must fail")
|
||||
}
|
||||
SetBaseURL(oldBaseURL)
|
||||
}
|
||||
|
||||
func TestApiCallToNotListeningServer(t *testing.T) {
|
||||
oldBaseURL := httpBaseURL
|
||||
SetBaseURL(inactiveURL)
|
||||
u := dataprovider.User{}
|
||||
_, _, err := AddUser(u, http.StatusBadRequest)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = UpdateUser(u, http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, err = RemoveUser(u, http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = GetUserByID(-1, http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = GetUsers(100, 0, "", http.StatusOK)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = GetQuotaScans(http.StatusOK)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, err = StartQuotaScan(u, http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = GetConnections(http.StatusOK)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, err = CloseConnection("non_existent_id", http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = GetVersion(http.StatusOK)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
SetBaseURL(oldBaseURL)
|
||||
}
|
||||
|
||||
func TestCloseConnectionHandler(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodDelete, activeConnectionsPath+"/connectionID", nil)
|
||||
rctx := chi.NewRouteContext()
|
||||
rctx.URLParams.Add("connectionID", "")
|
||||
req = req.WithContext(context.WithValue(req.Context(), chi.RouteCtxKey, rctx))
|
||||
rr := httptest.NewRecorder()
|
||||
handleCloseConnection(rr, req)
|
||||
if rr.Code != http.StatusBadRequest {
|
||||
t.Errorf("Expected response code 400. Got %d", rr.Code)
|
||||
}
|
||||
}
|
||||
44
api/quota.go
44
api/quota.go
@@ -1,44 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/go-chi/render"
|
||||
)
|
||||
|
||||
func getQuotaScans(w http.ResponseWriter, r *http.Request) {
|
||||
render.JSON(w, r, sftpd.GetQuotaScans())
|
||||
}
|
||||
|
||||
func startQuotaScan(w http.ResponseWriter, r *http.Request) {
|
||||
var u dataprovider.User
|
||||
err := render.DecodeJSON(r.Body, &u)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
user, err := dataprovider.UserExists(dataProvider, u.Username)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if sftpd.AddQuotaScan(user.Username) {
|
||||
sendAPIResponse(w, r, err, "Scan started", http.StatusCreated)
|
||||
go func() {
|
||||
numFiles, size, _, err := utils.ScanDirContents(user.HomeDir)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error scanning user home dir %v: %v", user.HomeDir, err)
|
||||
} else {
|
||||
err := dataprovider.UpdateUserQuota(dataProvider, user, numFiles, size, true)
|
||||
logger.Debug(logSender, "", "user dir scanned, user: %v, dir: %v, error: %v", user.Username, user.HomeDir, err)
|
||||
}
|
||||
sftpd.RemoveQuotaScan(user.Username)
|
||||
}()
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "Another scan is already in progress", http.StatusConflict)
|
||||
}
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/go-chi/chi"
|
||||
"github.com/go-chi/chi/middleware"
|
||||
"github.com/go-chi/render"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
// GetHTTPRouter returns the configured HTTP handler
|
||||
func GetHTTPRouter() http.Handler {
|
||||
return router
|
||||
}
|
||||
|
||||
func initializeRouter() {
|
||||
router = chi.NewRouter()
|
||||
router.Use(middleware.RequestID)
|
||||
router.Use(middleware.RealIP)
|
||||
router.Use(logger.NewStructuredLogger(logger.GetLogger()))
|
||||
router.Use(middleware.Recoverer)
|
||||
|
||||
router.NotFound(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
sendAPIResponse(w, r, nil, "Not Found", http.StatusNotFound)
|
||||
}))
|
||||
|
||||
router.MethodNotAllowed(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
sendAPIResponse(w, r, nil, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}))
|
||||
|
||||
router.Handle(metricsPath, promhttp.Handler())
|
||||
|
||||
router.Get(versionPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
render.JSON(w, r, utils.GetAppVersion())
|
||||
})
|
||||
|
||||
router.Get(activeConnectionsPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
render.JSON(w, r, sftpd.GetConnectionsStats())
|
||||
})
|
||||
|
||||
router.Delete(activeConnectionsPath+"/{connectionID}", func(w http.ResponseWriter, r *http.Request) {
|
||||
handleCloseConnection(w, r)
|
||||
})
|
||||
|
||||
router.Get(quotaScanPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
getQuotaScans(w, r)
|
||||
})
|
||||
|
||||
router.Post(quotaScanPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
startQuotaScan(w, r)
|
||||
})
|
||||
|
||||
router.Get(userPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
getUsers(w, r)
|
||||
})
|
||||
|
||||
router.Post(userPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
addUser(w, r)
|
||||
})
|
||||
|
||||
router.Get(userPath+"/{userID}", func(w http.ResponseWriter, r *http.Request) {
|
||||
getUserByID(w, r)
|
||||
})
|
||||
|
||||
router.Put(userPath+"/{userID}", func(w http.ResponseWriter, r *http.Request) {
|
||||
updateUser(w, r)
|
||||
})
|
||||
|
||||
router.Delete(userPath+"/{userID}", func(w http.ResponseWriter, r *http.Request) {
|
||||
deleteUser(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func handleCloseConnection(w http.ResponseWriter, r *http.Request) {
|
||||
connectionID := chi.URLParam(r, "connectionID")
|
||||
if connectionID == "" {
|
||||
sendAPIResponse(w, r, nil, "connectionID is mandatory", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if sftpd.CloseActiveConnection(connectionID) {
|
||||
sendAPIResponse(w, r, nil, "Connection closed", http.StatusOK)
|
||||
} else {
|
||||
sendAPIResponse(w, r, nil, "Not Found", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
@@ -1,691 +0,0 @@
|
||||
openapi: 3.0.1
|
||||
info:
|
||||
title: SFTPGo
|
||||
description: 'SFTPGo REST API'
|
||||
version: 1.0.0
|
||||
|
||||
servers:
|
||||
- url: /api/v1
|
||||
paths:
|
||||
/version:
|
||||
get:
|
||||
tags:
|
||||
- version
|
||||
summary: Get version details
|
||||
operationId: get_version
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref : '#/components/schemas/VersionInfo'
|
||||
/connection:
|
||||
get:
|
||||
tags:
|
||||
- connections
|
||||
summary: Get the active users and info about their uploads/downloads
|
||||
operationId: get_connections
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref : '#/components/schemas/ConnectionStatus'
|
||||
/connection/{connectionID}:
|
||||
delete:
|
||||
tags:
|
||||
- connections
|
||||
summary: Terminate an active connection
|
||||
operationId: close_connection
|
||||
parameters:
|
||||
- name: connectionID
|
||||
in: path
|
||||
description: ID of the connection to close
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 200
|
||||
message: "Connection closed"
|
||||
error: ""
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
404:
|
||||
description: Not Found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 404
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
/quota_scan:
|
||||
get:
|
||||
tags:
|
||||
- quota
|
||||
summary: Get the active quota scans
|
||||
operationId: get_quota_scans
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref : '#/components/schemas/QuotaScan'
|
||||
post:
|
||||
tags:
|
||||
- quota
|
||||
summary: start a new quota scan
|
||||
description: A quota scan update the number of files and their total size for the given user
|
||||
operationId: start_quota_scan
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/User'
|
||||
responses:
|
||||
201:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 201
|
||||
message: "Scan started"
|
||||
error: ""
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
404:
|
||||
description: Not Found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 404
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
409:
|
||||
description: Another scan is already in progress for this user
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 409
|
||||
message: "Another scan is already in progress"
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
/user:
|
||||
get:
|
||||
tags:
|
||||
- users
|
||||
summary: Returns an array with one or more users
|
||||
description: For security reasons password and public key are empty in the response
|
||||
operationId: get_users
|
||||
parameters:
|
||||
- in: query
|
||||
name: offset
|
||||
schema:
|
||||
type: integer
|
||||
minimum: 0
|
||||
default: 0
|
||||
required: false
|
||||
- in: query
|
||||
name: limit
|
||||
schema:
|
||||
type: integer
|
||||
minimum: 1
|
||||
maximum: 500
|
||||
default: 100
|
||||
required: false
|
||||
description: The maximum number of items to return. Max value is 500, default is 100
|
||||
- in: query
|
||||
name: order
|
||||
required: false
|
||||
description: Ordering users by username
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- ASC
|
||||
- DESC
|
||||
example: ASC
|
||||
- in: query
|
||||
name: username
|
||||
required: false
|
||||
description: Filter by username, extact match case sensitive
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref : '#/components/schemas/User'
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
post:
|
||||
tags:
|
||||
- users
|
||||
summary: Adds a new SFTP/SCP user
|
||||
operationId: add_user
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/User'
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/User'
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
/user/{userID}:
|
||||
get:
|
||||
tags:
|
||||
- users
|
||||
summary: Find user by ID
|
||||
description: For security reasons password and public key are empty in the response
|
||||
operationId: get_user_by_id
|
||||
parameters:
|
||||
- name: userID
|
||||
in: path
|
||||
description: ID of the user to retrieve
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/User'
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
404:
|
||||
description: Not Found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 404
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
put:
|
||||
tags:
|
||||
- users
|
||||
summary: Update an existing user
|
||||
operationId: update_user
|
||||
parameters:
|
||||
- name: userID
|
||||
in: path
|
||||
description: ID of the user to update
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/User'
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 200
|
||||
message: "User updated"
|
||||
error: ""
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
404:
|
||||
description: Not Found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 404
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
delete:
|
||||
tags:
|
||||
- users
|
||||
summary: Delete an existing user
|
||||
operationId: delete_user
|
||||
parameters:
|
||||
- name: userID
|
||||
in: path
|
||||
description: ID of the user to delete
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 200
|
||||
message: "User deleted"
|
||||
error: ""
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
404:
|
||||
description: Not Found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 404
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
components:
|
||||
schemas:
|
||||
Permission:
|
||||
type: string
|
||||
enum:
|
||||
- '*'
|
||||
- list
|
||||
- download
|
||||
- upload
|
||||
- overwrite
|
||||
- delete
|
||||
- rename
|
||||
- create_dirs
|
||||
- create_symlinks
|
||||
description: >
|
||||
Permissions:
|
||||
* `*` - all permissions are granted
|
||||
* `list` - list items is allowed
|
||||
* `download` - download files is allowed
|
||||
* `upload` - upload files is allowed
|
||||
* `overwrite` - overwrite an existing file, while uploading, is allowed. upload permission is required to allow file overwrite
|
||||
* `delete` - delete files or directories is allowed
|
||||
* `rename` - rename files or directories is allowed
|
||||
* `create_dirs` - create directories is allowed
|
||||
* `create_symlinks` - create links is allowed
|
||||
User:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 1
|
||||
username:
|
||||
type: string
|
||||
password:
|
||||
type: string
|
||||
nullable: true
|
||||
description: password or public key are mandatory. If the password has no known hashing algo prefix it will be stored using argon2id. You can send a password hashed as bcrypt or pbkdf2 and it will be stored as is. For security reasons this field is omitted when you search/get users
|
||||
public_keys:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
description: a password or at least one public key are mandatory. For security reasons this field is omitted when you search/get users.
|
||||
home_dir:
|
||||
type: string
|
||||
description: path to the user home directory. The user cannot upload or download files outside this directory. SFTPGo tries to automatically create this folder if missing. Must be an absolute path
|
||||
uid:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 0
|
||||
maximum: 65535
|
||||
description: if you run sftpgo as root user the created files and directories will be assigned to this uid. 0 means no change, the owner will be the user that runs sftpgo. Ignored on windows
|
||||
gid:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 0
|
||||
maximum: 65535
|
||||
description: if you run sftpgo as root user the created files and directories will be assigned to this gid. 0 means no change, the group will be the one of the user that runs sftpgo. Ignored on windows
|
||||
max_sessions:
|
||||
type: integer
|
||||
format: int32
|
||||
description: limit the sessions that an user can open. 0 means unlimited
|
||||
quota_size:
|
||||
type: integer
|
||||
format: int64
|
||||
description: quota as size. 0 menas unlimited. Please note that quota is updated if files are added/removed via SFTP/SCP otherwise a quota scan is needed
|
||||
quota_files:
|
||||
type: integer
|
||||
format: int32
|
||||
description: quota as number of files. 0 menas unlimited. Please note that quota is updated if files are added/removed via SFTP/SCP otherwise a quota scan is needed
|
||||
permissions:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Permission'
|
||||
minItems: 1
|
||||
used_quota_size:
|
||||
type: integer
|
||||
format: int64
|
||||
used_quota_file:
|
||||
type: integer
|
||||
format: int32
|
||||
last_quota_update:
|
||||
type: integer
|
||||
format: int64
|
||||
description: last quota update as unix timestamp in milliseconds
|
||||
upload_bandwidth:
|
||||
type: integer
|
||||
format: int32
|
||||
description: Maximum upload bandwidth as KB/s, 0 means unlimited
|
||||
download_bandwidth:
|
||||
type: integer
|
||||
format: int32
|
||||
description: Maximum download bandwidth as KB/s, 0 means unlimited
|
||||
Transfer:
|
||||
type: object
|
||||
properties:
|
||||
operation_type:
|
||||
type: string
|
||||
enum:
|
||||
- upload
|
||||
- download
|
||||
path:
|
||||
type: string
|
||||
description: SFTP/SCP file path for the upload/download
|
||||
start_time:
|
||||
type: integer
|
||||
format: int64
|
||||
description: start time as unix timestamp in milliseconds
|
||||
size:
|
||||
type: integer
|
||||
format: int64
|
||||
description: bytes transferred
|
||||
last_activity:
|
||||
type: integer
|
||||
format: int64
|
||||
description: last transfer activity as unix timestamp in milliseconds
|
||||
ConnectionStatus:
|
||||
type: object
|
||||
properties:
|
||||
username:
|
||||
type: string
|
||||
description: connected username
|
||||
connection_id:
|
||||
type: string
|
||||
description: unique connection identifier
|
||||
client_version:
|
||||
type: string
|
||||
description: SFTP/SCP client version
|
||||
remote_address:
|
||||
type: string
|
||||
description: Remote address for the connected SFTP/SCP client
|
||||
connection_time:
|
||||
type: integer
|
||||
format: int64
|
||||
description: connection time as unix timestamp in milliseconds
|
||||
last_activity:
|
||||
type: integer
|
||||
format: int64
|
||||
description: last client activity as unix timestamp in milliseconds
|
||||
protocol:
|
||||
type: string
|
||||
enum:
|
||||
- SFTP
|
||||
- SCP
|
||||
active_transfers:
|
||||
type: array
|
||||
items:
|
||||
$ref : '#/components/schemas/Transfer'
|
||||
QuotaScan:
|
||||
type: object
|
||||
properties:
|
||||
username:
|
||||
type: string
|
||||
description: username with an active scan
|
||||
start_time:
|
||||
type: integer
|
||||
format: int64
|
||||
description: scan start time as unix timestamp in milliseconds
|
||||
ApiResponse:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 200
|
||||
maximum: 500
|
||||
example: 200
|
||||
description: HTTP Status code, for example 200 OK, 400 Bad request and so on
|
||||
message:
|
||||
type: string
|
||||
nullable: true
|
||||
description: additional message if any
|
||||
error:
|
||||
type: string
|
||||
nullable: true
|
||||
description: error description if any
|
||||
VersionInfo:
|
||||
type: object
|
||||
properties:
|
||||
version:
|
||||
type: string
|
||||
build_date:
|
||||
type: string
|
||||
commit_hash:
|
||||
type: string
|
||||
|
||||
151
api/user.go
151
api/user.go
@@ -1,151 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/go-chi/chi"
|
||||
"github.com/go-chi/render"
|
||||
)
|
||||
|
||||
func getUsers(w http.ResponseWriter, r *http.Request) {
|
||||
limit := 100
|
||||
offset := 0
|
||||
order := "ASC"
|
||||
username := ""
|
||||
var err error
|
||||
if _, ok := r.URL.Query()["limit"]; ok {
|
||||
limit, err = strconv.Atoi(r.URL.Query().Get("limit"))
|
||||
if err != nil {
|
||||
err = errors.New("Invalid limit")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if limit > 500 {
|
||||
limit = 500
|
||||
}
|
||||
}
|
||||
if _, ok := r.URL.Query()["offset"]; ok {
|
||||
offset, err = strconv.Atoi(r.URL.Query().Get("offset"))
|
||||
if err != nil {
|
||||
err = errors.New("Invalid offset")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, ok := r.URL.Query()["order"]; ok {
|
||||
order = r.URL.Query().Get("order")
|
||||
if order != "ASC" && order != "DESC" {
|
||||
err = errors.New("Invalid order")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, ok := r.URL.Query()["username"]; ok {
|
||||
username = r.URL.Query().Get("username")
|
||||
}
|
||||
users, err := dataprovider.GetUsers(dataProvider, limit, offset, order, username)
|
||||
if err == nil {
|
||||
render.JSON(w, r, users)
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func getUserByID(w http.ResponseWriter, r *http.Request) {
|
||||
userID, err := strconv.ParseInt(chi.URLParam(r, "userID"), 10, 64)
|
||||
if err != nil {
|
||||
err = errors.New("Invalid userID")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
user, err := dataprovider.GetUserByID(dataProvider, userID)
|
||||
if err == nil {
|
||||
user.Password = ""
|
||||
user.PublicKeys = []string{}
|
||||
render.JSON(w, r, user)
|
||||
} else if _, ok := err.(*dataprovider.RecordNotFoundError); ok {
|
||||
sendAPIResponse(w, r, err, "", http.StatusNotFound)
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func addUser(w http.ResponseWriter, r *http.Request) {
|
||||
var user dataprovider.User
|
||||
err := render.DecodeJSON(r.Body, &user)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err = dataprovider.AddUser(dataProvider, user)
|
||||
if err == nil {
|
||||
user, err = dataprovider.UserExists(dataProvider, user.Username)
|
||||
if err == nil {
|
||||
user.Password = ""
|
||||
user.PublicKeys = []string{}
|
||||
render.JSON(w, r, user)
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
}
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
}
|
||||
}
|
||||
|
||||
func updateUser(w http.ResponseWriter, r *http.Request) {
|
||||
userID, err := strconv.ParseInt(chi.URLParam(r, "userID"), 10, 64)
|
||||
if err != nil {
|
||||
err = errors.New("Invalid userID")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
user, err := dataprovider.GetUserByID(dataProvider, userID)
|
||||
if _, ok := err.(*dataprovider.RecordNotFoundError); ok {
|
||||
sendAPIResponse(w, r, err, "", http.StatusNotFound)
|
||||
return
|
||||
} else if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
err = render.DecodeJSON(r.Body, &user)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if user.ID != userID {
|
||||
sendAPIResponse(w, r, err, "user ID in request body does not match user ID in path parameter", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err = dataprovider.UpdateUser(dataProvider, user)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "User updated", http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteUser(w http.ResponseWriter, r *http.Request) {
|
||||
userID, err := strconv.ParseInt(chi.URLParam(r, "userID"), 10, 64)
|
||||
if err != nil {
|
||||
err = errors.New("Invalid userID")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
user, err := dataprovider.GetUserByID(dataProvider, userID)
|
||||
if _, ok := err.(*dataprovider.RecordNotFoundError); ok {
|
||||
sendAPIResponse(w, r, err, "", http.StatusNotFound)
|
||||
return
|
||||
} else if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
err = dataprovider.DeleteUser(dataProvider, user)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "User deleted", http.StatusOK)
|
||||
}
|
||||
}
|
||||
69
cmd/acme.go
Normal file
69
cmd/acme.go
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/acme"
|
||||
"github.com/drakkan/sftpgo/v2/config"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
acmeCmd = &cobra.Command{
|
||||
Use: "acme",
|
||||
Short: "Obtain TLS certificates from ACME-based CAs like Let's Encrypt",
|
||||
}
|
||||
acmeRunCmd = &cobra.Command{
|
||||
Use: "run",
|
||||
Short: "Register your account and obtain certificates",
|
||||
Long: `This command must be run to obtain TLS certificates the first time or every
|
||||
time you add a new domain to your configuration file.
|
||||
Certificates are saved in the configured "certs_path".
|
||||
After this initial step, the certificates are automatically checked and
|
||||
renewed by the SFTPGo service
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logger.DisableLogger()
|
||||
logger.EnableConsoleLogger(zerolog.DebugLevel)
|
||||
configDir = util.CleanDirInput(configDir)
|
||||
err := config.LoadConfig(configDir, configFile)
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("Unable to initialize data provider, config load error: %v", err)
|
||||
return
|
||||
}
|
||||
acmeConfig := config.GetACMEConfig()
|
||||
err = acmeConfig.Initialize(configDir, false)
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("Unable to initialize ACME configuration: %v", err)
|
||||
}
|
||||
if err = acme.GetCertificates(); err != nil {
|
||||
logger.ErrorToConsole("Cannot get certificates: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
addConfigFlags(acmeRunCmd)
|
||||
acmeCmd.AddCommand(acmeRunCmd)
|
||||
rootCmd.AddCommand(acmeCmd)
|
||||
}
|
||||
34
cmd/awscontainer.go
Normal file
34
cmd/awscontainer.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build awscontainer
|
||||
// +build awscontainer
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func addAWSContainerFlags(cmd *cobra.Command) {
|
||||
viper.SetDefault("disable_aws_installation_code", false)
|
||||
viper.BindEnv("disable_aws_installation_code", "SFTPGO_DISABLE_AWS_INSTALLATION_CODE") //nolint:errcheck
|
||||
cmd.Flags().BoolVar(&disableAWSInstallationCode, "disable-aws-installation-code", viper.GetBool("disable_aws_installation_code"),
|
||||
`Disable installation code for the AWS container.
|
||||
This flag can be set using
|
||||
SFTPGO_DISABLE_AWS_INSTALLATION_CODE env var too.
|
||||
`)
|
||||
viper.BindPFlag("disable_aws_installation_code", cmd.Flags().Lookup("disable-aws-installation-code")) //nolint:errcheck
|
||||
}
|
||||
24
cmd/awscontainer_disabled.go
Normal file
24
cmd/awscontainer_disabled.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !awscontainer
|
||||
// +build !awscontainer
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func addAWSContainerFlags(cmd *cobra.Command) {}
|
||||
26
cmd/gen.go
Normal file
26
cmd/gen.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
var genCmd = &cobra.Command{
|
||||
Use: "gen",
|
||||
Short: "A collection of useful generators",
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(genCmd)
|
||||
}
|
||||
133
cmd/gencompletion.go
Normal file
133
cmd/gencompletion.go
Normal file
@@ -0,0 +1,133 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var genCompletionCmd = &cobra.Command{
|
||||
Use: "completion [bash|zsh|fish|powershell]",
|
||||
Short: "Generate the autocompletion script for the specified shell",
|
||||
Long: `Generate the autocompletion script for sftpgo for the specified shell.
|
||||
|
||||
See each sub-command's help for details on how to use the generated script.
|
||||
`,
|
||||
}
|
||||
|
||||
var genCompletionBashCmd = &cobra.Command{
|
||||
Use: "bash",
|
||||
Short: "Generate the autocompletion script for bash",
|
||||
Long: `Generate the autocompletion script for the bash shell.
|
||||
|
||||
This script depends on the 'bash-completion' package.
|
||||
If it is not installed already, you can install it via your OS's package
|
||||
manager.
|
||||
|
||||
To load completions in your current shell session:
|
||||
|
||||
$ source <(sftpgo gen completion bash)
|
||||
|
||||
To load completions for every new session, execute once:
|
||||
|
||||
Linux:
|
||||
$ sudo sftpgo gen completion bash > /usr/share/bash-completion/completions/sftpgo
|
||||
|
||||
MacOS:
|
||||
$ sudo sftpgo gen completion bash > /usr/local/etc/bash_completion.d/sftpgo
|
||||
|
||||
You will need to start a new shell for this setup to take effect.
|
||||
`,
|
||||
DisableFlagsInUseLine: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Root().GenBashCompletionV2(os.Stdout, true)
|
||||
},
|
||||
}
|
||||
|
||||
var genCompletionZshCmd = &cobra.Command{
|
||||
Use: "zsh",
|
||||
Short: "Generate the autocompletion script for zsh",
|
||||
Long: `Generate the autocompletion script for the zsh shell.
|
||||
|
||||
If shell completion is not already enabled in your environment you will need
|
||||
to enable it. You can execute the following once:
|
||||
|
||||
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
|
||||
|
||||
To load completions for every new session, execute once:
|
||||
|
||||
Linux:
|
||||
$ sftpgo gen completion zsh > > "${fpath[1]}/_sftpgo"
|
||||
|
||||
macOS:
|
||||
$ sudo sftpgo gen completion zsh > /usr/local/share/zsh/site-functions/_sftpgo
|
||||
|
||||
You will need to start a new shell for this setup to take effect.
|
||||
`,
|
||||
DisableFlagsInUseLine: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Root().GenZshCompletion(os.Stdout)
|
||||
},
|
||||
}
|
||||
|
||||
var genCompletionFishCmd = &cobra.Command{
|
||||
Use: "fish",
|
||||
Short: "Generate the autocompletion script for fish",
|
||||
Long: `Generate the autocompletion script for the fish shell.
|
||||
|
||||
To load completions in your current shell session:
|
||||
|
||||
$ sftpgo gen completion fish | source
|
||||
|
||||
To load completions for every new session, execute once:
|
||||
|
||||
$ sftpgo gen completion fish > ~/.config/fish/completions/sftpgo.fish
|
||||
|
||||
You will need to start a new shell for this setup to take effect.
|
||||
`,
|
||||
DisableFlagsInUseLine: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Root().GenFishCompletion(os.Stdout, true)
|
||||
},
|
||||
}
|
||||
|
||||
var genCompletionPowerShellCmd = &cobra.Command{
|
||||
Use: "powershell",
|
||||
Short: "Generate the autocompletion script for powershell",
|
||||
Long: `Generate the autocompletion script for powershell.
|
||||
|
||||
To load completions in your current shell session:
|
||||
|
||||
PS C:\> sftpgo gen completion powershell | Out-String | Invoke-Expression
|
||||
|
||||
To load completions for every new session, add the output of the above command
|
||||
to your powershell profile.
|
||||
`,
|
||||
DisableFlagsInUseLine: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
genCompletionCmd.AddCommand(genCompletionBashCmd)
|
||||
genCompletionCmd.AddCommand(genCompletionZshCmd)
|
||||
genCompletionCmd.AddCommand(genCompletionFishCmd)
|
||||
genCompletionCmd.AddCommand(genCompletionPowerShellCmd)
|
||||
|
||||
genCmd.AddCommand(genCompletionCmd)
|
||||
}
|
||||
69
cmd/genman.go
Normal file
69
cmd/genman.go
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
var (
|
||||
manDir string
|
||||
genManCmd = &cobra.Command{
|
||||
Use: "man",
|
||||
Short: "Generate man pages for sftpgo",
|
||||
Long: `This command automatically generates up-to-date man pages of SFTPGo's
|
||||
command-line interface.
|
||||
By default, it creates the man page files in the "man" directory under the
|
||||
current directory.
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logger.DisableLogger()
|
||||
logger.EnableConsoleLogger(zerolog.DebugLevel)
|
||||
if _, err := os.Stat(manDir); errors.Is(err, fs.ErrNotExist) {
|
||||
err = os.MkdirAll(manDir, os.ModePerm)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("Unable to generate man page files: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
header := &doc.GenManHeader{
|
||||
Section: "1",
|
||||
Manual: "SFTPGo Manual",
|
||||
Source: fmt.Sprintf("SFTPGo %v", version.Get().Version),
|
||||
}
|
||||
cmd.Root().DisableAutoGenTag = true
|
||||
err := doc.GenManTree(cmd.Root(), header, manDir)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("Unable to generate man page files: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
genManCmd.Flags().StringVarP(&manDir, "dir", "d", "man", "The directory to write the man pages")
|
||||
genCmd.AddCommand(genManCmd)
|
||||
}
|
||||
103
cmd/initprovider.go
Normal file
103
cmd/initprovider.go
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/config"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
initProviderCmd = &cobra.Command{
|
||||
Use: "initprovider",
|
||||
Short: "Initialize and/or updates the configured data provider",
|
||||
Long: `This command reads the data provider connection details from the specified
|
||||
configuration file and creates the initial structure or update the existing one,
|
||||
as needed.
|
||||
|
||||
Some data providers such as bolt and memory does not require an initialization
|
||||
but they could require an update to the existing data after upgrading SFTPGo.
|
||||
|
||||
For SQLite/bolt providers the database file will be auto-created if missing.
|
||||
|
||||
For PostgreSQL and MySQL providers you need to create the configured database,
|
||||
this command will create/update the required tables as needed.
|
||||
|
||||
To initialize/update the data provider from the configuration directory simply use:
|
||||
|
||||
$ sftpgo initprovider
|
||||
|
||||
Any defined action is ignored.
|
||||
Please take a look at the usage below to customize the options.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logger.DisableLogger()
|
||||
logger.EnableConsoleLogger(zerolog.DebugLevel)
|
||||
configDir = util.CleanDirInput(configDir)
|
||||
err := config.LoadConfig(configDir, configFile)
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("Unable to initialize data provider, config load error: %v", err)
|
||||
return
|
||||
}
|
||||
kmsConfig := config.GetKMSConfig()
|
||||
err = kmsConfig.Initialize()
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("Unable to initialize KMS: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
providerConf := config.GetProviderConf()
|
||||
// ignore actions
|
||||
providerConf.Actions.Hook = ""
|
||||
providerConf.Actions.ExecuteFor = nil
|
||||
providerConf.Actions.ExecuteOn = nil
|
||||
logger.InfoToConsole("Initializing provider: %#v config file: %#v", providerConf.Driver, viper.ConfigFileUsed())
|
||||
err = dataprovider.InitializeDatabase(providerConf, configDir)
|
||||
if err == nil {
|
||||
logger.InfoToConsole("Data provider successfully initialized/updated")
|
||||
} else if err == dataprovider.ErrNoInitRequired {
|
||||
logger.InfoToConsole("%v", err.Error())
|
||||
} else {
|
||||
logger.ErrorToConsole("Unable to initialize/update the data provider: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if providerConf.Driver != dataprovider.MemoryDataProviderName && loadDataFrom != "" {
|
||||
service := service.Service{
|
||||
LoadDataFrom: loadDataFrom,
|
||||
LoadDataMode: loadDataMode,
|
||||
LoadDataQuotaScan: loadDataQuotaScan,
|
||||
LoadDataClean: loadDataClean,
|
||||
}
|
||||
if err = service.LoadInitialData(); err != nil {
|
||||
logger.ErrorToConsole("Cannot load initial data: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(initProviderCmd)
|
||||
addConfigFlags(initProviderCmd)
|
||||
addBaseLoadDataFlags(initProviderCmd)
|
||||
}
|
||||
@@ -1,24 +1,43 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
installCmd = &cobra.Command{
|
||||
Use: "install",
|
||||
Short: "Install SFTPGo as Windows Service",
|
||||
Long: `To install the SFTPGo Windows Service with the default values for the command line flags simply use:
|
||||
Long: `To install the SFTPGo Windows Service with the default values for the command
|
||||
line flags simply use:
|
||||
|
||||
sftpgo service install
|
||||
|
||||
Please take a look at the usage below to customize the startup options`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
s := service.Service{
|
||||
ConfigDir: configDir,
|
||||
ConfigDir: util.CleanDirInput(configDir),
|
||||
ConfigFile: configFile,
|
||||
LogFilePath: logFilePath,
|
||||
LogMaxSize: logMaxSize,
|
||||
@@ -26,6 +45,7 @@ Please take a look at the usage below to customize the startup options`,
|
||||
LogMaxAge: logMaxAge,
|
||||
LogCompress: logCompress,
|
||||
LogVerbose: logVerbose,
|
||||
LogUTCTime: logUTCTime,
|
||||
Shutdown: make(chan bool),
|
||||
}
|
||||
winService := service.WindowsService{
|
||||
@@ -39,6 +59,7 @@ Please take a look at the usage below to customize the startup options`,
|
||||
err := winService.Install(serviceArgs...)
|
||||
if err != nil {
|
||||
fmt.Printf("Error installing service: %v\r\n", err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Service installed!\r\n")
|
||||
}
|
||||
@@ -50,3 +71,42 @@ func init() {
|
||||
serviceCmd.AddCommand(installCmd)
|
||||
addServeFlags(installCmd)
|
||||
}
|
||||
|
||||
func getCustomServeFlags() []string {
|
||||
result := []string{}
|
||||
if configDir != defaultConfigDir {
|
||||
configDir = util.CleanDirInput(configDir)
|
||||
result = append(result, "--"+configDirFlag)
|
||||
result = append(result, configDir)
|
||||
}
|
||||
if configFile != defaultConfigFile {
|
||||
result = append(result, "--"+configFileFlag)
|
||||
result = append(result, configFile)
|
||||
}
|
||||
if logFilePath != defaultLogFile {
|
||||
result = append(result, "--"+logFilePathFlag)
|
||||
result = append(result, logFilePath)
|
||||
}
|
||||
if logMaxSize != defaultLogMaxSize {
|
||||
result = append(result, "--"+logMaxSizeFlag)
|
||||
result = append(result, strconv.Itoa(logMaxSize))
|
||||
}
|
||||
if logMaxBackups != defaultLogMaxBackup {
|
||||
result = append(result, "--"+logMaxBackupFlag)
|
||||
result = append(result, strconv.Itoa(logMaxBackups))
|
||||
}
|
||||
if logMaxAge != defaultLogMaxAge {
|
||||
result = append(result, "--"+logMaxAgeFlag)
|
||||
result = append(result, strconv.Itoa(logMaxAge))
|
||||
}
|
||||
if logVerbose != defaultLogVerbose {
|
||||
result = append(result, "--"+logVerboseFlag+"=false")
|
||||
}
|
||||
if logUTCTime != defaultLogUTCTime {
|
||||
result = append(result, "--"+logUTCTimeFlag+"=true")
|
||||
}
|
||||
if logCompress != defaultLogCompress {
|
||||
result = append(result, "--"+logCompressFlag+"=true")
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
473
cmd/portable.go
Normal file
473
cmd/portable.go
Normal file
@@ -0,0 +1,473 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !noportable
|
||||
// +build !noportable
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/sftpgo/sdk"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
"github.com/drakkan/sftpgo/v2/sftpd"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
var (
|
||||
directoryToServe string
|
||||
portableSFTPDPort int
|
||||
portableAdvertiseService bool
|
||||
portableAdvertiseCredentials bool
|
||||
portableUsername string
|
||||
portablePassword string
|
||||
portableStartDir string
|
||||
portableLogFile string
|
||||
portableLogVerbose bool
|
||||
portableLogUTCTime bool
|
||||
portablePublicKeys []string
|
||||
portablePermissions []string
|
||||
portableSSHCommands []string
|
||||
portableAllowedPatterns []string
|
||||
portableDeniedPatterns []string
|
||||
portableFsProvider string
|
||||
portableS3Bucket string
|
||||
portableS3Region string
|
||||
portableS3AccessKey string
|
||||
portableS3AccessSecret string
|
||||
portableS3RoleARN string
|
||||
portableS3Endpoint string
|
||||
portableS3StorageClass string
|
||||
portableS3ACL string
|
||||
portableS3KeyPrefix string
|
||||
portableS3ULPartSize int
|
||||
portableS3ULConcurrency int
|
||||
portableS3ForcePathStyle bool
|
||||
portableGCSBucket string
|
||||
portableGCSCredentialsFile string
|
||||
portableGCSAutoCredentials int
|
||||
portableGCSStorageClass string
|
||||
portableGCSKeyPrefix string
|
||||
portableFTPDPort int
|
||||
portableFTPSCert string
|
||||
portableFTPSKey string
|
||||
portableWebDAVPort int
|
||||
portableWebDAVCert string
|
||||
portableWebDAVKey string
|
||||
portableAzContainer string
|
||||
portableAzAccountName string
|
||||
portableAzAccountKey string
|
||||
portableAzEndpoint string
|
||||
portableAzAccessTier string
|
||||
portableAzSASURL string
|
||||
portableAzKeyPrefix string
|
||||
portableAzULPartSize int
|
||||
portableAzULConcurrency int
|
||||
portableAzDLPartSize int
|
||||
portableAzDLConcurrency int
|
||||
portableAzUseEmulator bool
|
||||
portableCryptPassphrase string
|
||||
portableSFTPEndpoint string
|
||||
portableSFTPUsername string
|
||||
portableSFTPPassword string
|
||||
portableSFTPPrivateKeyPath string
|
||||
portableSFTPFingerprints []string
|
||||
portableSFTPPrefix string
|
||||
portableSFTPDisableConcurrentReads bool
|
||||
portableSFTPDBufferSize int64
|
||||
portableCmd = &cobra.Command{
|
||||
Use: "portable",
|
||||
Short: "Serve a single directory/account",
|
||||
Long: `To serve the current working directory with auto generated credentials simply
|
||||
use:
|
||||
|
||||
$ sftpgo portable
|
||||
|
||||
Please take a look at the usage below to customize the serving parameters`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
portableDir := directoryToServe
|
||||
fsProvider := sdk.GetProviderByName(portableFsProvider)
|
||||
if !filepath.IsAbs(portableDir) {
|
||||
if fsProvider == sdk.LocalFilesystemProvider {
|
||||
portableDir, _ = filepath.Abs(portableDir)
|
||||
} else {
|
||||
portableDir = os.TempDir()
|
||||
}
|
||||
}
|
||||
permissions := make(map[string][]string)
|
||||
permissions["/"] = portablePermissions
|
||||
portableGCSCredentials := ""
|
||||
if fsProvider == sdk.GCSFilesystemProvider && portableGCSCredentialsFile != "" {
|
||||
contents, err := getFileContents(portableGCSCredentialsFile)
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to get GCS credentials: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
portableGCSCredentials = contents
|
||||
portableGCSAutoCredentials = 0
|
||||
}
|
||||
portableSFTPPrivateKey := ""
|
||||
if fsProvider == sdk.SFTPFilesystemProvider && portableSFTPPrivateKeyPath != "" {
|
||||
contents, err := getFileContents(portableSFTPPrivateKeyPath)
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to get SFTP private key: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
portableSFTPPrivateKey = contents
|
||||
}
|
||||
if portableFTPDPort >= 0 && portableFTPSCert != "" && portableFTPSKey != "" {
|
||||
keyPairs := []common.TLSKeyPair{
|
||||
{
|
||||
Cert: portableFTPSCert,
|
||||
Key: portableFTPSKey,
|
||||
ID: common.DefaultTLSKeyPaidID,
|
||||
},
|
||||
}
|
||||
_, err := common.NewCertManager(keyPairs, filepath.Clean(defaultConfigDir),
|
||||
"FTP portable")
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to load FTPS key pair, cert file %#v key file %#v error: %v\n",
|
||||
portableFTPSCert, portableFTPSKey, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
if portableWebDAVPort > 0 && portableWebDAVCert != "" && portableWebDAVKey != "" {
|
||||
keyPairs := []common.TLSKeyPair{
|
||||
{
|
||||
Cert: portableWebDAVCert,
|
||||
Key: portableWebDAVKey,
|
||||
ID: common.DefaultTLSKeyPaidID,
|
||||
},
|
||||
}
|
||||
_, err := common.NewCertManager(keyPairs, filepath.Clean(defaultConfigDir),
|
||||
"WebDAV portable")
|
||||
if err != nil {
|
||||
fmt.Printf("Unable to load WebDAV key pair, cert file %#v key file %#v error: %v\n",
|
||||
portableWebDAVCert, portableWebDAVKey, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
service := service.Service{
|
||||
ConfigDir: filepath.Clean(defaultConfigDir),
|
||||
ConfigFile: defaultConfigFile,
|
||||
LogFilePath: portableLogFile,
|
||||
LogMaxSize: defaultLogMaxSize,
|
||||
LogMaxBackups: defaultLogMaxBackup,
|
||||
LogMaxAge: defaultLogMaxAge,
|
||||
LogCompress: defaultLogCompress,
|
||||
LogVerbose: portableLogVerbose,
|
||||
LogUTCTime: portableLogUTCTime,
|
||||
Shutdown: make(chan bool),
|
||||
PortableMode: 1,
|
||||
PortableUser: dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: portableUsername,
|
||||
Password: portablePassword,
|
||||
PublicKeys: portablePublicKeys,
|
||||
Permissions: permissions,
|
||||
HomeDir: portableDir,
|
||||
Status: 1,
|
||||
},
|
||||
Filters: dataprovider.UserFilters{
|
||||
BaseUserFilters: sdk.BaseUserFilters{
|
||||
FilePatterns: parsePatternsFilesFilters(),
|
||||
StartDirectory: portableStartDir,
|
||||
},
|
||||
},
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: sdk.GetProviderByName(portableFsProvider),
|
||||
S3Config: vfs.S3FsConfig{
|
||||
BaseS3FsConfig: sdk.BaseS3FsConfig{
|
||||
Bucket: portableS3Bucket,
|
||||
Region: portableS3Region,
|
||||
AccessKey: portableS3AccessKey,
|
||||
RoleARN: portableS3RoleARN,
|
||||
Endpoint: portableS3Endpoint,
|
||||
StorageClass: portableS3StorageClass,
|
||||
ACL: portableS3ACL,
|
||||
KeyPrefix: portableS3KeyPrefix,
|
||||
UploadPartSize: int64(portableS3ULPartSize),
|
||||
UploadConcurrency: portableS3ULConcurrency,
|
||||
ForcePathStyle: portableS3ForcePathStyle,
|
||||
},
|
||||
AccessSecret: kms.NewPlainSecret(portableS3AccessSecret),
|
||||
},
|
||||
GCSConfig: vfs.GCSFsConfig{
|
||||
BaseGCSFsConfig: sdk.BaseGCSFsConfig{
|
||||
Bucket: portableGCSBucket,
|
||||
AutomaticCredentials: portableGCSAutoCredentials,
|
||||
StorageClass: portableGCSStorageClass,
|
||||
KeyPrefix: portableGCSKeyPrefix,
|
||||
},
|
||||
Credentials: kms.NewPlainSecret(portableGCSCredentials),
|
||||
},
|
||||
AzBlobConfig: vfs.AzBlobFsConfig{
|
||||
BaseAzBlobFsConfig: sdk.BaseAzBlobFsConfig{
|
||||
Container: portableAzContainer,
|
||||
AccountName: portableAzAccountName,
|
||||
Endpoint: portableAzEndpoint,
|
||||
AccessTier: portableAzAccessTier,
|
||||
KeyPrefix: portableAzKeyPrefix,
|
||||
UseEmulator: portableAzUseEmulator,
|
||||
UploadPartSize: int64(portableAzULPartSize),
|
||||
UploadConcurrency: portableAzULConcurrency,
|
||||
DownloadPartSize: int64(portableAzDLPartSize),
|
||||
DownloadConcurrency: portableAzDLConcurrency,
|
||||
},
|
||||
AccountKey: kms.NewPlainSecret(portableAzAccountKey),
|
||||
SASURL: kms.NewPlainSecret(portableAzSASURL),
|
||||
},
|
||||
CryptConfig: vfs.CryptFsConfig{
|
||||
Passphrase: kms.NewPlainSecret(portableCryptPassphrase),
|
||||
},
|
||||
SFTPConfig: vfs.SFTPFsConfig{
|
||||
BaseSFTPFsConfig: sdk.BaseSFTPFsConfig{
|
||||
Endpoint: portableSFTPEndpoint,
|
||||
Username: portableSFTPUsername,
|
||||
Fingerprints: portableSFTPFingerprints,
|
||||
Prefix: portableSFTPPrefix,
|
||||
DisableCouncurrentReads: portableSFTPDisableConcurrentReads,
|
||||
BufferSize: portableSFTPDBufferSize,
|
||||
},
|
||||
Password: kms.NewPlainSecret(portableSFTPPassword),
|
||||
PrivateKey: kms.NewPlainSecret(portableSFTPPrivateKey),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := service.StartPortableMode(portableSFTPDPort, portableFTPDPort, portableWebDAVPort, portableSSHCommands, portableAdvertiseService,
|
||||
portableAdvertiseCredentials, portableFTPSCert, portableFTPSKey, portableWebDAVCert, portableWebDAVKey); err == nil {
|
||||
service.Wait()
|
||||
if service.Error == nil {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
os.Exit(1)
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
version.AddFeature("+portable")
|
||||
|
||||
portableCmd.Flags().StringVarP(&directoryToServe, "directory", "d", ".", `Path to the directory to serve.
|
||||
This can be an absolute path or a path
|
||||
relative to the current directory
|
||||
`)
|
||||
portableCmd.Flags().StringVar(&portableStartDir, "start-directory", "/", `Alternate start directory.
|
||||
This is a virtual path not a filesystem
|
||||
path`)
|
||||
portableCmd.Flags().IntVarP(&portableSFTPDPort, "sftpd-port", "s", 0, `0 means a random unprivileged port,
|
||||
< 0 disabled`)
|
||||
portableCmd.Flags().IntVar(&portableFTPDPort, "ftpd-port", -1, `0 means a random unprivileged port,
|
||||
< 0 disabled`)
|
||||
portableCmd.Flags().IntVar(&portableWebDAVPort, "webdav-port", -1, `0 means a random unprivileged port,
|
||||
< 0 disabled`)
|
||||
portableCmd.Flags().StringSliceVarP(&portableSSHCommands, "ssh-commands", "c", sftpd.GetDefaultSSHCommands(),
|
||||
`SSH commands to enable.
|
||||
"*" means any supported SSH command
|
||||
including scp
|
||||
`)
|
||||
portableCmd.Flags().StringVarP(&portableUsername, "username", "u", "", `Leave empty to use an auto generated
|
||||
value`)
|
||||
portableCmd.Flags().StringVarP(&portablePassword, "password", "p", "", `Leave empty to use an auto generated
|
||||
value`)
|
||||
portableCmd.Flags().StringVarP(&portableLogFile, logFilePathFlag, "l", "", "Leave empty to disable logging")
|
||||
portableCmd.Flags().BoolVarP(&portableLogVerbose, logVerboseFlag, "v", false, "Enable verbose logs")
|
||||
portableCmd.Flags().BoolVar(&portableLogUTCTime, logUTCTimeFlag, false, "Use UTC time for logging")
|
||||
portableCmd.Flags().StringSliceVarP(&portablePublicKeys, "public-key", "k", []string{}, "")
|
||||
portableCmd.Flags().StringSliceVarP(&portablePermissions, "permissions", "g", []string{"list", "download"},
|
||||
`User's permissions. "*" means any
|
||||
permission`)
|
||||
portableCmd.Flags().StringArrayVar(&portableAllowedPatterns, "allowed-patterns", []string{},
|
||||
`Allowed file patterns case insensitive.
|
||||
The format is:
|
||||
/dir::pattern1,pattern2.
|
||||
For example: "/somedir::*.jpg,a*b?.png"`)
|
||||
portableCmd.Flags().StringArrayVar(&portableDeniedPatterns, "denied-patterns", []string{},
|
||||
`Denied file patterns case insensitive.
|
||||
The format is:
|
||||
/dir::pattern1,pattern2.
|
||||
For example: "/somedir::*.jpg,a*b?.png"`)
|
||||
portableCmd.Flags().BoolVarP(&portableAdvertiseService, "advertise-service", "S", false,
|
||||
`Advertise configured services using
|
||||
multicast DNS`)
|
||||
portableCmd.Flags().BoolVarP(&portableAdvertiseCredentials, "advertise-credentials", "C", false,
|
||||
`If the SFTP/FTP service is
|
||||
advertised via multicast DNS, this
|
||||
flag allows to put username/password
|
||||
inside the advertised TXT record`)
|
||||
portableCmd.Flags().StringVarP(&portableFsProvider, "fs-provider", "f", "osfs", `osfs => local filesystem (legacy value: 0)
|
||||
s3fs => AWS S3 compatible (legacy: 1)
|
||||
gcsfs => Google Cloud Storage (legacy: 2)
|
||||
azblobfs => Azure Blob Storage (legacy: 3)
|
||||
cryptfs => Encrypted local filesystem (legacy: 4)
|
||||
sftpfs => SFTP (legacy: 5)`)
|
||||
portableCmd.Flags().StringVar(&portableS3Bucket, "s3-bucket", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3Region, "s3-region", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3AccessKey, "s3-access-key", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3AccessSecret, "s3-access-secret", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3RoleARN, "s3-role-arn", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3Endpoint, "s3-endpoint", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3StorageClass, "s3-storage-class", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3ACL, "s3-acl", "", "")
|
||||
portableCmd.Flags().StringVar(&portableS3KeyPrefix, "s3-key-prefix", "", `Allows to restrict access to the
|
||||
virtual folder identified by this
|
||||
prefix and its contents`)
|
||||
portableCmd.Flags().IntVar(&portableS3ULPartSize, "s3-upload-part-size", 5, `The buffer size for multipart uploads
|
||||
(MB)`)
|
||||
portableCmd.Flags().IntVar(&portableS3ULConcurrency, "s3-upload-concurrency", 2, `How many parts are uploaded in
|
||||
parallel`)
|
||||
portableCmd.Flags().BoolVar(&portableS3ForcePathStyle, "s3-force-path-style", false, `Force path style bucket URL`)
|
||||
portableCmd.Flags().StringVar(&portableGCSBucket, "gcs-bucket", "", "")
|
||||
portableCmd.Flags().StringVar(&portableGCSStorageClass, "gcs-storage-class", "", "")
|
||||
portableCmd.Flags().StringVar(&portableGCSKeyPrefix, "gcs-key-prefix", "", `Allows to restrict access to the
|
||||
virtual folder identified by this
|
||||
prefix and its contents`)
|
||||
portableCmd.Flags().StringVar(&portableGCSCredentialsFile, "gcs-credentials-file", "", `Google Cloud Storage JSON credentials
|
||||
file`)
|
||||
portableCmd.Flags().IntVar(&portableGCSAutoCredentials, "gcs-automatic-credentials", 1, `0 means explicit credentials using
|
||||
a JSON credentials file, 1 automatic
|
||||
`)
|
||||
portableCmd.Flags().StringVar(&portableFTPSCert, "ftpd-cert", "", "Path to the certificate file for FTPS")
|
||||
portableCmd.Flags().StringVar(&portableFTPSKey, "ftpd-key", "", "Path to the key file for FTPS")
|
||||
portableCmd.Flags().StringVar(&portableWebDAVCert, "webdav-cert", "", `Path to the certificate file for WebDAV
|
||||
over HTTPS`)
|
||||
portableCmd.Flags().StringVar(&portableWebDAVKey, "webdav-key", "", `Path to the key file for WebDAV over
|
||||
HTTPS`)
|
||||
portableCmd.Flags().StringVar(&portableAzContainer, "az-container", "", "")
|
||||
portableCmd.Flags().StringVar(&portableAzAccountName, "az-account-name", "", "")
|
||||
portableCmd.Flags().StringVar(&portableAzAccountKey, "az-account-key", "", "")
|
||||
portableCmd.Flags().StringVar(&portableAzSASURL, "az-sas-url", "", `Shared access signature URL`)
|
||||
portableCmd.Flags().StringVar(&portableAzEndpoint, "az-endpoint", "", `Leave empty to use the default:
|
||||
"blob.core.windows.net"`)
|
||||
portableCmd.Flags().StringVar(&portableAzAccessTier, "az-access-tier", "", `Leave empty to use the default
|
||||
container setting`)
|
||||
portableCmd.Flags().StringVar(&portableAzKeyPrefix, "az-key-prefix", "", `Allows to restrict access to the
|
||||
virtual folder identified by this
|
||||
prefix and its contents`)
|
||||
portableCmd.Flags().IntVar(&portableAzULPartSize, "az-upload-part-size", 5, `The buffer size for multipart uploads
|
||||
(MB)`)
|
||||
portableCmd.Flags().IntVar(&portableAzULConcurrency, "az-upload-concurrency", 5, `How many parts are uploaded in
|
||||
parallel`)
|
||||
portableCmd.Flags().IntVar(&portableAzDLPartSize, "az-download-part-size", 5, `The buffer size for multipart downloads
|
||||
(MB)`)
|
||||
portableCmd.Flags().IntVar(&portableAzDLConcurrency, "az-download-concurrency", 5, `How many parts are downloaded in
|
||||
parallel`)
|
||||
portableCmd.Flags().BoolVar(&portableAzUseEmulator, "az-use-emulator", false, "")
|
||||
portableCmd.Flags().StringVar(&portableCryptPassphrase, "crypto-passphrase", "", `Passphrase for encryption/decryption`)
|
||||
portableCmd.Flags().StringVar(&portableSFTPEndpoint, "sftp-endpoint", "", `SFTP endpoint as host:port for SFTP
|
||||
provider`)
|
||||
portableCmd.Flags().StringVar(&portableSFTPUsername, "sftp-username", "", `SFTP user for SFTP provider`)
|
||||
portableCmd.Flags().StringVar(&portableSFTPPassword, "sftp-password", "", `SFTP password for SFTP provider`)
|
||||
portableCmd.Flags().StringVar(&portableSFTPPrivateKeyPath, "sftp-key-path", "", `SFTP private key path for SFTP provider`)
|
||||
portableCmd.Flags().StringSliceVar(&portableSFTPFingerprints, "sftp-fingerprints", []string{}, `SFTP fingerprints to verify remote host
|
||||
key for SFTP provider`)
|
||||
portableCmd.Flags().StringVar(&portableSFTPPrefix, "sftp-prefix", "", `SFTP prefix allows restrict all
|
||||
operations to a given path within the
|
||||
remote SFTP server`)
|
||||
portableCmd.Flags().BoolVar(&portableSFTPDisableConcurrentReads, "sftp-disable-concurrent-reads", false, `Concurrent reads are safe to use and
|
||||
disabling them will degrade performance.
|
||||
Disable for read once servers`)
|
||||
portableCmd.Flags().Int64Var(&portableSFTPDBufferSize, "sftp-buffer-size", 0, `The size of the buffer (in MB) to use
|
||||
for transfers. By enabling buffering,
|
||||
the reads and writes, from/to the
|
||||
remote SFTP server, are split in
|
||||
multiple concurrent requests and this
|
||||
allows data to be transferred at a
|
||||
faster rate, over high latency networks,
|
||||
by overlapping round-trip times`)
|
||||
rootCmd.AddCommand(portableCmd)
|
||||
}
|
||||
|
||||
func parsePatternsFilesFilters() []sdk.PatternsFilter {
|
||||
var patterns []sdk.PatternsFilter
|
||||
for _, val := range portableAllowedPatterns {
|
||||
p, exts := getPatternsFilterValues(strings.TrimSpace(val))
|
||||
if p != "" {
|
||||
patterns = append(patterns, sdk.PatternsFilter{
|
||||
Path: path.Clean(p),
|
||||
AllowedPatterns: exts,
|
||||
DeniedPatterns: []string{},
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, val := range portableDeniedPatterns {
|
||||
p, exts := getPatternsFilterValues(strings.TrimSpace(val))
|
||||
if p != "" {
|
||||
found := false
|
||||
for index, e := range patterns {
|
||||
if path.Clean(e.Path) == path.Clean(p) {
|
||||
patterns[index].DeniedPatterns = append(patterns[index].DeniedPatterns, exts...)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
patterns = append(patterns, sdk.PatternsFilter{
|
||||
Path: path.Clean(p),
|
||||
AllowedPatterns: []string{},
|
||||
DeniedPatterns: exts,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return patterns
|
||||
}
|
||||
|
||||
func getPatternsFilterValues(value string) (string, []string) {
|
||||
if strings.Contains(value, "::") {
|
||||
dirExts := strings.Split(value, "::")
|
||||
if len(dirExts) > 1 {
|
||||
dir := strings.TrimSpace(dirExts[0])
|
||||
exts := []string{}
|
||||
for _, e := range strings.Split(dirExts[1], ",") {
|
||||
cleanedExt := strings.TrimSpace(e)
|
||||
if cleanedExt != "" {
|
||||
exts = append(exts, cleanedExt)
|
||||
}
|
||||
}
|
||||
if dir != "" && len(exts) > 0 {
|
||||
return dir, exts
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func getFileContents(name string) (string, error) {
|
||||
fi, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fi.Size() > 1048576 {
|
||||
return "", fmt.Errorf("%#v is too big %v/1048576 bytes", name, fi.Size())
|
||||
}
|
||||
contents, err := os.ReadFile(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(contents), nil
|
||||
}
|
||||
24
cmd/portable_disabled.go
Normal file
24
cmd/portable_disabled.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build noportable
|
||||
// +build noportable
|
||||
|
||||
package cmd
|
||||
|
||||
import "github.com/drakkan/sftpgo/v2/version"
|
||||
|
||||
func init() {
|
||||
version.AddFeature("-portable")
|
||||
}
|
||||
49
cmd/reload_windows.go
Normal file
49
cmd/reload_windows.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
)
|
||||
|
||||
var (
|
||||
reloadCmd = &cobra.Command{
|
||||
Use: "reload",
|
||||
Short: "Reload the SFTPGo Windows Service sending a \"paramchange\" request",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
s := service.WindowsService{
|
||||
Service: service.Service{
|
||||
Shutdown: make(chan bool),
|
||||
},
|
||||
}
|
||||
err := s.Reload()
|
||||
if err != nil {
|
||||
fmt.Printf("Error sending reload signal: %v\r\n", err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Reload signal sent!\r\n")
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
serviceCmd.AddCommand(reloadCmd)
|
||||
}
|
||||
89
cmd/resetprovider.go
Normal file
89
cmd/resetprovider.go
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/config"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
resetProviderForce bool
|
||||
resetProviderCmd = &cobra.Command{
|
||||
Use: "resetprovider",
|
||||
Short: "Reset the configured provider, any data will be lost",
|
||||
Long: `This command reads the data provider connection details from the specified
|
||||
configuration file and resets the provider by deleting all data and schemas.
|
||||
This command is not supported for the memory provider.
|
||||
|
||||
Please take a look at the usage below to customize the options.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logger.DisableLogger()
|
||||
logger.EnableConsoleLogger(zerolog.DebugLevel)
|
||||
configDir = util.CleanDirInput(configDir)
|
||||
err := config.LoadConfig(configDir, configFile)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("Unable to initialize data provider, config load error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
kmsConfig := config.GetKMSConfig()
|
||||
err = kmsConfig.Initialize()
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("unable to initialize KMS: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
providerConf := config.GetProviderConf()
|
||||
if !resetProviderForce {
|
||||
logger.WarnToConsole("You are about to delete all the SFTPGo data for provider %#v, config file: %#v",
|
||||
providerConf.Driver, viper.ConfigFileUsed())
|
||||
logger.WarnToConsole("Are you sure? (Y/n)")
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
answer, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("unable to read your answer: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if strings.ToUpper(strings.TrimSpace(answer)) != "Y" {
|
||||
logger.InfoToConsole("command aborted")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
logger.InfoToConsole("Resetting provider: %#v, config file: %#v", providerConf.Driver, viper.ConfigFileUsed())
|
||||
err = dataprovider.ResetDatabase(providerConf, configDir)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("Error resetting provider: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.InfoToConsole("Tha data provider was successfully reset")
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
addConfigFlags(resetProviderCmd)
|
||||
resetProviderCmd.Flags().BoolVar(&resetProviderForce, "force", false, `reset the provider without asking for confirmation`)
|
||||
|
||||
rootCmd.AddCommand(resetProviderCmd)
|
||||
}
|
||||
77
cmd/revertprovider.go
Normal file
77
cmd/revertprovider.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/config"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
revertProviderTargetVersion int
|
||||
revertProviderCmd = &cobra.Command{
|
||||
Use: "revertprovider",
|
||||
Short: "Revert the configured data provider to a previous version",
|
||||
Long: `This command reads the data provider connection details from the specified
|
||||
configuration file and restore the provider schema and/or data to a previous version.
|
||||
This command is not supported for the memory provider.
|
||||
|
||||
Please take a look at the usage below to customize the options.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logger.DisableLogger()
|
||||
logger.EnableConsoleLogger(zerolog.DebugLevel)
|
||||
if revertProviderTargetVersion != 15 {
|
||||
logger.WarnToConsole("Unsupported target version, 15 is the only supported one")
|
||||
os.Exit(1)
|
||||
}
|
||||
configDir = util.CleanDirInput(configDir)
|
||||
err := config.LoadConfig(configDir, configFile)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("Unable to initialize data provider, config load error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
kmsConfig := config.GetKMSConfig()
|
||||
err = kmsConfig.Initialize()
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("unable to initialize KMS: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
providerConf := config.GetProviderConf()
|
||||
logger.InfoToConsole("Reverting provider: %#v config file: %#v target version %v", providerConf.Driver,
|
||||
viper.ConfigFileUsed(), revertProviderTargetVersion)
|
||||
err = dataprovider.RevertDatabase(providerConf, configDir, revertProviderTargetVersion)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("Error reverting provider: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.InfoToConsole("Data provider successfully reverted")
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
addConfigFlags(revertProviderCmd)
|
||||
revertProviderCmd.Flags().IntVar(&revertProviderTargetVersion, "to-version", 15, `15 means the version supported in v2.2.x`)
|
||||
|
||||
rootCmd.AddCommand(revertProviderCmd)
|
||||
}
|
||||
319
cmd/root.go
319
cmd/root.go
@@ -1,65 +1,100 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
// Package cmd provides Command Line Interface support
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/drakkan/sftpgo/config"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
const (
|
||||
logSender = "cmd"
|
||||
configDirFlag = "config-dir"
|
||||
configDirKey = "config_dir"
|
||||
configFileFlag = "config-file"
|
||||
configFileKey = "config_file"
|
||||
logFilePathFlag = "log-file-path"
|
||||
logFilePathKey = "log_file_path"
|
||||
logMaxSizeFlag = "log-max-size"
|
||||
logMaxSizeKey = "log_max_size"
|
||||
logMaxBackupFlag = "log-max-backups"
|
||||
logMaxBackupKey = "log_max_backups"
|
||||
logMaxAgeFlag = "log-max-age"
|
||||
logMaxAgeKey = "log_max_age"
|
||||
logCompressFlag = "log-compress"
|
||||
logCompressKey = "log_compress"
|
||||
logVerboseFlag = "log-verbose"
|
||||
logVerboseKey = "log_verbose"
|
||||
defaultConfigDir = "."
|
||||
defaultConfigName = config.DefaultConfigName
|
||||
defaultLogFile = "sftpgo.log"
|
||||
defaultLogMaxSize = 10
|
||||
defaultLogMaxBackup = 5
|
||||
defaultLogMaxAge = 28
|
||||
defaultLogCompress = false
|
||||
defaultLogVerbose = true
|
||||
configDirFlag = "config-dir"
|
||||
configDirKey = "config_dir"
|
||||
configFileFlag = "config-file"
|
||||
configFileKey = "config_file"
|
||||
logFilePathFlag = "log-file-path"
|
||||
logFilePathKey = "log_file_path"
|
||||
logMaxSizeFlag = "log-max-size"
|
||||
logMaxSizeKey = "log_max_size"
|
||||
logMaxBackupFlag = "log-max-backups"
|
||||
logMaxBackupKey = "log_max_backups"
|
||||
logMaxAgeFlag = "log-max-age"
|
||||
logMaxAgeKey = "log_max_age"
|
||||
logCompressFlag = "log-compress"
|
||||
logCompressKey = "log_compress"
|
||||
logVerboseFlag = "log-verbose"
|
||||
logVerboseKey = "log_verbose"
|
||||
logUTCTimeFlag = "log-utc-time"
|
||||
logUTCTimeKey = "log_utc_time"
|
||||
loadDataFromFlag = "loaddata-from"
|
||||
loadDataFromKey = "loaddata_from"
|
||||
loadDataModeFlag = "loaddata-mode"
|
||||
loadDataModeKey = "loaddata_mode"
|
||||
loadDataQuotaScanFlag = "loaddata-scan"
|
||||
loadDataQuotaScanKey = "loaddata_scan"
|
||||
loadDataCleanFlag = "loaddata-clean"
|
||||
loadDataCleanKey = "loaddata_clean"
|
||||
defaultConfigDir = "."
|
||||
defaultConfigFile = ""
|
||||
defaultLogFile = "sftpgo.log"
|
||||
defaultLogMaxSize = 10
|
||||
defaultLogMaxBackup = 5
|
||||
defaultLogMaxAge = 28
|
||||
defaultLogCompress = false
|
||||
defaultLogVerbose = true
|
||||
defaultLogUTCTime = false
|
||||
defaultLoadDataFrom = ""
|
||||
defaultLoadDataMode = 1
|
||||
defaultLoadDataQuotaScan = 0
|
||||
defaultLoadDataClean = false
|
||||
)
|
||||
|
||||
var (
|
||||
configDir string
|
||||
configFile string
|
||||
logFilePath string
|
||||
logMaxSize int
|
||||
logMaxBackups int
|
||||
logMaxAge int
|
||||
logCompress bool
|
||||
logVerbose bool
|
||||
configDir string
|
||||
configFile string
|
||||
logFilePath string
|
||||
logMaxSize int
|
||||
logMaxBackups int
|
||||
logMaxAge int
|
||||
logCompress bool
|
||||
logVerbose bool
|
||||
logUTCTime bool
|
||||
loadDataFrom string
|
||||
loadDataMode int
|
||||
loadDataQuotaScan int
|
||||
loadDataClean bool
|
||||
// used if awscontainer build tag is enabled
|
||||
disableAWSInstallationCode bool
|
||||
|
||||
rootCmd = &cobra.Command{
|
||||
Use: "sftpgo",
|
||||
Short: "Full featured and highly configurable SFTP server",
|
||||
Short: "Fully featured and highly configurable SFTP server",
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
version := utils.GetAppVersion()
|
||||
rootCmd.CompletionOptions.DisableDefaultCmd = true
|
||||
rootCmd.Flags().BoolP("version", "v", false, "")
|
||||
rootCmd.Version = version.GetVersionAsString()
|
||||
rootCmd.SetVersionTemplate(`{{printf "SFTPGo version: "}}{{printf "%s" .Version}}
|
||||
rootCmd.Version = version.GetAsString()
|
||||
rootCmd.SetVersionTemplate(`{{printf "SFTPGo "}}{{printf "%s" .Version}}
|
||||
`)
|
||||
}
|
||||
|
||||
@@ -72,97 +107,155 @@ func Execute() {
|
||||
}
|
||||
}
|
||||
|
||||
func addServeFlags(cmd *cobra.Command) {
|
||||
func addConfigFlags(cmd *cobra.Command) {
|
||||
viper.SetDefault(configDirKey, defaultConfigDir)
|
||||
viper.BindEnv(configDirKey, "SFTPGO_CONFIG_DIR")
|
||||
viper.BindEnv(configDirKey, "SFTPGO_CONFIG_DIR") //nolint:errcheck // err is not nil only if the key to bind is missing
|
||||
cmd.Flags().StringVarP(&configDir, configDirFlag, "c", viper.GetString(configDirKey),
|
||||
"Location for SFTPGo config dir. This directory should contain the \"sftpgo\" configuration file or the configured "+
|
||||
"config-file and it is used as the base for files with a relative path (eg. the private keys for the SFTP server, "+
|
||||
"the SQLite database if you use SQLite as data provider). This flag can be set using SFTPGO_CONFIG_DIR env var too.")
|
||||
viper.BindPFlag(configDirKey, cmd.Flags().Lookup(configDirFlag))
|
||||
`Location for the config dir. This directory
|
||||
is used as the base for files with a relative
|
||||
path, eg. the private keys for the SFTP
|
||||
server or the SQLite database if you use
|
||||
SQLite as data provider.
|
||||
The configuration file, if not explicitly set,
|
||||
is looked for in this dir. We support reading
|
||||
from JSON, TOML, YAML, HCL, envfile and Java
|
||||
properties config files. The default config
|
||||
file name is "sftpgo" and therefore
|
||||
"sftpgo.json", "sftpgo.yaml" and so on are
|
||||
searched.
|
||||
This flag can be set using SFTPGO_CONFIG_DIR
|
||||
env var too.`)
|
||||
viper.BindPFlag(configDirKey, cmd.Flags().Lookup(configDirFlag)) //nolint:errcheck
|
||||
|
||||
viper.SetDefault(configFileKey, defaultConfigName)
|
||||
viper.BindEnv(configFileKey, "SFTPGO_CONFIG_FILE")
|
||||
cmd.Flags().StringVarP(&configFile, configFileFlag, "f", viper.GetString(configFileKey),
|
||||
"Name for SFTPGo configuration file. It must be the name of a file stored in config-dir not the absolute path to the "+
|
||||
"configuration file. The specified file name must have no extension we automatically load JSON, YAML, TOML, HCL and "+
|
||||
"Java properties. Therefore if you set \"sftpgo\" then \"sftpgo.json\", \"sftpgo.yaml\" and so on are searched. "+
|
||||
"This flag can be set using SFTPGO_CONFIG_FILE env var too.")
|
||||
viper.BindPFlag(configFileKey, cmd.Flags().Lookup(configFileFlag))
|
||||
viper.SetDefault(configFileKey, defaultConfigFile)
|
||||
viper.BindEnv(configFileKey, "SFTPGO_CONFIG_FILE") //nolint:errcheck
|
||||
cmd.Flags().StringVar(&configFile, configFileFlag, viper.GetString(configFileKey),
|
||||
`Path to SFTPGo configuration file.
|
||||
This flag explicitly defines the path, name
|
||||
and extension of the config file. If must be
|
||||
an absolute path or a path relative to the
|
||||
configuration directory. The specified file
|
||||
name must have a supported extension (JSON,
|
||||
YAML, TOML, HCL or Java properties).
|
||||
This flag can be set using SFTPGO_CONFIG_FILE
|
||||
env var too.`)
|
||||
viper.BindPFlag(configFileKey, cmd.Flags().Lookup(configFileFlag)) //nolint:errcheck
|
||||
}
|
||||
|
||||
func addBaseLoadDataFlags(cmd *cobra.Command) {
|
||||
viper.SetDefault(loadDataFromKey, defaultLoadDataFrom)
|
||||
viper.BindEnv(loadDataFromKey, "SFTPGO_LOADDATA_FROM") //nolint:errcheck
|
||||
cmd.Flags().StringVar(&loadDataFrom, loadDataFromFlag, viper.GetString(loadDataFromKey),
|
||||
`Load users and folders from this file.
|
||||
The file must be specified as absolute path
|
||||
and it must contain a backup obtained using
|
||||
the "dumpdata" REST API or compatible content.
|
||||
This flag can be set using SFTPGO_LOADDATA_FROM
|
||||
env var too.
|
||||
`)
|
||||
viper.BindPFlag(loadDataFromKey, cmd.Flags().Lookup(loadDataFromFlag)) //nolint:errcheck
|
||||
|
||||
viper.SetDefault(loadDataModeKey, defaultLoadDataMode)
|
||||
viper.BindEnv(loadDataModeKey, "SFTPGO_LOADDATA_MODE") //nolint:errcheck
|
||||
cmd.Flags().IntVar(&loadDataMode, loadDataModeFlag, viper.GetInt(loadDataModeKey),
|
||||
`Restore mode for data to load:
|
||||
0 - new users are added, existing users are
|
||||
updated
|
||||
1 - New users are added, existing users are
|
||||
not modified
|
||||
This flag can be set using SFTPGO_LOADDATA_MODE
|
||||
env var too.
|
||||
`)
|
||||
viper.BindPFlag(loadDataModeKey, cmd.Flags().Lookup(loadDataModeFlag)) //nolint:errcheck
|
||||
|
||||
viper.SetDefault(loadDataCleanKey, defaultLoadDataClean)
|
||||
viper.BindEnv(loadDataCleanKey, "SFTPGO_LOADDATA_CLEAN") //nolint:errcheck
|
||||
cmd.Flags().BoolVar(&loadDataClean, loadDataCleanFlag, viper.GetBool(loadDataCleanKey),
|
||||
`Determine if the loaddata-from file should
|
||||
be removed after a successful load. This flag
|
||||
can be set using SFTPGO_LOADDATA_CLEAN env var
|
||||
too. (default "false")
|
||||
`)
|
||||
viper.BindPFlag(loadDataCleanKey, cmd.Flags().Lookup(loadDataCleanFlag)) //nolint:errcheck
|
||||
}
|
||||
|
||||
func addServeFlags(cmd *cobra.Command) {
|
||||
addConfigFlags(cmd)
|
||||
|
||||
viper.SetDefault(logFilePathKey, defaultLogFile)
|
||||
viper.BindEnv(logFilePathKey, "SFTPGO_LOG_FILE_PATH")
|
||||
viper.BindEnv(logFilePathKey, "SFTPGO_LOG_FILE_PATH") //nolint:errcheck
|
||||
cmd.Flags().StringVarP(&logFilePath, logFilePathFlag, "l", viper.GetString(logFilePathKey),
|
||||
"Location for the log file. Leave empty to write logs to the standard output. This flag can be set using SFTPGO_LOG_FILE_PATH "+
|
||||
"env var too.")
|
||||
viper.BindPFlag(logFilePathKey, cmd.Flags().Lookup(logFilePathFlag))
|
||||
`Location for the log file. Leave empty to write
|
||||
logs to the standard output. This flag can be
|
||||
set using SFTPGO_LOG_FILE_PATH env var too.
|
||||
`)
|
||||
viper.BindPFlag(logFilePathKey, cmd.Flags().Lookup(logFilePathFlag)) //nolint:errcheck
|
||||
|
||||
viper.SetDefault(logMaxSizeKey, defaultLogMaxSize)
|
||||
viper.BindEnv(logMaxSizeKey, "SFTPGO_LOG_MAX_SIZE")
|
||||
viper.BindEnv(logMaxSizeKey, "SFTPGO_LOG_MAX_SIZE") //nolint:errcheck
|
||||
cmd.Flags().IntVarP(&logMaxSize, logMaxSizeFlag, "s", viper.GetInt(logMaxSizeKey),
|
||||
"Maximum size in megabytes of the log file before it gets rotated. This flag can be set using SFTPGO_LOG_MAX_SIZE "+
|
||||
"env var too. It is unused if log-file-path is empty.")
|
||||
viper.BindPFlag(logMaxSizeKey, cmd.Flags().Lookup(logMaxSizeFlag))
|
||||
`Maximum size in megabytes of the log file
|
||||
before it gets rotated. This flag can be set
|
||||
using SFTPGO_LOG_MAX_SIZE env var too. It is
|
||||
unused if log-file-path is empty.
|
||||
`)
|
||||
viper.BindPFlag(logMaxSizeKey, cmd.Flags().Lookup(logMaxSizeFlag)) //nolint:errcheck
|
||||
|
||||
viper.SetDefault(logMaxBackupKey, defaultLogMaxBackup)
|
||||
viper.BindEnv(logMaxBackupKey, "SFTPGO_LOG_MAX_BACKUPS")
|
||||
viper.BindEnv(logMaxBackupKey, "SFTPGO_LOG_MAX_BACKUPS") //nolint:errcheck
|
||||
cmd.Flags().IntVarP(&logMaxBackups, "log-max-backups", "b", viper.GetInt(logMaxBackupKey),
|
||||
"Maximum number of old log files to retain. This flag can be set using SFTPGO_LOG_MAX_BACKUPS env var too. "+
|
||||
"It is unused if log-file-path is empty.")
|
||||
viper.BindPFlag(logMaxBackupKey, cmd.Flags().Lookup(logMaxBackupFlag))
|
||||
`Maximum number of old log files to retain.
|
||||
This flag can be set using SFTPGO_LOG_MAX_BACKUPS
|
||||
env var too. It is unused if log-file-path is
|
||||
empty.`)
|
||||
viper.BindPFlag(logMaxBackupKey, cmd.Flags().Lookup(logMaxBackupFlag)) //nolint:errcheck
|
||||
|
||||
viper.SetDefault(logMaxAgeKey, defaultLogMaxAge)
|
||||
viper.BindEnv(logMaxAgeKey, "SFTPGO_LOG_MAX_AGE")
|
||||
viper.BindEnv(logMaxAgeKey, "SFTPGO_LOG_MAX_AGE") //nolint:errcheck
|
||||
cmd.Flags().IntVarP(&logMaxAge, "log-max-age", "a", viper.GetInt(logMaxAgeKey),
|
||||
"Maximum number of days to retain old log files. This flag can be set using SFTPGO_LOG_MAX_AGE env var too. "+
|
||||
"It is unused if log-file-path is empty.")
|
||||
viper.BindPFlag(logMaxAgeKey, cmd.Flags().Lookup(logMaxAgeFlag))
|
||||
`Maximum number of days to retain old log files.
|
||||
This flag can be set using SFTPGO_LOG_MAX_AGE env
|
||||
var too. It is unused if log-file-path is empty.
|
||||
`)
|
||||
viper.BindPFlag(logMaxAgeKey, cmd.Flags().Lookup(logMaxAgeFlag)) //nolint:errcheck
|
||||
|
||||
viper.SetDefault(logCompressKey, defaultLogCompress)
|
||||
viper.BindEnv(logCompressKey, "SFTPGO_LOG_COMPRESS")
|
||||
cmd.Flags().BoolVarP(&logCompress, logCompressFlag, "z", viper.GetBool(logCompressKey), "Determine if the rotated "+
|
||||
"log files should be compressed using gzip. This flag can be set using SFTPGO_LOG_COMPRESS env var too. "+
|
||||
"It is unused if log-file-path is empty.")
|
||||
viper.BindPFlag(logCompressKey, cmd.Flags().Lookup(logCompressFlag))
|
||||
viper.BindEnv(logCompressKey, "SFTPGO_LOG_COMPRESS") //nolint:errcheck
|
||||
cmd.Flags().BoolVarP(&logCompress, logCompressFlag, "z", viper.GetBool(logCompressKey),
|
||||
`Determine if the rotated log files
|
||||
should be compressed using gzip. This flag can
|
||||
be set using SFTPGO_LOG_COMPRESS env var too.
|
||||
It is unused if log-file-path is empty.
|
||||
`)
|
||||
viper.BindPFlag(logCompressKey, cmd.Flags().Lookup(logCompressFlag)) //nolint:errcheck
|
||||
|
||||
viper.SetDefault(logVerboseKey, defaultLogVerbose)
|
||||
viper.BindEnv(logVerboseKey, "SFTPGO_LOG_VERBOSE")
|
||||
cmd.Flags().BoolVarP(&logVerbose, logVerboseFlag, "v", viper.GetBool(logVerboseKey), "Enable verbose logs. "+
|
||||
"This flag can be set using SFTPGO_LOG_VERBOSE env var too.")
|
||||
viper.BindPFlag(logVerboseKey, cmd.Flags().Lookup(logVerboseFlag))
|
||||
}
|
||||
viper.BindEnv(logVerboseKey, "SFTPGO_LOG_VERBOSE") //nolint:errcheck
|
||||
cmd.Flags().BoolVarP(&logVerbose, logVerboseFlag, "v", viper.GetBool(logVerboseKey),
|
||||
`Enable verbose logs. This flag can be set
|
||||
using SFTPGO_LOG_VERBOSE env var too.
|
||||
`)
|
||||
viper.BindPFlag(logVerboseKey, cmd.Flags().Lookup(logVerboseFlag)) //nolint:errcheck
|
||||
|
||||
func getCustomServeFlags() []string {
|
||||
result := []string{}
|
||||
if configDir != defaultConfigDir {
|
||||
result = append(result, "--"+configDirFlag)
|
||||
result = append(result, configDir)
|
||||
}
|
||||
if configFile != defaultConfigName {
|
||||
result = append(result, "--"+configFileFlag)
|
||||
result = append(result, configFile)
|
||||
}
|
||||
if logFilePath != defaultLogFile {
|
||||
result = append(result, "--"+logFilePathFlag)
|
||||
result = append(result, logFilePath)
|
||||
}
|
||||
if logMaxSize != defaultLogMaxSize {
|
||||
result = append(result, "--"+logMaxSizeFlag)
|
||||
result = append(result, strconv.Itoa(logMaxSize))
|
||||
}
|
||||
if logMaxBackups != defaultLogMaxBackup {
|
||||
result = append(result, "--"+logMaxBackupFlag)
|
||||
result = append(result, strconv.Itoa(logMaxBackups))
|
||||
}
|
||||
if logMaxAge != defaultLogMaxAge {
|
||||
result = append(result, "--"+logMaxAgeFlag)
|
||||
result = append(result, strconv.Itoa(logMaxAge))
|
||||
}
|
||||
if logVerbose != defaultLogVerbose {
|
||||
result = append(result, "--"+logVerboseFlag+"=false")
|
||||
}
|
||||
if logCompress != defaultLogCompress {
|
||||
result = append(result, "--"+logCompressFlag+"=true")
|
||||
}
|
||||
return result
|
||||
viper.SetDefault(logUTCTimeKey, defaultLogUTCTime)
|
||||
viper.BindEnv(logUTCTimeKey, "SFTPGO_LOG_UTC_TIME") //nolint:errcheck
|
||||
cmd.Flags().BoolVar(&logUTCTime, logUTCTimeFlag, viper.GetBool(logUTCTimeKey),
|
||||
`Use UTC time for logging. This flag can be set
|
||||
using SFTPGO_LOG_UTC_TIME env var too.
|
||||
`)
|
||||
viper.BindPFlag(logUTCTimeKey, cmd.Flags().Lookup(logUTCTimeFlag)) //nolint:errcheck
|
||||
|
||||
addBaseLoadDataFlags(cmd)
|
||||
|
||||
viper.SetDefault(loadDataQuotaScanKey, defaultLoadDataQuotaScan)
|
||||
viper.BindEnv(loadDataQuotaScanKey, "SFTPGO_LOADDATA_QUOTA_SCAN") //nolint:errcheck
|
||||
cmd.Flags().IntVar(&loadDataQuotaScan, loadDataQuotaScanFlag, viper.GetInt(loadDataQuotaScanKey),
|
||||
`Quota scan mode after data load:
|
||||
0 - no quota scan
|
||||
1 - scan quota
|
||||
2 - scan quota if the user has quota restrictions
|
||||
This flag can be set using SFTPGO_LOADDATA_QUOTA_SCAN
|
||||
env var too.
|
||||
(default 0)`)
|
||||
viper.BindPFlag(loadDataQuotaScanKey, cmd.Flags().Lookup(loadDataQuotaScanFlag)) //nolint:errcheck
|
||||
}
|
||||
|
||||
49
cmd/rotatelogs_windows.go
Normal file
49
cmd/rotatelogs_windows.go
Normal file
@@ -0,0 +1,49 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
)
|
||||
|
||||
var (
|
||||
rotateLogCmd = &cobra.Command{
|
||||
Use: "rotatelogs",
|
||||
Short: "Signal to the running service to rotate the logs",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
s := service.WindowsService{
|
||||
Service: service.Service{
|
||||
Shutdown: make(chan bool),
|
||||
},
|
||||
}
|
||||
err := s.RotateLogFile()
|
||||
if err != nil {
|
||||
fmt.Printf("Error sending rotate log file signal to the service: %v\r\n", err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Rotate log file signal sent!\r\n")
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
serviceCmd.AddCommand(rotateLogCmd)
|
||||
}
|
||||
57
cmd/serve.go
57
cmd/serve.go
@@ -1,34 +1,62 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
serveCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Start the SFTP Server",
|
||||
Long: `To start the SFTPGo with the default values for the command line flags simply use:
|
||||
Short: "Start the SFTPGo service",
|
||||
Long: `To start the SFTPGo with the default values for the command line flags simply
|
||||
use:
|
||||
|
||||
sftpgo serve
|
||||
$ sftpgo serve
|
||||
|
||||
Please take a look at the usage below to customize the startup options`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
service := service.Service{
|
||||
ConfigDir: configDir,
|
||||
ConfigFile: configFile,
|
||||
LogFilePath: logFilePath,
|
||||
LogMaxSize: logMaxSize,
|
||||
LogMaxBackups: logMaxBackups,
|
||||
LogMaxAge: logMaxAge,
|
||||
LogCompress: logCompress,
|
||||
LogVerbose: logVerbose,
|
||||
Shutdown: make(chan bool),
|
||||
ConfigDir: util.CleanDirInput(configDir),
|
||||
ConfigFile: configFile,
|
||||
LogFilePath: logFilePath,
|
||||
LogMaxSize: logMaxSize,
|
||||
LogMaxBackups: logMaxBackups,
|
||||
LogMaxAge: logMaxAge,
|
||||
LogCompress: logCompress,
|
||||
LogVerbose: logVerbose,
|
||||
LogUTCTime: logUTCTime,
|
||||
LoadDataFrom: loadDataFrom,
|
||||
LoadDataMode: loadDataMode,
|
||||
LoadDataQuotaScan: loadDataQuotaScan,
|
||||
LoadDataClean: loadDataClean,
|
||||
Shutdown: make(chan bool),
|
||||
}
|
||||
if err := service.Start(); err == nil {
|
||||
if err := service.Start(disableAWSInstallationCode); err == nil {
|
||||
service.Wait()
|
||||
if service.Error == nil {
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
os.Exit(1)
|
||||
},
|
||||
}
|
||||
)
|
||||
@@ -36,4 +64,5 @@ Please take a look at the usage below to customize the startup options`,
|
||||
func init() {
|
||||
rootCmd.AddCommand(serveCmd)
|
||||
addServeFlags(serveCmd)
|
||||
addAWSContainerFlags(serveCmd)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
@@ -7,7 +21,7 @@ import (
|
||||
var (
|
||||
serviceCmd = &cobra.Command{
|
||||
Use: "service",
|
||||
Short: "Install, Uninstall, Start, Stop and retrieve status for SFTPGo Windows Service",
|
||||
Short: "Manage the SFTPGo Windows Service",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
68
cmd/smtptest.go
Normal file
68
cmd/smtptest.go
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/config"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/smtp"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
smtpTestRecipient string
|
||||
smtpTestCmd = &cobra.Command{
|
||||
Use: "smtptest",
|
||||
Short: "Test the SMTP configuration",
|
||||
Long: `SFTPGo will try to send a test email to the specified recipient.
|
||||
If the SMTP configuration is correct you should receive this email.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logger.DisableLogger()
|
||||
logger.EnableConsoleLogger(zerolog.DebugLevel)
|
||||
configDir = util.CleanDirInput(configDir)
|
||||
err := config.LoadConfig(configDir, configFile)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("Unable to initialize data provider, config load error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
smtpConfig := config.GetSMTPConfig()
|
||||
err = smtpConfig.Initialize(configDir)
|
||||
if err != nil {
|
||||
logger.ErrorToConsole("unable to initialize SMTP configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = smtp.SendEmail(smtpTestRecipient, "SFTPGo - Testing Email Settings", "It appears your SFTPGo email is setup correctly!",
|
||||
smtp.EmailContentTypeTextPlain)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("Error sending email: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.InfoToConsole("No errors were reported while sending an email. Please check your inbox to make sure.")
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
addConfigFlags(smtpTestCmd)
|
||||
smtpTestCmd.Flags().StringVar(&smtpTestRecipient, "recipient", "", `email address to send the test e-mail to`)
|
||||
smtpTestCmd.MarkFlagRequired("recipient") //nolint:errcheck
|
||||
|
||||
rootCmd.AddCommand(smtpTestCmd)
|
||||
}
|
||||
@@ -1,17 +1,39 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
startCmd = &cobra.Command{
|
||||
Use: "start",
|
||||
Short: "Start SFTPGo Windows Service",
|
||||
Short: "Start the SFTPGo Windows Service",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
configDir = util.CleanDirInput(configDir)
|
||||
if !filepath.IsAbs(logFilePath) && util.IsFileInputValid(logFilePath) {
|
||||
logFilePath = filepath.Join(configDir, logFilePath)
|
||||
}
|
||||
s := service.Service{
|
||||
ConfigDir: configDir,
|
||||
ConfigFile: configFile,
|
||||
@@ -21,6 +43,7 @@ var (
|
||||
LogMaxAge: logMaxAge,
|
||||
LogCompress: logCompress,
|
||||
LogVerbose: logVerbose,
|
||||
LogUTCTime: logUTCTime,
|
||||
Shutdown: make(chan bool),
|
||||
}
|
||||
winService := service.WindowsService{
|
||||
@@ -29,6 +52,7 @@ var (
|
||||
err := winService.RunService()
|
||||
if err != nil {
|
||||
fmt.Printf("Error starting service: %v\r\n", err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Service started!\r\n")
|
||||
}
|
||||
|
||||
216
cmd/startsubsys.go
Normal file
216
cmd/startsubsys.go
Normal file
@@ -0,0 +1,216 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rs/xid"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/common"
|
||||
"github.com/drakkan/sftpgo/v2/config"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/sftpd"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
var (
|
||||
logJournalD = false
|
||||
preserveHomeDir = false
|
||||
baseHomeDir = ""
|
||||
subsystemCmd = &cobra.Command{
|
||||
Use: "startsubsys",
|
||||
Short: "Use sftpgo as SFTP file transfer subsystem",
|
||||
Long: `In this mode SFTPGo speaks the server side of SFTP protocol to stdout and
|
||||
expects client requests from stdin.
|
||||
This mode is not intended to be called directly, but from sshd using the
|
||||
Subsystem option.
|
||||
For example adding a line like this one in "/etc/ssh/sshd_config":
|
||||
|
||||
Subsystem sftp sftpgo startsubsys
|
||||
|
||||
Command-line flags should be specified in the Subsystem declaration.
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logSender := "startsubsys"
|
||||
connectionID := xid.New().String()
|
||||
logLevel := zerolog.DebugLevel
|
||||
if !logVerbose {
|
||||
logLevel = zerolog.InfoLevel
|
||||
}
|
||||
logger.SetLogTime(logUTCTime)
|
||||
if logJournalD {
|
||||
logger.InitJournalDLogger(logLevel)
|
||||
} else {
|
||||
logger.InitStdErrLogger(logLevel)
|
||||
}
|
||||
osUser, err := user.Current()
|
||||
if err != nil {
|
||||
logger.Error(logSender, connectionID, "unable to get the current user: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
username := osUser.Username
|
||||
homedir := osUser.HomeDir
|
||||
logger.Info(logSender, connectionID, "starting SFTPGo %v as subsystem, user %#v home dir %#v config dir %#v base home dir %#v",
|
||||
version.Get(), username, homedir, configDir, baseHomeDir)
|
||||
err = config.LoadConfig(configDir, configFile)
|
||||
if err != nil {
|
||||
logger.Error(logSender, connectionID, "unable to load configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
dataProviderConf := config.GetProviderConf()
|
||||
commonConfig := config.GetCommonConfig()
|
||||
// idle connection are managed externally
|
||||
commonConfig.IdleTimeout = 0
|
||||
config.SetCommonConfig(commonConfig)
|
||||
if err := common.Initialize(config.GetCommonConfig(), dataProviderConf.GetShared()); err != nil {
|
||||
logger.Error(logSender, connectionID, "%v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
kmsConfig := config.GetKMSConfig()
|
||||
if err := kmsConfig.Initialize(); err != nil {
|
||||
logger.Error(logSender, connectionID, "unable to initialize KMS: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
mfaConfig := config.GetMFAConfig()
|
||||
err = mfaConfig.Initialize()
|
||||
if err != nil {
|
||||
logger.Error(logSender, "", "unable to initialize MFA: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := plugin.Initialize(config.GetPluginsConfig(), logVerbose); err != nil {
|
||||
logger.Error(logSender, connectionID, "unable to initialize plugin system: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
smtpConfig := config.GetSMTPConfig()
|
||||
err = smtpConfig.Initialize(configDir)
|
||||
if err != nil {
|
||||
logger.Error(logSender, connectionID, "unable to initialize SMTP configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if dataProviderConf.Driver == dataprovider.SQLiteDataProviderName || dataProviderConf.Driver == dataprovider.BoltDataProviderName {
|
||||
logger.Debug(logSender, connectionID, "data provider %#v not supported in subsystem mode, using %#v provider",
|
||||
dataProviderConf.Driver, dataprovider.MemoryDataProviderName)
|
||||
dataProviderConf.Driver = dataprovider.MemoryDataProviderName
|
||||
dataProviderConf.Name = ""
|
||||
}
|
||||
config.SetProviderConf(dataProviderConf)
|
||||
err = dataprovider.Initialize(dataProviderConf, configDir, false)
|
||||
if err != nil {
|
||||
logger.Error(logSender, connectionID, "unable to initialize the data provider: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
httpConfig := config.GetHTTPConfig()
|
||||
if err := httpConfig.Initialize(configDir); err != nil {
|
||||
logger.Error(logSender, connectionID, "unable to initialize http client: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
commandConfig := config.GetCommandConfig()
|
||||
if err := commandConfig.Initialize(); err != nil {
|
||||
logger.Error(logSender, connectionID, "unable to initialize commands configuration: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
user, err := dataprovider.UserExists(username)
|
||||
if err == nil {
|
||||
if user.HomeDir != filepath.Clean(homedir) && !preserveHomeDir {
|
||||
// update the user
|
||||
user.HomeDir = filepath.Clean(homedir)
|
||||
err = dataprovider.UpdateUser(&user, dataprovider.ActionExecutorSystem, "")
|
||||
if err != nil {
|
||||
logger.Error(logSender, connectionID, "unable to update user %#v: %v", username, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
user.Username = username
|
||||
if baseHomeDir != "" && filepath.IsAbs(baseHomeDir) {
|
||||
user.HomeDir = filepath.Join(baseHomeDir, username)
|
||||
} else {
|
||||
user.HomeDir = filepath.Clean(homedir)
|
||||
}
|
||||
logger.Debug(logSender, connectionID, "home dir for new user %#v", user.HomeDir)
|
||||
user.Password = connectionID
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
err = dataprovider.AddUser(&user, dataprovider.ActionExecutorSystem, "")
|
||||
if err != nil {
|
||||
logger.Error(logSender, connectionID, "unable to add user %#v: %v", username, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
err = user.LoadAndApplyGroupSettings()
|
||||
if err != nil {
|
||||
logger.Error(logSender, connectionID, "unable to apply group settings for user %#v: %v", username, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = sftpd.ServeSubSystemConnection(&user, connectionID, os.Stdin, os.Stdout)
|
||||
if err != nil && err != io.EOF {
|
||||
logger.Warn(logSender, connectionID, "serving subsystem finished with error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
logger.Info(logSender, connectionID, "serving subsystem finished")
|
||||
plugin.Handler.Cleanup()
|
||||
os.Exit(0)
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
subsystemCmd.Flags().BoolVarP(&preserveHomeDir, "preserve-home", "p", false, `If the user already exists, the existing home
|
||||
directory will not be changed`)
|
||||
subsystemCmd.Flags().StringVarP(&baseHomeDir, "base-home-dir", "d", "", `If the user does not exist specify an alternate
|
||||
starting directory. The home directory for a new
|
||||
user will be:
|
||||
|
||||
[base-home-dir]/[username]
|
||||
|
||||
base-home-dir must be an absolute path.`)
|
||||
subsystemCmd.Flags().BoolVarP(&logJournalD, "log-to-journald", "j", false, `Send logs to journald. Only available on Linux.
|
||||
Use:
|
||||
|
||||
$ journalctl -o verbose -f
|
||||
|
||||
To see full logs.
|
||||
If not set, the logs will be sent to the standard
|
||||
error`)
|
||||
|
||||
addConfigFlags(subsystemCmd)
|
||||
|
||||
viper.SetDefault(logVerboseKey, defaultLogVerbose)
|
||||
viper.BindEnv(logVerboseKey, "SFTPGO_LOG_VERBOSE") //nolint:errcheck
|
||||
subsystemCmd.Flags().BoolVarP(&logVerbose, logVerboseFlag, "v", viper.GetBool(logVerboseKey),
|
||||
`Enable verbose logs. This flag can be set
|
||||
using SFTPGO_LOG_VERBOSE env var too.
|
||||
`)
|
||||
viper.BindPFlag(logVerboseKey, subsystemCmd.Flags().Lookup(logVerboseFlag)) //nolint:errcheck
|
||||
|
||||
viper.SetDefault(logUTCTimeKey, defaultLogUTCTime)
|
||||
viper.BindEnv(logUTCTimeKey, "SFTPGO_LOG_UTC_TIME") //nolint:errcheck
|
||||
subsystemCmd.Flags().BoolVar(&logUTCTime, logUTCTimeFlag, viper.GetBool(logUTCTimeKey),
|
||||
`Use UTC time for logging. This flag can be set
|
||||
using SFTPGO_LOG_UTC_TIME env var too.
|
||||
`)
|
||||
viper.BindPFlag(logUTCTimeKey, subsystemCmd.Flags().Lookup(logUTCTimeFlag)) //nolint:errcheck
|
||||
|
||||
rootCmd.AddCommand(subsystemCmd)
|
||||
}
|
||||
@@ -1,10 +1,26 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -20,6 +36,7 @@ var (
|
||||
status, err := s.Status()
|
||||
if err != nil {
|
||||
fmt.Printf("Error querying service status: %v\r\n", err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Service status: %#v\r\n", status.String())
|
||||
}
|
||||
|
||||
@@ -1,16 +1,32 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
)
|
||||
|
||||
var (
|
||||
stopCmd = &cobra.Command{
|
||||
Use: "stop",
|
||||
Short: "Stop SFTPGo Windows Service",
|
||||
Short: "Stop the SFTPGo Windows Service",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
s := service.WindowsService{
|
||||
Service: service.Service{
|
||||
@@ -20,6 +36,7 @@ var (
|
||||
err := s.Stop()
|
||||
if err != nil {
|
||||
fmt.Printf("Error stopping service: %v\r\n", err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Service stopped!\r\n")
|
||||
}
|
||||
|
||||
@@ -1,16 +1,32 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/service"
|
||||
)
|
||||
|
||||
var (
|
||||
uninstallCmd = &cobra.Command{
|
||||
Use: "uninstall",
|
||||
Short: "Uninstall SFTPGo Windows Service",
|
||||
Short: "Uninstall the SFTPGo Windows Service",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
s := service.WindowsService{
|
||||
Service: service.Service{
|
||||
@@ -20,6 +36,7 @@ var (
|
||||
err := s.Uninstall()
|
||||
if err != nil {
|
||||
fmt.Printf("Error removing service: %v\r\n", err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Service uninstalled\r\n")
|
||||
}
|
||||
|
||||
115
command/command.go
Normal file
115
command/command.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
// Package command provides command configuration for SFTPGo hooks
|
||||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
minTimeout = 1
|
||||
maxTimeout = 300
|
||||
defaultTimeout = 30
|
||||
)
|
||||
|
||||
var (
|
||||
config Config
|
||||
)
|
||||
|
||||
// Command define the configuration for a specific commands
|
||||
type Command struct {
|
||||
// Path is the command path as defined in the hook configuration
|
||||
Path string `json:"path" mapstructure:"path"`
|
||||
// Timeout specifies a time limit, in seconds, for the command execution.
|
||||
// This value overrides the global timeout if set.
|
||||
// Do not use variables with the SFTPGO_ prefix to avoid conflicts with env
|
||||
// vars that SFTPGo sets
|
||||
Timeout int `json:"timeout" mapstructure:"timeout"`
|
||||
// Env defines additional environment variable for the commands.
|
||||
// Each entry is of the form "key=value".
|
||||
// These values are added to the global environment variables if any
|
||||
Env []string `json:"env" mapstructure:"env"`
|
||||
}
|
||||
|
||||
// Config defines the configuration for external commands such as
|
||||
// program based hooks
|
||||
type Config struct {
|
||||
// Timeout specifies a global time limit, in seconds, for the external commands execution
|
||||
Timeout int `json:"timeout" mapstructure:"timeout"`
|
||||
// Env defines additional environment variable for the commands.
|
||||
// Each entry is of the form "key=value".
|
||||
// Do not use variables with the SFTPGO_ prefix to avoid conflicts with env
|
||||
// vars that SFTPGo sets
|
||||
Env []string `json:"env" mapstructure:"env"`
|
||||
// Commands defines configuration for specific commands
|
||||
Commands []Command `json:"commands" mapstructure:"commands"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
config = Config{
|
||||
Timeout: defaultTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize configures commands
|
||||
func (c Config) Initialize() error {
|
||||
if c.Timeout < minTimeout || c.Timeout > maxTimeout {
|
||||
return fmt.Errorf("invalid timeout %v", c.Timeout)
|
||||
}
|
||||
for _, env := range c.Env {
|
||||
if len(strings.Split(env, "=")) != 2 {
|
||||
return fmt.Errorf("invalid env var %#v", env)
|
||||
}
|
||||
}
|
||||
for idx, cmd := range c.Commands {
|
||||
if cmd.Path == "" {
|
||||
return fmt.Errorf("invalid path %#v", cmd.Path)
|
||||
}
|
||||
if cmd.Timeout == 0 {
|
||||
c.Commands[idx].Timeout = c.Timeout
|
||||
} else {
|
||||
if cmd.Timeout < minTimeout || cmd.Timeout > maxTimeout {
|
||||
return fmt.Errorf("invalid timeout %v for command %#v", cmd.Timeout, cmd.Path)
|
||||
}
|
||||
}
|
||||
for _, env := range cmd.Env {
|
||||
if len(strings.Split(env, "=")) != 2 {
|
||||
return fmt.Errorf("invalid env var %#v for command %#v", env, cmd.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
config = c
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfig returns the configuration for the specified command
|
||||
func GetConfig(command string) (time.Duration, []string) {
|
||||
env := os.Environ()
|
||||
timeout := time.Duration(config.Timeout) * time.Second
|
||||
env = append(env, config.Env...)
|
||||
for _, cmd := range config.Commands {
|
||||
if cmd.Path == command {
|
||||
timeout = time.Duration(cmd.Timeout) * time.Second
|
||||
env = append(env, cmd.Env...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return timeout, env
|
||||
}
|
||||
119
command/command_test.go
Normal file
119
command/command_test.go
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package command
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCommandConfig(t *testing.T) {
|
||||
require.Equal(t, defaultTimeout, config.Timeout)
|
||||
cfg := Config{
|
||||
Timeout: 10,
|
||||
Env: []string{"a=b"},
|
||||
}
|
||||
err := cfg.Initialize()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cfg.Timeout, config.Timeout)
|
||||
assert.Equal(t, cfg.Env, config.Env)
|
||||
assert.Len(t, cfg.Commands, 0)
|
||||
timeout, env := GetConfig("cmd")
|
||||
assert.Equal(t, time.Duration(config.Timeout)*time.Second, timeout)
|
||||
assert.Contains(t, env, "a=b")
|
||||
|
||||
cfg.Commands = []Command{
|
||||
{
|
||||
Path: "cmd1",
|
||||
Timeout: 30,
|
||||
Env: []string{"c=d"},
|
||||
},
|
||||
{
|
||||
Path: "cmd2",
|
||||
Timeout: 0,
|
||||
Env: []string{"e=f"},
|
||||
},
|
||||
}
|
||||
err = cfg.Initialize()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cfg.Timeout, config.Timeout)
|
||||
assert.Equal(t, cfg.Env, config.Env)
|
||||
if assert.Len(t, config.Commands, 2) {
|
||||
assert.Equal(t, cfg.Commands[0].Path, config.Commands[0].Path)
|
||||
assert.Equal(t, cfg.Commands[0].Timeout, config.Commands[0].Timeout)
|
||||
assert.Equal(t, cfg.Commands[0].Env, config.Commands[0].Env)
|
||||
assert.Equal(t, cfg.Commands[1].Path, config.Commands[1].Path)
|
||||
assert.Equal(t, cfg.Timeout, config.Commands[1].Timeout)
|
||||
assert.Equal(t, cfg.Commands[1].Env, config.Commands[1].Env)
|
||||
}
|
||||
timeout, env = GetConfig("cmd1")
|
||||
assert.Equal(t, time.Duration(config.Commands[0].Timeout)*time.Second, timeout)
|
||||
assert.Contains(t, env, "a=b")
|
||||
assert.Contains(t, env, "c=d")
|
||||
assert.NotContains(t, env, "e=f")
|
||||
timeout, env = GetConfig("cmd2")
|
||||
assert.Equal(t, time.Duration(config.Timeout)*time.Second, timeout)
|
||||
assert.Contains(t, env, "a=b")
|
||||
assert.NotContains(t, env, "c=d")
|
||||
assert.Contains(t, env, "e=f")
|
||||
}
|
||||
|
||||
func TestConfigErrors(t *testing.T) {
|
||||
c := Config{}
|
||||
err := c.Initialize()
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "invalid timeout")
|
||||
}
|
||||
c.Timeout = 10
|
||||
c.Env = []string{"a"}
|
||||
err = c.Initialize()
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "invalid env var")
|
||||
}
|
||||
c.Env = nil
|
||||
c.Commands = []Command{
|
||||
{
|
||||
Path: "",
|
||||
},
|
||||
}
|
||||
err = c.Initialize()
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "invalid path")
|
||||
}
|
||||
c.Commands = []Command{
|
||||
{
|
||||
Path: "path",
|
||||
Timeout: 10000,
|
||||
},
|
||||
}
|
||||
err = c.Initialize()
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "invalid timeout")
|
||||
}
|
||||
c.Commands = []Command{
|
||||
{
|
||||
Path: "path",
|
||||
Timeout: 30,
|
||||
Env: []string{"b"},
|
||||
},
|
||||
}
|
||||
err = c.Initialize()
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "invalid env var")
|
||||
}
|
||||
}
|
||||
290
common/actions.go
Normal file
290
common/actions.go
Normal file
@@ -0,0 +1,290 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sftpgo/sdk"
|
||||
"github.com/sftpgo/sdk/plugin/notifier"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/command"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/httpclient"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
errUnconfiguredAction = errors.New("no hook is configured for this action")
|
||||
errNoHook = errors.New("unable to execute action, no hook defined")
|
||||
errUnexpectedHTTResponse = errors.New("unexpected HTTP response code")
|
||||
hooksConcurrencyGuard = make(chan struct{}, 150)
|
||||
)
|
||||
|
||||
func startNewHook() {
|
||||
hooksConcurrencyGuard <- struct{}{}
|
||||
}
|
||||
|
||||
func hookEnded() {
|
||||
<-hooksConcurrencyGuard
|
||||
}
|
||||
|
||||
// ProtocolActions defines the action to execute on file operations and SSH commands
|
||||
type ProtocolActions struct {
|
||||
// Valid values are download, upload, pre-delete, delete, rename, ssh_cmd. Empty slice to disable
|
||||
ExecuteOn []string `json:"execute_on" mapstructure:"execute_on"`
|
||||
// Actions to be performed synchronously.
|
||||
// The pre-delete action is always executed synchronously while the other ones are asynchronous.
|
||||
// Executing an action synchronously means that SFTPGo will not return a result code to the client
|
||||
// (which is waiting for it) until your hook have completed its execution.
|
||||
ExecuteSync []string `json:"execute_sync" mapstructure:"execute_sync"`
|
||||
// Absolute path to an external program or an HTTP URL
|
||||
Hook string `json:"hook" mapstructure:"hook"`
|
||||
}
|
||||
|
||||
var actionHandler ActionHandler = &defaultActionHandler{}
|
||||
|
||||
// InitializeActionHandler lets the user choose an action handler implementation.
|
||||
//
|
||||
// Do NOT call this function after application initialization.
|
||||
func InitializeActionHandler(handler ActionHandler) {
|
||||
actionHandler = handler
|
||||
}
|
||||
|
||||
func handleUnconfiguredPreAction(operation string) error {
|
||||
// for pre-delete we execute the internal handling on error, so we must return errUnconfiguredAction.
|
||||
// Other pre action will deny the operation on error so if we have no configuration we must return
|
||||
// a nil error
|
||||
if operation == operationPreDelete {
|
||||
return errUnconfiguredAction
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecutePreAction executes a pre-* action and returns the result
|
||||
func ExecutePreAction(conn *BaseConnection, operation, filePath, virtualPath string, fileSize int64, openFlags int) error {
|
||||
var event *notifier.FsEvent
|
||||
hasNotifiersPlugin := plugin.Handler.HasNotifiers()
|
||||
hasHook := util.Contains(Config.Actions.ExecuteOn, operation)
|
||||
if !hasHook && !hasNotifiersPlugin {
|
||||
return handleUnconfiguredPreAction(operation)
|
||||
}
|
||||
event = newActionNotification(&conn.User, operation, filePath, virtualPath, "", "", "",
|
||||
conn.protocol, conn.GetRemoteIP(), conn.ID, fileSize, openFlags, nil)
|
||||
if hasNotifiersPlugin {
|
||||
plugin.Handler.NotifyFsEvent(event)
|
||||
}
|
||||
if !hasHook {
|
||||
return handleUnconfiguredPreAction(operation)
|
||||
}
|
||||
return actionHandler.Handle(event)
|
||||
}
|
||||
|
||||
// ExecuteActionNotification executes the defined hook, if any, for the specified action
|
||||
func ExecuteActionNotification(conn *BaseConnection, operation, filePath, virtualPath, target, virtualTarget, sshCmd string,
|
||||
fileSize int64, err error,
|
||||
) {
|
||||
hasNotifiersPlugin := plugin.Handler.HasNotifiers()
|
||||
hasHook := util.Contains(Config.Actions.ExecuteOn, operation)
|
||||
if !hasHook && !hasNotifiersPlugin {
|
||||
return
|
||||
}
|
||||
notification := newActionNotification(&conn.User, operation, filePath, virtualPath, target, virtualTarget, sshCmd,
|
||||
conn.protocol, conn.GetRemoteIP(), conn.ID, fileSize, 0, err)
|
||||
if hasNotifiersPlugin {
|
||||
plugin.Handler.NotifyFsEvent(notification)
|
||||
}
|
||||
|
||||
if hasHook {
|
||||
if util.Contains(Config.Actions.ExecuteSync, operation) {
|
||||
actionHandler.Handle(notification) //nolint:errcheck
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
startNewHook()
|
||||
defer hookEnded()
|
||||
|
||||
actionHandler.Handle(notification) //nolint:errcheck
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// ActionHandler handles a notification for a Protocol Action.
|
||||
type ActionHandler interface {
|
||||
Handle(notification *notifier.FsEvent) error
|
||||
}
|
||||
|
||||
func newActionNotification(
|
||||
user *dataprovider.User,
|
||||
operation, filePath, virtualPath, target, virtualTarget, sshCmd, protocol, ip, sessionID string,
|
||||
fileSize int64,
|
||||
openFlags int,
|
||||
err error,
|
||||
) *notifier.FsEvent {
|
||||
var bucket, endpoint string
|
||||
status := 1
|
||||
|
||||
fsConfig := user.GetFsConfigForPath(virtualPath)
|
||||
|
||||
switch fsConfig.Provider {
|
||||
case sdk.S3FilesystemProvider:
|
||||
bucket = fsConfig.S3Config.Bucket
|
||||
endpoint = fsConfig.S3Config.Endpoint
|
||||
case sdk.GCSFilesystemProvider:
|
||||
bucket = fsConfig.GCSConfig.Bucket
|
||||
case sdk.AzureBlobFilesystemProvider:
|
||||
bucket = fsConfig.AzBlobConfig.Container
|
||||
if fsConfig.AzBlobConfig.Endpoint != "" {
|
||||
endpoint = fsConfig.AzBlobConfig.Endpoint
|
||||
}
|
||||
case sdk.SFTPFilesystemProvider:
|
||||
endpoint = fsConfig.SFTPConfig.Endpoint
|
||||
}
|
||||
|
||||
if err == ErrQuotaExceeded {
|
||||
status = 3
|
||||
} else if err != nil {
|
||||
status = 2
|
||||
}
|
||||
|
||||
return ¬ifier.FsEvent{
|
||||
Action: operation,
|
||||
Username: user.Username,
|
||||
Path: filePath,
|
||||
TargetPath: target,
|
||||
VirtualPath: virtualPath,
|
||||
VirtualTargetPath: virtualTarget,
|
||||
SSHCmd: sshCmd,
|
||||
FileSize: fileSize,
|
||||
FsProvider: int(fsConfig.Provider),
|
||||
Bucket: bucket,
|
||||
Endpoint: endpoint,
|
||||
Status: status,
|
||||
Protocol: protocol,
|
||||
IP: ip,
|
||||
SessionID: sessionID,
|
||||
OpenFlags: openFlags,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
}
|
||||
}
|
||||
|
||||
type defaultActionHandler struct{}
|
||||
|
||||
func (h *defaultActionHandler) Handle(event *notifier.FsEvent) error {
|
||||
if !util.Contains(Config.Actions.ExecuteOn, event.Action) {
|
||||
return errUnconfiguredAction
|
||||
}
|
||||
|
||||
if Config.Actions.Hook == "" {
|
||||
logger.Warn(event.Protocol, "", "Unable to send notification, no hook is defined")
|
||||
|
||||
return errNoHook
|
||||
}
|
||||
|
||||
if strings.HasPrefix(Config.Actions.Hook, "http") {
|
||||
return h.handleHTTP(event)
|
||||
}
|
||||
|
||||
return h.handleCommand(event)
|
||||
}
|
||||
|
||||
func (h *defaultActionHandler) handleHTTP(event *notifier.FsEvent) error {
|
||||
u, err := url.Parse(Config.Actions.Hook)
|
||||
if err != nil {
|
||||
logger.Error(event.Protocol, "", "Invalid hook %#v for operation %#v: %v",
|
||||
Config.Actions.Hook, event.Action, err)
|
||||
return err
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
respCode := 0
|
||||
|
||||
var b bytes.Buffer
|
||||
_ = json.NewEncoder(&b).Encode(event)
|
||||
|
||||
resp, err := httpclient.RetryablePost(Config.Actions.Hook, "application/json", &b)
|
||||
if err == nil {
|
||||
respCode = resp.StatusCode
|
||||
resp.Body.Close()
|
||||
|
||||
if respCode != http.StatusOK {
|
||||
err = errUnexpectedHTTResponse
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug(event.Protocol, "", "notified operation %#v to URL: %v status code: %v, elapsed: %v err: %v",
|
||||
event.Action, u.Redacted(), respCode, time.Since(startTime), err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *defaultActionHandler) handleCommand(event *notifier.FsEvent) error {
|
||||
if !filepath.IsAbs(Config.Actions.Hook) {
|
||||
err := fmt.Errorf("invalid notification command %#v", Config.Actions.Hook)
|
||||
logger.Warn(event.Protocol, "", "unable to execute notification command: %v", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
timeout, env := command.GetConfig(Config.Actions.Hook)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, Config.Actions.Hook)
|
||||
cmd.Env = append(env, notificationAsEnvVars(event)...)
|
||||
|
||||
startTime := time.Now()
|
||||
err := cmd.Run()
|
||||
|
||||
logger.Debug(event.Protocol, "", "executed command %#v, elapsed: %v, error: %v",
|
||||
Config.Actions.Hook, time.Since(startTime), err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func notificationAsEnvVars(event *notifier.FsEvent) []string {
|
||||
return []string{
|
||||
fmt.Sprintf("SFTPGO_ACTION=%v", event.Action),
|
||||
fmt.Sprintf("SFTPGO_ACTION_USERNAME=%v", event.Username),
|
||||
fmt.Sprintf("SFTPGO_ACTION_PATH=%v", event.Path),
|
||||
fmt.Sprintf("SFTPGO_ACTION_TARGET=%v", event.TargetPath),
|
||||
fmt.Sprintf("SFTPGO_ACTION_VIRTUAL_PATH=%v", event.VirtualPath),
|
||||
fmt.Sprintf("SFTPGO_ACTION_VIRTUAL_TARGET=%v", event.VirtualTargetPath),
|
||||
fmt.Sprintf("SFTPGO_ACTION_SSH_CMD=%v", event.SSHCmd),
|
||||
fmt.Sprintf("SFTPGO_ACTION_FILE_SIZE=%v", event.FileSize),
|
||||
fmt.Sprintf("SFTPGO_ACTION_FS_PROVIDER=%v", event.FsProvider),
|
||||
fmt.Sprintf("SFTPGO_ACTION_BUCKET=%v", event.Bucket),
|
||||
fmt.Sprintf("SFTPGO_ACTION_ENDPOINT=%v", event.Endpoint),
|
||||
fmt.Sprintf("SFTPGO_ACTION_STATUS=%v", event.Status),
|
||||
fmt.Sprintf("SFTPGO_ACTION_PROTOCOL=%v", event.Protocol),
|
||||
fmt.Sprintf("SFTPGO_ACTION_IP=%v", event.IP),
|
||||
fmt.Sprintf("SFTPGO_ACTION_SESSION_ID=%v", event.SessionID),
|
||||
fmt.Sprintf("SFTPGO_ACTION_OPEN_FLAGS=%v", event.OpenFlags),
|
||||
fmt.Sprintf("SFTPGO_ACTION_TIMESTAMP=%v", event.Timestamp),
|
||||
}
|
||||
}
|
||||
309
common/actions_test.go
Normal file
309
common/actions_test.go
Normal file
@@ -0,0 +1,309 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/lithammer/shortuuid/v3"
|
||||
"github.com/rs/xid"
|
||||
"github.com/sftpgo/sdk"
|
||||
"github.com/sftpgo/sdk/plugin/notifier"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
func TestNewActionNotification(t *testing.T) {
|
||||
user := &dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "username",
|
||||
},
|
||||
}
|
||||
user.FsConfig.Provider = sdk.LocalFilesystemProvider
|
||||
user.FsConfig.S3Config = vfs.S3FsConfig{
|
||||
BaseS3FsConfig: sdk.BaseS3FsConfig{
|
||||
Bucket: "s3bucket",
|
||||
Endpoint: "endpoint",
|
||||
},
|
||||
}
|
||||
user.FsConfig.GCSConfig = vfs.GCSFsConfig{
|
||||
BaseGCSFsConfig: sdk.BaseGCSFsConfig{
|
||||
Bucket: "gcsbucket",
|
||||
},
|
||||
}
|
||||
user.FsConfig.AzBlobConfig = vfs.AzBlobFsConfig{
|
||||
BaseAzBlobFsConfig: sdk.BaseAzBlobFsConfig{
|
||||
Container: "azcontainer",
|
||||
Endpoint: "azendpoint",
|
||||
},
|
||||
}
|
||||
user.FsConfig.SFTPConfig = vfs.SFTPFsConfig{
|
||||
BaseSFTPFsConfig: sdk.BaseSFTPFsConfig{
|
||||
Endpoint: "sftpendpoint",
|
||||
},
|
||||
}
|
||||
sessionID := xid.New().String()
|
||||
a := newActionNotification(user, operationDownload, "path", "vpath", "target", "", "", ProtocolSFTP, "", sessionID,
|
||||
123, 0, errors.New("fake error"))
|
||||
assert.Equal(t, user.Username, a.Username)
|
||||
assert.Equal(t, 0, len(a.Bucket))
|
||||
assert.Equal(t, 0, len(a.Endpoint))
|
||||
assert.Equal(t, 2, a.Status)
|
||||
|
||||
user.FsConfig.Provider = sdk.S3FilesystemProvider
|
||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", "", ProtocolSSH, "", sessionID,
|
||||
123, 0, nil)
|
||||
assert.Equal(t, "s3bucket", a.Bucket)
|
||||
assert.Equal(t, "endpoint", a.Endpoint)
|
||||
assert.Equal(t, 1, a.Status)
|
||||
|
||||
user.FsConfig.Provider = sdk.GCSFilesystemProvider
|
||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", "", ProtocolSCP, "", sessionID,
|
||||
123, 0, ErrQuotaExceeded)
|
||||
assert.Equal(t, "gcsbucket", a.Bucket)
|
||||
assert.Equal(t, 0, len(a.Endpoint))
|
||||
assert.Equal(t, 3, a.Status)
|
||||
|
||||
user.FsConfig.Provider = sdk.AzureBlobFilesystemProvider
|
||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", "", ProtocolSCP, "", sessionID,
|
||||
123, 0, nil)
|
||||
assert.Equal(t, "azcontainer", a.Bucket)
|
||||
assert.Equal(t, "azendpoint", a.Endpoint)
|
||||
assert.Equal(t, 1, a.Status)
|
||||
|
||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", "", ProtocolSCP, "", sessionID,
|
||||
123, os.O_APPEND, nil)
|
||||
assert.Equal(t, "azcontainer", a.Bucket)
|
||||
assert.Equal(t, "azendpoint", a.Endpoint)
|
||||
assert.Equal(t, 1, a.Status)
|
||||
assert.Equal(t, os.O_APPEND, a.OpenFlags)
|
||||
|
||||
user.FsConfig.Provider = sdk.SFTPFilesystemProvider
|
||||
a = newActionNotification(user, operationDownload, "path", "vpath", "target", "", "", ProtocolSFTP, "", sessionID,
|
||||
123, 0, nil)
|
||||
assert.Equal(t, "sftpendpoint", a.Endpoint)
|
||||
}
|
||||
|
||||
func TestActionHTTP(t *testing.T) {
|
||||
actionsCopy := Config.Actions
|
||||
|
||||
Config.Actions = ProtocolActions{
|
||||
ExecuteOn: []string{operationDownload},
|
||||
Hook: fmt.Sprintf("http://%v", httpAddr),
|
||||
}
|
||||
user := &dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "username",
|
||||
},
|
||||
}
|
||||
a := newActionNotification(user, operationDownload, "path", "vpath", "target", "", "", ProtocolSFTP, "",
|
||||
xid.New().String(), 123, 0, nil)
|
||||
err := actionHandler.Handle(a)
|
||||
assert.NoError(t, err)
|
||||
|
||||
Config.Actions.Hook = "http://invalid:1234"
|
||||
err = actionHandler.Handle(a)
|
||||
assert.Error(t, err)
|
||||
|
||||
Config.Actions.Hook = fmt.Sprintf("http://%v/404", httpAddr)
|
||||
err = actionHandler.Handle(a)
|
||||
if assert.Error(t, err) {
|
||||
assert.EqualError(t, err, errUnexpectedHTTResponse.Error())
|
||||
}
|
||||
|
||||
Config.Actions = actionsCopy
|
||||
}
|
||||
|
||||
func TestActionCMD(t *testing.T) {
|
||||
if runtime.GOOS == osWindows {
|
||||
t.Skip("this test is not available on Windows")
|
||||
}
|
||||
actionsCopy := Config.Actions
|
||||
|
||||
hookCmd, err := exec.LookPath("true")
|
||||
assert.NoError(t, err)
|
||||
|
||||
Config.Actions = ProtocolActions{
|
||||
ExecuteOn: []string{operationDownload},
|
||||
Hook: hookCmd,
|
||||
}
|
||||
user := &dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "username",
|
||||
},
|
||||
}
|
||||
sessionID := shortuuid.New()
|
||||
a := newActionNotification(user, operationDownload, "path", "vpath", "target", "", "", ProtocolSFTP, "", sessionID,
|
||||
123, 0, nil)
|
||||
err = actionHandler.Handle(a)
|
||||
assert.NoError(t, err)
|
||||
|
||||
c := NewBaseConnection("id", ProtocolSFTP, "", "", *user)
|
||||
ExecuteActionNotification(c, OperationSSHCmd, "path", "vpath", "target", "vtarget", "sha1sum", 0, nil)
|
||||
|
||||
ExecuteActionNotification(c, operationDownload, "path", "vpath", "", "", "", 0, nil)
|
||||
|
||||
Config.Actions = actionsCopy
|
||||
}
|
||||
|
||||
func TestWrongActions(t *testing.T) {
|
||||
actionsCopy := Config.Actions
|
||||
|
||||
badCommand := "/bad/command"
|
||||
if runtime.GOOS == osWindows {
|
||||
badCommand = "C:\\bad\\command"
|
||||
}
|
||||
Config.Actions = ProtocolActions{
|
||||
ExecuteOn: []string{operationUpload},
|
||||
Hook: badCommand,
|
||||
}
|
||||
user := &dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "username",
|
||||
},
|
||||
}
|
||||
|
||||
a := newActionNotification(user, operationUpload, "", "", "", "", "", ProtocolSFTP, "", xid.New().String(),
|
||||
123, 0, nil)
|
||||
err := actionHandler.Handle(a)
|
||||
assert.Error(t, err, "action with bad command must fail")
|
||||
|
||||
a.Action = operationDelete
|
||||
err = actionHandler.Handle(a)
|
||||
assert.EqualError(t, err, errUnconfiguredAction.Error())
|
||||
|
||||
Config.Actions.Hook = "http://foo\x7f.com/"
|
||||
a.Action = operationUpload
|
||||
err = actionHandler.Handle(a)
|
||||
assert.Error(t, err, "action with bad url must fail")
|
||||
|
||||
Config.Actions.Hook = ""
|
||||
err = actionHandler.Handle(a)
|
||||
if assert.Error(t, err) {
|
||||
assert.EqualError(t, err, errNoHook.Error())
|
||||
}
|
||||
|
||||
Config.Actions.Hook = "relative path"
|
||||
err = actionHandler.Handle(a)
|
||||
if assert.Error(t, err) {
|
||||
assert.EqualError(t, err, fmt.Sprintf("invalid notification command %#v", Config.Actions.Hook))
|
||||
}
|
||||
|
||||
Config.Actions = actionsCopy
|
||||
}
|
||||
|
||||
func TestPreDeleteAction(t *testing.T) {
|
||||
if runtime.GOOS == osWindows {
|
||||
t.Skip("this test is not available on Windows")
|
||||
}
|
||||
actionsCopy := Config.Actions
|
||||
|
||||
hookCmd, err := exec.LookPath("true")
|
||||
assert.NoError(t, err)
|
||||
Config.Actions = ProtocolActions{
|
||||
ExecuteOn: []string{operationPreDelete},
|
||||
Hook: hookCmd,
|
||||
}
|
||||
homeDir := filepath.Join(os.TempDir(), "test_user")
|
||||
err = os.MkdirAll(homeDir, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "username",
|
||||
HomeDir: homeDir,
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
fs := vfs.NewOsFs("id", homeDir, "")
|
||||
c := NewBaseConnection("id", ProtocolSFTP, "", "", user)
|
||||
|
||||
testfile := filepath.Join(user.HomeDir, "testfile")
|
||||
err = os.WriteFile(testfile, []byte("test"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
info, err := os.Stat(testfile)
|
||||
assert.NoError(t, err)
|
||||
err = c.RemoveFile(fs, testfile, "testfile", info)
|
||||
assert.NoError(t, err)
|
||||
assert.FileExists(t, testfile)
|
||||
|
||||
os.RemoveAll(homeDir)
|
||||
|
||||
Config.Actions = actionsCopy
|
||||
}
|
||||
|
||||
func TestUnconfiguredHook(t *testing.T) {
|
||||
actionsCopy := Config.Actions
|
||||
|
||||
Config.Actions = ProtocolActions{
|
||||
ExecuteOn: []string{operationDownload},
|
||||
Hook: "",
|
||||
}
|
||||
pluginsConfig := []plugin.Config{
|
||||
{
|
||||
Type: "notifier",
|
||||
},
|
||||
}
|
||||
err := plugin.Initialize(pluginsConfig, true)
|
||||
assert.Error(t, err)
|
||||
assert.True(t, plugin.Handler.HasNotifiers())
|
||||
|
||||
c := NewBaseConnection("id", ProtocolSFTP, "", "", dataprovider.User{})
|
||||
err = ExecutePreAction(c, OperationPreDownload, "", "", 0, 0)
|
||||
assert.NoError(t, err)
|
||||
err = ExecutePreAction(c, operationPreDelete, "", "", 0, 0)
|
||||
assert.ErrorIs(t, err, errUnconfiguredAction)
|
||||
|
||||
ExecuteActionNotification(c, operationDownload, "", "", "", "", "", 0, nil)
|
||||
|
||||
err = plugin.Initialize(nil, true)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, plugin.Handler.HasNotifiers())
|
||||
|
||||
Config.Actions = actionsCopy
|
||||
}
|
||||
|
||||
type actionHandlerStub struct {
|
||||
called bool
|
||||
}
|
||||
|
||||
func (h *actionHandlerStub) Handle(event *notifier.FsEvent) error {
|
||||
h.called = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestInitializeActionHandler(t *testing.T) {
|
||||
handler := &actionHandlerStub{}
|
||||
|
||||
InitializeActionHandler(handler)
|
||||
t.Cleanup(func() {
|
||||
InitializeActionHandler(&defaultActionHandler{})
|
||||
})
|
||||
|
||||
err := actionHandler.Handle(¬ifier.FsEvent{})
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, handler.called)
|
||||
}
|
||||
65
common/clientsmap.go
Normal file
65
common/clientsmap.go
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
)
|
||||
|
||||
// clienstMap is a struct containing the map of the connected clients
|
||||
type clientsMap struct {
|
||||
totalConnections int32
|
||||
mu sync.RWMutex
|
||||
clients map[string]int
|
||||
}
|
||||
|
||||
func (c *clientsMap) add(source string) {
|
||||
atomic.AddInt32(&c.totalConnections, 1)
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.clients[source]++
|
||||
}
|
||||
|
||||
func (c *clientsMap) remove(source string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if val, ok := c.clients[source]; ok {
|
||||
atomic.AddInt32(&c.totalConnections, -1)
|
||||
c.clients[source]--
|
||||
if val > 1 {
|
||||
return
|
||||
}
|
||||
delete(c.clients, source)
|
||||
} else {
|
||||
logger.Warn(logSender, "", "cannot remove client %v it is not mapped", source)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *clientsMap) getTotal() int32 {
|
||||
return atomic.LoadInt32(&c.totalConnections)
|
||||
}
|
||||
|
||||
func (c *clientsMap) getTotalFrom(source string) int {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
return c.clients[source]
|
||||
}
|
||||
73
common/clientsmap_test.go
Normal file
73
common/clientsmap_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestClientsMap(t *testing.T) {
|
||||
m := clientsMap{
|
||||
clients: make(map[string]int),
|
||||
}
|
||||
ip1 := "192.168.1.1"
|
||||
ip2 := "192.168.1.2"
|
||||
m.add(ip1)
|
||||
assert.Equal(t, int32(1), m.getTotal())
|
||||
assert.Equal(t, 1, m.getTotalFrom(ip1))
|
||||
assert.Equal(t, 0, m.getTotalFrom(ip2))
|
||||
|
||||
m.add(ip1)
|
||||
m.add(ip2)
|
||||
assert.Equal(t, int32(3), m.getTotal())
|
||||
assert.Equal(t, 2, m.getTotalFrom(ip1))
|
||||
assert.Equal(t, 1, m.getTotalFrom(ip2))
|
||||
|
||||
m.add(ip1)
|
||||
m.add(ip1)
|
||||
m.add(ip2)
|
||||
assert.Equal(t, int32(6), m.getTotal())
|
||||
assert.Equal(t, 4, m.getTotalFrom(ip1))
|
||||
assert.Equal(t, 2, m.getTotalFrom(ip2))
|
||||
|
||||
m.remove(ip2)
|
||||
assert.Equal(t, int32(5), m.getTotal())
|
||||
assert.Equal(t, 4, m.getTotalFrom(ip1))
|
||||
assert.Equal(t, 1, m.getTotalFrom(ip2))
|
||||
|
||||
m.remove("unknown")
|
||||
assert.Equal(t, int32(5), m.getTotal())
|
||||
assert.Equal(t, 4, m.getTotalFrom(ip1))
|
||||
assert.Equal(t, 1, m.getTotalFrom(ip2))
|
||||
|
||||
m.remove(ip2)
|
||||
assert.Equal(t, int32(4), m.getTotal())
|
||||
assert.Equal(t, 4, m.getTotalFrom(ip1))
|
||||
assert.Equal(t, 0, m.getTotalFrom(ip2))
|
||||
|
||||
m.remove(ip1)
|
||||
m.remove(ip1)
|
||||
m.remove(ip1)
|
||||
assert.Equal(t, int32(1), m.getTotal())
|
||||
assert.Equal(t, 1, m.getTotalFrom(ip1))
|
||||
assert.Equal(t, 0, m.getTotalFrom(ip2))
|
||||
|
||||
m.remove(ip1)
|
||||
assert.Equal(t, int32(0), m.getTotal())
|
||||
assert.Equal(t, 0, m.getTotalFrom(ip1))
|
||||
assert.Equal(t, 0, m.getTotalFrom(ip2))
|
||||
}
|
||||
1285
common/common.go
Normal file
1285
common/common.go
Normal file
File diff suppressed because it is too large
Load Diff
1099
common/common_test.go
Normal file
1099
common/common_test.go
Normal file
File diff suppressed because it is too large
Load Diff
1401
common/connection.go
Normal file
1401
common/connection.go
Normal file
File diff suppressed because it is too large
Load Diff
493
common/connection_test.go
Normal file
493
common/connection_test.go
Normal file
@@ -0,0 +1,493 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/rs/xid"
|
||||
"github.com/sftpgo/sdk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
// MockOsFs mockable OsFs
|
||||
type MockOsFs struct {
|
||||
vfs.Fs
|
||||
hasVirtualFolders bool
|
||||
}
|
||||
|
||||
// Name returns the name for the Fs implementation
|
||||
func (fs *MockOsFs) Name() string {
|
||||
return "mockOsFs"
|
||||
}
|
||||
|
||||
// HasVirtualFolders returns true if folders are emulated
|
||||
func (fs *MockOsFs) HasVirtualFolders() bool {
|
||||
return fs.hasVirtualFolders
|
||||
}
|
||||
|
||||
func (fs *MockOsFs) IsUploadResumeSupported() bool {
|
||||
return !fs.hasVirtualFolders
|
||||
}
|
||||
|
||||
func (fs *MockOsFs) Chtimes(name string, atime, mtime time.Time, isUploading bool) error {
|
||||
return vfs.ErrVfsUnsupported
|
||||
}
|
||||
|
||||
func newMockOsFs(hasVirtualFolders bool, connectionID, rootDir string) vfs.Fs {
|
||||
return &MockOsFs{
|
||||
Fs: vfs.NewOsFs(connectionID, rootDir, ""),
|
||||
hasVirtualFolders: hasVirtualFolders,
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveErrors(t *testing.T) {
|
||||
mappedPath := filepath.Join(os.TempDir(), "map")
|
||||
homePath := filepath.Join(os.TempDir(), "home")
|
||||
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "remove_errors_user",
|
||||
HomeDir: homePath,
|
||||
},
|
||||
VirtualFolders: []vfs.VirtualFolder{
|
||||
{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: filepath.Base(mappedPath),
|
||||
MappedPath: mappedPath,
|
||||
},
|
||||
VirtualPath: "/virtualpath",
|
||||
},
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
conn := NewBaseConnection("", ProtocolFTP, "", "", user)
|
||||
err := conn.IsRemoveDirAllowed(fs, mappedPath, "/virtualpath1")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "permission denied")
|
||||
}
|
||||
err = conn.RemoveFile(fs, filepath.Join(homePath, "missing_file"), "/missing_file",
|
||||
vfs.NewFileInfo("info", false, 100, time.Now(), false))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSetStatMode(t *testing.T) {
|
||||
oldSetStatMode := Config.SetstatMode
|
||||
Config.SetstatMode = 1
|
||||
|
||||
fakePath := "fake path"
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
fs := newMockOsFs(true, "", user.GetHomeDir())
|
||||
conn := NewBaseConnection("", ProtocolWebDAV, "", "", user)
|
||||
err := conn.handleChmod(fs, fakePath, fakePath, nil)
|
||||
assert.NoError(t, err)
|
||||
err = conn.handleChown(fs, fakePath, fakePath, nil)
|
||||
assert.NoError(t, err)
|
||||
err = conn.handleChtimes(fs, fakePath, fakePath, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
Config.SetstatMode = 2
|
||||
err = conn.handleChmod(fs, fakePath, fakePath, nil)
|
||||
assert.NoError(t, err)
|
||||
err = conn.handleChtimes(fs, fakePath, fakePath, &StatAttributes{
|
||||
Atime: time.Now(),
|
||||
Mtime: time.Now(),
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
Config.SetstatMode = oldSetStatMode
|
||||
}
|
||||
|
||||
func TestRecursiveRenameWalkError(t *testing.T) {
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
conn := NewBaseConnection("", ProtocolWebDAV, "", "", dataprovider.User{})
|
||||
err := conn.checkRecursiveRenameDirPermissions(fs, fs, "/source", "/target")
|
||||
assert.ErrorIs(t, err, os.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestCrossRenameFsErrors(t *testing.T) {
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
conn := NewBaseConnection("", ProtocolWebDAV, "", "", dataprovider.User{})
|
||||
res := conn.hasSpaceForCrossRename(fs, vfs.QuotaCheckResult{}, 1, "missingsource")
|
||||
assert.False(t, res)
|
||||
if runtime.GOOS != osWindows {
|
||||
dirPath := filepath.Join(os.TempDir(), "d")
|
||||
err := os.Mkdir(dirPath, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = os.Chmod(dirPath, 0001)
|
||||
assert.NoError(t, err)
|
||||
|
||||
res = conn.hasSpaceForCrossRename(fs, vfs.QuotaCheckResult{}, 1, dirPath)
|
||||
assert.False(t, res)
|
||||
|
||||
err = os.Chmod(dirPath, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(dirPath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenameVirtualFolders(t *testing.T) {
|
||||
vdir := "/avdir"
|
||||
u := dataprovider.User{}
|
||||
u.VirtualFolders = append(u.VirtualFolders, vfs.VirtualFolder{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: "name",
|
||||
MappedPath: "mappedPath",
|
||||
},
|
||||
VirtualPath: vdir,
|
||||
})
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
conn := NewBaseConnection("", ProtocolFTP, "", "", u)
|
||||
res := conn.isRenamePermitted(fs, fs, "source", "target", vdir, "vdirtarget", nil)
|
||||
assert.False(t, res)
|
||||
}
|
||||
|
||||
func TestRenamePerms(t *testing.T) {
|
||||
src := "source"
|
||||
target := "target"
|
||||
sub := "/sub"
|
||||
subTarget := sub + "/target"
|
||||
u := dataprovider.User{}
|
||||
u.Permissions = map[string][]string{}
|
||||
u.Permissions["/"] = []string{dataprovider.PermCreateDirs, dataprovider.PermUpload, dataprovider.PermCreateSymlinks,
|
||||
dataprovider.PermDeleteFiles}
|
||||
conn := NewBaseConnection("", ProtocolSFTP, "", "", u)
|
||||
assert.False(t, conn.hasRenamePerms(src, target, nil))
|
||||
u.Permissions["/"] = []string{dataprovider.PermRename}
|
||||
assert.True(t, conn.hasRenamePerms(src, target, nil))
|
||||
u.Permissions["/"] = []string{dataprovider.PermCreateDirs, dataprovider.PermUpload, dataprovider.PermDeleteFiles,
|
||||
dataprovider.PermDeleteDirs}
|
||||
assert.False(t, conn.hasRenamePerms(src, target, nil))
|
||||
|
||||
info := vfs.NewFileInfo(src, true, 0, time.Now(), false)
|
||||
u.Permissions["/"] = []string{dataprovider.PermRenameFiles}
|
||||
assert.False(t, conn.hasRenamePerms(src, target, info))
|
||||
u.Permissions["/"] = []string{dataprovider.PermRenameDirs}
|
||||
assert.True(t, conn.hasRenamePerms(src, target, info))
|
||||
u.Permissions["/"] = []string{dataprovider.PermRename}
|
||||
assert.True(t, conn.hasRenamePerms(src, target, info))
|
||||
u.Permissions["/"] = []string{dataprovider.PermDownload, dataprovider.PermUpload, dataprovider.PermDeleteDirs}
|
||||
assert.False(t, conn.hasRenamePerms(src, target, info))
|
||||
// test with different permissions between source and target
|
||||
u.Permissions["/"] = []string{dataprovider.PermRename}
|
||||
u.Permissions[sub] = []string{dataprovider.PermRenameFiles}
|
||||
assert.False(t, conn.hasRenamePerms(src, subTarget, info))
|
||||
u.Permissions[sub] = []string{dataprovider.PermRenameDirs}
|
||||
assert.True(t, conn.hasRenamePerms(src, subTarget, info))
|
||||
// test files
|
||||
info = vfs.NewFileInfo(src, false, 0, time.Now(), false)
|
||||
u.Permissions["/"] = []string{dataprovider.PermRenameDirs}
|
||||
assert.False(t, conn.hasRenamePerms(src, target, info))
|
||||
u.Permissions["/"] = []string{dataprovider.PermRenameFiles}
|
||||
assert.True(t, conn.hasRenamePerms(src, target, info))
|
||||
u.Permissions["/"] = []string{dataprovider.PermRename}
|
||||
assert.True(t, conn.hasRenamePerms(src, target, info))
|
||||
// test with different permissions between source and target
|
||||
u.Permissions["/"] = []string{dataprovider.PermRename}
|
||||
u.Permissions[sub] = []string{dataprovider.PermRenameDirs}
|
||||
assert.False(t, conn.hasRenamePerms(src, subTarget, info))
|
||||
u.Permissions[sub] = []string{dataprovider.PermRenameFiles}
|
||||
assert.True(t, conn.hasRenamePerms(src, subTarget, info))
|
||||
}
|
||||
|
||||
func TestUpdateQuotaAfterRename(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: userTestUsername,
|
||||
HomeDir: filepath.Join(os.TempDir(), "home"),
|
||||
},
|
||||
}
|
||||
mappedPath := filepath.Join(os.TempDir(), "vdir")
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
user.VirtualFolders = append(user.VirtualFolders, vfs.VirtualFolder{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
MappedPath: mappedPath,
|
||||
},
|
||||
VirtualPath: "/vdir",
|
||||
QuotaFiles: -1,
|
||||
QuotaSize: -1,
|
||||
})
|
||||
user.VirtualFolders = append(user.VirtualFolders, vfs.VirtualFolder{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
MappedPath: mappedPath,
|
||||
},
|
||||
VirtualPath: "/vdir1",
|
||||
QuotaFiles: -1,
|
||||
QuotaSize: -1,
|
||||
})
|
||||
err := os.MkdirAll(user.GetHomeDir(), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = os.MkdirAll(mappedPath, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
fs, err := user.GetFilesystem("id")
|
||||
assert.NoError(t, err)
|
||||
c := NewBaseConnection("", ProtocolSFTP, "", "", user)
|
||||
request := sftp.NewRequest("Rename", "/testfile")
|
||||
if runtime.GOOS != osWindows {
|
||||
request.Filepath = "/dir"
|
||||
request.Target = path.Join("/vdir", "dir")
|
||||
testDirPath := filepath.Join(mappedPath, "dir")
|
||||
err := os.MkdirAll(testDirPath, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = os.Chmod(testDirPath, 0001)
|
||||
assert.NoError(t, err)
|
||||
err = c.updateQuotaAfterRename(fs, request.Filepath, request.Target, testDirPath, 0)
|
||||
assert.Error(t, err)
|
||||
err = os.Chmod(testDirPath, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
testFile1 := "/testfile1"
|
||||
request.Target = testFile1
|
||||
request.Filepath = path.Join("/vdir", "file")
|
||||
err = c.updateQuotaAfterRename(fs, request.Filepath, request.Target, filepath.Join(mappedPath, "file"), 0)
|
||||
assert.Error(t, err)
|
||||
err = os.WriteFile(filepath.Join(mappedPath, "file"), []byte("test content"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
request.Filepath = testFile1
|
||||
request.Target = path.Join("/vdir", "file")
|
||||
err = c.updateQuotaAfterRename(fs, request.Filepath, request.Target, filepath.Join(mappedPath, "file"), 12)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(user.GetHomeDir(), "testfile1"), []byte("test content"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
request.Target = testFile1
|
||||
request.Filepath = path.Join("/vdir", "file")
|
||||
err = c.updateQuotaAfterRename(fs, request.Filepath, request.Target, filepath.Join(mappedPath, "file"), 12)
|
||||
assert.NoError(t, err)
|
||||
request.Target = path.Join("/vdir1", "file")
|
||||
request.Filepath = path.Join("/vdir", "file")
|
||||
err = c.updateQuotaAfterRename(fs, request.Filepath, request.Target, filepath.Join(mappedPath, "file"), 12)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = os.RemoveAll(mappedPath)
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestErrorsMapping(t *testing.T) {
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
conn := NewBaseConnection("", ProtocolSFTP, "", "", dataprovider.User{BaseUser: sdk.BaseUser{HomeDir: os.TempDir()}})
|
||||
osErrorsProtocols := []string{ProtocolWebDAV, ProtocolFTP, ProtocolHTTP, ProtocolHTTPShare,
|
||||
ProtocolDataRetention, ProtocolOIDC}
|
||||
for _, protocol := range supportedProtocols {
|
||||
conn.SetProtocol(protocol)
|
||||
err := conn.GetFsError(fs, os.ErrNotExist)
|
||||
if protocol == ProtocolSFTP {
|
||||
assert.ErrorIs(t, err, sftp.ErrSSHFxNoSuchFile)
|
||||
} else if util.Contains(osErrorsProtocols, protocol) {
|
||||
assert.EqualError(t, err, os.ErrNotExist.Error())
|
||||
} else {
|
||||
assert.EqualError(t, err, ErrNotExist.Error())
|
||||
}
|
||||
err = conn.GetFsError(fs, os.ErrPermission)
|
||||
if protocol == ProtocolSFTP {
|
||||
assert.EqualError(t, err, sftp.ErrSSHFxPermissionDenied.Error())
|
||||
} else {
|
||||
assert.EqualError(t, err, ErrPermissionDenied.Error())
|
||||
}
|
||||
err = conn.GetFsError(fs, os.ErrClosed)
|
||||
if protocol == ProtocolSFTP {
|
||||
assert.ErrorIs(t, err, sftp.ErrSSHFxFailure)
|
||||
assert.Contains(t, err.Error(), os.ErrClosed.Error())
|
||||
} else {
|
||||
assert.EqualError(t, err, ErrGenericFailure.Error())
|
||||
}
|
||||
err = conn.GetFsError(fs, ErrPermissionDenied)
|
||||
if protocol == ProtocolSFTP {
|
||||
assert.ErrorIs(t, err, sftp.ErrSSHFxFailure)
|
||||
assert.Contains(t, err.Error(), ErrPermissionDenied.Error())
|
||||
} else {
|
||||
assert.EqualError(t, err, ErrPermissionDenied.Error())
|
||||
}
|
||||
err = conn.GetFsError(fs, vfs.ErrVfsUnsupported)
|
||||
if protocol == ProtocolSFTP {
|
||||
assert.EqualError(t, err, sftp.ErrSSHFxOpUnsupported.Error())
|
||||
} else {
|
||||
assert.EqualError(t, err, ErrOpUnsupported.Error())
|
||||
}
|
||||
err = conn.GetFsError(fs, vfs.ErrStorageSizeUnavailable)
|
||||
if protocol == ProtocolSFTP {
|
||||
assert.ErrorIs(t, err, sftp.ErrSSHFxOpUnsupported)
|
||||
assert.Contains(t, err.Error(), vfs.ErrStorageSizeUnavailable.Error())
|
||||
} else {
|
||||
assert.EqualError(t, err, vfs.ErrStorageSizeUnavailable.Error())
|
||||
}
|
||||
err = conn.GetQuotaExceededError()
|
||||
assert.True(t, conn.IsQuotaExceededError(err))
|
||||
err = conn.GetReadQuotaExceededError()
|
||||
if protocol == ProtocolSFTP {
|
||||
assert.ErrorIs(t, err, sftp.ErrSSHFxFailure)
|
||||
assert.Contains(t, err.Error(), ErrReadQuotaExceeded.Error())
|
||||
} else {
|
||||
assert.ErrorIs(t, err, ErrReadQuotaExceeded)
|
||||
}
|
||||
err = conn.GetNotExistError()
|
||||
assert.True(t, conn.IsNotExistError(err))
|
||||
err = conn.GetFsError(fs, nil)
|
||||
assert.NoError(t, err)
|
||||
err = conn.GetOpUnsupportedError()
|
||||
if protocol == ProtocolSFTP {
|
||||
assert.EqualError(t, err, sftp.ErrSSHFxOpUnsupported.Error())
|
||||
} else {
|
||||
assert.EqualError(t, err, ErrOpUnsupported.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxWriteSize(t *testing.T) {
|
||||
permissions := make(map[string][]string)
|
||||
permissions["/"] = []string{dataprovider.PermAny}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: userTestUsername,
|
||||
Permissions: permissions,
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
}
|
||||
fs, err := user.GetFilesystem("123")
|
||||
assert.NoError(t, err)
|
||||
conn := NewBaseConnection("", ProtocolFTP, "", "", user)
|
||||
quotaResult := vfs.QuotaCheckResult{
|
||||
HasSpace: true,
|
||||
}
|
||||
size, err := conn.GetMaxWriteSize(quotaResult, false, 0, fs.IsUploadResumeSupported())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), size)
|
||||
|
||||
conn.User.Filters.MaxUploadFileSize = 100
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, false, 0, fs.IsUploadResumeSupported())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(100), size)
|
||||
|
||||
quotaResult.QuotaSize = 1000
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, false, 50, fs.IsUploadResumeSupported())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(100), size)
|
||||
|
||||
quotaResult.QuotaSize = 1000
|
||||
quotaResult.UsedSize = 990
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, false, 50, fs.IsUploadResumeSupported())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(60), size)
|
||||
|
||||
quotaResult.QuotaSize = 0
|
||||
quotaResult.UsedSize = 0
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, true, 100, fs.IsUploadResumeSupported())
|
||||
assert.True(t, conn.IsQuotaExceededError(err))
|
||||
assert.Equal(t, int64(0), size)
|
||||
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, true, 10, fs.IsUploadResumeSupported())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(90), size)
|
||||
|
||||
fs = newMockOsFs(true, fs.ConnectionID(), user.GetHomeDir())
|
||||
size, err = conn.GetMaxWriteSize(quotaResult, true, 100, fs.IsUploadResumeSupported())
|
||||
assert.EqualError(t, err, ErrOpUnsupported.Error())
|
||||
assert.Equal(t, int64(0), size)
|
||||
}
|
||||
|
||||
func TestCheckParentDirsErrors(t *testing.T) {
|
||||
permissions := make(map[string][]string)
|
||||
permissions["/"] = []string{dataprovider.PermAny}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: userTestUsername,
|
||||
Permissions: permissions,
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
},
|
||||
}
|
||||
c := NewBaseConnection(xid.New().String(), ProtocolSFTP, "", "", user)
|
||||
err := c.CheckParentDirs("/a/dir")
|
||||
assert.Error(t, err)
|
||||
|
||||
user.FsConfig.Provider = sdk.LocalFilesystemProvider
|
||||
user.VirtualFolders = nil
|
||||
user.VirtualFolders = append(user.VirtualFolders, vfs.VirtualFolder{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: sdk.CryptedFilesystemProvider,
|
||||
},
|
||||
},
|
||||
VirtualPath: "/vdir",
|
||||
})
|
||||
user.VirtualFolders = append(user.VirtualFolders, vfs.VirtualFolder{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
MappedPath: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
VirtualPath: "/vdir/sub",
|
||||
})
|
||||
c = NewBaseConnection(xid.New().String(), ProtocolSFTP, "", "", user)
|
||||
err = c.CheckParentDirs("/vdir/sub/dir")
|
||||
assert.Error(t, err)
|
||||
|
||||
user = dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: userTestUsername,
|
||||
Permissions: permissions,
|
||||
HomeDir: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
FsConfig: vfs.Filesystem{
|
||||
Provider: sdk.S3FilesystemProvider,
|
||||
S3Config: vfs.S3FsConfig{
|
||||
BaseS3FsConfig: sdk.BaseS3FsConfig{
|
||||
Bucket: "buck",
|
||||
Region: "us-east-1",
|
||||
AccessKey: "key",
|
||||
},
|
||||
AccessSecret: kms.NewPlainSecret("s3secret"),
|
||||
},
|
||||
},
|
||||
}
|
||||
c = NewBaseConnection(xid.New().String(), ProtocolSFTP, "", "", user)
|
||||
err = c.CheckParentDirs("/a/dir")
|
||||
assert.NoError(t, err)
|
||||
|
||||
user.VirtualFolders = append(user.VirtualFolders, vfs.VirtualFolder{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
MappedPath: filepath.Clean(os.TempDir()),
|
||||
},
|
||||
VirtualPath: "/local/dir",
|
||||
})
|
||||
|
||||
c = NewBaseConnection(xid.New().String(), ProtocolSFTP, "", "", user)
|
||||
err = c.CheckParentDirs("/local/dir/sub-dir")
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(filepath.Join(os.TempDir(), "sub-dir"))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
483
common/dataretention.go
Normal file
483
common/dataretention.go
Normal file
@@ -0,0 +1,483 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/command"
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/httpclient"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/smtp"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// RetentionCheckNotification defines the supported notification methods for a retention check result
|
||||
type RetentionCheckNotification = string
|
||||
|
||||
// Supported notification methods
|
||||
const (
|
||||
// notify results using the defined "data_retention_hook"
|
||||
RetentionCheckNotificationHook = "Hook"
|
||||
// notify results by email
|
||||
RetentionCheckNotificationEmail = "Email"
|
||||
)
|
||||
|
||||
var (
|
||||
// RetentionChecks is the list of active quota scans
|
||||
RetentionChecks ActiveRetentionChecks
|
||||
)
|
||||
|
||||
// ActiveRetentionChecks holds the active quota scans
|
||||
type ActiveRetentionChecks struct {
|
||||
sync.RWMutex
|
||||
Checks []RetentionCheck
|
||||
}
|
||||
|
||||
// Get returns the active retention checks
|
||||
func (c *ActiveRetentionChecks) Get() []RetentionCheck {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
checks := make([]RetentionCheck, 0, len(c.Checks))
|
||||
for _, check := range c.Checks {
|
||||
foldersCopy := make([]FolderRetention, len(check.Folders))
|
||||
copy(foldersCopy, check.Folders)
|
||||
notificationsCopy := make([]string, len(check.Notifications))
|
||||
copy(notificationsCopy, check.Notifications)
|
||||
checks = append(checks, RetentionCheck{
|
||||
Username: check.Username,
|
||||
StartTime: check.StartTime,
|
||||
Notifications: notificationsCopy,
|
||||
Email: check.Email,
|
||||
Folders: foldersCopy,
|
||||
})
|
||||
}
|
||||
return checks
|
||||
}
|
||||
|
||||
// Add a new retention check, returns nil if a retention check for the given
|
||||
// username is already active. The returned result can be used to start the check
|
||||
func (c *ActiveRetentionChecks) Add(check RetentionCheck, user *dataprovider.User) *RetentionCheck {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
for _, val := range c.Checks {
|
||||
if val.Username == user.Username {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// we silently ignore file patterns
|
||||
user.Filters.FilePatterns = nil
|
||||
conn := NewBaseConnection("", "", "", "", *user)
|
||||
conn.SetProtocol(ProtocolDataRetention)
|
||||
conn.ID = fmt.Sprintf("data_retention_%v", user.Username)
|
||||
check.Username = user.Username
|
||||
check.StartTime = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
check.conn = conn
|
||||
check.updateUserPermissions()
|
||||
c.Checks = append(c.Checks, check)
|
||||
|
||||
return &check
|
||||
}
|
||||
|
||||
// remove a user from the ones with active retention checks
|
||||
// and returns true if the user is removed
|
||||
func (c *ActiveRetentionChecks) remove(username string) bool {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
for idx, check := range c.Checks {
|
||||
if check.Username == username {
|
||||
lastIdx := len(c.Checks) - 1
|
||||
c.Checks[idx] = c.Checks[lastIdx]
|
||||
c.Checks = c.Checks[:lastIdx]
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// FolderRetention defines the retention policy for the specified directory path
|
||||
type FolderRetention struct {
|
||||
// Path is the exposed virtual directory path, if no other specific retention is defined,
|
||||
// the retention applies for sub directories too. For example if retention is defined
|
||||
// for the paths "/" and "/sub" then the retention for "/" is applied for any file outside
|
||||
// the "/sub" directory
|
||||
Path string `json:"path"`
|
||||
// Retention time in hours. 0 means exclude this path
|
||||
Retention int `json:"retention"`
|
||||
// DeleteEmptyDirs defines if empty directories will be deleted.
|
||||
// The user need the delete permission
|
||||
DeleteEmptyDirs bool `json:"delete_empty_dirs,omitempty"`
|
||||
// IgnoreUserPermissions defines if delete files even if the user does not have the delete permission.
|
||||
// The default is "false" which means that files will be skipped if the user does not have the permission
|
||||
// to delete them. This applies to sub directories too.
|
||||
IgnoreUserPermissions bool `json:"ignore_user_permissions,omitempty"`
|
||||
}
|
||||
|
||||
func (f *FolderRetention) isValid() error {
|
||||
f.Path = path.Clean(f.Path)
|
||||
if !path.IsAbs(f.Path) {
|
||||
return util.NewValidationError(fmt.Sprintf("folder retention: invalid path %#v, please specify an absolute POSIX path",
|
||||
f.Path))
|
||||
}
|
||||
if f.Retention < 0 {
|
||||
return util.NewValidationError(fmt.Sprintf("invalid folder retention %v, it must be greater or equal to zero",
|
||||
f.Retention))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type folderRetentionCheckResult struct {
|
||||
Path string `json:"path"`
|
||||
Retention int `json:"retention"`
|
||||
DeletedFiles int `json:"deleted_files"`
|
||||
DeletedSize int64 `json:"deleted_size"`
|
||||
Elapsed time.Duration `json:"-"`
|
||||
Info string `json:"info,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// RetentionCheck defines an active retention check
|
||||
type RetentionCheck struct {
|
||||
// Username to which the retention check refers
|
||||
Username string `json:"username"`
|
||||
// retention check start time as unix timestamp in milliseconds
|
||||
StartTime int64 `json:"start_time"`
|
||||
// affected folders
|
||||
Folders []FolderRetention `json:"folders"`
|
||||
// how cleanup results will be notified
|
||||
Notifications []RetentionCheckNotification `json:"notifications,omitempty"`
|
||||
// email to use if the notification method is set to email
|
||||
Email string `json:"email,omitempty"`
|
||||
// Cleanup results
|
||||
results []*folderRetentionCheckResult `json:"-"`
|
||||
conn *BaseConnection
|
||||
}
|
||||
|
||||
// Validate returns an error if the specified folders are not valid
|
||||
func (c *RetentionCheck) Validate() error {
|
||||
folderPaths := make(map[string]bool)
|
||||
nothingToDo := true
|
||||
for idx := range c.Folders {
|
||||
f := &c.Folders[idx]
|
||||
if err := f.isValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
if f.Retention > 0 {
|
||||
nothingToDo = false
|
||||
}
|
||||
if _, ok := folderPaths[f.Path]; ok {
|
||||
return util.NewValidationError(fmt.Sprintf("duplicated folder path %#v", f.Path))
|
||||
}
|
||||
folderPaths[f.Path] = true
|
||||
}
|
||||
if nothingToDo {
|
||||
return util.NewValidationError("nothing to delete!")
|
||||
}
|
||||
for _, notification := range c.Notifications {
|
||||
switch notification {
|
||||
case RetentionCheckNotificationEmail:
|
||||
if !smtp.IsEnabled() {
|
||||
return util.NewValidationError("in order to notify results via email you must configure an SMTP server")
|
||||
}
|
||||
if c.Email == "" {
|
||||
return util.NewValidationError("in order to notify results via email you must add a valid email address to your profile")
|
||||
}
|
||||
case RetentionCheckNotificationHook:
|
||||
if Config.DataRetentionHook == "" {
|
||||
return util.NewValidationError("in order to notify results via hook you must define a data_retention_hook")
|
||||
}
|
||||
default:
|
||||
return util.NewValidationError(fmt.Sprintf("invalid notification %#v", notification))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *RetentionCheck) updateUserPermissions() {
|
||||
for _, folder := range c.Folders {
|
||||
if folder.IgnoreUserPermissions {
|
||||
c.conn.User.Permissions[folder.Path] = []string{dataprovider.PermAny}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *RetentionCheck) getFolderRetention(folderPath string) (FolderRetention, error) {
|
||||
dirsForPath := util.GetDirsForVirtualPath(folderPath)
|
||||
for _, dirPath := range dirsForPath {
|
||||
for _, folder := range c.Folders {
|
||||
if folder.Path == dirPath {
|
||||
return folder, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return FolderRetention{}, fmt.Errorf("unable to find folder retention for %#v", folderPath)
|
||||
}
|
||||
|
||||
func (c *RetentionCheck) removeFile(virtualPath string, info os.FileInfo) error {
|
||||
fs, fsPath, err := c.conn.GetFsAndResolvedPath(virtualPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.conn.RemoveFile(fs, fsPath, virtualPath, info)
|
||||
}
|
||||
|
||||
func (c *RetentionCheck) cleanupFolder(folderPath string) error {
|
||||
deleteFilesPerms := []string{dataprovider.PermDelete, dataprovider.PermDeleteFiles}
|
||||
startTime := time.Now()
|
||||
result := &folderRetentionCheckResult{
|
||||
Path: folderPath,
|
||||
}
|
||||
c.results = append(c.results, result)
|
||||
if !c.conn.User.HasPerm(dataprovider.PermListItems, folderPath) || !c.conn.User.HasAnyPerm(deleteFilesPerms, folderPath) {
|
||||
result.Elapsed = time.Since(startTime)
|
||||
result.Info = "data retention check skipped: no permissions"
|
||||
c.conn.Log(logger.LevelInfo, "user %#v does not have permissions to check retention on %#v, retention check skipped",
|
||||
c.conn.User.Username, folderPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
folderRetention, err := c.getFolderRetention(folderPath)
|
||||
if err != nil {
|
||||
result.Elapsed = time.Since(startTime)
|
||||
result.Error = "unable to get folder retention"
|
||||
c.conn.Log(logger.LevelError, "unable to get folder retention for path %#v", folderPath)
|
||||
return err
|
||||
}
|
||||
result.Retention = folderRetention.Retention
|
||||
if folderRetention.Retention == 0 {
|
||||
result.Elapsed = time.Since(startTime)
|
||||
result.Info = "data retention check skipped: retention is set to 0"
|
||||
c.conn.Log(logger.LevelDebug, "retention check skipped for folder %#v, retention is set to 0", folderPath)
|
||||
return nil
|
||||
}
|
||||
c.conn.Log(logger.LevelDebug, "start retention check for folder %#v, retention: %v hours, delete empty dirs? %v, ignore user perms? %v",
|
||||
folderPath, folderRetention.Retention, folderRetention.DeleteEmptyDirs, folderRetention.IgnoreUserPermissions)
|
||||
files, err := c.conn.ListDir(folderPath)
|
||||
if err != nil {
|
||||
result.Elapsed = time.Since(startTime)
|
||||
if err == c.conn.GetNotExistError() {
|
||||
result.Info = "data retention check skipped, folder does not exist"
|
||||
c.conn.Log(logger.LevelDebug, "folder %#v does not exist, retention check skipped", folderPath)
|
||||
return nil
|
||||
}
|
||||
result.Error = fmt.Sprintf("unable to list directory %#v", folderPath)
|
||||
c.conn.Log(logger.LevelError, result.Error)
|
||||
return err
|
||||
}
|
||||
for _, info := range files {
|
||||
virtualPath := path.Join(folderPath, info.Name())
|
||||
if info.IsDir() {
|
||||
if err := c.cleanupFolder(virtualPath); err != nil {
|
||||
result.Elapsed = time.Since(startTime)
|
||||
result.Error = fmt.Sprintf("unable to check folder: %v", err)
|
||||
c.conn.Log(logger.LevelError, "unable to cleanup folder %#v: %v", virtualPath, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
retentionTime := info.ModTime().Add(time.Duration(folderRetention.Retention) * time.Hour)
|
||||
if retentionTime.Before(time.Now()) {
|
||||
if err := c.removeFile(virtualPath, info); err != nil {
|
||||
result.Elapsed = time.Since(startTime)
|
||||
result.Error = fmt.Sprintf("unable to remove file %#v: %v", virtualPath, err)
|
||||
c.conn.Log(logger.LevelError, "unable to remove file %#v, retention %v: %v",
|
||||
virtualPath, retentionTime, err)
|
||||
return err
|
||||
}
|
||||
c.conn.Log(logger.LevelDebug, "removed file %#v, modification time: %v, retention: %v hours, retention time: %v",
|
||||
virtualPath, info.ModTime(), folderRetention.Retention, retentionTime)
|
||||
result.DeletedFiles++
|
||||
result.DeletedSize += info.Size()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if folderRetention.DeleteEmptyDirs {
|
||||
c.checkEmptyDirRemoval(folderPath)
|
||||
}
|
||||
result.Elapsed = time.Since(startTime)
|
||||
c.conn.Log(logger.LevelDebug, "retention check completed for folder %#v, deleted files: %v, deleted size: %v bytes",
|
||||
folderPath, result.DeletedFiles, result.DeletedSize)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *RetentionCheck) checkEmptyDirRemoval(folderPath string) {
|
||||
if folderPath != "/" && c.conn.User.HasAnyPerm([]string{
|
||||
dataprovider.PermDelete,
|
||||
dataprovider.PermDeleteDirs,
|
||||
}, path.Dir(folderPath),
|
||||
) {
|
||||
files, err := c.conn.ListDir(folderPath)
|
||||
if err == nil && len(files) == 0 {
|
||||
err = c.conn.RemoveDir(folderPath)
|
||||
c.conn.Log(logger.LevelDebug, "tryed to remove empty dir %#v, error: %v", folderPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the retention check
|
||||
func (c *RetentionCheck) Start() {
|
||||
c.conn.Log(logger.LevelInfo, "retention check started")
|
||||
defer RetentionChecks.remove(c.conn.User.Username)
|
||||
defer c.conn.CloseFS() //nolint:errcheck
|
||||
|
||||
startTime := time.Now()
|
||||
for _, folder := range c.Folders {
|
||||
if folder.Retention > 0 {
|
||||
if err := c.cleanupFolder(folder.Path); err != nil {
|
||||
c.conn.Log(logger.LevelError, "retention check failed, unable to cleanup folder %#v", folder.Path)
|
||||
c.sendNotifications(time.Since(startTime), err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.conn.Log(logger.LevelInfo, "retention check completed")
|
||||
c.sendNotifications(time.Since(startTime), nil)
|
||||
}
|
||||
|
||||
func (c *RetentionCheck) sendNotifications(elapsed time.Duration, err error) {
|
||||
for _, notification := range c.Notifications {
|
||||
switch notification {
|
||||
case RetentionCheckNotificationEmail:
|
||||
c.sendEmailNotification(elapsed, err) //nolint:errcheck
|
||||
case RetentionCheckNotificationHook:
|
||||
c.sendHookNotification(elapsed, err) //nolint:errcheck
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *RetentionCheck) sendEmailNotification(elapsed time.Duration, errCheck error) error {
|
||||
body := new(bytes.Buffer)
|
||||
data := make(map[string]any)
|
||||
data["Results"] = c.results
|
||||
totalDeletedFiles := 0
|
||||
totalDeletedSize := int64(0)
|
||||
for _, result := range c.results {
|
||||
totalDeletedFiles += result.DeletedFiles
|
||||
totalDeletedSize += result.DeletedSize
|
||||
}
|
||||
data["HumanizeSize"] = util.ByteCountIEC
|
||||
data["TotalFiles"] = totalDeletedFiles
|
||||
data["TotalSize"] = totalDeletedSize
|
||||
data["Elapsed"] = elapsed
|
||||
data["Username"] = c.conn.User.Username
|
||||
data["StartTime"] = util.GetTimeFromMsecSinceEpoch(c.StartTime)
|
||||
if errCheck == nil {
|
||||
data["Status"] = "Succeeded"
|
||||
} else {
|
||||
data["Status"] = "Failed"
|
||||
}
|
||||
if err := smtp.RenderRetentionReportTemplate(body, data); err != nil {
|
||||
c.conn.Log(logger.LevelError, "unable to render retention check template: %v", err)
|
||||
return err
|
||||
}
|
||||
startTime := time.Now()
|
||||
subject := fmt.Sprintf("Retention check completed for user %#v", c.conn.User.Username)
|
||||
if err := smtp.SendEmail(c.Email, subject, body.String(), smtp.EmailContentTypeTextHTML); err != nil {
|
||||
c.conn.Log(logger.LevelError, "unable to notify retention check result via email: %v, elapsed: %v", err,
|
||||
time.Since(startTime))
|
||||
return err
|
||||
}
|
||||
c.conn.Log(logger.LevelInfo, "retention check result successfully notified via email, elapsed: %v", time.Since(startTime))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *RetentionCheck) sendHookNotification(elapsed time.Duration, errCheck error) error {
|
||||
startNewHook()
|
||||
defer hookEnded()
|
||||
|
||||
data := make(map[string]any)
|
||||
totalDeletedFiles := 0
|
||||
totalDeletedSize := int64(0)
|
||||
for _, result := range c.results {
|
||||
totalDeletedFiles += result.DeletedFiles
|
||||
totalDeletedSize += result.DeletedSize
|
||||
}
|
||||
data["username"] = c.conn.User.Username
|
||||
data["start_time"] = c.StartTime
|
||||
data["elapsed"] = elapsed.Milliseconds()
|
||||
if errCheck == nil {
|
||||
data["status"] = 1
|
||||
} else {
|
||||
data["status"] = 0
|
||||
}
|
||||
data["total_deleted_files"] = totalDeletedFiles
|
||||
data["total_deleted_size"] = totalDeletedSize
|
||||
data["details"] = c.results
|
||||
jsonData, _ := json.Marshal(data)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
if strings.HasPrefix(Config.DataRetentionHook, "http") {
|
||||
var url *url.URL
|
||||
url, err := url.Parse(Config.DataRetentionHook)
|
||||
if err != nil {
|
||||
c.conn.Log(logger.LevelError, "invalid data retention hook %#v: %v", Config.DataRetentionHook, err)
|
||||
return err
|
||||
}
|
||||
respCode := 0
|
||||
|
||||
resp, err := httpclient.RetryablePost(url.String(), "application/json", bytes.NewBuffer(jsonData))
|
||||
if err == nil {
|
||||
respCode = resp.StatusCode
|
||||
resp.Body.Close()
|
||||
|
||||
if respCode != http.StatusOK {
|
||||
err = errUnexpectedHTTResponse
|
||||
}
|
||||
}
|
||||
|
||||
c.conn.Log(logger.LevelDebug, "notified result to URL: %#v, status code: %v, elapsed: %v err: %v",
|
||||
url.Redacted(), respCode, time.Since(startTime), err)
|
||||
|
||||
return err
|
||||
}
|
||||
if !filepath.IsAbs(Config.DataRetentionHook) {
|
||||
err := fmt.Errorf("invalid data retention hook %#v", Config.DataRetentionHook)
|
||||
c.conn.Log(logger.LevelError, "%v", err)
|
||||
return err
|
||||
}
|
||||
timeout, env := command.GetConfig(Config.DataRetentionHook)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, Config.DataRetentionHook)
|
||||
cmd.Env = append(env,
|
||||
fmt.Sprintf("SFTPGO_DATA_RETENTION_RESULT=%v", string(jsonData)))
|
||||
err := cmd.Run()
|
||||
|
||||
c.conn.Log(logger.LevelDebug, "notified result using command: %v, elapsed: %v err: %v",
|
||||
Config.DataRetentionHook, time.Since(startTime), err)
|
||||
return err
|
||||
}
|
||||
354
common/dataretention_test.go
Normal file
354
common/dataretention_test.go
Normal file
@@ -0,0 +1,354 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sftpgo/sdk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/smtp"
|
||||
)
|
||||
|
||||
func TestRetentionValidation(t *testing.T) {
|
||||
check := RetentionCheck{}
|
||||
check.Folders = append(check.Folders, FolderRetention{
|
||||
Path: "relative",
|
||||
Retention: 10,
|
||||
})
|
||||
err := check.Validate()
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "please specify an absolute POSIX path")
|
||||
|
||||
check.Folders = []FolderRetention{
|
||||
{
|
||||
Path: "/",
|
||||
Retention: -1,
|
||||
},
|
||||
}
|
||||
err = check.Validate()
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid folder retention")
|
||||
|
||||
check.Folders = []FolderRetention{
|
||||
{
|
||||
Path: "/ab/..",
|
||||
Retention: 0,
|
||||
},
|
||||
}
|
||||
err = check.Validate()
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "nothing to delete")
|
||||
assert.Equal(t, "/", check.Folders[0].Path)
|
||||
|
||||
check.Folders = append(check.Folders, FolderRetention{
|
||||
Path: "/../..",
|
||||
Retention: 24,
|
||||
})
|
||||
err = check.Validate()
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), `duplicated folder path "/"`)
|
||||
|
||||
check.Folders = []FolderRetention{
|
||||
{
|
||||
Path: "/dir1",
|
||||
Retention: 48,
|
||||
},
|
||||
{
|
||||
Path: "/dir2",
|
||||
Retention: 96,
|
||||
},
|
||||
}
|
||||
err = check.Validate()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, check.Notifications, 0)
|
||||
assert.Empty(t, check.Email)
|
||||
|
||||
check.Notifications = []RetentionCheckNotification{RetentionCheckNotificationEmail}
|
||||
err = check.Validate()
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "you must configure an SMTP server")
|
||||
|
||||
smtpCfg := smtp.Config{
|
||||
Host: "mail.example.com",
|
||||
Port: 25,
|
||||
TemplatesPath: "templates",
|
||||
}
|
||||
err = smtpCfg.Initialize("..")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = check.Validate()
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "you must add a valid email address")
|
||||
|
||||
check.Email = "admin@example.com"
|
||||
err = check.Validate()
|
||||
assert.NoError(t, err)
|
||||
|
||||
smtpCfg = smtp.Config{}
|
||||
err = smtpCfg.Initialize("..")
|
||||
require.NoError(t, err)
|
||||
|
||||
check.Notifications = []RetentionCheckNotification{RetentionCheckNotificationHook}
|
||||
err = check.Validate()
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "data_retention_hook")
|
||||
|
||||
check.Notifications = []string{"not valid"}
|
||||
err = check.Validate()
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid notification")
|
||||
}
|
||||
|
||||
func TestRetentionEmailNotifications(t *testing.T) {
|
||||
smtpCfg := smtp.Config{
|
||||
Host: "127.0.0.1",
|
||||
Port: 2525,
|
||||
TemplatesPath: "templates",
|
||||
}
|
||||
err := smtpCfg.Initialize("..")
|
||||
require.NoError(t, err)
|
||||
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "user1",
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
check := RetentionCheck{
|
||||
Notifications: []RetentionCheckNotification{RetentionCheckNotificationEmail},
|
||||
Email: "notification@example.com",
|
||||
results: []*folderRetentionCheckResult{
|
||||
{
|
||||
Path: "/",
|
||||
Retention: 24,
|
||||
DeletedFiles: 10,
|
||||
DeletedSize: 32657,
|
||||
Elapsed: 10 * time.Second,
|
||||
},
|
||||
},
|
||||
}
|
||||
conn := NewBaseConnection("", "", "", "", user)
|
||||
conn.SetProtocol(ProtocolDataRetention)
|
||||
conn.ID = fmt.Sprintf("data_retention_%v", user.Username)
|
||||
check.conn = conn
|
||||
check.sendNotifications(1*time.Second, nil)
|
||||
err = check.sendEmailNotification(1*time.Second, nil)
|
||||
assert.NoError(t, err)
|
||||
err = check.sendEmailNotification(1*time.Second, errors.New("test error"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
smtpCfg.Port = 2626
|
||||
err = smtpCfg.Initialize("..")
|
||||
require.NoError(t, err)
|
||||
err = check.sendEmailNotification(1*time.Second, nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
smtpCfg = smtp.Config{}
|
||||
err = smtpCfg.Initialize("..")
|
||||
require.NoError(t, err)
|
||||
err = check.sendEmailNotification(1*time.Second, nil)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestRetentionHookNotifications(t *testing.T) {
|
||||
dataRetentionHook := Config.DataRetentionHook
|
||||
|
||||
Config.DataRetentionHook = fmt.Sprintf("http://%v", httpAddr)
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "user2",
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
check := RetentionCheck{
|
||||
Notifications: []RetentionCheckNotification{RetentionCheckNotificationHook},
|
||||
results: []*folderRetentionCheckResult{
|
||||
{
|
||||
Path: "/",
|
||||
Retention: 24,
|
||||
DeletedFiles: 10,
|
||||
DeletedSize: 32657,
|
||||
Elapsed: 10 * time.Second,
|
||||
},
|
||||
},
|
||||
}
|
||||
conn := NewBaseConnection("", "", "", "", user)
|
||||
conn.SetProtocol(ProtocolDataRetention)
|
||||
conn.ID = fmt.Sprintf("data_retention_%v", user.Username)
|
||||
check.conn = conn
|
||||
check.sendNotifications(1*time.Second, nil)
|
||||
err := check.sendHookNotification(1*time.Second, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
Config.DataRetentionHook = fmt.Sprintf("http://%v/404", httpAddr)
|
||||
err = check.sendHookNotification(1*time.Second, nil)
|
||||
assert.ErrorIs(t, err, errUnexpectedHTTResponse)
|
||||
|
||||
Config.DataRetentionHook = "http://foo\x7f.com/retention"
|
||||
err = check.sendHookNotification(1*time.Second, err)
|
||||
assert.Error(t, err)
|
||||
|
||||
Config.DataRetentionHook = "relativepath"
|
||||
err = check.sendHookNotification(1*time.Second, err)
|
||||
assert.Error(t, err)
|
||||
|
||||
if runtime.GOOS != osWindows {
|
||||
hookCmd, err := exec.LookPath("true")
|
||||
assert.NoError(t, err)
|
||||
|
||||
Config.DataRetentionHook = hookCmd
|
||||
err = check.sendHookNotification(1*time.Second, err)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
Config.DataRetentionHook = dataRetentionHook
|
||||
}
|
||||
|
||||
func TestRetentionPermissionsAndGetFolder(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "user1",
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermListItems, dataprovider.PermDelete}
|
||||
user.Permissions["/dir1"] = []string{dataprovider.PermListItems}
|
||||
user.Permissions["/dir2/sub1"] = []string{dataprovider.PermCreateDirs}
|
||||
user.Permissions["/dir2/sub2"] = []string{dataprovider.PermDelete}
|
||||
|
||||
check := RetentionCheck{
|
||||
Folders: []FolderRetention{
|
||||
{
|
||||
Path: "/dir2",
|
||||
Retention: 24 * 7,
|
||||
IgnoreUserPermissions: true,
|
||||
},
|
||||
{
|
||||
Path: "/dir3",
|
||||
Retention: 24 * 7,
|
||||
IgnoreUserPermissions: false,
|
||||
},
|
||||
{
|
||||
Path: "/dir2/sub1/sub",
|
||||
Retention: 24,
|
||||
IgnoreUserPermissions: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
conn := NewBaseConnection("", "", "", "", user)
|
||||
conn.SetProtocol(ProtocolDataRetention)
|
||||
conn.ID = fmt.Sprintf("data_retention_%v", user.Username)
|
||||
check.conn = conn
|
||||
check.updateUserPermissions()
|
||||
assert.Equal(t, []string{dataprovider.PermListItems, dataprovider.PermDelete}, conn.User.Permissions["/"])
|
||||
assert.Equal(t, []string{dataprovider.PermListItems}, conn.User.Permissions["/dir1"])
|
||||
assert.Equal(t, []string{dataprovider.PermAny}, conn.User.Permissions["/dir2"])
|
||||
assert.Equal(t, []string{dataprovider.PermAny}, conn.User.Permissions["/dir2/sub1/sub"])
|
||||
assert.Equal(t, []string{dataprovider.PermCreateDirs}, conn.User.Permissions["/dir2/sub1"])
|
||||
assert.Equal(t, []string{dataprovider.PermDelete}, conn.User.Permissions["/dir2/sub2"])
|
||||
|
||||
_, err := check.getFolderRetention("/")
|
||||
assert.Error(t, err)
|
||||
folder, err := check.getFolderRetention("/dir3")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/dir3", folder.Path)
|
||||
folder, err = check.getFolderRetention("/dir2/sub3")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/dir2", folder.Path)
|
||||
folder, err = check.getFolderRetention("/dir2/sub2")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/dir2", folder.Path)
|
||||
folder, err = check.getFolderRetention("/dir2/sub1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/dir2", folder.Path)
|
||||
folder, err = check.getFolderRetention("/dir2/sub1/sub/sub")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/dir2/sub1/sub", folder.Path)
|
||||
}
|
||||
|
||||
func TestRetentionCheckAddRemove(t *testing.T) {
|
||||
username := "username"
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: username,
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
check := RetentionCheck{
|
||||
Folders: []FolderRetention{
|
||||
{
|
||||
Path: "/",
|
||||
Retention: 48,
|
||||
},
|
||||
},
|
||||
Notifications: []RetentionCheckNotification{RetentionCheckNotificationHook},
|
||||
}
|
||||
assert.NotNil(t, RetentionChecks.Add(check, &user))
|
||||
checks := RetentionChecks.Get()
|
||||
require.Len(t, checks, 1)
|
||||
assert.Equal(t, username, checks[0].Username)
|
||||
assert.Greater(t, checks[0].StartTime, int64(0))
|
||||
require.Len(t, checks[0].Folders, 1)
|
||||
assert.Equal(t, check.Folders[0].Path, checks[0].Folders[0].Path)
|
||||
assert.Equal(t, check.Folders[0].Retention, checks[0].Folders[0].Retention)
|
||||
require.Len(t, checks[0].Notifications, 1)
|
||||
assert.Equal(t, RetentionCheckNotificationHook, checks[0].Notifications[0])
|
||||
|
||||
assert.Nil(t, RetentionChecks.Add(check, &user))
|
||||
assert.True(t, RetentionChecks.remove(username))
|
||||
require.Len(t, RetentionChecks.Get(), 0)
|
||||
assert.False(t, RetentionChecks.remove(username))
|
||||
}
|
||||
|
||||
func TestCleanupErrors(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "u",
|
||||
},
|
||||
}
|
||||
user.Permissions = make(map[string][]string)
|
||||
user.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
check := &RetentionCheck{
|
||||
Folders: []FolderRetention{
|
||||
{
|
||||
Path: "/path",
|
||||
Retention: 48,
|
||||
},
|
||||
},
|
||||
}
|
||||
check = RetentionChecks.Add(*check, &user)
|
||||
require.NotNil(t, check)
|
||||
|
||||
err := check.removeFile("missing file", nil)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = check.cleanupFolder("/")
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.True(t, RetentionChecks.remove(user.Username))
|
||||
}
|
||||
342
common/defender.go
Normal file
342
common/defender.go
Normal file
@@ -0,0 +1,342 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/yl2chen/cidranger"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// HostEvent is the enumerable for the supported host events
|
||||
type HostEvent int
|
||||
|
||||
// Supported host events
|
||||
const (
|
||||
HostEventLoginFailed HostEvent = iota
|
||||
HostEventUserNotFound
|
||||
HostEventNoLoginTried
|
||||
HostEventLimitExceeded
|
||||
)
|
||||
|
||||
// Supported defender drivers
|
||||
const (
|
||||
DefenderDriverMemory = "memory"
|
||||
DefenderDriverProvider = "provider"
|
||||
)
|
||||
|
||||
var (
|
||||
supportedDefenderDrivers = []string{DefenderDriverMemory, DefenderDriverProvider}
|
||||
)
|
||||
|
||||
// Defender defines the interface that a defender must implements
|
||||
type Defender interface {
|
||||
GetHosts() ([]dataprovider.DefenderEntry, error)
|
||||
GetHost(ip string) (dataprovider.DefenderEntry, error)
|
||||
AddEvent(ip string, event HostEvent)
|
||||
IsBanned(ip string) bool
|
||||
GetBanTime(ip string) (*time.Time, error)
|
||||
GetScore(ip string) (int, error)
|
||||
DeleteHost(ip string) bool
|
||||
Reload() error
|
||||
}
|
||||
|
||||
// DefenderConfig defines the "defender" configuration
|
||||
type DefenderConfig struct {
|
||||
// Set to true to enable the defender
|
||||
Enabled bool `json:"enabled" mapstructure:"enabled"`
|
||||
// Defender implementation to use, we support "memory" and "provider".
|
||||
// Using "provider" as driver you can share the defender events among
|
||||
// multiple SFTPGo instances. For a single instance "memory" provider will
|
||||
// be much faster
|
||||
Driver string `json:"driver" mapstructure:"driver"`
|
||||
// BanTime is the number of minutes that a host is banned
|
||||
BanTime int `json:"ban_time" mapstructure:"ban_time"`
|
||||
// Percentage increase of the ban time if a banned host tries to connect again
|
||||
BanTimeIncrement int `json:"ban_time_increment" mapstructure:"ban_time_increment"`
|
||||
// Threshold value for banning a client
|
||||
Threshold int `json:"threshold" mapstructure:"threshold"`
|
||||
// Score for invalid login attempts, eg. non-existent user accounts or
|
||||
// client disconnected for inactivity without authentication attempts
|
||||
ScoreInvalid int `json:"score_invalid" mapstructure:"score_invalid"`
|
||||
// Score for valid login attempts, eg. user accounts that exist
|
||||
ScoreValid int `json:"score_valid" mapstructure:"score_valid"`
|
||||
// Score for limit exceeded events, generated from the rate limiters or for max connections
|
||||
// per-host exceeded
|
||||
ScoreLimitExceeded int `json:"score_limit_exceeded" mapstructure:"score_limit_exceeded"`
|
||||
// Defines the time window, in minutes, for tracking client errors.
|
||||
// A host is banned if it has exceeded the defined threshold during
|
||||
// the last observation time minutes
|
||||
ObservationTime int `json:"observation_time" mapstructure:"observation_time"`
|
||||
// The number of banned IPs and host scores kept in memory will vary between the
|
||||
// soft and hard limit for the "memory" driver. For the "provider" driver the
|
||||
// soft limit is ignored and the hard limit is used to limit the number of entries
|
||||
// to return when you request for the entire host list from the defender
|
||||
EntriesSoftLimit int `json:"entries_soft_limit" mapstructure:"entries_soft_limit"`
|
||||
EntriesHardLimit int `json:"entries_hard_limit" mapstructure:"entries_hard_limit"`
|
||||
// Path to a file containing a list of IP addresses and/or networks to never ban
|
||||
SafeListFile string `json:"safelist_file" mapstructure:"safelist_file"`
|
||||
// Path to a file containing a list of IP addresses and/or networks to always ban
|
||||
BlockListFile string `json:"blocklist_file" mapstructure:"blocklist_file"`
|
||||
// List of IP addresses and/or networks to never ban.
|
||||
// For large lists prefer SafeListFile
|
||||
SafeList []string `json:"safelist" mapstructure:"safelist"`
|
||||
// List of IP addresses and/or networks to always ban.
|
||||
// For large lists prefer BlockListFile
|
||||
BlockList []string `json:"blocklist" mapstructure:"blocklist"`
|
||||
}
|
||||
|
||||
type baseDefender struct {
|
||||
config *DefenderConfig
|
||||
sync.RWMutex
|
||||
safeList *HostList
|
||||
blockList *HostList
|
||||
}
|
||||
|
||||
// Reload reloads block and safe lists
|
||||
func (d *baseDefender) Reload() error {
|
||||
blockList, err := loadHostListFromFile(d.config.BlockListFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockList = addEntriesToList(d.config.BlockList, blockList, "blocklist")
|
||||
|
||||
d.Lock()
|
||||
d.blockList = blockList
|
||||
d.Unlock()
|
||||
|
||||
safeList, err := loadHostListFromFile(d.config.SafeListFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
safeList = addEntriesToList(d.config.SafeList, safeList, "safelist")
|
||||
|
||||
d.Lock()
|
||||
d.safeList = safeList
|
||||
d.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *baseDefender) isBanned(ip string) bool {
|
||||
if d.blockList != nil && d.blockList.isListed(ip) {
|
||||
// permanent ban
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *baseDefender) getScore(event HostEvent) int {
|
||||
var score int
|
||||
|
||||
switch event {
|
||||
case HostEventLoginFailed:
|
||||
score = d.config.ScoreValid
|
||||
case HostEventLimitExceeded:
|
||||
score = d.config.ScoreLimitExceeded
|
||||
case HostEventUserNotFound, HostEventNoLoginTried:
|
||||
score = d.config.ScoreInvalid
|
||||
}
|
||||
return score
|
||||
}
|
||||
|
||||
// HostListFile defines the structure expected for safe/block list files
|
||||
type HostListFile struct {
|
||||
IPAddresses []string `json:"addresses"`
|
||||
CIDRNetworks []string `json:"networks"`
|
||||
}
|
||||
|
||||
// HostList defines the structure used to keep the HostListFile in memory
|
||||
type HostList struct {
|
||||
IPAddresses map[string]bool
|
||||
Ranges cidranger.Ranger
|
||||
}
|
||||
|
||||
func (h *HostList) isListed(ip string) bool {
|
||||
if _, ok := h.IPAddresses[ip]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
ok, err := h.Ranges.Contains(net.ParseIP(ip))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
type hostEvent struct {
|
||||
dateTime time.Time
|
||||
score int
|
||||
}
|
||||
|
||||
type hostScore struct {
|
||||
TotalScore int
|
||||
Events []hostEvent
|
||||
}
|
||||
|
||||
// validate returns an error if the configuration is invalid
|
||||
func (c *DefenderConfig) validate() error {
|
||||
if !c.Enabled {
|
||||
return nil
|
||||
}
|
||||
if c.ScoreInvalid >= c.Threshold {
|
||||
return fmt.Errorf("score_invalid %v cannot be greater than threshold %v", c.ScoreInvalid, c.Threshold)
|
||||
}
|
||||
if c.ScoreValid >= c.Threshold {
|
||||
return fmt.Errorf("score_valid %v cannot be greater than threshold %v", c.ScoreValid, c.Threshold)
|
||||
}
|
||||
if c.ScoreLimitExceeded >= c.Threshold {
|
||||
return fmt.Errorf("score_limit_exceeded %v cannot be greater than threshold %v", c.ScoreLimitExceeded, c.Threshold)
|
||||
}
|
||||
if c.BanTime <= 0 {
|
||||
return fmt.Errorf("invalid ban_time %v", c.BanTime)
|
||||
}
|
||||
if c.BanTimeIncrement <= 0 {
|
||||
return fmt.Errorf("invalid ban_time_increment %v", c.BanTimeIncrement)
|
||||
}
|
||||
if c.ObservationTime <= 0 {
|
||||
return fmt.Errorf("invalid observation_time %v", c.ObservationTime)
|
||||
}
|
||||
if c.EntriesSoftLimit <= 0 {
|
||||
return fmt.Errorf("invalid entries_soft_limit %v", c.EntriesSoftLimit)
|
||||
}
|
||||
if c.EntriesHardLimit <= c.EntriesSoftLimit {
|
||||
return fmt.Errorf("invalid entries_hard_limit %v must be > %v", c.EntriesHardLimit, c.EntriesSoftLimit)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadHostListFromFile(name string) (*HostList, error) {
|
||||
if name == "" {
|
||||
return nil, nil
|
||||
}
|
||||
if !util.IsFileInputValid(name) {
|
||||
return nil, fmt.Errorf("invalid host list file name %#v", name)
|
||||
}
|
||||
|
||||
info, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// opinionated max size, you should avoid big host lists
|
||||
if info.Size() > 1048576*5 { // 5MB
|
||||
return nil, fmt.Errorf("host list file %#v is too big: %v bytes", name, info.Size())
|
||||
}
|
||||
|
||||
content, err := os.ReadFile(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read input file %#v: %v", name, err)
|
||||
}
|
||||
|
||||
var hostList HostListFile
|
||||
|
||||
err = json.Unmarshal(content, &hostList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(hostList.CIDRNetworks) > 0 || len(hostList.IPAddresses) > 0 {
|
||||
result := &HostList{
|
||||
IPAddresses: make(map[string]bool),
|
||||
Ranges: cidranger.NewPCTrieRanger(),
|
||||
}
|
||||
ipCount := 0
|
||||
cdrCount := 0
|
||||
for _, ip := range hostList.IPAddresses {
|
||||
if net.ParseIP(ip) == nil {
|
||||
logger.Warn(logSender, "", "unable to parse IP %#v", ip)
|
||||
continue
|
||||
}
|
||||
result.IPAddresses[ip] = true
|
||||
ipCount++
|
||||
}
|
||||
for _, cidrNet := range hostList.CIDRNetworks {
|
||||
_, network, err := net.ParseCIDR(cidrNet)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "unable to parse CIDR network %#v: %v", cidrNet, err)
|
||||
continue
|
||||
}
|
||||
err = result.Ranges.Insert(cidranger.NewBasicRangerEntry(*network))
|
||||
if err == nil {
|
||||
cdrCount++
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info(logSender, "", "list %#v loaded, ip addresses loaded: %v/%v networks loaded: %v/%v",
|
||||
name, ipCount, len(hostList.IPAddresses), cdrCount, len(hostList.CIDRNetworks))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func addEntriesToList(entries []string, hostList *HostList, listName string) *HostList {
|
||||
if len(entries) == 0 {
|
||||
return hostList
|
||||
}
|
||||
|
||||
if hostList == nil {
|
||||
hostList = &HostList{
|
||||
IPAddresses: make(map[string]bool),
|
||||
Ranges: cidranger.NewPCTrieRanger(),
|
||||
}
|
||||
}
|
||||
ipCount := 0
|
||||
ipLoaded := 0
|
||||
cdrCount := 0
|
||||
cdrLoaded := 0
|
||||
|
||||
for _, entry := range entries {
|
||||
entry = strings.TrimSpace(entry)
|
||||
if strings.LastIndex(entry, "/") > 0 {
|
||||
cdrCount++
|
||||
_, network, err := net.ParseCIDR(entry)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "unable to parse CIDR network %#v: %v", entry, err)
|
||||
continue
|
||||
}
|
||||
err = hostList.Ranges.Insert(cidranger.NewBasicRangerEntry(*network))
|
||||
if err == nil {
|
||||
cdrLoaded++
|
||||
}
|
||||
} else {
|
||||
ipCount++
|
||||
if net.ParseIP(entry) == nil {
|
||||
logger.Warn(logSender, "", "unable to parse IP %#v", entry)
|
||||
continue
|
||||
}
|
||||
hostList.IPAddresses[entry] = true
|
||||
ipLoaded++
|
||||
}
|
||||
}
|
||||
logger.Info(logSender, "", "%s from config loaded, ip addresses loaded: %v/%v networks loaded: %v/%v",
|
||||
listName, ipLoaded, ipCount, cdrLoaded, cdrCount)
|
||||
|
||||
return hostList
|
||||
}
|
||||
715
common/defender_test.go
Normal file
715
common/defender_test.go
Normal file
@@ -0,0 +1,715 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/yl2chen/cidranger"
|
||||
)
|
||||
|
||||
func TestBasicDefender(t *testing.T) {
|
||||
bl := HostListFile{
|
||||
IPAddresses: []string{"172.16.1.1", "172.16.1.2"},
|
||||
CIDRNetworks: []string{"10.8.0.0/24"},
|
||||
}
|
||||
sl := HostListFile{
|
||||
IPAddresses: []string{"172.16.1.3", "172.16.1.4"},
|
||||
CIDRNetworks: []string{"192.168.8.0/24"},
|
||||
}
|
||||
blFile := filepath.Join(os.TempDir(), "bl.json")
|
||||
slFile := filepath.Join(os.TempDir(), "sl.json")
|
||||
|
||||
data, err := json.Marshal(bl)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(blFile, data, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
data, err = json.Marshal(sl)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(slFile, data, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
config := &DefenderConfig{
|
||||
Enabled: true,
|
||||
BanTime: 10,
|
||||
BanTimeIncrement: 2,
|
||||
Threshold: 5,
|
||||
ScoreInvalid: 2,
|
||||
ScoreValid: 1,
|
||||
ScoreLimitExceeded: 3,
|
||||
ObservationTime: 15,
|
||||
EntriesSoftLimit: 1,
|
||||
EntriesHardLimit: 2,
|
||||
SafeListFile: "slFile",
|
||||
BlockListFile: "blFile",
|
||||
SafeList: []string{"192.168.1.3", "192.168.1.4", "192.168.9.0/24"},
|
||||
BlockList: []string{"192.168.1.1", "192.168.1.2", "10.8.9.0/24"},
|
||||
}
|
||||
|
||||
_, err = newInMemoryDefender(config)
|
||||
assert.Error(t, err)
|
||||
config.BlockListFile = blFile
|
||||
_, err = newInMemoryDefender(config)
|
||||
assert.Error(t, err)
|
||||
config.SafeListFile = slFile
|
||||
d, err := newInMemoryDefender(config)
|
||||
assert.NoError(t, err)
|
||||
|
||||
defender := d.(*memoryDefender)
|
||||
assert.True(t, defender.IsBanned("172.16.1.1"))
|
||||
assert.True(t, defender.IsBanned("192.168.1.1"))
|
||||
assert.False(t, defender.IsBanned("172.16.1.10"))
|
||||
assert.False(t, defender.IsBanned("192.168.1.10"))
|
||||
assert.False(t, defender.IsBanned("10.8.2.3"))
|
||||
assert.False(t, defender.IsBanned("10.9.2.3"))
|
||||
assert.True(t, defender.IsBanned("10.8.0.3"))
|
||||
assert.True(t, defender.IsBanned("10.8.9.3"))
|
||||
assert.False(t, defender.IsBanned("invalid ip"))
|
||||
assert.Equal(t, 0, defender.countBanned())
|
||||
assert.Equal(t, 0, defender.countHosts())
|
||||
hosts, err := defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, hosts, 0)
|
||||
_, err = defender.GetHost("10.8.0.4")
|
||||
assert.Error(t, err)
|
||||
|
||||
defender.AddEvent("172.16.1.4", HostEventLoginFailed)
|
||||
defender.AddEvent("192.168.1.4", HostEventLoginFailed)
|
||||
defender.AddEvent("192.168.8.4", HostEventUserNotFound)
|
||||
defender.AddEvent("172.16.1.3", HostEventLimitExceeded)
|
||||
defender.AddEvent("192.168.1.3", HostEventLimitExceeded)
|
||||
assert.Equal(t, 0, defender.countHosts())
|
||||
|
||||
testIP := "12.34.56.78"
|
||||
defender.AddEvent(testIP, HostEventLoginFailed)
|
||||
assert.Equal(t, 1, defender.countHosts())
|
||||
assert.Equal(t, 0, defender.countBanned())
|
||||
score, err := defender.GetScore(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, score)
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, hosts, 1) {
|
||||
assert.Equal(t, 1, hosts[0].Score)
|
||||
assert.True(t, hosts[0].BanTime.IsZero())
|
||||
assert.Empty(t, hosts[0].GetBanTime())
|
||||
}
|
||||
host, err := defender.GetHost(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, host.Score)
|
||||
assert.Empty(t, host.GetBanTime())
|
||||
banTime, err := defender.GetBanTime(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, banTime)
|
||||
defender.AddEvent(testIP, HostEventLimitExceeded)
|
||||
assert.Equal(t, 1, defender.countHosts())
|
||||
assert.Equal(t, 0, defender.countBanned())
|
||||
score, err = defender.GetScore(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 4, score)
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, hosts, 1) {
|
||||
assert.Equal(t, 4, hosts[0].Score)
|
||||
assert.True(t, hosts[0].BanTime.IsZero())
|
||||
assert.Empty(t, hosts[0].GetBanTime())
|
||||
}
|
||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
||||
assert.Equal(t, 0, defender.countHosts())
|
||||
assert.Equal(t, 1, defender.countBanned())
|
||||
score, err = defender.GetScore(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, score)
|
||||
banTime, err = defender.GetBanTime(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, banTime)
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, hosts, 1) {
|
||||
assert.Equal(t, 0, hosts[0].Score)
|
||||
assert.False(t, hosts[0].BanTime.IsZero())
|
||||
assert.NotEmpty(t, hosts[0].GetBanTime())
|
||||
assert.Equal(t, hex.EncodeToString([]byte(testIP)), hosts[0].GetID())
|
||||
}
|
||||
host, err = defender.GetHost(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, host.Score)
|
||||
assert.NotEmpty(t, host.GetBanTime())
|
||||
|
||||
// now test cleanup, testIP is already banned
|
||||
testIP1 := "12.34.56.79"
|
||||
testIP2 := "12.34.56.80"
|
||||
testIP3 := "12.34.56.81"
|
||||
|
||||
defender.AddEvent(testIP1, HostEventNoLoginTried)
|
||||
defender.AddEvent(testIP2, HostEventNoLoginTried)
|
||||
assert.Equal(t, 2, defender.countHosts())
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
||||
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countHosts())
|
||||
// testIP1 and testIP2 should be removed
|
||||
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countHosts())
|
||||
score, err = defender.GetScore(testIP1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, score)
|
||||
score, err = defender.GetScore(testIP2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, score)
|
||||
score, err = defender.GetScore(testIP3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, score)
|
||||
|
||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
||||
// IP3 is now banned
|
||||
banTime, err = defender.GetBanTime(testIP3)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, banTime)
|
||||
assert.Equal(t, 0, defender.countHosts())
|
||||
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
for i := 0; i < 3; i++ {
|
||||
defender.AddEvent(testIP1, HostEventNoLoginTried)
|
||||
}
|
||||
assert.Equal(t, 0, defender.countHosts())
|
||||
assert.Equal(t, config.EntriesSoftLimit, defender.countBanned())
|
||||
banTime, err = defender.GetBanTime(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, banTime)
|
||||
banTime, err = defender.GetBanTime(testIP3)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, banTime)
|
||||
banTime, err = defender.GetBanTime(testIP1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, banTime)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
defender.AddEvent(testIP3, HostEventNoLoginTried)
|
||||
}
|
||||
assert.Equal(t, 0, defender.countHosts())
|
||||
assert.Equal(t, defender.config.EntriesSoftLimit, defender.countBanned())
|
||||
|
||||
banTime, err = defender.GetBanTime(testIP3)
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, banTime) {
|
||||
assert.True(t, defender.IsBanned(testIP3))
|
||||
// ban time should increase
|
||||
newBanTime, err := defender.GetBanTime(testIP3)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, newBanTime.After(*banTime))
|
||||
}
|
||||
|
||||
assert.True(t, defender.DeleteHost(testIP3))
|
||||
assert.False(t, defender.DeleteHost(testIP3))
|
||||
|
||||
err = os.Remove(slFile)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(blFile)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestExpiredHostBans(t *testing.T) {
|
||||
config := &DefenderConfig{
|
||||
Enabled: true,
|
||||
BanTime: 10,
|
||||
BanTimeIncrement: 2,
|
||||
Threshold: 5,
|
||||
ScoreInvalid: 2,
|
||||
ScoreValid: 1,
|
||||
ScoreLimitExceeded: 3,
|
||||
ObservationTime: 15,
|
||||
EntriesSoftLimit: 1,
|
||||
EntriesHardLimit: 2,
|
||||
}
|
||||
|
||||
d, err := newInMemoryDefender(config)
|
||||
assert.NoError(t, err)
|
||||
|
||||
defender := d.(*memoryDefender)
|
||||
|
||||
testIP := "1.2.3.4"
|
||||
defender.banned[testIP] = time.Now().Add(-24 * time.Hour)
|
||||
|
||||
// the ban is expired testIP should not be listed
|
||||
res, err := defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, res, 0)
|
||||
|
||||
assert.False(t, defender.IsBanned(testIP))
|
||||
_, err = defender.GetHost(testIP)
|
||||
assert.Error(t, err)
|
||||
_, ok := defender.banned[testIP]
|
||||
assert.True(t, ok)
|
||||
// now add an event for an expired banned ip, it should be removed
|
||||
defender.AddEvent(testIP, HostEventLoginFailed)
|
||||
assert.False(t, defender.IsBanned(testIP))
|
||||
entry, err := defender.GetHost(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testIP, entry.IP)
|
||||
assert.Empty(t, entry.GetBanTime())
|
||||
assert.Equal(t, 1, entry.Score)
|
||||
|
||||
res, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, res, 1) {
|
||||
assert.Equal(t, testIP, res[0].IP)
|
||||
assert.Empty(t, res[0].GetBanTime())
|
||||
assert.Equal(t, 1, res[0].Score)
|
||||
}
|
||||
|
||||
events := []hostEvent{
|
||||
{
|
||||
dateTime: time.Now().Add(-24 * time.Hour),
|
||||
score: 2,
|
||||
},
|
||||
{
|
||||
dateTime: time.Now().Add(-24 * time.Hour),
|
||||
score: 3,
|
||||
},
|
||||
}
|
||||
|
||||
hs := hostScore{
|
||||
Events: events,
|
||||
TotalScore: 5,
|
||||
}
|
||||
|
||||
defender.hosts[testIP] = hs
|
||||
// the recorded scored are too old
|
||||
res, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, res, 0)
|
||||
_, err = defender.GetHost(testIP)
|
||||
assert.Error(t, err)
|
||||
_, ok = defender.hosts[testIP]
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestLoadHostListFromFile(t *testing.T) {
|
||||
_, err := loadHostListFromFile(".")
|
||||
assert.Error(t, err)
|
||||
|
||||
hostsFilePath := filepath.Join(os.TempDir(), "hostfile")
|
||||
content := make([]byte, 1048576*6)
|
||||
_, err = rand.Read(content)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(hostsFilePath, content, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = loadHostListFromFile(hostsFilePath)
|
||||
assert.Error(t, err)
|
||||
|
||||
hl := HostListFile{
|
||||
IPAddresses: []string{},
|
||||
CIDRNetworks: []string{},
|
||||
}
|
||||
|
||||
asJSON, err := json.Marshal(hl)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(hostsFilePath, asJSON, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hostList, err := loadHostListFromFile(hostsFilePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, hostList)
|
||||
|
||||
hl.IPAddresses = append(hl.IPAddresses, "invalidip")
|
||||
asJSON, err = json.Marshal(hl)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(hostsFilePath, asJSON, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hostList, err = loadHostListFromFile(hostsFilePath)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, hostList.IPAddresses, 0)
|
||||
|
||||
hl.IPAddresses = nil
|
||||
hl.CIDRNetworks = append(hl.CIDRNetworks, "invalid net")
|
||||
|
||||
asJSON, err = json.Marshal(hl)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(hostsFilePath, asJSON, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hostList, err = loadHostListFromFile(hostsFilePath)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, hostList)
|
||||
assert.Len(t, hostList.IPAddresses, 0)
|
||||
assert.Equal(t, 0, hostList.Ranges.Len())
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
err = os.Chmod(hostsFilePath, 0111)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = loadHostListFromFile(hostsFilePath)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = os.Chmod(hostsFilePath, 0644)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(hostsFilePath, []byte("non json content"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
_, err = loadHostListFromFile(hostsFilePath)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = os.Remove(hostsFilePath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAddEntriesToHostList(t *testing.T) {
|
||||
name := "testList"
|
||||
hostlist := addEntriesToList([]string{"192.168.6.1", "10.7.0.0/25"}, nil, name)
|
||||
require.NotNil(t, hostlist)
|
||||
assert.True(t, hostlist.isListed("192.168.6.1"))
|
||||
assert.False(t, hostlist.isListed("192.168.6.2"))
|
||||
assert.True(t, hostlist.isListed("10.7.0.28"))
|
||||
assert.False(t, hostlist.isListed("10.7.0.129"))
|
||||
// load invalid values
|
||||
hostlist = addEntriesToList([]string{"invalidip", "invalidnet/24"}, nil, name)
|
||||
require.NotNil(t, hostlist)
|
||||
assert.Len(t, hostlist.IPAddresses, 0)
|
||||
assert.Equal(t, 0, hostlist.Ranges.Len())
|
||||
}
|
||||
|
||||
func TestDefenderCleanup(t *testing.T) {
|
||||
d := memoryDefender{
|
||||
baseDefender: baseDefender{
|
||||
config: &DefenderConfig{
|
||||
ObservationTime: 1,
|
||||
EntriesSoftLimit: 2,
|
||||
EntriesHardLimit: 3,
|
||||
},
|
||||
},
|
||||
banned: make(map[string]time.Time),
|
||||
hosts: make(map[string]hostScore),
|
||||
}
|
||||
|
||||
d.banned["1.1.1.1"] = time.Now().Add(-24 * time.Hour)
|
||||
d.banned["1.1.1.2"] = time.Now().Add(-24 * time.Hour)
|
||||
d.banned["1.1.1.3"] = time.Now().Add(-24 * time.Hour)
|
||||
d.banned["1.1.1.4"] = time.Now().Add(-24 * time.Hour)
|
||||
|
||||
d.cleanupBanned()
|
||||
assert.Equal(t, 0, d.countBanned())
|
||||
|
||||
d.banned["2.2.2.2"] = time.Now().Add(2 * time.Minute)
|
||||
d.banned["2.2.2.3"] = time.Now().Add(1 * time.Minute)
|
||||
d.banned["2.2.2.4"] = time.Now().Add(3 * time.Minute)
|
||||
d.banned["2.2.2.5"] = time.Now().Add(4 * time.Minute)
|
||||
|
||||
d.cleanupBanned()
|
||||
assert.Equal(t, d.config.EntriesSoftLimit, d.countBanned())
|
||||
banTime, err := d.GetBanTime("2.2.2.3")
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, banTime)
|
||||
|
||||
d.hosts["3.3.3.3"] = hostScore{
|
||||
TotalScore: 0,
|
||||
Events: []hostEvent{
|
||||
{
|
||||
dateTime: time.Now().Add(-5 * time.Minute),
|
||||
score: 1,
|
||||
},
|
||||
{
|
||||
dateTime: time.Now().Add(-3 * time.Minute),
|
||||
score: 1,
|
||||
},
|
||||
{
|
||||
dateTime: time.Now(),
|
||||
score: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
d.hosts["3.3.3.4"] = hostScore{
|
||||
TotalScore: 1,
|
||||
Events: []hostEvent{
|
||||
{
|
||||
dateTime: time.Now().Add(-3 * time.Minute),
|
||||
score: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
d.hosts["3.3.3.5"] = hostScore{
|
||||
TotalScore: 1,
|
||||
Events: []hostEvent{
|
||||
{
|
||||
dateTime: time.Now().Add(-2 * time.Minute),
|
||||
score: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
d.hosts["3.3.3.6"] = hostScore{
|
||||
TotalScore: 1,
|
||||
Events: []hostEvent{
|
||||
{
|
||||
dateTime: time.Now().Add(-1 * time.Minute),
|
||||
score: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
score, err := d.GetScore("3.3.3.3")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, score)
|
||||
|
||||
d.cleanupHosts()
|
||||
assert.Equal(t, d.config.EntriesSoftLimit, d.countHosts())
|
||||
score, err = d.GetScore("3.3.3.4")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, score)
|
||||
}
|
||||
|
||||
func TestDefenderConfig(t *testing.T) {
|
||||
c := DefenderConfig{}
|
||||
err := c.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
c.Enabled = true
|
||||
c.Threshold = 10
|
||||
c.ScoreInvalid = 10
|
||||
err = c.validate()
|
||||
require.Error(t, err)
|
||||
|
||||
c.ScoreInvalid = 2
|
||||
c.ScoreLimitExceeded = 10
|
||||
err = c.validate()
|
||||
require.Error(t, err)
|
||||
|
||||
c.ScoreLimitExceeded = 2
|
||||
c.ScoreValid = 10
|
||||
err = c.validate()
|
||||
require.Error(t, err)
|
||||
|
||||
c.ScoreValid = 1
|
||||
c.BanTime = 0
|
||||
err = c.validate()
|
||||
require.Error(t, err)
|
||||
|
||||
c.BanTime = 30
|
||||
c.BanTimeIncrement = 0
|
||||
err = c.validate()
|
||||
require.Error(t, err)
|
||||
|
||||
c.BanTimeIncrement = 50
|
||||
c.ObservationTime = 0
|
||||
err = c.validate()
|
||||
require.Error(t, err)
|
||||
|
||||
c.ObservationTime = 30
|
||||
err = c.validate()
|
||||
require.Error(t, err)
|
||||
|
||||
c.EntriesSoftLimit = 10
|
||||
err = c.validate()
|
||||
require.Error(t, err)
|
||||
|
||||
c.EntriesHardLimit = 10
|
||||
err = c.validate()
|
||||
require.Error(t, err)
|
||||
|
||||
c.EntriesHardLimit = 20
|
||||
err = c.validate()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func BenchmarkDefenderBannedSearch(b *testing.B) {
|
||||
d := getDefenderForBench()
|
||||
|
||||
ip, ipnet, err := net.ParseCIDR("10.8.0.0/12") // 1048574 ip addresses
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
||||
d.banned[ip.String()] = time.Now().Add(10 * time.Minute)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
d.IsBanned("192.168.1.1")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCleanup(b *testing.B) {
|
||||
d := getDefenderForBench()
|
||||
|
||||
ip, ipnet, err := net.ParseCIDR("192.168.4.0/24")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
||||
d.AddEvent(ip.String(), HostEventLoginFailed)
|
||||
if d.countHosts() > d.config.EntriesHardLimit {
|
||||
panic("too many hosts")
|
||||
}
|
||||
if d.countBanned() > d.config.EntriesSoftLimit {
|
||||
panic("too many ip banned")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkDefenderBannedSearchWithBlockList(b *testing.B) {
|
||||
d := getDefenderForBench()
|
||||
|
||||
d.blockList = &HostList{
|
||||
IPAddresses: make(map[string]bool),
|
||||
Ranges: cidranger.NewPCTrieRanger(),
|
||||
}
|
||||
|
||||
ip, ipnet, err := net.ParseCIDR("129.8.0.0/12") // 1048574 ip addresses
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
||||
d.banned[ip.String()] = time.Now().Add(10 * time.Minute)
|
||||
d.blockList.IPAddresses[ip.String()] = true
|
||||
}
|
||||
|
||||
for i := 0; i < 255; i++ {
|
||||
cidr := fmt.Sprintf("10.8.%v.1/24", i)
|
||||
_, network, _ := net.ParseCIDR(cidr)
|
||||
if err := d.blockList.Ranges.Insert(cidranger.NewBasicRangerEntry(*network)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
d.IsBanned("192.168.1.1")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHostListSearch(b *testing.B) {
|
||||
hostlist := &HostList{
|
||||
IPAddresses: make(map[string]bool),
|
||||
Ranges: cidranger.NewPCTrieRanger(),
|
||||
}
|
||||
|
||||
ip, ipnet, _ := net.ParseCIDR("172.16.0.0/16")
|
||||
|
||||
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
|
||||
hostlist.IPAddresses[ip.String()] = true
|
||||
}
|
||||
|
||||
for i := 0; i < 255; i++ {
|
||||
cidr := fmt.Sprintf("10.8.%v.1/24", i)
|
||||
_, network, _ := net.ParseCIDR(cidr)
|
||||
if err := hostlist.Ranges.Insert(cidranger.NewBasicRangerEntry(*network)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if hostlist.isListed("192.167.1.2") {
|
||||
panic("should not be listed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCIDRanger(b *testing.B) {
|
||||
ranger := cidranger.NewPCTrieRanger()
|
||||
for i := 0; i < 255; i++ {
|
||||
cidr := fmt.Sprintf("192.168.%v.1/24", i)
|
||||
_, network, _ := net.ParseCIDR(cidr)
|
||||
if err := ranger.Insert(cidranger.NewBasicRangerEntry(*network)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
ipToMatch := net.ParseIP("192.167.1.2")
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := ranger.Contains(ipToMatch); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNetContains(b *testing.B) {
|
||||
var nets []*net.IPNet
|
||||
for i := 0; i < 255; i++ {
|
||||
cidr := fmt.Sprintf("192.168.%v.1/24", i)
|
||||
_, network, _ := net.ParseCIDR(cidr)
|
||||
nets = append(nets, network)
|
||||
}
|
||||
|
||||
ipToMatch := net.ParseIP("192.167.1.1")
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, n := range nets {
|
||||
n.Contains(ipToMatch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getDefenderForBench() *memoryDefender {
|
||||
config := &DefenderConfig{
|
||||
Enabled: true,
|
||||
BanTime: 30,
|
||||
BanTimeIncrement: 50,
|
||||
Threshold: 10,
|
||||
ScoreInvalid: 2,
|
||||
ScoreValid: 2,
|
||||
ObservationTime: 30,
|
||||
EntriesSoftLimit: 50,
|
||||
EntriesHardLimit: 100,
|
||||
}
|
||||
return &memoryDefender{
|
||||
baseDefender: baseDefender{
|
||||
config: config,
|
||||
},
|
||||
hosts: make(map[string]hostScore),
|
||||
banned: make(map[string]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
func inc(ip net.IP) {
|
||||
for j := len(ip) - 1; j >= 0; j-- {
|
||||
ip[j]++
|
||||
if ip[j] > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
171
common/defenderdb.go
Normal file
171
common/defenderdb.go
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
type dbDefender struct {
|
||||
baseDefender
|
||||
lastCleanup time.Time
|
||||
}
|
||||
|
||||
func newDBDefender(config *DefenderConfig) (Defender, error) {
|
||||
err := config.validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defender := &dbDefender{
|
||||
baseDefender: baseDefender{
|
||||
config: config,
|
||||
},
|
||||
lastCleanup: time.Time{},
|
||||
}
|
||||
|
||||
if err := defender.Reload(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return defender, nil
|
||||
}
|
||||
|
||||
// GetHosts returns hosts that are banned or for which some violations have been detected
|
||||
func (d *dbDefender) GetHosts() ([]dataprovider.DefenderEntry, error) {
|
||||
return dataprovider.GetDefenderHosts(d.getStartObservationTime(), d.config.EntriesHardLimit)
|
||||
}
|
||||
|
||||
// GetHost returns a defender host by ip, if any
|
||||
func (d *dbDefender) GetHost(ip string) (dataprovider.DefenderEntry, error) {
|
||||
return dataprovider.GetDefenderHostByIP(ip, d.getStartObservationTime())
|
||||
}
|
||||
|
||||
// IsBanned returns true if the specified IP is banned
|
||||
// and increase ban time if the IP is found.
|
||||
// This method must be called as soon as the client connects
|
||||
func (d *dbDefender) IsBanned(ip string) bool {
|
||||
d.RLock()
|
||||
if d.baseDefender.isBanned(ip) {
|
||||
d.RUnlock()
|
||||
return true
|
||||
}
|
||||
d.RUnlock()
|
||||
|
||||
_, err := dataprovider.IsDefenderHostBanned(ip)
|
||||
if err != nil {
|
||||
// not found or another error, we allow this host
|
||||
return false
|
||||
}
|
||||
increment := d.config.BanTime * d.config.BanTimeIncrement / 100
|
||||
if increment == 0 {
|
||||
increment++
|
||||
}
|
||||
dataprovider.UpdateDefenderBanTime(ip, increment) //nolint:errcheck
|
||||
return true
|
||||
}
|
||||
|
||||
// DeleteHost removes the specified IP from the defender lists
|
||||
func (d *dbDefender) DeleteHost(ip string) bool {
|
||||
if _, err := d.GetHost(ip); err != nil {
|
||||
return false
|
||||
}
|
||||
return dataprovider.DeleteDefenderHost(ip) == nil
|
||||
}
|
||||
|
||||
// AddEvent adds an event for the given IP.
|
||||
// This method must be called for clients not yet banned
|
||||
func (d *dbDefender) AddEvent(ip string, event HostEvent) {
|
||||
d.RLock()
|
||||
if d.safeList != nil && d.safeList.isListed(ip) {
|
||||
d.RUnlock()
|
||||
return
|
||||
}
|
||||
d.RUnlock()
|
||||
|
||||
score := d.baseDefender.getScore(event)
|
||||
|
||||
host, err := dataprovider.AddDefenderEvent(ip, score, d.getStartObservationTime())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if host.Score > d.config.Threshold {
|
||||
banTime := time.Now().Add(time.Duration(d.config.BanTime) * time.Minute)
|
||||
err = dataprovider.SetDefenderBanTime(ip, util.GetTimeAsMsSinceEpoch(banTime))
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
d.cleanup()
|
||||
}
|
||||
}
|
||||
|
||||
// GetBanTime returns the ban time for the given IP or nil if the IP is not banned
|
||||
func (d *dbDefender) GetBanTime(ip string) (*time.Time, error) {
|
||||
host, err := d.GetHost(ip)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if host.BanTime.IsZero() {
|
||||
return nil, nil
|
||||
}
|
||||
return &host.BanTime, nil
|
||||
}
|
||||
|
||||
// GetScore returns the score for the given IP
|
||||
func (d *dbDefender) GetScore(ip string) (int, error) {
|
||||
host, err := d.GetHost(ip)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return host.Score, nil
|
||||
}
|
||||
|
||||
func (d *dbDefender) cleanup() {
|
||||
lastCleanup := d.getLastCleanup()
|
||||
if lastCleanup.IsZero() || lastCleanup.Add(time.Duration(d.config.ObservationTime)*time.Minute*3).Before(time.Now()) {
|
||||
// FIXME: this could be racy in rare cases but it is better than acquire the lock for the cleanup duration
|
||||
// or to always acquire a read/write lock.
|
||||
// Concurrent cleanups could happen anyway from multiple SFTPGo instances and should not cause any issues
|
||||
d.setLastCleanup(time.Now())
|
||||
expireTime := time.Now().Add(-time.Duration(d.config.ObservationTime+1) * time.Minute)
|
||||
logger.Debug(logSender, "", "cleanup defender hosts before %v, last cleanup %v", expireTime, lastCleanup)
|
||||
if err := dataprovider.CleanupDefender(util.GetTimeAsMsSinceEpoch(expireTime)); err != nil {
|
||||
logger.Error(logSender, "", "defender cleanup error, reset last cleanup to %v", lastCleanup)
|
||||
d.setLastCleanup(lastCleanup)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dbDefender) getStartObservationTime() int64 {
|
||||
t := time.Now().Add(-time.Duration(d.config.ObservationTime) * time.Minute)
|
||||
return util.GetTimeAsMsSinceEpoch(t)
|
||||
}
|
||||
|
||||
func (d *dbDefender) getLastCleanup() time.Time {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
|
||||
return d.lastCleanup
|
||||
}
|
||||
|
||||
func (d *dbDefender) setLastCleanup(when time.Time) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
d.lastCleanup = when
|
||||
}
|
||||
311
common/defenderdb_test.go
Normal file
311
common/defenderdb_test.go
Normal file
@@ -0,0 +1,311 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
func TestBasicDbDefender(t *testing.T) {
|
||||
if !isDbDefenderSupported() {
|
||||
t.Skip("this test is not supported with the current database provider")
|
||||
}
|
||||
config := &DefenderConfig{
|
||||
Enabled: true,
|
||||
BanTime: 10,
|
||||
BanTimeIncrement: 2,
|
||||
Threshold: 5,
|
||||
ScoreInvalid: 2,
|
||||
ScoreValid: 1,
|
||||
ScoreLimitExceeded: 3,
|
||||
ObservationTime: 15,
|
||||
EntriesSoftLimit: 1,
|
||||
EntriesHardLimit: 10,
|
||||
SafeListFile: "slFile",
|
||||
BlockListFile: "blFile",
|
||||
}
|
||||
_, err := newDBDefender(config)
|
||||
assert.Error(t, err)
|
||||
|
||||
bl := HostListFile{
|
||||
IPAddresses: []string{"172.16.1.1", "172.16.1.2"},
|
||||
CIDRNetworks: []string{"10.8.0.0/24"},
|
||||
}
|
||||
sl := HostListFile{
|
||||
IPAddresses: []string{"172.16.1.3", "172.16.1.4"},
|
||||
CIDRNetworks: []string{"192.168.8.0/24"},
|
||||
}
|
||||
blFile := filepath.Join(os.TempDir(), "bl.json")
|
||||
slFile := filepath.Join(os.TempDir(), "sl.json")
|
||||
|
||||
data, err := json.Marshal(bl)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(blFile, data, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
data, err = json.Marshal(sl)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(slFile, data, os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
config.BlockListFile = blFile
|
||||
_, err = newDBDefender(config)
|
||||
assert.Error(t, err)
|
||||
config.SafeListFile = slFile
|
||||
d, err := newDBDefender(config)
|
||||
assert.NoError(t, err)
|
||||
defender := d.(*dbDefender)
|
||||
assert.True(t, defender.IsBanned("172.16.1.1"))
|
||||
assert.False(t, defender.IsBanned("172.16.1.10"))
|
||||
assert.False(t, defender.IsBanned("10.8.1.3"))
|
||||
assert.True(t, defender.IsBanned("10.8.0.4"))
|
||||
assert.False(t, defender.IsBanned("invalid ip"))
|
||||
hosts, err := defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, hosts, 0)
|
||||
_, err = defender.GetHost("10.8.0.3")
|
||||
assert.Error(t, err)
|
||||
|
||||
defender.AddEvent("172.16.1.4", HostEventLoginFailed)
|
||||
defender.AddEvent("192.168.8.4", HostEventUserNotFound)
|
||||
defender.AddEvent("172.16.1.3", HostEventLimitExceeded)
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, hosts, 0)
|
||||
assert.True(t, defender.getLastCleanup().IsZero())
|
||||
|
||||
testIP := "123.45.67.89"
|
||||
defender.AddEvent(testIP, HostEventLoginFailed)
|
||||
lastCleanup := defender.getLastCleanup()
|
||||
assert.False(t, lastCleanup.IsZero())
|
||||
score, err := defender.GetScore(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, score)
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, hosts, 1) {
|
||||
assert.Equal(t, 1, hosts[0].Score)
|
||||
assert.True(t, hosts[0].BanTime.IsZero())
|
||||
assert.Empty(t, hosts[0].GetBanTime())
|
||||
}
|
||||
host, err := defender.GetHost(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, host.Score)
|
||||
assert.Empty(t, host.GetBanTime())
|
||||
banTime, err := defender.GetBanTime(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, banTime)
|
||||
defender.AddEvent(testIP, HostEventLimitExceeded)
|
||||
score, err = defender.GetScore(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 4, score)
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, hosts, 1) {
|
||||
assert.Equal(t, 4, hosts[0].Score)
|
||||
assert.True(t, hosts[0].BanTime.IsZero())
|
||||
assert.Empty(t, hosts[0].GetBanTime())
|
||||
}
|
||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
||||
score, err = defender.GetScore(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, score)
|
||||
banTime, err = defender.GetBanTime(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, banTime)
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, hosts, 1) {
|
||||
assert.Equal(t, 0, hosts[0].Score)
|
||||
assert.False(t, hosts[0].BanTime.IsZero())
|
||||
assert.NotEmpty(t, hosts[0].GetBanTime())
|
||||
assert.Equal(t, hex.EncodeToString([]byte(testIP)), hosts[0].GetID())
|
||||
}
|
||||
host, err = defender.GetHost(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, host.Score)
|
||||
assert.NotEmpty(t, host.GetBanTime())
|
||||
// ban time should increase
|
||||
assert.True(t, defender.IsBanned(testIP))
|
||||
newBanTime, err := defender.GetBanTime(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, newBanTime.After(*banTime))
|
||||
|
||||
assert.True(t, defender.DeleteHost(testIP))
|
||||
assert.False(t, defender.DeleteHost(testIP))
|
||||
// test cleanup
|
||||
testIP1 := "123.45.67.90"
|
||||
testIP2 := "123.45.67.91"
|
||||
testIP3 := "123.45.67.92"
|
||||
for i := 0; i < 3; i++ {
|
||||
defender.AddEvent(testIP, HostEventNoLoginTried)
|
||||
defender.AddEvent(testIP1, HostEventNoLoginTried)
|
||||
defender.AddEvent(testIP2, HostEventNoLoginTried)
|
||||
}
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, hosts, 3)
|
||||
for _, host := range hosts {
|
||||
assert.Equal(t, 0, host.Score)
|
||||
assert.False(t, host.BanTime.IsZero())
|
||||
assert.NotEmpty(t, host.GetBanTime())
|
||||
}
|
||||
defender.AddEvent(testIP3, HostEventLoginFailed)
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, hosts, 4)
|
||||
// now set a ban time in the past, so the host will be cleanead up
|
||||
for _, ip := range []string{testIP1, testIP2} {
|
||||
err = dataprovider.SetDefenderBanTime(ip, util.GetTimeAsMsSinceEpoch(time.Now().Add(-1*time.Minute)))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, hosts, 4)
|
||||
for _, host := range hosts {
|
||||
switch host.IP {
|
||||
case testIP:
|
||||
assert.Equal(t, 0, host.Score)
|
||||
assert.False(t, host.BanTime.IsZero())
|
||||
assert.NotEmpty(t, host.GetBanTime())
|
||||
case testIP3:
|
||||
assert.Equal(t, 1, host.Score)
|
||||
assert.True(t, host.BanTime.IsZero())
|
||||
assert.Empty(t, host.GetBanTime())
|
||||
default:
|
||||
assert.Equal(t, 6, host.Score)
|
||||
assert.True(t, host.BanTime.IsZero())
|
||||
assert.Empty(t, host.GetBanTime())
|
||||
}
|
||||
}
|
||||
host, err = defender.GetHost(testIP)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, host.Score)
|
||||
assert.False(t, host.BanTime.IsZero())
|
||||
assert.NotEmpty(t, host.GetBanTime())
|
||||
host, err = defender.GetHost(testIP3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, host.Score)
|
||||
assert.True(t, host.BanTime.IsZero())
|
||||
assert.Empty(t, host.GetBanTime())
|
||||
// set a negative observation time so the from field in the queries will be in the future
|
||||
// we still should get the banned hosts
|
||||
defender.config.ObservationTime = -2
|
||||
assert.Greater(t, defender.getStartObservationTime(), time.Now().UnixMilli())
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, hosts, 1) {
|
||||
assert.Equal(t, testIP, hosts[0].IP)
|
||||
assert.Equal(t, 0, hosts[0].Score)
|
||||
assert.False(t, hosts[0].BanTime.IsZero())
|
||||
assert.NotEmpty(t, hosts[0].GetBanTime())
|
||||
}
|
||||
_, err = defender.GetHost(testIP)
|
||||
assert.NoError(t, err)
|
||||
// cleanup db
|
||||
err = dataprovider.CleanupDefender(util.GetTimeAsMsSinceEpoch(time.Now().Add(10 * time.Minute)))
|
||||
assert.NoError(t, err)
|
||||
// the banned host must still be there
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, hosts, 1) {
|
||||
assert.Equal(t, testIP, hosts[0].IP)
|
||||
assert.Equal(t, 0, hosts[0].Score)
|
||||
assert.False(t, hosts[0].BanTime.IsZero())
|
||||
assert.NotEmpty(t, hosts[0].GetBanTime())
|
||||
}
|
||||
_, err = defender.GetHost(testIP)
|
||||
assert.NoError(t, err)
|
||||
err = dataprovider.SetDefenderBanTime(testIP, util.GetTimeAsMsSinceEpoch(time.Now().Add(-1*time.Minute)))
|
||||
assert.NoError(t, err)
|
||||
err = dataprovider.CleanupDefender(util.GetTimeAsMsSinceEpoch(time.Now().Add(10 * time.Minute)))
|
||||
assert.NoError(t, err)
|
||||
hosts, err = defender.GetHosts()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, hosts, 0)
|
||||
|
||||
err = os.Remove(slFile)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(blFile)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestDbDefenderCleanup(t *testing.T) {
|
||||
if !isDbDefenderSupported() {
|
||||
t.Skip("this test is not supported with the current database provider")
|
||||
}
|
||||
config := &DefenderConfig{
|
||||
Enabled: true,
|
||||
BanTime: 10,
|
||||
BanTimeIncrement: 2,
|
||||
Threshold: 5,
|
||||
ScoreInvalid: 2,
|
||||
ScoreValid: 1,
|
||||
ScoreLimitExceeded: 3,
|
||||
ObservationTime: 15,
|
||||
EntriesSoftLimit: 1,
|
||||
EntriesHardLimit: 10,
|
||||
}
|
||||
d, err := newDBDefender(config)
|
||||
assert.NoError(t, err)
|
||||
defender := d.(*dbDefender)
|
||||
lastCleanup := defender.getLastCleanup()
|
||||
assert.True(t, lastCleanup.IsZero())
|
||||
defender.cleanup()
|
||||
lastCleanup = defender.getLastCleanup()
|
||||
assert.False(t, lastCleanup.IsZero())
|
||||
defender.cleanup()
|
||||
assert.Equal(t, lastCleanup, defender.getLastCleanup())
|
||||
defender.setLastCleanup(time.Now().Add(-time.Duration(config.ObservationTime) * time.Minute * 4))
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
defender.cleanup()
|
||||
assert.True(t, lastCleanup.Before(defender.getLastCleanup()))
|
||||
|
||||
providerConf := dataprovider.GetProviderConfig()
|
||||
err = dataprovider.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
lastCleanup = time.Now().Add(-time.Duration(config.ObservationTime) * time.Minute * 4)
|
||||
defender.setLastCleanup(lastCleanup)
|
||||
defender.cleanup()
|
||||
// cleanup will fail and so last cleanup should be reset to the previous value
|
||||
assert.Equal(t, lastCleanup, defender.getLastCleanup())
|
||||
|
||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func isDbDefenderSupported() bool {
|
||||
// SQLite shares the implementation with other SQL-based provider but it makes no sense
|
||||
// to use it outside test cases
|
||||
switch dataprovider.GetProviderStatus().Driver {
|
||||
case dataprovider.MySQLDataProviderName, dataprovider.PGSQLDataProviderName,
|
||||
dataprovider.CockroachDataProviderName, dataprovider.SQLiteDataProviderName:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
340
common/defendermem.go
Normal file
340
common/defendermem.go
Normal file
@@ -0,0 +1,340 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
type memoryDefender struct {
|
||||
baseDefender
|
||||
// IP addresses of the clients trying to connected are stored inside hosts,
|
||||
// they are added to banned once the thresold is reached.
|
||||
// A violation from a banned host will increase the ban time
|
||||
// based on the configured BanTimeIncrement
|
||||
hosts map[string]hostScore // the key is the host IP
|
||||
banned map[string]time.Time // the key is the host IP
|
||||
}
|
||||
|
||||
func newInMemoryDefender(config *DefenderConfig) (Defender, error) {
|
||||
err := config.validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defender := &memoryDefender{
|
||||
baseDefender: baseDefender{
|
||||
config: config,
|
||||
},
|
||||
hosts: make(map[string]hostScore),
|
||||
banned: make(map[string]time.Time),
|
||||
}
|
||||
|
||||
if err := defender.Reload(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return defender, nil
|
||||
}
|
||||
|
||||
// GetHosts returns hosts that are banned or for which some violations have been detected
|
||||
func (d *memoryDefender) GetHosts() ([]dataprovider.DefenderEntry, error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
|
||||
var result []dataprovider.DefenderEntry
|
||||
for k, v := range d.banned {
|
||||
if v.After(time.Now()) {
|
||||
result = append(result, dataprovider.DefenderEntry{
|
||||
IP: k,
|
||||
BanTime: v,
|
||||
})
|
||||
}
|
||||
}
|
||||
for k, v := range d.hosts {
|
||||
score := 0
|
||||
for _, event := range v.Events {
|
||||
if event.dateTime.Add(time.Duration(d.config.ObservationTime) * time.Minute).After(time.Now()) {
|
||||
score += event.score
|
||||
}
|
||||
}
|
||||
if score > 0 {
|
||||
result = append(result, dataprovider.DefenderEntry{
|
||||
IP: k,
|
||||
Score: score,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetHost returns a defender host by ip, if any
|
||||
func (d *memoryDefender) GetHost(ip string) (dataprovider.DefenderEntry, error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
|
||||
if banTime, ok := d.banned[ip]; ok {
|
||||
if banTime.After(time.Now()) {
|
||||
return dataprovider.DefenderEntry{
|
||||
IP: ip,
|
||||
BanTime: banTime,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
if hs, ok := d.hosts[ip]; ok {
|
||||
score := 0
|
||||
for _, event := range hs.Events {
|
||||
if event.dateTime.Add(time.Duration(d.config.ObservationTime) * time.Minute).After(time.Now()) {
|
||||
score += event.score
|
||||
}
|
||||
}
|
||||
if score > 0 {
|
||||
return dataprovider.DefenderEntry{
|
||||
IP: ip,
|
||||
Score: score,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return dataprovider.DefenderEntry{}, util.NewRecordNotFoundError("host not found")
|
||||
}
|
||||
|
||||
// IsBanned returns true if the specified IP is banned
|
||||
// and increase ban time if the IP is found.
|
||||
// This method must be called as soon as the client connects
|
||||
func (d *memoryDefender) IsBanned(ip string) bool {
|
||||
d.RLock()
|
||||
|
||||
if banTime, ok := d.banned[ip]; ok {
|
||||
if banTime.After(time.Now()) {
|
||||
increment := d.config.BanTime * d.config.BanTimeIncrement / 100
|
||||
if increment == 0 {
|
||||
increment++
|
||||
}
|
||||
|
||||
d.RUnlock()
|
||||
|
||||
// we can save an earlier ban time if there are contemporary updates
|
||||
// but this should not make much difference. I prefer to hold a read lock
|
||||
// until possible for performance reasons, this method is called each
|
||||
// time a new client connects and it must be as fast as possible
|
||||
d.Lock()
|
||||
d.banned[ip] = banTime.Add(time.Duration(increment) * time.Minute)
|
||||
d.Unlock()
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
defer d.RUnlock()
|
||||
|
||||
return d.baseDefender.isBanned(ip)
|
||||
}
|
||||
|
||||
// DeleteHost removes the specified IP from the defender lists
|
||||
func (d *memoryDefender) DeleteHost(ip string) bool {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
if _, ok := d.banned[ip]; ok {
|
||||
delete(d.banned, ip)
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := d.hosts[ip]; ok {
|
||||
delete(d.hosts, ip)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// AddEvent adds an event for the given IP.
|
||||
// This method must be called for clients not yet banned
|
||||
func (d *memoryDefender) AddEvent(ip string, event HostEvent) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
if d.safeList != nil && d.safeList.isListed(ip) {
|
||||
return
|
||||
}
|
||||
|
||||
// ignore events for already banned hosts
|
||||
if v, ok := d.banned[ip]; ok {
|
||||
if v.After(time.Now()) {
|
||||
return
|
||||
}
|
||||
delete(d.banned, ip)
|
||||
}
|
||||
|
||||
score := d.baseDefender.getScore(event)
|
||||
|
||||
ev := hostEvent{
|
||||
dateTime: time.Now(),
|
||||
score: score,
|
||||
}
|
||||
|
||||
if hs, ok := d.hosts[ip]; ok {
|
||||
hs.Events = append(hs.Events, ev)
|
||||
hs.TotalScore = 0
|
||||
|
||||
idx := 0
|
||||
for _, event := range hs.Events {
|
||||
if event.dateTime.Add(time.Duration(d.config.ObservationTime) * time.Minute).After(time.Now()) {
|
||||
hs.Events[idx] = event
|
||||
hs.TotalScore += event.score
|
||||
idx++
|
||||
}
|
||||
}
|
||||
|
||||
hs.Events = hs.Events[:idx]
|
||||
if hs.TotalScore >= d.config.Threshold {
|
||||
d.banned[ip] = time.Now().Add(time.Duration(d.config.BanTime) * time.Minute)
|
||||
delete(d.hosts, ip)
|
||||
d.cleanupBanned()
|
||||
} else {
|
||||
d.hosts[ip] = hs
|
||||
}
|
||||
} else {
|
||||
d.hosts[ip] = hostScore{
|
||||
TotalScore: ev.score,
|
||||
Events: []hostEvent{ev},
|
||||
}
|
||||
d.cleanupHosts()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *memoryDefender) countBanned() int {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
|
||||
return len(d.banned)
|
||||
}
|
||||
|
||||
func (d *memoryDefender) countHosts() int {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
|
||||
return len(d.hosts)
|
||||
}
|
||||
|
||||
// GetBanTime returns the ban time for the given IP or nil if the IP is not banned
|
||||
func (d *memoryDefender) GetBanTime(ip string) (*time.Time, error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
|
||||
if banTime, ok := d.banned[ip]; ok {
|
||||
return &banTime, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetScore returns the score for the given IP
|
||||
func (d *memoryDefender) GetScore(ip string) (int, error) {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
|
||||
score := 0
|
||||
|
||||
if hs, ok := d.hosts[ip]; ok {
|
||||
for _, event := range hs.Events {
|
||||
if event.dateTime.Add(time.Duration(d.config.ObservationTime) * time.Minute).After(time.Now()) {
|
||||
score += event.score
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return score, nil
|
||||
}
|
||||
|
||||
func (d *memoryDefender) cleanupBanned() {
|
||||
if len(d.banned) > d.config.EntriesHardLimit {
|
||||
kvList := make(kvList, 0, len(d.banned))
|
||||
|
||||
for k, v := range d.banned {
|
||||
if v.Before(time.Now()) {
|
||||
delete(d.banned, k)
|
||||
}
|
||||
|
||||
kvList = append(kvList, kv{
|
||||
Key: k,
|
||||
Value: v.UnixNano(),
|
||||
})
|
||||
}
|
||||
|
||||
// we removed expired ip addresses, if any, above, this could be enough
|
||||
numToRemove := len(d.banned) - d.config.EntriesSoftLimit
|
||||
|
||||
if numToRemove <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sort.Sort(kvList)
|
||||
|
||||
for idx, kv := range kvList {
|
||||
if idx >= numToRemove {
|
||||
break
|
||||
}
|
||||
|
||||
delete(d.banned, kv.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *memoryDefender) cleanupHosts() {
|
||||
if len(d.hosts) > d.config.EntriesHardLimit {
|
||||
kvList := make(kvList, 0, len(d.hosts))
|
||||
|
||||
for k, v := range d.hosts {
|
||||
value := int64(0)
|
||||
if len(v.Events) > 0 {
|
||||
value = v.Events[len(v.Events)-1].dateTime.UnixNano()
|
||||
}
|
||||
kvList = append(kvList, kv{
|
||||
Key: k,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Sort(kvList)
|
||||
|
||||
numToRemove := len(d.hosts) - d.config.EntriesSoftLimit
|
||||
|
||||
for idx, kv := range kvList {
|
||||
if idx >= numToRemove {
|
||||
break
|
||||
}
|
||||
|
||||
delete(d.hosts, kv.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type kv struct {
|
||||
Key string
|
||||
Value int64
|
||||
}
|
||||
|
||||
type kvList []kv
|
||||
|
||||
func (p kvList) Len() int { return len(p) }
|
||||
func (p kvList) Less(i, j int) bool { return p[i].Value < p[j].Value }
|
||||
func (p kvList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
148
common/httpauth.go
Normal file
148
common/httpauth.go
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/GehirnInc/crypt/apr1_crypt"
|
||||
"github.com/GehirnInc/crypt/md5_crypt"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// HTTPAuthenticationHeader defines the HTTP authentication
|
||||
HTTPAuthenticationHeader = "WWW-Authenticate"
|
||||
md5CryptPwdPrefix = "$1$"
|
||||
apr1CryptPwdPrefix = "$apr1$"
|
||||
)
|
||||
|
||||
var (
|
||||
bcryptPwdPrefixes = []string{"$2a$", "$2$", "$2x$", "$2y$", "$2b$"}
|
||||
)
|
||||
|
||||
// HTTPAuthProvider defines the interface for HTTP auth providers
|
||||
type HTTPAuthProvider interface {
|
||||
ValidateCredentials(username, password string) bool
|
||||
IsEnabled() bool
|
||||
}
|
||||
|
||||
type basicAuthProvider struct {
|
||||
Path string
|
||||
sync.RWMutex
|
||||
Info os.FileInfo
|
||||
Users map[string]string
|
||||
}
|
||||
|
||||
// NewBasicAuthProvider returns an HTTPAuthProvider implementing Basic Auth
|
||||
func NewBasicAuthProvider(authUserFile string) (HTTPAuthProvider, error) {
|
||||
basicAuthProvider := basicAuthProvider{
|
||||
Path: authUserFile,
|
||||
Info: nil,
|
||||
Users: make(map[string]string),
|
||||
}
|
||||
return &basicAuthProvider, basicAuthProvider.loadUsers()
|
||||
}
|
||||
|
||||
func (p *basicAuthProvider) IsEnabled() bool {
|
||||
return p.Path != ""
|
||||
}
|
||||
|
||||
func (p *basicAuthProvider) isReloadNeeded(info os.FileInfo) bool {
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
|
||||
return p.Info == nil || p.Info.ModTime() != info.ModTime() || p.Info.Size() != info.Size()
|
||||
}
|
||||
|
||||
func (p *basicAuthProvider) loadUsers() error {
|
||||
if !p.IsEnabled() {
|
||||
return nil
|
||||
}
|
||||
info, err := os.Stat(p.Path)
|
||||
if err != nil {
|
||||
logger.Debug(logSender, "", "unable to stat basic auth users file: %v", err)
|
||||
return err
|
||||
}
|
||||
if p.isReloadNeeded(info) {
|
||||
r, err := os.Open(p.Path)
|
||||
if err != nil {
|
||||
logger.Debug(logSender, "", "unable to open basic auth users file: %v", err)
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
reader := csv.NewReader(r)
|
||||
reader.Comma = ':'
|
||||
reader.Comment = '#'
|
||||
reader.TrimLeadingSpace = true
|
||||
records, err := reader.ReadAll()
|
||||
if err != nil {
|
||||
logger.Debug(logSender, "", "unable to parse basic auth users file: %v", err)
|
||||
return err
|
||||
}
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
p.Users = make(map[string]string)
|
||||
for _, record := range records {
|
||||
if len(record) == 2 {
|
||||
p.Users[record[0]] = record[1]
|
||||
}
|
||||
}
|
||||
logger.Debug(logSender, "", "number of users loaded for httpd basic auth: %v", len(p.Users))
|
||||
p.Info = info
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *basicAuthProvider) getHashedPassword(username string) (string, bool) {
|
||||
err := p.loadUsers()
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
|
||||
pwd, ok := p.Users[username]
|
||||
return pwd, ok
|
||||
}
|
||||
|
||||
// ValidateCredentials returns true if the credentials are valid
|
||||
func (p *basicAuthProvider) ValidateCredentials(username, password string) bool {
|
||||
if hashedPwd, ok := p.getHashedPassword(username); ok {
|
||||
if util.IsStringPrefixInSlice(hashedPwd, bcryptPwdPrefixes) {
|
||||
err := bcrypt.CompareHashAndPassword([]byte(hashedPwd), []byte(password))
|
||||
return err == nil
|
||||
}
|
||||
if strings.HasPrefix(hashedPwd, md5CryptPwdPrefix) {
|
||||
crypter := md5_crypt.New()
|
||||
err := crypter.Verify(hashedPwd, []byte(password))
|
||||
return err == nil
|
||||
}
|
||||
if strings.HasPrefix(hashedPwd, apr1CryptPwdPrefix) {
|
||||
crypter := apr1_crypt.New()
|
||||
err := crypter.Verify(hashedPwd, []byte(password))
|
||||
return err == nil
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
85
common/httpauth_test.go
Normal file
85
common/httpauth_test.go
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBasicAuth(t *testing.T) {
|
||||
httpAuth, err := NewBasicAuthProvider("")
|
||||
require.NoError(t, err)
|
||||
require.False(t, httpAuth.IsEnabled())
|
||||
|
||||
_, err = NewBasicAuthProvider("missing path")
|
||||
require.Error(t, err)
|
||||
|
||||
authUserFile := filepath.Join(os.TempDir(), "http_users.txt")
|
||||
authUserData := []byte("test1:$2y$05$bcHSED7aO1cfLto6ZdDBOOKzlwftslVhtpIkRhAtSa4GuLmk5mola\n")
|
||||
err = os.WriteFile(authUserFile, authUserData, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
|
||||
httpAuth, err = NewBasicAuthProvider(authUserFile)
|
||||
require.NoError(t, err)
|
||||
require.True(t, httpAuth.IsEnabled())
|
||||
require.False(t, httpAuth.ValidateCredentials("test1", "wrong1"))
|
||||
require.False(t, httpAuth.ValidateCredentials("test2", "password2"))
|
||||
require.True(t, httpAuth.ValidateCredentials("test1", "password1"))
|
||||
|
||||
authUserData = append(authUserData, []byte("test2:$1$OtSSTL8b$bmaCqEksI1e7rnZSjsIDR1\n")...)
|
||||
err = os.WriteFile(authUserFile, authUserData, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
require.False(t, httpAuth.ValidateCredentials("test2", "wrong2"))
|
||||
require.True(t, httpAuth.ValidateCredentials("test2", "password2"))
|
||||
|
||||
authUserData = append(authUserData, []byte("test2:$apr1$gLnIkRIf$Xr/6aJfmIrihP4b2N2tcs/\n")...)
|
||||
err = os.WriteFile(authUserFile, authUserData, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
require.False(t, httpAuth.ValidateCredentials("test2", "wrong2"))
|
||||
require.True(t, httpAuth.ValidateCredentials("test2", "password2"))
|
||||
|
||||
authUserData = append(authUserData, []byte("test3:$apr1$gLnIkRIf$Xr/6aJfmIrihP4b2N2tcs/\n")...)
|
||||
err = os.WriteFile(authUserFile, authUserData, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
require.False(t, httpAuth.ValidateCredentials("test3", "password3"))
|
||||
|
||||
authUserData = append(authUserData, []byte("test4:$invalid$gLnIkRIf$Xr/6$aJfmIr$ihP4b2N2tcs/\n")...)
|
||||
err = os.WriteFile(authUserFile, authUserData, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
require.False(t, httpAuth.ValidateCredentials("test4", "password3"))
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
authUserData = append(authUserData, []byte("test5:$apr1$gLnIkRIf$Xr/6aJfmIrihP4b2N2tcs/\n")...)
|
||||
err = os.WriteFile(authUserFile, authUserData, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
err = os.Chmod(authUserFile, 0001)
|
||||
require.NoError(t, err)
|
||||
require.False(t, httpAuth.ValidateCredentials("test5", "password2"))
|
||||
err = os.Chmod(authUserFile, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
authUserData = append(authUserData, []byte("\"foo\"bar\"\r\n")...)
|
||||
err = os.WriteFile(authUserFile, authUserData, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
require.False(t, httpAuth.ValidateCredentials("test2", "password2"))
|
||||
|
||||
err = os.Remove(authUserFile)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
3840
common/protocol_test.go
Normal file
3840
common/protocol_test.go
Normal file
File diff suppressed because it is too large
Load Diff
257
common/ratelimiter.go
Normal file
257
common/ratelimiter.go
Normal file
@@ -0,0 +1,257 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoBucket = errors.New("no bucket found")
|
||||
errReserve = errors.New("unable to reserve token")
|
||||
rateLimiterProtocolValues = []string{ProtocolSSH, ProtocolFTP, ProtocolWebDAV, ProtocolHTTP}
|
||||
)
|
||||
|
||||
// RateLimiterType defines the supported rate limiters types
|
||||
type RateLimiterType int
|
||||
|
||||
// Supported rate limiter types
|
||||
const (
|
||||
rateLimiterTypeGlobal RateLimiterType = iota + 1
|
||||
rateLimiterTypeSource
|
||||
)
|
||||
|
||||
// RateLimiterConfig defines the configuration for a rate limiter
|
||||
type RateLimiterConfig struct {
|
||||
// Average defines the maximum rate allowed. 0 means disabled
|
||||
Average int64 `json:"average" mapstructure:"average"`
|
||||
// Period defines the period as milliseconds. Default: 1000 (1 second).
|
||||
// The rate is actually defined by dividing average by period.
|
||||
// So for a rate below 1 req/s, one needs to define a period larger than a second.
|
||||
Period int64 `json:"period" mapstructure:"period"`
|
||||
// Burst is the maximum number of requests allowed to go through in the
|
||||
// same arbitrarily small period of time. Default: 1.
|
||||
Burst int `json:"burst" mapstructure:"burst"`
|
||||
// Type defines the rate limiter type:
|
||||
// - rateLimiterTypeGlobal is a global rate limiter independent from the source
|
||||
// - rateLimiterTypeSource is a per-source rate limiter
|
||||
Type int `json:"type" mapstructure:"type"`
|
||||
// Protocols defines the protocols for this rate limiter.
|
||||
// Available protocols are: "SFTP", "FTP", "DAV".
|
||||
// A rate limiter with no protocols defined is disabled
|
||||
Protocols []string `json:"protocols" mapstructure:"protocols"`
|
||||
// AllowList defines a list of IP addresses and IP ranges excluded from rate limiting
|
||||
AllowList []string `json:"allow_list" mapstructure:"mapstructure"`
|
||||
// If the rate limit is exceeded, the defender is enabled, and this is a per-source limiter,
|
||||
// a new defender event will be generated
|
||||
GenerateDefenderEvents bool `json:"generate_defender_events" mapstructure:"generate_defender_events"`
|
||||
// The number of per-ip rate limiters kept in memory will vary between the
|
||||
// soft and hard limit
|
||||
EntriesSoftLimit int `json:"entries_soft_limit" mapstructure:"entries_soft_limit"`
|
||||
EntriesHardLimit int `json:"entries_hard_limit" mapstructure:"entries_hard_limit"`
|
||||
}
|
||||
|
||||
func (r *RateLimiterConfig) isEnabled() bool {
|
||||
return r.Average > 0 && len(r.Protocols) > 0
|
||||
}
|
||||
|
||||
func (r *RateLimiterConfig) validate() error {
|
||||
if r.Burst < 1 {
|
||||
return fmt.Errorf("invalid burst %v. It must be >= 1", r.Burst)
|
||||
}
|
||||
if r.Period < 100 {
|
||||
return fmt.Errorf("invalid period %v. It must be >= 100", r.Period)
|
||||
}
|
||||
if r.Type != int(rateLimiterTypeGlobal) && r.Type != int(rateLimiterTypeSource) {
|
||||
return fmt.Errorf("invalid type %v", r.Type)
|
||||
}
|
||||
if r.Type != int(rateLimiterTypeGlobal) {
|
||||
if r.EntriesSoftLimit <= 0 {
|
||||
return fmt.Errorf("invalid entries_soft_limit %v", r.EntriesSoftLimit)
|
||||
}
|
||||
if r.EntriesHardLimit <= r.EntriesSoftLimit {
|
||||
return fmt.Errorf("invalid entries_hard_limit %v must be > %v", r.EntriesHardLimit, r.EntriesSoftLimit)
|
||||
}
|
||||
}
|
||||
r.Protocols = util.RemoveDuplicates(r.Protocols, true)
|
||||
for _, protocol := range r.Protocols {
|
||||
if !util.Contains(rateLimiterProtocolValues, protocol) {
|
||||
return fmt.Errorf("invalid protocol %#v", protocol)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RateLimiterConfig) getLimiter() *rateLimiter {
|
||||
limiter := &rateLimiter{
|
||||
burst: r.Burst,
|
||||
globalBucket: nil,
|
||||
generateDefenderEvents: r.GenerateDefenderEvents,
|
||||
}
|
||||
var maxDelay time.Duration
|
||||
period := time.Duration(r.Period) * time.Millisecond
|
||||
rtl := float64(r.Average*int64(time.Second)) / float64(period)
|
||||
limiter.rate = rate.Limit(rtl)
|
||||
if rtl < 1 {
|
||||
maxDelay = period / 2
|
||||
} else {
|
||||
maxDelay = time.Second / (time.Duration(rtl) * 2)
|
||||
}
|
||||
if maxDelay > 10*time.Second {
|
||||
maxDelay = 10 * time.Second
|
||||
}
|
||||
limiter.maxDelay = maxDelay
|
||||
limiter.buckets = sourceBuckets{
|
||||
buckets: make(map[string]sourceRateLimiter),
|
||||
hardLimit: r.EntriesHardLimit,
|
||||
softLimit: r.EntriesSoftLimit,
|
||||
}
|
||||
if r.Type != int(rateLimiterTypeSource) {
|
||||
limiter.globalBucket = rate.NewLimiter(limiter.rate, limiter.burst)
|
||||
}
|
||||
return limiter
|
||||
}
|
||||
|
||||
// RateLimiter defines a rate limiter
|
||||
type rateLimiter struct {
|
||||
rate rate.Limit
|
||||
burst int
|
||||
maxDelay time.Duration
|
||||
globalBucket *rate.Limiter
|
||||
buckets sourceBuckets
|
||||
generateDefenderEvents bool
|
||||
allowList []func(net.IP) bool
|
||||
}
|
||||
|
||||
// Wait blocks until the limit allows one event to happen
|
||||
// or returns an error if the time to wait exceeds the max
|
||||
// allowed delay
|
||||
func (rl *rateLimiter) Wait(source string) (time.Duration, error) {
|
||||
if len(rl.allowList) > 0 {
|
||||
ip := net.ParseIP(source)
|
||||
if ip != nil {
|
||||
for idx := range rl.allowList {
|
||||
if rl.allowList[idx](ip) {
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
var res *rate.Reservation
|
||||
if rl.globalBucket != nil {
|
||||
res = rl.globalBucket.Reserve()
|
||||
} else {
|
||||
var err error
|
||||
res, err = rl.buckets.reserve(source)
|
||||
if err != nil {
|
||||
rateLimiter := rate.NewLimiter(rl.rate, rl.burst)
|
||||
res = rl.buckets.addAndReserve(rateLimiter, source)
|
||||
}
|
||||
}
|
||||
if !res.OK() {
|
||||
return 0, errReserve
|
||||
}
|
||||
delay := res.Delay()
|
||||
if delay > rl.maxDelay {
|
||||
res.Cancel()
|
||||
if rl.generateDefenderEvents && rl.globalBucket == nil {
|
||||
AddDefenderEvent(source, HostEventLimitExceeded)
|
||||
}
|
||||
return delay, fmt.Errorf("rate limit exceed, wait time to respect rate %v, max wait time allowed %v", delay, rl.maxDelay)
|
||||
}
|
||||
time.Sleep(delay)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
type sourceRateLimiter struct {
|
||||
lastActivity int64
|
||||
bucket *rate.Limiter
|
||||
}
|
||||
|
||||
func (s *sourceRateLimiter) updateLastActivity() {
|
||||
atomic.StoreInt64(&s.lastActivity, time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func (s *sourceRateLimiter) getLastActivity() int64 {
|
||||
return atomic.LoadInt64(&s.lastActivity)
|
||||
}
|
||||
|
||||
type sourceBuckets struct {
|
||||
sync.RWMutex
|
||||
buckets map[string]sourceRateLimiter
|
||||
hardLimit int
|
||||
softLimit int
|
||||
}
|
||||
|
||||
func (b *sourceBuckets) reserve(source string) (*rate.Reservation, error) {
|
||||
b.RLock()
|
||||
defer b.RUnlock()
|
||||
|
||||
if src, ok := b.buckets[source]; ok {
|
||||
src.updateLastActivity()
|
||||
return src.bucket.Reserve(), nil
|
||||
}
|
||||
|
||||
return nil, errNoBucket
|
||||
}
|
||||
|
||||
func (b *sourceBuckets) addAndReserve(r *rate.Limiter, source string) *rate.Reservation {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
b.cleanup()
|
||||
|
||||
src := sourceRateLimiter{
|
||||
bucket: r,
|
||||
}
|
||||
src.updateLastActivity()
|
||||
b.buckets[source] = src
|
||||
return src.bucket.Reserve()
|
||||
}
|
||||
|
||||
func (b *sourceBuckets) cleanup() {
|
||||
if len(b.buckets) >= b.hardLimit {
|
||||
numToRemove := len(b.buckets) - b.softLimit
|
||||
|
||||
kvList := make(kvList, 0, len(b.buckets))
|
||||
|
||||
for k, v := range b.buckets {
|
||||
kvList = append(kvList, kv{
|
||||
Key: k,
|
||||
Value: v.getLastActivity(),
|
||||
})
|
||||
}
|
||||
|
||||
sort.Sort(kvList)
|
||||
|
||||
for idx, kv := range kvList {
|
||||
if idx >= numToRemove {
|
||||
break
|
||||
}
|
||||
|
||||
delete(b.buckets, kv.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
162
common/ratelimiter_test.go
Normal file
162
common/ratelimiter_test.go
Normal file
@@ -0,0 +1,162 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
func TestRateLimiterConfig(t *testing.T) {
|
||||
config := RateLimiterConfig{}
|
||||
err := config.validate()
|
||||
require.Error(t, err)
|
||||
config.Burst = 1
|
||||
config.Period = 10
|
||||
err = config.validate()
|
||||
require.Error(t, err)
|
||||
config.Period = 1000
|
||||
config.Type = 100
|
||||
err = config.validate()
|
||||
require.Error(t, err)
|
||||
config.Type = int(rateLimiterTypeSource)
|
||||
config.EntriesSoftLimit = 0
|
||||
err = config.validate()
|
||||
require.Error(t, err)
|
||||
config.EntriesSoftLimit = 150
|
||||
config.EntriesHardLimit = 0
|
||||
err = config.validate()
|
||||
require.Error(t, err)
|
||||
config.EntriesHardLimit = 200
|
||||
config.Protocols = []string{"unsupported protocol"}
|
||||
err = config.validate()
|
||||
require.Error(t, err)
|
||||
config.Protocols = rateLimiterProtocolValues
|
||||
err = config.validate()
|
||||
require.NoError(t, err)
|
||||
|
||||
limiter := config.getLimiter()
|
||||
require.Equal(t, 500*time.Millisecond, limiter.maxDelay)
|
||||
require.Nil(t, limiter.globalBucket)
|
||||
config.Type = int(rateLimiterTypeGlobal)
|
||||
config.Average = 1
|
||||
config.Period = 10000
|
||||
limiter = config.getLimiter()
|
||||
require.Equal(t, 5*time.Second, limiter.maxDelay)
|
||||
require.NotNil(t, limiter.globalBucket)
|
||||
config.Period = 100000
|
||||
limiter = config.getLimiter()
|
||||
require.Equal(t, 10*time.Second, limiter.maxDelay)
|
||||
config.Period = 500
|
||||
config.Average = 1
|
||||
limiter = config.getLimiter()
|
||||
require.Equal(t, 250*time.Millisecond, limiter.maxDelay)
|
||||
}
|
||||
|
||||
func TestRateLimiter(t *testing.T) {
|
||||
config := RateLimiterConfig{
|
||||
Average: 1,
|
||||
Period: 1000,
|
||||
Burst: 1,
|
||||
Type: int(rateLimiterTypeGlobal),
|
||||
Protocols: rateLimiterProtocolValues,
|
||||
}
|
||||
limiter := config.getLimiter()
|
||||
_, err := limiter.Wait("")
|
||||
require.NoError(t, err)
|
||||
_, err = limiter.Wait("")
|
||||
require.Error(t, err)
|
||||
|
||||
config.Type = int(rateLimiterTypeSource)
|
||||
config.GenerateDefenderEvents = true
|
||||
config.EntriesSoftLimit = 5
|
||||
config.EntriesHardLimit = 10
|
||||
limiter = config.getLimiter()
|
||||
|
||||
source := "192.168.1.2"
|
||||
_, err = limiter.Wait(source)
|
||||
require.NoError(t, err)
|
||||
_, err = limiter.Wait(source)
|
||||
require.Error(t, err)
|
||||
// a different source should work
|
||||
_, err = limiter.Wait(source + "1")
|
||||
require.NoError(t, err)
|
||||
|
||||
allowList := []string{"192.168.1.0/24"}
|
||||
allowFuncs, err := util.ParseAllowedIPAndRanges(allowList)
|
||||
assert.NoError(t, err)
|
||||
limiter.allowList = allowFuncs
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err = limiter.Wait(source)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
_, err = limiter.Wait("not an ip")
|
||||
require.NoError(t, err)
|
||||
|
||||
config.Burst = 0
|
||||
limiter = config.getLimiter()
|
||||
_, err = limiter.Wait(source)
|
||||
require.ErrorIs(t, err, errReserve)
|
||||
}
|
||||
|
||||
func TestLimiterCleanup(t *testing.T) {
|
||||
config := RateLimiterConfig{
|
||||
Average: 100,
|
||||
Period: 1000,
|
||||
Burst: 1,
|
||||
Type: int(rateLimiterTypeSource),
|
||||
Protocols: rateLimiterProtocolValues,
|
||||
EntriesSoftLimit: 1,
|
||||
EntriesHardLimit: 3,
|
||||
}
|
||||
limiter := config.getLimiter()
|
||||
source1 := "10.8.0.1"
|
||||
source2 := "10.8.0.2"
|
||||
source3 := "10.8.0.3"
|
||||
source4 := "10.8.0.4"
|
||||
_, err := limiter.Wait(source1)
|
||||
assert.NoError(t, err)
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
_, err = limiter.Wait(source2)
|
||||
assert.NoError(t, err)
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
assert.Len(t, limiter.buckets.buckets, 2)
|
||||
_, ok := limiter.buckets.buckets[source1]
|
||||
assert.True(t, ok)
|
||||
_, ok = limiter.buckets.buckets[source2]
|
||||
assert.True(t, ok)
|
||||
_, err = limiter.Wait(source3)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, limiter.buckets.buckets, 3)
|
||||
_, ok = limiter.buckets.buckets[source1]
|
||||
assert.True(t, ok)
|
||||
_, ok = limiter.buckets.buckets[source2]
|
||||
assert.True(t, ok)
|
||||
_, ok = limiter.buckets.buckets[source3]
|
||||
assert.True(t, ok)
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
_, err = limiter.Wait(source4)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, limiter.buckets.buckets, 2)
|
||||
_, ok = limiter.buckets.buckets[source3]
|
||||
assert.True(t, ok)
|
||||
_, ok = limiter.buckets.buckets[source4]
|
||||
assert.True(t, ok)
|
||||
}
|
||||
243
common/tlsutils.go
Normal file
243
common/tlsutils.go
Normal file
@@ -0,0 +1,243 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultTLSKeyPaidID defines the id to use for non-binding specific key pairs
|
||||
DefaultTLSKeyPaidID = "default"
|
||||
)
|
||||
|
||||
// TLSKeyPair defines the paths and the unique identifier for a TLS key pair
|
||||
type TLSKeyPair struct {
|
||||
Cert string
|
||||
Key string
|
||||
ID string
|
||||
}
|
||||
|
||||
// CertManager defines a TLS certificate manager
|
||||
type CertManager struct {
|
||||
keyPairs []TLSKeyPair
|
||||
configDir string
|
||||
logSender string
|
||||
sync.RWMutex
|
||||
caCertificates []string
|
||||
caRevocationLists []string
|
||||
certs map[string]*tls.Certificate
|
||||
rootCAs *x509.CertPool
|
||||
crls []*pkix.CertificateList
|
||||
}
|
||||
|
||||
// Reload tries to reload certificate and CRLs
|
||||
func (m *CertManager) Reload() error {
|
||||
errCrt := m.loadCertificates()
|
||||
errCRLs := m.LoadCRLs()
|
||||
|
||||
if errCrt != nil {
|
||||
return errCrt
|
||||
}
|
||||
return errCRLs
|
||||
}
|
||||
|
||||
// LoadCertificates tries to load the configured x509 key pairs
|
||||
func (m *CertManager) loadCertificates() error {
|
||||
if len(m.keyPairs) == 0 {
|
||||
return errors.New("no key pairs defined")
|
||||
}
|
||||
certs := make(map[string]*tls.Certificate)
|
||||
for _, keyPair := range m.keyPairs {
|
||||
if keyPair.ID == "" {
|
||||
return errors.New("TLS certificate without ID")
|
||||
}
|
||||
newCert, err := tls.LoadX509KeyPair(keyPair.Cert, keyPair.Key)
|
||||
if err != nil {
|
||||
logger.Warn(m.logSender, "", "unable to load X509 key pair, cert file %#v key file %#v error: %v",
|
||||
keyPair.Cert, keyPair.Key, err)
|
||||
return err
|
||||
}
|
||||
if _, ok := certs[keyPair.ID]; ok {
|
||||
return fmt.Errorf("TLS certificate with id %#v is duplicated", keyPair.ID)
|
||||
}
|
||||
logger.Debug(m.logSender, "", "TLS certificate %#v successfully loaded, id %v", keyPair.Cert, keyPair.ID)
|
||||
certs[keyPair.ID] = &newCert
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m.certs = certs
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetCertificateFunc returns the loaded certificate
|
||||
func (m *CertManager) GetCertificateFunc(certID string) func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
val, ok := m.certs[certID]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no certificate for id %v", certID)
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
}
|
||||
|
||||
// IsRevoked returns true if the specified certificate has been revoked
|
||||
func (m *CertManager) IsRevoked(crt *x509.Certificate, caCrt *x509.Certificate) bool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
if crt == nil || caCrt == nil {
|
||||
logger.Warn(m.logSender, "", "unable to verify crt %v ca crt %v", crt, caCrt)
|
||||
return len(m.crls) > 0
|
||||
}
|
||||
|
||||
for _, crl := range m.crls {
|
||||
if !crl.HasExpired(time.Now()) && caCrt.CheckCRLSignature(crl) == nil {
|
||||
for _, rc := range crl.TBSCertList.RevokedCertificates {
|
||||
if rc.SerialNumber.Cmp(crt.SerialNumber) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// LoadCRLs tries to load certificate revocation lists from the given paths
|
||||
func (m *CertManager) LoadCRLs() error {
|
||||
if len(m.caRevocationLists) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var crls []*pkix.CertificateList
|
||||
|
||||
for _, revocationList := range m.caRevocationLists {
|
||||
if !util.IsFileInputValid(revocationList) {
|
||||
return fmt.Errorf("invalid root CA revocation list %#v", revocationList)
|
||||
}
|
||||
if revocationList != "" && !filepath.IsAbs(revocationList) {
|
||||
revocationList = filepath.Join(m.configDir, revocationList)
|
||||
}
|
||||
crlBytes, err := os.ReadFile(revocationList)
|
||||
if err != nil {
|
||||
logger.Warn(m.logSender, "unable to read revocation list %#v", revocationList)
|
||||
return err
|
||||
}
|
||||
crl, err := x509.ParseCRL(crlBytes)
|
||||
if err != nil {
|
||||
logger.Warn(m.logSender, "unable to parse revocation list %#v", revocationList)
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debug(m.logSender, "", "CRL %#v successfully loaded", revocationList)
|
||||
crls = append(crls, crl)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m.crls = crls
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRootCAs returns the set of root certificate authorities that servers
|
||||
// use if required to verify a client certificate
|
||||
func (m *CertManager) GetRootCAs() *x509.CertPool {
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
|
||||
return m.rootCAs
|
||||
}
|
||||
|
||||
// LoadRootCAs tries to load root CA certificate authorities from the given paths
|
||||
func (m *CertManager) LoadRootCAs() error {
|
||||
if len(m.caCertificates) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
rootCAs := x509.NewCertPool()
|
||||
|
||||
for _, rootCA := range m.caCertificates {
|
||||
if !util.IsFileInputValid(rootCA) {
|
||||
return fmt.Errorf("invalid root CA certificate %#v", rootCA)
|
||||
}
|
||||
if rootCA != "" && !filepath.IsAbs(rootCA) {
|
||||
rootCA = filepath.Join(m.configDir, rootCA)
|
||||
}
|
||||
crt, err := os.ReadFile(rootCA)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rootCAs.AppendCertsFromPEM(crt) {
|
||||
logger.Debug(m.logSender, "", "TLS certificate authority %#v successfully loaded", rootCA)
|
||||
} else {
|
||||
err := fmt.Errorf("unable to load TLS certificate authority %#v", rootCA)
|
||||
logger.Warn(m.logSender, "", "%v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
m.rootCAs = rootCAs
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetCACertificates sets the root CA authorities file paths.
|
||||
// This should not be changed at runtime
|
||||
func (m *CertManager) SetCACertificates(caCertificates []string) {
|
||||
m.caCertificates = util.RemoveDuplicates(caCertificates, true)
|
||||
}
|
||||
|
||||
// SetCARevocationLists sets the CA revocation lists file paths.
|
||||
// This should not be changed at runtime
|
||||
func (m *CertManager) SetCARevocationLists(caRevocationLists []string) {
|
||||
m.caRevocationLists = util.RemoveDuplicates(caRevocationLists, true)
|
||||
}
|
||||
|
||||
// NewCertManager creates a new certificate manager
|
||||
func NewCertManager(keyPairs []TLSKeyPair, configDir, logSender string) (*CertManager, error) {
|
||||
manager := &CertManager{
|
||||
keyPairs: keyPairs,
|
||||
certs: make(map[string]*tls.Certificate),
|
||||
configDir: configDir,
|
||||
logSender: logSender,
|
||||
}
|
||||
err := manager.loadCertificates()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return manager, nil
|
||||
}
|
||||
461
common/tlsutils_test.go
Normal file
461
common/tlsutils_test.go
Normal file
@@ -0,0 +1,461 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
serverCert = `-----BEGIN CERTIFICATE-----
|
||||
MIIEIDCCAgigAwIBAgIRAPOR9zTkX35vSdeyGpF8Rn8wDQYJKoZIhvcNAQELBQAw
|
||||
EzERMA8GA1UEAxMIQ2VydEF1dGgwHhcNMjEwMTAyMjEyMjU1WhcNMjIwNzAyMjEz
|
||||
MDUxWjARMQ8wDQYDVQQDEwZzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
|
||||
ggEKAoIBAQCte0PJhCTNqTiqdwk/s4JanKIMKUVWr2u94a+JYy5gJ9xYXrQ49SeN
|
||||
m+fwhTAOqctP5zNVkFqxlBytJZg3pqCKqRoOOl1qVgL3F3o7JdhZGi67aw8QMLPx
|
||||
tLPpYWnnrlUQoXRJdTlqkDqO8lOZl9HO5oZeidPZ7r5BVD6ZiujAC6Zg0jIc+EPt
|
||||
qhaUJ1CStoAeRf1rNWKmDsLv5hEaDWoaHF9sNVzDQg6atZ3ici00qQj+uvEZo8mL
|
||||
k6egg3rqsTv9ml2qlrRgFumt99J60hTt3tuQaAruHY80O9nGy3SCXC11daa7gszH
|
||||
ElCRvhUVoOxRtB54YBEtJ0gEpFnTO9J1AgMBAAGjcTBvMA4GA1UdDwEB/wQEAwID
|
||||
uDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFAgDXwPV
|
||||
nhztNz+H20iNWgoIx8adMB8GA1UdIwQYMBaAFO1yCNAGr/zQTJIi8lw3w5OiuBvM
|
||||
MA0GCSqGSIb3DQEBCwUAA4ICAQCR5kgIb4vAtrtsXD24n6RtU1yIXHPLNmDStVrH
|
||||
uaMYNnHlLhRlQFCjHhjWvZ89FQC7FeNOITc3FpibJySyw7JfnsyEOGxEbcAS4uLB
|
||||
2pdAiJPqdQtxIVcyi5vu53m1T5tm0sy8sBrGxU466aDQ8VGqjcjfTwNIyoFMd3p/
|
||||
ezFRvg2BudwU9hqApgfHfLi4WCuI3hLO2tbmgDinyH0HI0YYNNweGpiBYbTLF4Tx
|
||||
H6vHgD9USMZeu4+HX0IIsBiHQD7TTIe5ceREkPcNPd5qTpIvT3zKQ/KwwT90/zjP
|
||||
aWmz6pLxBfjRu7MY/bDfxfRUqsrLYJCVBoaDVRWR9rhiPIFkC5JzoWD/4hdj2iis
|
||||
N0+OOaJ77L+/ArFprE+7Fu3cSdYlfiNjV8R5kE29cAxKLI92CjAiTKrEuxKcQPKO
|
||||
+taWNKIYYjEDZwVnzlkTIl007X0RBuzu9gh4w5NwJdt8ZOJAp0JV0Cq+UvG+FC/v
|
||||
lYk82E6j1HKhf4CXmrjsrD1Fyu41mpVFOpa2ATiFGvms913MkXuyO8g99IllmDw1
|
||||
D7/PN4Qe9N6Zm7yoKZM0IUw2v+SUMIdOAZ7dptO9ZjtYOfiAIYN3jM8R4JYgPiuD
|
||||
DGSM9LJBJxCxI/DiO1y1Z3n9TcdDQYut8Gqdi/aYXw2YeqyHXosX5Od3vcK/O5zC
|
||||
pOJTYQ==
|
||||
-----END CERTIFICATE-----`
|
||||
serverKey = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEArXtDyYQkzak4qncJP7OCWpyiDClFVq9rveGviWMuYCfcWF60
|
||||
OPUnjZvn8IUwDqnLT+czVZBasZQcrSWYN6agiqkaDjpdalYC9xd6OyXYWRouu2sP
|
||||
EDCz8bSz6WFp565VEKF0SXU5apA6jvJTmZfRzuaGXonT2e6+QVQ+mYrowAumYNIy
|
||||
HPhD7aoWlCdQkraAHkX9azVipg7C7+YRGg1qGhxfbDVcw0IOmrWd4nItNKkI/rrx
|
||||
GaPJi5OnoIN66rE7/Zpdqpa0YBbprffSetIU7d7bkGgK7h2PNDvZxst0glwtdXWm
|
||||
u4LMxxJQkb4VFaDsUbQeeGARLSdIBKRZ0zvSdQIDAQABAoIBAF4sI8goq7HYwqIG
|
||||
rEagM4rsrCrd3H4KC/qvoJJ7/JjGCp8OCddBfY8pquat5kCPe4aMgxlXm2P6evaj
|
||||
CdZr5Ypf8Xz3we4PctyfKgMhsCfuRqAGpc6sIYJ8DY4LC2pxAExe2LlnoRtv39np
|
||||
QeiGuaYPDbIUL6SGLVFZYgIHngFhbDYfL83q3Cb/PnivUGFvUVQCfRBUKO2d8KYq
|
||||
TrVB5BWD2GrHor24ApQmci1OOqfbkIevkK6bk8HUfSZiZGI9LUQiPHMxi5k2x43J
|
||||
nIwhZnW2N28dorKnWHg2vh7viGvinVRZ3MEyX150oCw/L6SYM4fqR6t2ZSBgNQHT
|
||||
ZNoDtwECgYEA4lXMgtYqKuSlZ3TKfxAj03tJ/gbRdKcUCEGXEbdpY70tTu6KESZS
|
||||
etid4Ut/sWEoPTJsgYiGbgJl571t1O8oR1UZYgh9hBGHLV6UEIt9n2PbExhE2vL3
|
||||
SB7+LfO+tMvM4qKUBN+uy4GpU0NiyEEecw4x4S7MRSyHFRIDR7B6RV0CgYEAxDgS
|
||||
mDaNUfSdfB5mXekLUJAwqeKRdL9RjXYaHbnoZ5kIwQ73tFikRwyTsLQwMhjE1l3z
|
||||
MItTzIAyTf/BlK3dsp6bHTaT7hXIjHBsuKATN5qAuUpzTrg9+QaCawVSlQgNeF3a
|
||||
iyfD4dVp66Bzn3gO757TWqmroBZ2e1owbAQvF/kCgYAKT/Jze6KMNcK7hfy78VZQ
|
||||
imuCoXjlob8t6R8i9YJdwv7Pe9rakS5s3nXDEBePU2fr8eIzvK6zUHSoLF9WtlbV
|
||||
eTEg4FYnsEzCam7AmjptCrWulwp8F1ng9ViLa3Gi9y4snU+1MSPbrdqzKnzTtvPW
|
||||
Ni1bnzA7bp3w/dMcbxQDGQKBgB50hY5SiUS7LuZg4YqZ7UOn3aXAoMr6FvJZ7lvG
|
||||
yyepPQ6aACBh0b2lWhcHIKPl7EdJdcGHHo6TJzusAqPNCKf8rh6upe9COkpx+K3/
|
||||
SnxK4sffol4JgrTwKbXqsZKoGU8hYhZPKbwXn8UOtmN+AvN2N1/PDfBfDCzBJtrd
|
||||
G2IhAoGBAN19976xAMDjKb2+wd/mQYA2fR7E8lodxdX3LDnblYmndTKY67nVo94M
|
||||
FHPKZSN590HkFJ+wmChnOrqjtosY+N25CKMS7939EUIDrq+B+bYTWM/gcwdLXNUk
|
||||
Rygw/078Z3ZDJamXmyez5WpeLFrrbmI8sLnBBmSjQvMb6vCEtQ2Z
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
caCRT = `-----BEGIN CERTIFICATE-----
|
||||
MIIE5jCCAs6gAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDEwhDZXJ0
|
||||
QXV0aDAeFw0yMTAxMDIyMTIwNTVaFw0yMjA3MDIyMTMwNTJaMBMxETAPBgNVBAMT
|
||||
CENlcnRBdXRoMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4Tiho5xW
|
||||
AC15JRkMwfp3/TJwI2As7MY5dele5cmdr5bHAE+sRKqC+Ti88OJWCV5saoyax/1S
|
||||
CjxJlQMZMl169P1QYJskKjdG2sdv6RLWLMgwSNRRjxp/Bw9dHdiEb9MjLgu28Jro
|
||||
9peQkHcRHeMf5hM9WvlIJGrdzbC4hUehmqggcqgARainBkYjf0SwuWxHeu4nMqkp
|
||||
Ak5tcSTLCjHfEFHZ9Te0TIPG5YkWocQKyeLgu4lvuU+DD2W2lym+YVUtRMGs1Env
|
||||
k7p+N0DcGU26qfzZ2sF5ZXkqm7dBsGQB9pIxwc2Q8T1dCIyP9OQCKVILdc5aVFf1
|
||||
cryQFHYzYNNZXFlIBims5VV5Mgfp8ESHQSue+v6n6ykecLEyKt1F1Y/MWY/nWUSI
|
||||
8zdq83jdBAZVjo9MSthxVn57/06s/hQca65IpcTZV2gX0a+eRlAVqaRbAhL3LaZe
|
||||
bYsW3WHKoUOftwemuep3nL51TzlXZVL7Oz/ClGaEOsnGG9KFO6jh+W768qC0zLQI
|
||||
CdE7v2Zex98sZteHCg9fGJHIaYoF0aJG5P3WI5oZf2fy7UIYN9ADLFZiorCXAZEh
|
||||
CSU6mDoRViZ4RGR9GZxbDZ9KYn7O8M/KCR72bkQg73TlMsk1zSXEw0MKLUjtsw6c
|
||||
rZ0Jt8t3sRatHO3JrYHALMt9vZfyNCZp0IsCAwEAAaNFMEMwDgYDVR0PAQH/BAQD
|
||||
AgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwHQYDVR0OBBYEFO1yCNAGr/zQTJIi8lw3
|
||||
w5OiuBvMMA0GCSqGSIb3DQEBCwUAA4ICAQA6gCNuM7r8mnx674dm31GxBjQy5ZwB
|
||||
7CxDzYEvL/oiZ3Tv3HlPfN2LAAsJUfGnghh9DOytenL2CTZWjl/emP5eijzmlP+9
|
||||
zva5I6CIMCf/eDDVsRdO244t0o4uG7+At0IgSDM3bpVaVb4RHZNjEziYChsEYY8d
|
||||
HK6iwuRSvFniV6yhR/Vj1Ymi9yZ5xclqseLXiQnUB0PkfIk23+7s42cXB16653fH
|
||||
O/FsPyKBLiKJArizLYQc12aP3QOrYoYD9+fAzIIzew7A5C0aanZCGzkuFpO6TRlD
|
||||
Tb7ry9Gf0DfPpCgxraH8tOcmnqp/ka3hjqo/SRnnTk0IFrmmLdarJvjD46rKwBo4
|
||||
MjyAIR1mQ5j8GTlSFBmSgETOQ/EYvO3FPLmra1Fh7L+DvaVzTpqI9fG3TuyyY+Ri
|
||||
Fby4ycTOGSZOe5Fh8lqkX5Y47mCUJ3zHzOA1vUJy2eTlMRGpu47Eb1++Vm6EzPUP
|
||||
2EF5aD+zwcssh+atZvQbwxpgVqVcyLt91RSkKkmZQslh0rnlTb68yxvUnD3zw7So
|
||||
o6TAf9UvwVMEvdLT9NnFd6hwi2jcNte/h538GJwXeBb8EkfpqLKpTKyicnOdkamZ
|
||||
7E9zY8SHNRYMwB9coQ/W8NvufbCgkvOoLyMXk5edbXofXl3PhNGOlraWbghBnzf5
|
||||
r3rwjFsQOoZotA==
|
||||
-----END CERTIFICATE-----`
|
||||
caKey = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIJKQIBAAKCAgEA4Tiho5xWAC15JRkMwfp3/TJwI2As7MY5dele5cmdr5bHAE+s
|
||||
RKqC+Ti88OJWCV5saoyax/1SCjxJlQMZMl169P1QYJskKjdG2sdv6RLWLMgwSNRR
|
||||
jxp/Bw9dHdiEb9MjLgu28Jro9peQkHcRHeMf5hM9WvlIJGrdzbC4hUehmqggcqgA
|
||||
RainBkYjf0SwuWxHeu4nMqkpAk5tcSTLCjHfEFHZ9Te0TIPG5YkWocQKyeLgu4lv
|
||||
uU+DD2W2lym+YVUtRMGs1Envk7p+N0DcGU26qfzZ2sF5ZXkqm7dBsGQB9pIxwc2Q
|
||||
8T1dCIyP9OQCKVILdc5aVFf1cryQFHYzYNNZXFlIBims5VV5Mgfp8ESHQSue+v6n
|
||||
6ykecLEyKt1F1Y/MWY/nWUSI8zdq83jdBAZVjo9MSthxVn57/06s/hQca65IpcTZ
|
||||
V2gX0a+eRlAVqaRbAhL3LaZebYsW3WHKoUOftwemuep3nL51TzlXZVL7Oz/ClGaE
|
||||
OsnGG9KFO6jh+W768qC0zLQICdE7v2Zex98sZteHCg9fGJHIaYoF0aJG5P3WI5oZ
|
||||
f2fy7UIYN9ADLFZiorCXAZEhCSU6mDoRViZ4RGR9GZxbDZ9KYn7O8M/KCR72bkQg
|
||||
73TlMsk1zSXEw0MKLUjtsw6crZ0Jt8t3sRatHO3JrYHALMt9vZfyNCZp0IsCAwEA
|
||||
AQKCAgAV+ElERYbaI5VyufvVnFJCH75ypPoc6sVGLEq2jbFVJJcq/5qlZCC8oP1F
|
||||
Xj7YUR6wUiDzK1Hqb7EZ2SCHGjlZVrCVi+y+NYAy7UuMZ+r+mVSkdhmypPoJPUVv
|
||||
GOTqZ6VB46Cn3eSl0WknvoWr7bD555yPmEuiSc5zNy74yWEJTidEKAFGyknowcTK
|
||||
sG+w1tAuPLcUKQ44DGB+rgEkcHL7C5EAa7upzx0C3RmZFB+dTAVyJdkBMbFuOhTS
|
||||
sB7DLeTplR7/4mp9da7EQw51ZXC1DlZOEZt++4/desXsqATNAbva1OuzrLG7mMKe
|
||||
N/PCBh/aERQcsCvgUmaXqGQgqN1Jhw8kbXnjZnVd9iE7TAh7ki3VqNy1OMgTwOex
|
||||
bBYWaCqHuDYIxCjeW0qLJcn0cKQ13FVYrxgInf4Jp82SQht5b/zLL3IRZEyKcLJF
|
||||
kL6g1wlmTUTUX0z8eZzlM0ZCrqtExjgElMO/rV971nyNV5WU8Og3NmE8/slqMrmJ
|
||||
DlrQr9q0WJsDKj1IMe46EUM6ix7bbxC5NIfJ96dgdxZDn6ghjca6iZYqqUACvmUj
|
||||
cq08s3R4Ouw9/87kn11wwGBx2yDueCwrjKEGc0RKjweGbwu0nBxOrkJ8JXz6bAv7
|
||||
1OKfYaX3afI9B8x4uaiuRs38oBQlg9uAYFfl4HNBPuQikGLmsQKCAQEA8VjFOsaz
|
||||
y6NMZzKXi7WZ48uu3ed5x3Kf6RyDr1WvQ1jkBMv9b6b8Gp1CRnPqviRBto9L8QAg
|
||||
bCXZTqnXzn//brskmW8IZgqjAlf89AWa53piucu9/hgidrHRZobs5gTqev28uJdc
|
||||
zcuw1g8c3nCpY9WeTjHODzX5NXYRLFpkazLfYa6c8Q9jZR4KKrpdM+66fxL0JlOd
|
||||
7dN0oQtEqEAugsd3cwkZgvWhY4oM7FGErrZoDLy273ZdJzi/vU+dThyVzfD8Ab8u
|
||||
VxxuobVMT/S608zbe+uaiUdov5s96OkCl87403UNKJBH+6LNb3rjBBLE9NPN5ET9
|
||||
JLQMrYd+zj8jQwKCAQEA7uU5I9MOufo9bIgJqjY4Ie1+Ex9DZEMUYFAvGNCJCVcS
|
||||
mwOdGF8AWzIavTLACmEDJO7t/OrBdoo4L7IEsCNjgA3WiIwIMiWUVqveAGUMEXr6
|
||||
TRI5EolV6FTqqIP6AS+BAeBq7G1ELgsTrWNHh11rW3+3kBMuOCn77PUQ8WHwcq/r
|
||||
teZcZn4Ewcr6P7cBODgVvnBPhe/J8xHS0HFVCeS1CvaiNYgees5yA80Apo9IPjDJ
|
||||
YWawLjmH5wUBI5yDFVp067wjqJnoKPSoKwWkZXqUk+zgFXx5KT0gh/c5yh1frASp
|
||||
q6oaYnHEVC5qj2SpT1GFLonTcrQUXiSkiUudvNu1GQKCAQEAmko+5GFtRe0ihgLQ
|
||||
4S76r6diJli6AKil1Fg3U1r6zZpBQ1PJtJxTJQyN9w5Z7q6tF/GqAesrzxevQdvQ
|
||||
rCImAPtA3ZofC2UXawMnIjWHHx6diNvYnV1+gtUQ4nO1dSOFZ5VZFcUmPiZO6boF
|
||||
oaryj3FcX+71JcJCjEvrlKhA9Es0hXUkvfMxfs5if4he1zlyHpTWYr4oA4egUugq
|
||||
P0mwskikc3VIyvEO+NyjgFxo72yLPkFSzemkidN8uKDyFqKtnlfGM7OuA2CY1WZa
|
||||
3+67lXWshx9KzyJIs92iCYkU8EoPxtdYzyrV6efdX7x27v60zTOut5TnJJS6WiF6
|
||||
Do5MkwKCAQAxoR9IyP0DN/BwzqYrXU42Bi+t603F04W1KJNQNWpyrUspNwv41yus
|
||||
xnD1o0hwH41Wq+h3JZIBfV+E0RfWO9Pc84MBJQ5C1LnHc7cQH+3s575+Km3+4tcd
|
||||
CB8j2R8kBeloKWYtLdn/Mr/ownpGreqyvIq2/LUaZ+Z1aMgXTYB1YwS16mCBzmZQ
|
||||
mEl62RsAwe4KfSyYJ6OtwqMoOJMxFfliiLBULK4gVykqjvk2oQeiG+KKQJoTUFJi
|
||||
dRCyhD5bPkqR+qjxyt+HOqSBI4/uoROi05AOBqjpH1DVzk+MJKQOiX1yM0l98CKY
|
||||
Vng+x+vAla/0Zh+ucajVkgk4mKPxazdpAoIBAQC17vWk4KYJpF2RC3pKPcQ0PdiX
|
||||
bN35YNlvyhkYlSfDNdyH3aDrGiycUyW2mMXUgEDFsLRxHMTL+zPC6efqO6sTAJDY
|
||||
cBptsW4drW/qo8NTx3dNOisLkW+mGGJOR/w157hREFr29ymCVMYu/Z7fVWIeSpCq
|
||||
p3u8YX8WTljrxwSczlGjvpM7uJx3SfYRM4TUoy+8wU8bK74LywLa5f60bQY6Dye0
|
||||
Gqd9O6OoPfgcQlwjC5MiAofeqwPJvU0hQOPoehZyNLAmOCWXTYWaTP7lxO1r6+NE
|
||||
M3hGYqW3W8Ixua71OskCypBZg/HVlIP/lzjRzdx+VOB2hbWVth2Iup/Z1egW
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
caCRL = `-----BEGIN X509 CRL-----
|
||||
MIICpzCBkAIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDEwhDZXJ0QXV0aBcN
|
||||
MjEwMTAyMjEzNDA1WhcNMjMwMTAyMjEzNDA1WjAkMCICEQC+l04DbHWMyC3fG09k
|
||||
VXf+Fw0yMTAxMDIyMTM0MDVaoCMwITAfBgNVHSMEGDAWgBTtcgjQBq/80EySIvJc
|
||||
N8OTorgbzDANBgkqhkiG9w0BAQsFAAOCAgEAEJ7z+uNc8sqtxlOhSdTGDzX/xput
|
||||
E857kFQkSlMnU2whQ8c+XpYrBLA5vIZJNSSwohTpM4+zVBX/bJpmu3wqqaArRO9/
|
||||
YcW5mQk9Anvb4WjQW1cHmtNapMTzoC9AiYt/OWPfy+P6JCgCr4Hy6LgQyIRL6bM9
|
||||
VYTalolOm1qa4Y5cIeT7iHq/91mfaqo8/6MYRjLl8DOTROpmw8OS9bCXkzGKdCat
|
||||
AbAzwkQUSauyoCQ10rpX+Y64w9ng3g4Dr20aCqPf5osaqplEJ2HTK8ljDTidlslv
|
||||
9anQj8ax3Su89vI8+hK+YbfVQwrThabgdSjQsn+veyx8GlP8WwHLAQ379KjZjWg+
|
||||
OlOSwBeU1vTdP0QcB8X5C2gVujAyuQekbaV86xzIBOj7vZdfHZ6ee30TZ2FKiMyg
|
||||
7/N2OqW0w77ChsjB4MSHJCfuTgIeg62GzuZXLM+Q2Z9LBdtm4Byg+sm/P52adOEg
|
||||
gVb2Zf4KSvsAmA0PIBlu449/QXUFcMxzLFy7mwTeZj2B4Ln0Hm0szV9f9R8MwMtB
|
||||
SyLYxVH+mgqaR6Jkk22Q/yYyLPaELfafX5gp/AIXG8n0zxfVaTvK3auSgb1Q6ZLS
|
||||
5QH9dSIsmZHlPq7GoSXmKpMdjUL8eaky/IMteioyXgsBiATzl5L2dsw6MTX3MDF0
|
||||
QbDK+MzhmbKfDxs=
|
||||
-----END X509 CRL-----`
|
||||
client1Crt = `-----BEGIN CERTIFICATE-----
|
||||
MIIEITCCAgmgAwIBAgIRAIppZHoj1hM80D7WzTEKLuAwDQYJKoZIhvcNAQELBQAw
|
||||
EzERMA8GA1UEAxMIQ2VydEF1dGgwHhcNMjEwMTAyMjEyMzEwWhcNMjIwNzAyMjEz
|
||||
MDUxWjASMRAwDgYDVQQDEwdjbGllbnQxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
|
||||
MIIBCgKCAQEAoKbYY9MdF2kF/nhBESIiZTdVYtA8XL9xrIZyDj9EnCiTxHiVbJtH
|
||||
XVwszqSl5TRrotPmnmAQcX3r8OCk+z+RQZ0QQj257P3kG6q4rNnOcWCS5xEd20jP
|
||||
yhQ3m+hMGfZsotNTQze1ochuQgLUN6IPyPxZkH22ia3jX4iu1eo/QxeLYHj1UHw4
|
||||
3Cii9yE+j5kPUC21xmnrGKdUrB55NYLXHx6yTIqYR5znSOVB8oJi18/hwdZmH859
|
||||
DHhm0Hx1HrS+jbjI3+CMorZJ3WUyNf+CkiVLD3xYutPbxzEpwiqkG/XYzLH0habT
|
||||
cDcILo18n+o3jvem2KWBrDhyairjIDscwQIDAQABo3EwbzAOBgNVHQ8BAf8EBAMC
|
||||
A7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSJ5GIv
|
||||
zIrE4ZSQt2+CGblKTDswizAfBgNVHSMEGDAWgBTtcgjQBq/80EySIvJcN8OTorgb
|
||||
zDANBgkqhkiG9w0BAQsFAAOCAgEALh4f5GhvNYNou0Ab04iQBbLEdOu2RlbK1B5n
|
||||
K9P/umYenBHMY/z6HT3+6tpcHsDuqE8UVdq3f3Gh4S2Gu9m8PRitT+cJ3gdo9Plm
|
||||
3rD4ufn/s6rGg3ppydXcedm17492tbccUDWOBZw3IO/ASVq13WPgT0/Kev7cPq0k
|
||||
sSdSNhVeXqx8Myc2/d+8GYyzbul2Kpfa7h9i24sK49E9ftnSmsIvngONo08eT1T0
|
||||
3wAOyK2981LIsHaAWcneShKFLDB6LeXIT9oitOYhiykhFlBZ4M1GNlSNfhQ8IIQP
|
||||
xbqMNXCLkW4/BtLhGEEcg0QVso6Kudl9rzgTfQknrdF7pHp6rS46wYUjoSyIY6dl
|
||||
oLmnoAVJX36J3QPWelePI9e07X2wrTfiZWewwgw3KNRWjd6/zfPLe7GoqXnK1S2z
|
||||
PT8qMfCaTwKTtUkzXuTFvQ8bAo2My/mS8FOcpkt2oQWeOsADHAUX7fz5BCoa2DL3
|
||||
k/7Mh4gVT+JYZEoTwCFuYHgMWFWe98naqHi9lB4yR981p1QgXgxO7qBeipagKY1F
|
||||
LlH1iwXUqZ3MZnkNA+4e1Fglsw3sa/rC+L98HnznJ/YbTfQbCP6aQ1qcOymrjMud
|
||||
7MrFwqZjtd/SK4Qx1VpK6jGEAtPgWBTUS3p9ayg6lqjMBjsmySWfvRsDQbq6P5Ct
|
||||
O/e3EH8=
|
||||
-----END CERTIFICATE-----`
|
||||
client1Key = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAoKbYY9MdF2kF/nhBESIiZTdVYtA8XL9xrIZyDj9EnCiTxHiV
|
||||
bJtHXVwszqSl5TRrotPmnmAQcX3r8OCk+z+RQZ0QQj257P3kG6q4rNnOcWCS5xEd
|
||||
20jPyhQ3m+hMGfZsotNTQze1ochuQgLUN6IPyPxZkH22ia3jX4iu1eo/QxeLYHj1
|
||||
UHw43Cii9yE+j5kPUC21xmnrGKdUrB55NYLXHx6yTIqYR5znSOVB8oJi18/hwdZm
|
||||
H859DHhm0Hx1HrS+jbjI3+CMorZJ3WUyNf+CkiVLD3xYutPbxzEpwiqkG/XYzLH0
|
||||
habTcDcILo18n+o3jvem2KWBrDhyairjIDscwQIDAQABAoIBAEBSjVFqtbsp0byR
|
||||
aXvyrtLX1Ng7h++at2jca85Ihq//jyqbHTje8zPuNAKI6eNbmb0YGr5OuEa4pD9N
|
||||
ssDmMsKSoG/lRwwcm7h4InkSvBWpFShvMgUaohfHAHzsBYxfnh+TfULsi0y7c2n6
|
||||
t/2OZcOTRkkUDIITnXYiw93ibHHv2Mv2bBDu35kGrcK+c2dN5IL5ZjTjMRpbJTe2
|
||||
44RBJbdTxHBVSgoGBnugF+s2aEma6Ehsj70oyfoVpM6Aed5kGge0A5zA1JO7WCn9
|
||||
Ay/DzlULRXHjJIoRWd2NKvx5n3FNppUc9vJh2plRHalRooZ2+MjSf8HmXlvG2Hpb
|
||||
ScvmWgECgYEA1G+A/2KnxWsr/7uWIJ7ClcGCiNLdk17Pv3DZ3G4qUsU2ITftfIbb
|
||||
tU0Q/b19na1IY8Pjy9ptP7t74/hF5kky97cf1FA8F+nMj/k4+wO8QDI8OJfzVzh9
|
||||
PwielA5vbE+xmvis5Hdp8/od1Yrc/rPSy2TKtPFhvsqXjqoUmOAjDP8CgYEAwZjH
|
||||
9dt1sc2lx/rMxihlWEzQ3JPswKW9/LJAmbRBoSWF9FGNjbX7uhWtXRKJkzb8ZAwa
|
||||
88azluNo2oftbDD/+jw8b2cDgaJHlLAkSD4O1D1RthW7/LKD15qZ/oFsRb13NV85
|
||||
ZNKtwslXGbfVNyGKUVFm7fVA8vBAOUey+LKDFj8CgYEAg8WWstOzVdYguMTXXuyb
|
||||
ruEV42FJaDyLiSirOvxq7GTAKuLSQUg1yMRBIeQEo2X1XU0JZE3dLodRVhuO4EXP
|
||||
g7Dn4X7Th9HSvgvNuIacowWGLWSz4Qp9RjhGhXhezUSx2nseY6le46PmFavJYYSR
|
||||
4PBofMyt4PcyA6Cknh+KHmkCgYEAnTriG7ETE0a7v4DXUpB4TpCEiMCy5Xs2o8Z5
|
||||
ZNva+W+qLVUWq+MDAIyechqeFSvxK6gRM69LJ96lx+XhU58wJiFJzAhT9rK/g+jS
|
||||
bsHH9WOfu0xHkuHA5hgvvV2Le9B2wqgFyva4HJy82qxMxCu/VG/SMqyfBS9OWbb7
|
||||
ibQhdq0CgYAl53LUWZsFSZIth1vux2LVOsI8C3X1oiXDGpnrdlQ+K7z57hq5EsRq
|
||||
GC+INxwXbvKNqp5h0z2MvmKYPDlGVTgw8f8JjM7TkN17ERLcydhdRrMONUryZpo8
|
||||
1xTob+8blyJgfxZUIAKbMbMbIiU0WAF0rfD/eJJwS4htOW/Hfv4TGA==
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
// client 2 crt is revoked
|
||||
client2Crt = `-----BEGIN CERTIFICATE-----
|
||||
MIIEITCCAgmgAwIBAgIRAL6XTgNsdYzILd8bT2RVd/4wDQYJKoZIhvcNAQELBQAw
|
||||
EzERMA8GA1UEAxMIQ2VydEF1dGgwHhcNMjEwMTAyMjEyMzIwWhcNMjIwNzAyMjEz
|
||||
MDUxWjASMRAwDgYDVQQDEwdjbGllbnQyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
|
||||
MIIBCgKCAQEA6xjW5KQR3/OFQtV5M75WINqQ4AzXSu6DhSz/yumaaQZP/UxY+6hi
|
||||
jcrFzGo9MMie/Sza8DhkXOFAl2BelUubrOeB2cl+/Gr8OCyRi2Gv6j3zCsuN/4jQ
|
||||
tNaoez/IbkDvI3l/ZpzBtnuNY2RiemGgHuORXHRVf3qVlsw+npBIRW5rM2HkO/xG
|
||||
oZjeBErWVu390Lyn+Gvk2TqQDnkutWnxUC60/zPlHhXZ4BwaFAekbSnjsSDB1YFM
|
||||
s8HwW4oBryoxdj3/+/qLrBHt75IdLw3T7/V1UDJQM3EvSQOr12w4egpldhtsC871
|
||||
nnBQZeY6qA5feffIwwg/6lJm70o6S6OX6wIDAQABo3EwbzAOBgNVHQ8BAf8EBAMC
|
||||
A7gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBTB84v5
|
||||
t9HqhLhMODbn6oYkEQt3KzAfBgNVHSMEGDAWgBTtcgjQBq/80EySIvJcN8OTorgb
|
||||
zDANBgkqhkiG9w0BAQsFAAOCAgEALGtBCve5k8tToL3oLuXp/oSik6ovIB/zq4I/
|
||||
4zNMYPU31+ZWz6aahysgx1JL1yqTa3Qm8o2tu52MbnV10dM7CIw7c/cYa+c+OPcG
|
||||
5LF97kp13X+r2axy+CmwM86b4ILaDGs2Qyai6VB6k7oFUve+av5o7aUrNFpqGCJz
|
||||
HWdtHZSVA3JMATzy0TfWanwkzreqfdw7qH0yZ9bDURlBKAVWrqnCstva9jRuv+AI
|
||||
eqxr/4Ro986TFjJdoAP3Vr16CPg7/B6GA/KmsBWJrpeJdPWq4i2gpLKvYZoy89qD
|
||||
mUZf34RbzcCtV4NvV1DadGnt4us0nvLrvS5rL2+2uWD09kZYq9RbLkvgzF/cY0fz
|
||||
i7I1bi5XQ+alWe0uAk5ZZL/D+GTRYUX1AWwCqwJxmHrMxcskMyO9pXvLyuSWRDLo
|
||||
YNBrbX9nLcfJzVCp+X+9sntTHjs4l6Cw+fLepJIgtgqdCHtbhTiv68vSM6cgb4br
|
||||
6n2xrXRKuioiWFOrTSRr+oalZh8dGJ/xvwY8IbWknZAvml9mf1VvfE7Ma5P777QM
|
||||
fsbYVTq0Y3R/5hIWsC3HA5z6MIM8L1oRe/YyhP3CTmrCHkVKyDOosGXpGz+JVcyo
|
||||
cfYkY5A3yFKB2HaCwZSfwFmRhxkrYWGEbHv3Cd9YkZs1J3hNhGFZyVMC9Uh0S85a
|
||||
6zdDidU=
|
||||
-----END CERTIFICATE-----`
|
||||
client2Key = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEA6xjW5KQR3/OFQtV5M75WINqQ4AzXSu6DhSz/yumaaQZP/UxY
|
||||
+6hijcrFzGo9MMie/Sza8DhkXOFAl2BelUubrOeB2cl+/Gr8OCyRi2Gv6j3zCsuN
|
||||
/4jQtNaoez/IbkDvI3l/ZpzBtnuNY2RiemGgHuORXHRVf3qVlsw+npBIRW5rM2Hk
|
||||
O/xGoZjeBErWVu390Lyn+Gvk2TqQDnkutWnxUC60/zPlHhXZ4BwaFAekbSnjsSDB
|
||||
1YFMs8HwW4oBryoxdj3/+/qLrBHt75IdLw3T7/V1UDJQM3EvSQOr12w4egpldhts
|
||||
C871nnBQZeY6qA5feffIwwg/6lJm70o6S6OX6wIDAQABAoIBAFatstVb1KdQXsq0
|
||||
cFpui8zTKOUiduJOrDkWzTygAmlEhYtrccdfXu7OWz0x0lvBLDVGK3a0I/TGrAzj
|
||||
4BuFY+FM/egxTVt9in6fmA3et4BS1OAfCryzUdfK6RV//8L+t+zJZ/qKQzWnugpy
|
||||
QYjDo8ifuMFwtvEoXizaIyBNLAhEp9hnrv+Tyi2O2gahPvCHsD48zkyZRCHYRstD
|
||||
NH5cIrwz9/RJgPO1KI+QsJE7Nh7stR0sbr+5TPU4fnsL2mNhMUF2TJrwIPrc1yp+
|
||||
YIUjdnh3SO88j4TQT3CIrWi8i4pOy6N0dcVn3gpCRGaqAKyS2ZYUj+yVtLO4KwxZ
|
||||
SZ1lNvECgYEA78BrF7f4ETfWSLcBQ3qxfLs7ibB6IYo2x25685FhZjD+zLXM1AKb
|
||||
FJHEXUm3mUYrFJK6AFEyOQnyGKBOLs3S6oTAswMPbTkkZeD1Y9O6uv0AHASLZnK6
|
||||
pC6ub0eSRF5LUyTQ55Jj8D7QsjXJueO8v+G5ihWhNSN9tB2UA+8NBmkCgYEA+weq
|
||||
cvoeMIEMBQHnNNLy35bwfqrceGyPIRBcUIvzQfY1vk7KW6DYOUzC7u+WUzy/hA52
|
||||
DjXVVhua2eMQ9qqtOav7djcMc2W9RbLowxvno7K5qiCss013MeWk64TCWy+WMp5A
|
||||
AVAtOliC3hMkIKqvR2poqn+IBTh1449agUJQqTMCgYEAu06IHGq1GraV6g9XpGF5
|
||||
wqoAlMzUTdnOfDabRilBf/YtSr+J++ThRcuwLvXFw7CnPZZ4TIEjDJ7xjj3HdxeE
|
||||
fYYjineMmNd40UNUU556F1ZLvJfsVKizmkuCKhwvcMx+asGrmA+tlmds4p3VMS50
|
||||
KzDtpKzLWlmU/p/RINWlRmkCgYBy0pHTn7aZZx2xWKqCDg+L2EXPGqZX6wgZDpu7
|
||||
OBifzlfM4ctL2CmvI/5yPmLbVgkgBWFYpKUdiujsyyEiQvWTUKhn7UwjqKDHtcsk
|
||||
G6p7xS+JswJrzX4885bZJ9Oi1AR2yM3sC9l0O7I4lDbNPmWIXBLeEhGMmcPKv/Kc
|
||||
91Ff4wKBgQCF3ur+Vt0PSU0ucrPVHjCe7tqazm0LJaWbPXL1Aw0pzdM2EcNcW/MA
|
||||
w0kqpr7MgJ94qhXCBcVcfPuFN9fBOadM3UBj1B45Cz3pptoK+ScI8XKno6jvVK/p
|
||||
xr5cb9VBRBtB9aOKVfuRhpatAfS2Pzm2Htae9lFn7slGPUmu2hkjDw==
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
)
|
||||
|
||||
func TestLoadCertificate(t *testing.T) {
|
||||
caCrtPath := filepath.Join(os.TempDir(), "testca.crt")
|
||||
caCrlPath := filepath.Join(os.TempDir(), "testcrl.crt")
|
||||
certPath := filepath.Join(os.TempDir(), "test.crt")
|
||||
keyPath := filepath.Join(os.TempDir(), "test.key")
|
||||
err := os.WriteFile(caCrtPath, []byte(caCRT), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(caCrlPath, []byte(caCRL), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(certPath, []byte(serverCert), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(keyPath, []byte(serverKey), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
keyPairs := []TLSKeyPair{
|
||||
{
|
||||
Cert: certPath,
|
||||
Key: keyPath,
|
||||
ID: DefaultTLSKeyPaidID,
|
||||
},
|
||||
{
|
||||
Cert: certPath,
|
||||
Key: keyPath,
|
||||
ID: DefaultTLSKeyPaidID,
|
||||
},
|
||||
}
|
||||
certManager, err := NewCertManager(keyPairs, configDir, logSenderTest)
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "is duplicated")
|
||||
}
|
||||
assert.Nil(t, certManager)
|
||||
|
||||
keyPairs = []TLSKeyPair{
|
||||
{
|
||||
Cert: certPath,
|
||||
Key: keyPath,
|
||||
ID: DefaultTLSKeyPaidID,
|
||||
},
|
||||
}
|
||||
|
||||
certManager, err = NewCertManager(keyPairs, configDir, logSenderTest)
|
||||
assert.NoError(t, err)
|
||||
certFunc := certManager.GetCertificateFunc(DefaultTLSKeyPaidID)
|
||||
if assert.NotNil(t, certFunc) {
|
||||
hello := &tls.ClientHelloInfo{
|
||||
ServerName: "localhost",
|
||||
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305},
|
||||
}
|
||||
cert, err := certFunc(hello)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, certManager.certs[DefaultTLSKeyPaidID], cert)
|
||||
}
|
||||
certFunc = certManager.GetCertificateFunc("unknownID")
|
||||
if assert.NotNil(t, certFunc) {
|
||||
hello := &tls.ClientHelloInfo{
|
||||
ServerName: "localhost",
|
||||
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305},
|
||||
}
|
||||
_, err = certFunc(hello)
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "no certificate for id unknownID")
|
||||
}
|
||||
}
|
||||
certManager.SetCACertificates(nil)
|
||||
err = certManager.LoadRootCAs()
|
||||
assert.NoError(t, err)
|
||||
|
||||
certManager.SetCACertificates([]string{""})
|
||||
err = certManager.LoadRootCAs()
|
||||
assert.Error(t, err)
|
||||
|
||||
certManager.SetCACertificates([]string{"invalid"})
|
||||
err = certManager.LoadRootCAs()
|
||||
assert.Error(t, err)
|
||||
|
||||
// laoding the key as root CA must fail
|
||||
certManager.SetCACertificates([]string{keyPath})
|
||||
err = certManager.LoadRootCAs()
|
||||
assert.Error(t, err)
|
||||
|
||||
certManager.SetCACertificates([]string{certPath})
|
||||
err = certManager.LoadRootCAs()
|
||||
assert.NoError(t, err)
|
||||
|
||||
rootCa := certManager.GetRootCAs()
|
||||
assert.NotNil(t, rootCa)
|
||||
|
||||
err = certManager.Reload()
|
||||
assert.NoError(t, err)
|
||||
|
||||
certManager.SetCARevocationLists(nil)
|
||||
err = certManager.LoadCRLs()
|
||||
assert.NoError(t, err)
|
||||
|
||||
certManager.SetCARevocationLists([]string{""})
|
||||
err = certManager.LoadCRLs()
|
||||
assert.Error(t, err)
|
||||
|
||||
certManager.SetCARevocationLists([]string{"invalid crl"})
|
||||
err = certManager.LoadCRLs()
|
||||
assert.Error(t, err)
|
||||
|
||||
// this is not a crl and must fail
|
||||
certManager.SetCARevocationLists([]string{caCrtPath})
|
||||
err = certManager.LoadCRLs()
|
||||
assert.Error(t, err)
|
||||
|
||||
certManager.SetCARevocationLists([]string{caCrlPath})
|
||||
err = certManager.LoadCRLs()
|
||||
assert.NoError(t, err)
|
||||
|
||||
crt, err := tls.X509KeyPair([]byte(caCRT), []byte(caKey))
|
||||
assert.NoError(t, err)
|
||||
|
||||
x509CAcrt, err := x509.ParseCertificate(crt.Certificate[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
crt, err = tls.X509KeyPair([]byte(client1Crt), []byte(client1Key))
|
||||
assert.NoError(t, err)
|
||||
x509crt, err := x509.ParseCertificate(crt.Certificate[0])
|
||||
if assert.NoError(t, err) {
|
||||
assert.False(t, certManager.IsRevoked(x509crt, x509CAcrt))
|
||||
}
|
||||
|
||||
crt, err = tls.X509KeyPair([]byte(client2Crt), []byte(client2Key))
|
||||
assert.NoError(t, err)
|
||||
x509crt, err = x509.ParseCertificate(crt.Certificate[0])
|
||||
if assert.NoError(t, err) {
|
||||
assert.True(t, certManager.IsRevoked(x509crt, x509CAcrt))
|
||||
}
|
||||
|
||||
assert.True(t, certManager.IsRevoked(nil, nil))
|
||||
|
||||
err = os.Remove(caCrlPath)
|
||||
assert.NoError(t, err)
|
||||
err = certManager.Reload()
|
||||
assert.Error(t, err)
|
||||
|
||||
err = os.Remove(certPath)
|
||||
assert.NoError(t, err)
|
||||
err = os.Remove(keyPath)
|
||||
assert.NoError(t, err)
|
||||
err = certManager.Reload()
|
||||
assert.Error(t, err)
|
||||
|
||||
err = os.Remove(caCrtPath)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLoadInvalidCert(t *testing.T) {
|
||||
certManager, err := NewCertManager(nil, configDir, logSenderTest)
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "no key pairs defined")
|
||||
}
|
||||
assert.Nil(t, certManager)
|
||||
|
||||
keyPairs := []TLSKeyPair{
|
||||
{
|
||||
Cert: "test.crt",
|
||||
Key: "test.key",
|
||||
ID: DefaultTLSKeyPaidID,
|
||||
},
|
||||
}
|
||||
certManager, err = NewCertManager(keyPairs, configDir, logSenderTest)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, certManager)
|
||||
|
||||
keyPairs = []TLSKeyPair{
|
||||
{
|
||||
Cert: "test.crt",
|
||||
Key: "test.key",
|
||||
},
|
||||
}
|
||||
certManager, err = NewCertManager(keyPairs, configDir, logSenderTest)
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "TLS certificate without ID")
|
||||
}
|
||||
assert.Nil(t, certManager)
|
||||
}
|
||||
465
common/transfer.go
Normal file
465
common/transfer.go
Normal file
@@ -0,0 +1,465 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTransferClosed defines the error returned for a closed transfer
|
||||
ErrTransferClosed = errors.New("transfer already closed")
|
||||
)
|
||||
|
||||
// BaseTransfer contains protocols common transfer details for an upload or a download.
|
||||
type BaseTransfer struct { //nolint:maligned
|
||||
ID int64
|
||||
BytesSent int64
|
||||
BytesReceived int64
|
||||
Fs vfs.Fs
|
||||
File vfs.File
|
||||
Connection *BaseConnection
|
||||
cancelFn func()
|
||||
fsPath string
|
||||
effectiveFsPath string
|
||||
requestPath string
|
||||
ftpMode string
|
||||
start time.Time
|
||||
MaxWriteSize int64
|
||||
MinWriteOffset int64
|
||||
InitialSize int64
|
||||
truncatedSize int64
|
||||
isNewFile bool
|
||||
transferType int
|
||||
AbortTransfer int32
|
||||
aTime time.Time
|
||||
mTime time.Time
|
||||
transferQuota dataprovider.TransferQuota
|
||||
sync.Mutex
|
||||
errAbort error
|
||||
ErrTransfer error
|
||||
}
|
||||
|
||||
// NewBaseTransfer returns a new BaseTransfer and adds it to the given connection
|
||||
func NewBaseTransfer(file vfs.File, conn *BaseConnection, cancelFn func(), fsPath, effectiveFsPath, requestPath string,
|
||||
transferType int, minWriteOffset, initialSize, maxWriteSize, truncatedSize int64, isNewFile bool, fs vfs.Fs,
|
||||
transferQuota dataprovider.TransferQuota,
|
||||
) *BaseTransfer {
|
||||
t := &BaseTransfer{
|
||||
ID: conn.GetTransferID(),
|
||||
File: file,
|
||||
Connection: conn,
|
||||
cancelFn: cancelFn,
|
||||
fsPath: fsPath,
|
||||
effectiveFsPath: effectiveFsPath,
|
||||
start: time.Now(),
|
||||
transferType: transferType,
|
||||
MinWriteOffset: minWriteOffset,
|
||||
InitialSize: initialSize,
|
||||
isNewFile: isNewFile,
|
||||
requestPath: requestPath,
|
||||
BytesSent: 0,
|
||||
BytesReceived: 0,
|
||||
MaxWriteSize: maxWriteSize,
|
||||
AbortTransfer: 0,
|
||||
truncatedSize: truncatedSize,
|
||||
transferQuota: transferQuota,
|
||||
Fs: fs,
|
||||
}
|
||||
|
||||
conn.AddTransfer(t)
|
||||
return t
|
||||
}
|
||||
|
||||
// GetTransferQuota returns data transfer quota limits
|
||||
func (t *BaseTransfer) GetTransferQuota() dataprovider.TransferQuota {
|
||||
return t.transferQuota
|
||||
}
|
||||
|
||||
// SetFtpMode sets the FTP mode for the current transfer
|
||||
func (t *BaseTransfer) SetFtpMode(mode string) {
|
||||
t.ftpMode = mode
|
||||
}
|
||||
|
||||
// GetID returns the transfer ID
|
||||
func (t *BaseTransfer) GetID() int64 {
|
||||
return t.ID
|
||||
}
|
||||
|
||||
// GetType returns the transfer type
|
||||
func (t *BaseTransfer) GetType() int {
|
||||
return t.transferType
|
||||
}
|
||||
|
||||
// GetSize returns the transferred size
|
||||
func (t *BaseTransfer) GetSize() int64 {
|
||||
if t.transferType == TransferDownload {
|
||||
return atomic.LoadInt64(&t.BytesSent)
|
||||
}
|
||||
return atomic.LoadInt64(&t.BytesReceived)
|
||||
}
|
||||
|
||||
// GetDownloadedSize returns the transferred size
|
||||
func (t *BaseTransfer) GetDownloadedSize() int64 {
|
||||
return atomic.LoadInt64(&t.BytesSent)
|
||||
}
|
||||
|
||||
// GetUploadedSize returns the transferred size
|
||||
func (t *BaseTransfer) GetUploadedSize() int64 {
|
||||
return atomic.LoadInt64(&t.BytesReceived)
|
||||
}
|
||||
|
||||
// GetStartTime returns the start time
|
||||
func (t *BaseTransfer) GetStartTime() time.Time {
|
||||
return t.start
|
||||
}
|
||||
|
||||
// GetAbortError returns the error to send to the client if the transfer was aborted
|
||||
func (t *BaseTransfer) GetAbortError() error {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
if t.errAbort != nil {
|
||||
return t.errAbort
|
||||
}
|
||||
return getQuotaExceededError(t.Connection.protocol)
|
||||
}
|
||||
|
||||
// SignalClose signals that the transfer should be closed after the next read/write.
|
||||
// The optional error argument allow to send a specific error, otherwise a generic
|
||||
// transfer aborted error is sent
|
||||
func (t *BaseTransfer) SignalClose(err error) {
|
||||
t.Lock()
|
||||
t.errAbort = err
|
||||
t.Unlock()
|
||||
atomic.StoreInt32(&(t.AbortTransfer), 1)
|
||||
}
|
||||
|
||||
// GetTruncatedSize returns the truncated sized if this is an upload overwriting
|
||||
// an existing file
|
||||
func (t *BaseTransfer) GetTruncatedSize() int64 {
|
||||
return t.truncatedSize
|
||||
}
|
||||
|
||||
// HasSizeLimit returns true if there is an upload or download size limit
|
||||
func (t *BaseTransfer) HasSizeLimit() bool {
|
||||
if t.MaxWriteSize > 0 {
|
||||
return true
|
||||
}
|
||||
if t.transferQuota.HasSizeLimits() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetVirtualPath returns the transfer virtual path
|
||||
func (t *BaseTransfer) GetVirtualPath() string {
|
||||
return t.requestPath
|
||||
}
|
||||
|
||||
// GetFsPath returns the transfer filesystem path
|
||||
func (t *BaseTransfer) GetFsPath() string {
|
||||
return t.fsPath
|
||||
}
|
||||
|
||||
// SetTimes stores access and modification times if fsPath matches the current file
|
||||
func (t *BaseTransfer) SetTimes(fsPath string, atime time.Time, mtime time.Time) bool {
|
||||
if fsPath == t.GetFsPath() {
|
||||
t.aTime = atime
|
||||
t.mTime = mtime
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetRealFsPath returns the real transfer filesystem path.
|
||||
// If atomic uploads are enabled this differ from fsPath
|
||||
func (t *BaseTransfer) GetRealFsPath(fsPath string) string {
|
||||
if fsPath == t.GetFsPath() {
|
||||
if t.File != nil {
|
||||
return t.File.Name()
|
||||
}
|
||||
return t.fsPath
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// SetCancelFn sets the cancel function for the transfer
|
||||
func (t *BaseTransfer) SetCancelFn(cancelFn func()) {
|
||||
t.cancelFn = cancelFn
|
||||
}
|
||||
|
||||
// CheckRead returns an error if read if not allowed
|
||||
func (t *BaseTransfer) CheckRead() error {
|
||||
if t.transferQuota.AllowedDLSize == 0 && t.transferQuota.AllowedTotalSize == 0 {
|
||||
return nil
|
||||
}
|
||||
if t.transferQuota.AllowedTotalSize > 0 {
|
||||
if atomic.LoadInt64(&t.BytesSent)+atomic.LoadInt64(&t.BytesReceived) > t.transferQuota.AllowedTotalSize {
|
||||
return t.Connection.GetReadQuotaExceededError()
|
||||
}
|
||||
} else if t.transferQuota.AllowedDLSize > 0 {
|
||||
if atomic.LoadInt64(&t.BytesSent) > t.transferQuota.AllowedDLSize {
|
||||
return t.Connection.GetReadQuotaExceededError()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckWrite returns an error if write if not allowed
|
||||
func (t *BaseTransfer) CheckWrite() error {
|
||||
if t.MaxWriteSize > 0 && atomic.LoadInt64(&t.BytesReceived) > t.MaxWriteSize {
|
||||
return t.Connection.GetQuotaExceededError()
|
||||
}
|
||||
if t.transferQuota.AllowedULSize == 0 && t.transferQuota.AllowedTotalSize == 0 {
|
||||
return nil
|
||||
}
|
||||
if t.transferQuota.AllowedTotalSize > 0 {
|
||||
if atomic.LoadInt64(&t.BytesSent)+atomic.LoadInt64(&t.BytesReceived) > t.transferQuota.AllowedTotalSize {
|
||||
return t.Connection.GetQuotaExceededError()
|
||||
}
|
||||
} else if t.transferQuota.AllowedULSize > 0 {
|
||||
if atomic.LoadInt64(&t.BytesReceived) > t.transferQuota.AllowedULSize {
|
||||
return t.Connection.GetQuotaExceededError()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Truncate changes the size of the opened file.
|
||||
// Supported for local fs only
|
||||
func (t *BaseTransfer) Truncate(fsPath string, size int64) (int64, error) {
|
||||
if fsPath == t.GetFsPath() {
|
||||
if t.File != nil {
|
||||
initialSize := t.InitialSize
|
||||
err := t.File.Truncate(size)
|
||||
if err == nil {
|
||||
t.Lock()
|
||||
t.InitialSize = size
|
||||
if t.MaxWriteSize > 0 {
|
||||
sizeDiff := initialSize - size
|
||||
t.MaxWriteSize += sizeDiff
|
||||
metric.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.transferType, t.ErrTransfer)
|
||||
if t.transferQuota.HasSizeLimits() {
|
||||
go func(ulSize, dlSize int64, user dataprovider.User) {
|
||||
dataprovider.UpdateUserTransferQuota(&user, ulSize, dlSize, false) //nolint:errcheck
|
||||
}(atomic.LoadInt64(&t.BytesReceived), atomic.LoadInt64(&t.BytesSent), t.Connection.User)
|
||||
}
|
||||
atomic.StoreInt64(&t.BytesReceived, 0)
|
||||
}
|
||||
t.Unlock()
|
||||
}
|
||||
t.Connection.Log(logger.LevelDebug, "file %#v truncated to size %v max write size %v new initial size %v err: %v",
|
||||
fsPath, size, t.MaxWriteSize, t.InitialSize, err)
|
||||
return initialSize, err
|
||||
}
|
||||
if size == 0 && atomic.LoadInt64(&t.BytesSent) == 0 {
|
||||
// for cloud providers the file is always truncated to zero, we don't support append/resume for uploads
|
||||
// for buffered SFTP we can have buffered bytes so we returns an error
|
||||
if !vfs.IsBufferedSFTPFs(t.Fs) {
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
return 0, vfs.ErrVfsUnsupported
|
||||
}
|
||||
return 0, errTransferMismatch
|
||||
}
|
||||
|
||||
// TransferError is called if there is an unexpected error.
|
||||
// For example network or client issues
|
||||
func (t *BaseTransfer) TransferError(err error) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
if t.ErrTransfer != nil {
|
||||
return
|
||||
}
|
||||
t.ErrTransfer = err
|
||||
if t.cancelFn != nil {
|
||||
t.cancelFn()
|
||||
}
|
||||
elapsed := time.Since(t.start).Nanoseconds() / 1000000
|
||||
t.Connection.Log(logger.LevelError, "Unexpected error for transfer, path: %#v, error: \"%v\" bytes sent: %v, "+
|
||||
"bytes received: %v transfer running since %v ms", t.fsPath, t.ErrTransfer, atomic.LoadInt64(&t.BytesSent),
|
||||
atomic.LoadInt64(&t.BytesReceived), elapsed)
|
||||
}
|
||||
|
||||
func (t *BaseTransfer) getUploadFileSize() (int64, error) {
|
||||
var fileSize int64
|
||||
info, err := t.Fs.Stat(t.fsPath)
|
||||
if err == nil {
|
||||
fileSize = info.Size()
|
||||
}
|
||||
if vfs.IsCryptOsFs(t.Fs) && t.ErrTransfer != nil {
|
||||
errDelete := t.Fs.Remove(t.fsPath, false)
|
||||
if errDelete != nil {
|
||||
t.Connection.Log(logger.LevelWarn, "error removing partial crypto file %#v: %v", t.fsPath, errDelete)
|
||||
}
|
||||
}
|
||||
return fileSize, err
|
||||
}
|
||||
|
||||
// return 1 if the file is outside the user home dir
|
||||
func (t *BaseTransfer) checkUploadOutsideHomeDir(err error) int {
|
||||
if err == nil {
|
||||
return 0
|
||||
}
|
||||
if Config.TempPath == "" {
|
||||
return 0
|
||||
}
|
||||
err = t.Fs.Remove(t.effectiveFsPath, false)
|
||||
t.Connection.Log(logger.LevelWarn, "upload in temp path cannot be renamed, delete temporary file: %#v, deletion error: %v",
|
||||
t.effectiveFsPath, err)
|
||||
// the file is outside the home dir so don't update the quota
|
||||
atomic.StoreInt64(&t.BytesReceived, 0)
|
||||
t.MinWriteOffset = 0
|
||||
return 1
|
||||
}
|
||||
|
||||
// Close it is called when the transfer is completed.
|
||||
// It logs the transfer info, updates the user quota (for uploads)
|
||||
// and executes any defined action.
|
||||
// If there is an error no action will be executed and, in atomic mode,
|
||||
// we try to delete the temporary file
|
||||
func (t *BaseTransfer) Close() error {
|
||||
defer t.Connection.RemoveTransfer(t)
|
||||
|
||||
var err error
|
||||
numFiles := 0
|
||||
if t.isNewFile {
|
||||
numFiles = 1
|
||||
}
|
||||
metric.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived),
|
||||
t.transferType, t.ErrTransfer)
|
||||
if t.transferQuota.HasSizeLimits() {
|
||||
dataprovider.UpdateUserTransferQuota(&t.Connection.User, atomic.LoadInt64(&t.BytesReceived), //nolint:errcheck
|
||||
atomic.LoadInt64(&t.BytesSent), false)
|
||||
}
|
||||
if t.File != nil && t.Connection.IsQuotaExceededError(t.ErrTransfer) {
|
||||
// if quota is exceeded we try to remove the partial file for uploads to local filesystem
|
||||
err = t.Fs.Remove(t.File.Name(), false)
|
||||
if err == nil {
|
||||
numFiles--
|
||||
atomic.StoreInt64(&t.BytesReceived, 0)
|
||||
t.MinWriteOffset = 0
|
||||
}
|
||||
t.Connection.Log(logger.LevelWarn, "upload denied due to space limit, delete temporary file: %#v, deletion error: %v",
|
||||
t.File.Name(), err)
|
||||
} else if t.transferType == TransferUpload && t.effectiveFsPath != t.fsPath {
|
||||
if t.ErrTransfer == nil || Config.UploadMode == UploadModeAtomicWithResume {
|
||||
err = t.Fs.Rename(t.effectiveFsPath, t.fsPath)
|
||||
t.Connection.Log(logger.LevelDebug, "atomic upload completed, rename: %#v -> %#v, error: %v",
|
||||
t.effectiveFsPath, t.fsPath, err)
|
||||
// the file must be removed if it is uploaded to a path outside the home dir and cannot be renamed
|
||||
numFiles -= t.checkUploadOutsideHomeDir(err)
|
||||
} else {
|
||||
err = t.Fs.Remove(t.effectiveFsPath, false)
|
||||
t.Connection.Log(logger.LevelWarn, "atomic upload completed with error: \"%v\", delete temporary file: %#v, deletion error: %v",
|
||||
t.ErrTransfer, t.effectiveFsPath, err)
|
||||
if err == nil {
|
||||
numFiles--
|
||||
atomic.StoreInt64(&t.BytesReceived, 0)
|
||||
t.MinWriteOffset = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
elapsed := time.Since(t.start).Nanoseconds() / 1000000
|
||||
if t.transferType == TransferDownload {
|
||||
logger.TransferLog(downloadLogSender, t.fsPath, elapsed, atomic.LoadInt64(&t.BytesSent), t.Connection.User.Username,
|
||||
t.Connection.ID, t.Connection.protocol, t.Connection.localAddr, t.Connection.remoteAddr, t.ftpMode)
|
||||
ExecuteActionNotification(t.Connection, operationDownload, t.fsPath, t.requestPath, "", "", "",
|
||||
atomic.LoadInt64(&t.BytesSent), t.ErrTransfer)
|
||||
} else {
|
||||
fileSize := atomic.LoadInt64(&t.BytesReceived) + t.MinWriteOffset
|
||||
if statSize, errStat := t.getUploadFileSize(); errStat == nil {
|
||||
fileSize = statSize
|
||||
}
|
||||
t.Connection.Log(logger.LevelDebug, "uploaded file size %v", fileSize)
|
||||
t.updateQuota(numFiles, fileSize)
|
||||
t.updateTimes()
|
||||
logger.TransferLog(uploadLogSender, t.fsPath, elapsed, atomic.LoadInt64(&t.BytesReceived), t.Connection.User.Username,
|
||||
t.Connection.ID, t.Connection.protocol, t.Connection.localAddr, t.Connection.remoteAddr, t.ftpMode)
|
||||
ExecuteActionNotification(t.Connection, operationUpload, t.fsPath, t.requestPath, "", "", "", fileSize, t.ErrTransfer)
|
||||
}
|
||||
if t.ErrTransfer != nil {
|
||||
t.Connection.Log(logger.LevelError, "transfer error: %v, path: %#v", t.ErrTransfer, t.fsPath)
|
||||
if err == nil {
|
||||
err = t.ErrTransfer
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *BaseTransfer) updateTimes() {
|
||||
if !t.aTime.IsZero() && !t.mTime.IsZero() {
|
||||
err := t.Fs.Chtimes(t.fsPath, t.aTime, t.mTime, true)
|
||||
t.Connection.Log(logger.LevelDebug, "set times for file %#v, atime: %v, mtime: %v, err: %v",
|
||||
t.fsPath, t.aTime, t.mTime, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *BaseTransfer) updateQuota(numFiles int, fileSize int64) bool {
|
||||
// S3 uploads are atomic, if there is an error nothing is uploaded
|
||||
if t.File == nil && t.ErrTransfer != nil && !t.Connection.User.HasBufferedSFTP(t.GetVirtualPath()) {
|
||||
return false
|
||||
}
|
||||
sizeDiff := fileSize - t.InitialSize
|
||||
if t.transferType == TransferUpload && (numFiles != 0 || sizeDiff > 0) {
|
||||
vfolder, err := t.Connection.User.GetVirtualFolderForPath(path.Dir(t.requestPath))
|
||||
if err == nil {
|
||||
dataprovider.UpdateVirtualFolderQuota(&vfolder.BaseVirtualFolder, numFiles, //nolint:errcheck
|
||||
sizeDiff, false)
|
||||
if vfolder.IsIncludedInUserQuota() {
|
||||
dataprovider.UpdateUserQuota(&t.Connection.User, numFiles, sizeDiff, false) //nolint:errcheck
|
||||
}
|
||||
} else {
|
||||
dataprovider.UpdateUserQuota(&t.Connection.User, numFiles, sizeDiff, false) //nolint:errcheck
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HandleThrottle manage bandwidth throttling
|
||||
func (t *BaseTransfer) HandleThrottle() {
|
||||
var wantedBandwidth int64
|
||||
var trasferredBytes int64
|
||||
if t.transferType == TransferDownload {
|
||||
wantedBandwidth = t.Connection.User.DownloadBandwidth
|
||||
trasferredBytes = atomic.LoadInt64(&t.BytesSent)
|
||||
} else {
|
||||
wantedBandwidth = t.Connection.User.UploadBandwidth
|
||||
trasferredBytes = atomic.LoadInt64(&t.BytesReceived)
|
||||
}
|
||||
if wantedBandwidth > 0 {
|
||||
// real and wanted elapsed as milliseconds, bytes as kilobytes
|
||||
realElapsed := time.Since(t.start).Nanoseconds() / 1000000
|
||||
// trasferredBytes / 1024 = KB/s, we multiply for 1000 to get milliseconds
|
||||
wantedElapsed := 1000 * (trasferredBytes / 1024) / wantedBandwidth
|
||||
if wantedElapsed > realElapsed {
|
||||
toSleep := time.Duration(wantedElapsed - realElapsed)
|
||||
time.Sleep(toSleep * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
469
common/transfer_test.go
Normal file
469
common/transfer_test.go
Normal file
@@ -0,0 +1,469 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sftpgo/sdk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
func TestTransferUpdateQuota(t *testing.T) {
|
||||
conn := NewBaseConnection("", ProtocolSFTP, "", "", dataprovider.User{})
|
||||
transfer := BaseTransfer{
|
||||
Connection: conn,
|
||||
transferType: TransferUpload,
|
||||
BytesReceived: 123,
|
||||
Fs: vfs.NewOsFs("", os.TempDir(), ""),
|
||||
}
|
||||
errFake := errors.New("fake error")
|
||||
transfer.TransferError(errFake)
|
||||
assert.False(t, transfer.updateQuota(1, 0))
|
||||
err := transfer.Close()
|
||||
if assert.Error(t, err) {
|
||||
assert.EqualError(t, err, errFake.Error())
|
||||
}
|
||||
mappedPath := filepath.Join(os.TempDir(), "vdir")
|
||||
vdirPath := "/vdir"
|
||||
conn.User.VirtualFolders = append(conn.User.VirtualFolders, vfs.VirtualFolder{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
MappedPath: mappedPath,
|
||||
},
|
||||
VirtualPath: vdirPath,
|
||||
QuotaFiles: -1,
|
||||
QuotaSize: -1,
|
||||
})
|
||||
transfer.ErrTransfer = nil
|
||||
transfer.BytesReceived = 1
|
||||
transfer.requestPath = "/vdir/file"
|
||||
assert.True(t, transfer.updateQuota(1, 0))
|
||||
err = transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTransferThrottling(t *testing.T) {
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "test",
|
||||
UploadBandwidth: 50,
|
||||
DownloadBandwidth: 40,
|
||||
},
|
||||
}
|
||||
fs := vfs.NewOsFs("", os.TempDir(), "")
|
||||
testFileSize := int64(131072)
|
||||
wantedUploadElapsed := 1000 * (testFileSize / 1024) / u.UploadBandwidth
|
||||
wantedDownloadElapsed := 1000 * (testFileSize / 1024) / u.DownloadBandwidth
|
||||
// some tolerance
|
||||
wantedUploadElapsed -= wantedDownloadElapsed / 10
|
||||
wantedDownloadElapsed -= wantedDownloadElapsed / 10
|
||||
conn := NewBaseConnection("id", ProtocolSCP, "", "", u)
|
||||
transfer := NewBaseTransfer(nil, conn, nil, "", "", "", TransferUpload, 0, 0, 0, 0, true, fs, dataprovider.TransferQuota{})
|
||||
transfer.BytesReceived = testFileSize
|
||||
transfer.Connection.UpdateLastActivity()
|
||||
startTime := transfer.Connection.GetLastActivity()
|
||||
transfer.HandleThrottle()
|
||||
elapsed := time.Since(startTime).Nanoseconds() / 1000000
|
||||
assert.GreaterOrEqual(t, elapsed, wantedUploadElapsed, "upload bandwidth throttling not respected")
|
||||
err := transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
transfer = NewBaseTransfer(nil, conn, nil, "", "", "", TransferDownload, 0, 0, 0, 0, true, fs, dataprovider.TransferQuota{})
|
||||
transfer.BytesSent = testFileSize
|
||||
transfer.Connection.UpdateLastActivity()
|
||||
startTime = transfer.Connection.GetLastActivity()
|
||||
|
||||
transfer.HandleThrottle()
|
||||
elapsed = time.Since(startTime).Nanoseconds() / 1000000
|
||||
assert.GreaterOrEqual(t, elapsed, wantedDownloadElapsed, "download bandwidth throttling not respected")
|
||||
err = transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRealPath(t *testing.T) {
|
||||
testFile := filepath.Join(os.TempDir(), "afile.txt")
|
||||
fs := vfs.NewOsFs("123", os.TempDir(), "")
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "user",
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
u.Permissions = make(map[string][]string)
|
||||
u.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
file, err := os.Create(testFile)
|
||||
require.NoError(t, err)
|
||||
conn := NewBaseConnection(fs.ConnectionID(), ProtocolSFTP, "", "", u)
|
||||
transfer := NewBaseTransfer(file, conn, nil, testFile, testFile, "/transfer_test_file",
|
||||
TransferUpload, 0, 0, 0, 0, true, fs, dataprovider.TransferQuota{})
|
||||
rPath := transfer.GetRealFsPath(testFile)
|
||||
assert.Equal(t, testFile, rPath)
|
||||
rPath = conn.getRealFsPath(testFile)
|
||||
assert.Equal(t, testFile, rPath)
|
||||
err = transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
err = file.Close()
|
||||
assert.NoError(t, err)
|
||||
transfer.File = nil
|
||||
rPath = transfer.GetRealFsPath(testFile)
|
||||
assert.Equal(t, testFile, rPath)
|
||||
rPath = transfer.GetRealFsPath("")
|
||||
assert.Empty(t, rPath)
|
||||
err = os.Remove(testFile)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, conn.GetTransfers(), 0)
|
||||
}
|
||||
|
||||
func TestTruncate(t *testing.T) {
|
||||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
||||
fs := vfs.NewOsFs("123", os.TempDir(), "")
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "user",
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
u.Permissions = make(map[string][]string)
|
||||
u.Permissions["/"] = []string{dataprovider.PermAny}
|
||||
file, err := os.Create(testFile)
|
||||
if !assert.NoError(t, err) {
|
||||
assert.FailNow(t, "unable to open test file")
|
||||
}
|
||||
_, err = file.Write([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
conn := NewBaseConnection(fs.ConnectionID(), ProtocolSFTP, "", "", u)
|
||||
transfer := NewBaseTransfer(file, conn, nil, testFile, testFile, "/transfer_test_file", TransferUpload, 0, 5,
|
||||
100, 0, false, fs, dataprovider.TransferQuota{})
|
||||
|
||||
err = conn.SetStat("/transfer_test_file", &StatAttributes{
|
||||
Size: 2,
|
||||
Flags: StatAttrSize,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(103), transfer.MaxWriteSize)
|
||||
err = transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
err = file.Close()
|
||||
assert.NoError(t, err)
|
||||
fi, err := os.Stat(testFile)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, int64(2), fi.Size())
|
||||
}
|
||||
|
||||
transfer = NewBaseTransfer(file, conn, nil, testFile, testFile, "/transfer_test_file", TransferUpload, 0, 0,
|
||||
100, 0, true, fs, dataprovider.TransferQuota{})
|
||||
// file.Stat will fail on a closed file
|
||||
err = conn.SetStat("/transfer_test_file", &StatAttributes{
|
||||
Size: 2,
|
||||
Flags: StatAttrSize,
|
||||
})
|
||||
assert.Error(t, err)
|
||||
err = transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
transfer = NewBaseTransfer(nil, conn, nil, testFile, testFile, "", TransferUpload, 0, 0, 0, 0, true,
|
||||
fs, dataprovider.TransferQuota{})
|
||||
_, err = transfer.Truncate("mismatch", 0)
|
||||
assert.EqualError(t, err, errTransferMismatch.Error())
|
||||
_, err = transfer.Truncate(testFile, 0)
|
||||
assert.NoError(t, err)
|
||||
_, err = transfer.Truncate(testFile, 1)
|
||||
assert.EqualError(t, err, vfs.ErrVfsUnsupported.Error())
|
||||
|
||||
err = transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = os.Remove(testFile)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, conn.GetTransfers(), 0)
|
||||
}
|
||||
|
||||
func TestTransferErrors(t *testing.T) {
|
||||
isCancelled := false
|
||||
cancelFn := func() {
|
||||
isCancelled = true
|
||||
}
|
||||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
||||
fs := vfs.NewOsFs("id", os.TempDir(), "")
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "test",
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
err := os.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
file, err := os.Open(testFile)
|
||||
if !assert.NoError(t, err) {
|
||||
assert.FailNow(t, "unable to open test file")
|
||||
}
|
||||
conn := NewBaseConnection("id", ProtocolSFTP, "", "", u)
|
||||
transfer := NewBaseTransfer(file, conn, nil, testFile, testFile, "/transfer_test_file", TransferUpload,
|
||||
0, 0, 0, 0, true, fs, dataprovider.TransferQuota{})
|
||||
assert.Nil(t, transfer.cancelFn)
|
||||
assert.Equal(t, testFile, transfer.GetFsPath())
|
||||
transfer.SetCancelFn(cancelFn)
|
||||
errFake := errors.New("err fake")
|
||||
transfer.BytesReceived = 9
|
||||
transfer.TransferError(ErrQuotaExceeded)
|
||||
assert.True(t, isCancelled)
|
||||
transfer.TransferError(errFake)
|
||||
assert.Error(t, transfer.ErrTransfer, ErrQuotaExceeded.Error())
|
||||
// the file is closed from the embedding struct before to call close
|
||||
err = file.Close()
|
||||
assert.NoError(t, err)
|
||||
err = transfer.Close()
|
||||
if assert.Error(t, err) {
|
||||
assert.Error(t, err, ErrQuotaExceeded.Error())
|
||||
}
|
||||
assert.NoFileExists(t, testFile)
|
||||
|
||||
err = os.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
file, err = os.Open(testFile)
|
||||
if !assert.NoError(t, err) {
|
||||
assert.FailNow(t, "unable to open test file")
|
||||
}
|
||||
fsPath := filepath.Join(os.TempDir(), "test_file")
|
||||
transfer = NewBaseTransfer(file, conn, nil, fsPath, file.Name(), "/test_file", TransferUpload, 0, 0, 0, 0, true,
|
||||
fs, dataprovider.TransferQuota{})
|
||||
transfer.BytesReceived = 9
|
||||
transfer.TransferError(errFake)
|
||||
assert.Error(t, transfer.ErrTransfer, errFake.Error())
|
||||
// the file is closed from the embedding struct before to call close
|
||||
err = file.Close()
|
||||
assert.NoError(t, err)
|
||||
err = transfer.Close()
|
||||
if assert.Error(t, err) {
|
||||
assert.Error(t, err, errFake.Error())
|
||||
}
|
||||
assert.NoFileExists(t, testFile)
|
||||
|
||||
err = os.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
file, err = os.Open(testFile)
|
||||
if !assert.NoError(t, err) {
|
||||
assert.FailNow(t, "unable to open test file")
|
||||
}
|
||||
transfer = NewBaseTransfer(file, conn, nil, fsPath, file.Name(), "/test_file", TransferUpload, 0, 0, 0, 0, true,
|
||||
fs, dataprovider.TransferQuota{})
|
||||
transfer.BytesReceived = 9
|
||||
// the file is closed from the embedding struct before to call close
|
||||
err = file.Close()
|
||||
assert.NoError(t, err)
|
||||
err = transfer.Close()
|
||||
assert.NoError(t, err)
|
||||
assert.NoFileExists(t, testFile)
|
||||
assert.FileExists(t, fsPath)
|
||||
err = os.Remove(fsPath)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, conn.GetTransfers(), 0)
|
||||
}
|
||||
|
||||
func TestRemovePartialCryptoFile(t *testing.T) {
|
||||
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
|
||||
fs, err := vfs.NewCryptFs("id", os.TempDir(), "", vfs.CryptFsConfig{Passphrase: kms.NewPlainSecret("secret")})
|
||||
require.NoError(t, err)
|
||||
u := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: "test",
|
||||
HomeDir: os.TempDir(),
|
||||
},
|
||||
}
|
||||
conn := NewBaseConnection(fs.ConnectionID(), ProtocolSFTP, "", "", u)
|
||||
transfer := NewBaseTransfer(nil, conn, nil, testFile, testFile, "/transfer_test_file", TransferUpload,
|
||||
0, 0, 0, 0, true, fs, dataprovider.TransferQuota{})
|
||||
transfer.ErrTransfer = errors.New("test error")
|
||||
_, err = transfer.getUploadFileSize()
|
||||
assert.Error(t, err)
|
||||
err = os.WriteFile(testFile, []byte("test data"), os.ModePerm)
|
||||
assert.NoError(t, err)
|
||||
size, err := transfer.getUploadFileSize()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(9), size)
|
||||
assert.NoFileExists(t, testFile)
|
||||
}
|
||||
|
||||
func TestFTPMode(t *testing.T) {
|
||||
conn := NewBaseConnection("", ProtocolFTP, "", "", dataprovider.User{})
|
||||
transfer := BaseTransfer{
|
||||
Connection: conn,
|
||||
transferType: TransferUpload,
|
||||
BytesReceived: 123,
|
||||
Fs: vfs.NewOsFs("", os.TempDir(), ""),
|
||||
}
|
||||
assert.Empty(t, transfer.ftpMode)
|
||||
transfer.SetFtpMode("active")
|
||||
assert.Equal(t, "active", transfer.ftpMode)
|
||||
}
|
||||
|
||||
func TestTransferQuota(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
TotalDataTransfer: -1,
|
||||
UploadDataTransfer: -1,
|
||||
DownloadDataTransfer: -1,
|
||||
},
|
||||
}
|
||||
user.Filters.DataTransferLimits = []sdk.DataTransferLimit{
|
||||
{
|
||||
Sources: []string{"127.0.0.1/32", "192.168.1.0/24"},
|
||||
TotalDataTransfer: 100,
|
||||
UploadDataTransfer: 0,
|
||||
DownloadDataTransfer: 0,
|
||||
},
|
||||
{
|
||||
Sources: []string{"172.16.0.0/24"},
|
||||
TotalDataTransfer: 0,
|
||||
UploadDataTransfer: 120,
|
||||
DownloadDataTransfer: 150,
|
||||
},
|
||||
}
|
||||
ul, dl, total := user.GetDataTransferLimits("127.0.1.1")
|
||||
assert.Equal(t, int64(0), ul)
|
||||
assert.Equal(t, int64(0), dl)
|
||||
assert.Equal(t, int64(0), total)
|
||||
ul, dl, total = user.GetDataTransferLimits("127.0.0.1")
|
||||
assert.Equal(t, int64(0), ul)
|
||||
assert.Equal(t, int64(0), dl)
|
||||
assert.Equal(t, int64(100*1048576), total)
|
||||
ul, dl, total = user.GetDataTransferLimits("192.168.1.4")
|
||||
assert.Equal(t, int64(0), ul)
|
||||
assert.Equal(t, int64(0), dl)
|
||||
assert.Equal(t, int64(100*1048576), total)
|
||||
ul, dl, total = user.GetDataTransferLimits("172.16.0.2")
|
||||
assert.Equal(t, int64(120*1048576), ul)
|
||||
assert.Equal(t, int64(150*1048576), dl)
|
||||
assert.Equal(t, int64(0), total)
|
||||
transferQuota := dataprovider.TransferQuota{}
|
||||
assert.True(t, transferQuota.HasDownloadSpace())
|
||||
assert.True(t, transferQuota.HasUploadSpace())
|
||||
transferQuota.TotalSize = -1
|
||||
transferQuota.ULSize = -1
|
||||
transferQuota.DLSize = -1
|
||||
assert.True(t, transferQuota.HasDownloadSpace())
|
||||
assert.True(t, transferQuota.HasUploadSpace())
|
||||
transferQuota.TotalSize = 100
|
||||
transferQuota.AllowedTotalSize = 10
|
||||
assert.True(t, transferQuota.HasDownloadSpace())
|
||||
assert.True(t, transferQuota.HasUploadSpace())
|
||||
transferQuota.AllowedTotalSize = 0
|
||||
assert.False(t, transferQuota.HasDownloadSpace())
|
||||
assert.False(t, transferQuota.HasUploadSpace())
|
||||
transferQuota.TotalSize = 0
|
||||
transferQuota.DLSize = 100
|
||||
transferQuota.ULSize = 50
|
||||
transferQuota.AllowedTotalSize = 0
|
||||
assert.False(t, transferQuota.HasDownloadSpace())
|
||||
assert.False(t, transferQuota.HasUploadSpace())
|
||||
transferQuota.AllowedDLSize = 1
|
||||
transferQuota.AllowedULSize = 1
|
||||
assert.True(t, transferQuota.HasDownloadSpace())
|
||||
assert.True(t, transferQuota.HasUploadSpace())
|
||||
transferQuota.AllowedDLSize = -10
|
||||
transferQuota.AllowedULSize = -1
|
||||
assert.False(t, transferQuota.HasDownloadSpace())
|
||||
assert.False(t, transferQuota.HasUploadSpace())
|
||||
|
||||
conn := NewBaseConnection("", ProtocolSFTP, "", "", user)
|
||||
transfer := NewBaseTransfer(nil, conn, nil, "file.txt", "file.txt", "/transfer_test_file", TransferUpload,
|
||||
0, 0, 0, 0, true, vfs.NewOsFs("", os.TempDir(), ""), dataprovider.TransferQuota{})
|
||||
err := transfer.CheckRead()
|
||||
assert.NoError(t, err)
|
||||
err = transfer.CheckWrite()
|
||||
assert.NoError(t, err)
|
||||
|
||||
transfer.transferQuota = dataprovider.TransferQuota{
|
||||
AllowedTotalSize: 10,
|
||||
}
|
||||
transfer.BytesReceived = 5
|
||||
transfer.BytesSent = 4
|
||||
err = transfer.CheckRead()
|
||||
assert.NoError(t, err)
|
||||
err = transfer.CheckWrite()
|
||||
assert.NoError(t, err)
|
||||
|
||||
transfer.BytesSent = 6
|
||||
err = transfer.CheckRead()
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), ErrReadQuotaExceeded.Error())
|
||||
}
|
||||
err = transfer.CheckWrite()
|
||||
assert.True(t, conn.IsQuotaExceededError(err))
|
||||
|
||||
transferQuota = dataprovider.TransferQuota{
|
||||
AllowedTotalSize: 0,
|
||||
AllowedULSize: 10,
|
||||
AllowedDLSize: 5,
|
||||
}
|
||||
transfer.transferQuota = transferQuota
|
||||
assert.Equal(t, transferQuota, transfer.GetTransferQuota())
|
||||
err = transfer.CheckRead()
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), ErrReadQuotaExceeded.Error())
|
||||
}
|
||||
err = transfer.CheckWrite()
|
||||
assert.NoError(t, err)
|
||||
|
||||
transfer.BytesReceived = 11
|
||||
err = transfer.CheckRead()
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), ErrReadQuotaExceeded.Error())
|
||||
}
|
||||
err = transfer.CheckWrite()
|
||||
assert.True(t, conn.IsQuotaExceededError(err))
|
||||
}
|
||||
|
||||
func TestUploadOutsideHomeRenameError(t *testing.T) {
|
||||
oldTempPath := Config.TempPath
|
||||
|
||||
conn := NewBaseConnection("", ProtocolSFTP, "", "", dataprovider.User{})
|
||||
transfer := BaseTransfer{
|
||||
Connection: conn,
|
||||
transferType: TransferUpload,
|
||||
BytesReceived: 123,
|
||||
Fs: vfs.NewOsFs("", filepath.Join(os.TempDir(), "home"), ""),
|
||||
}
|
||||
|
||||
fileName := filepath.Join(os.TempDir(), "_temp")
|
||||
err := os.WriteFile(fileName, []byte(`data`), 0644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
transfer.effectiveFsPath = fileName
|
||||
res := transfer.checkUploadOutsideHomeDir(os.ErrPermission)
|
||||
assert.Equal(t, 0, res)
|
||||
|
||||
Config.TempPath = filepath.Clean(os.TempDir())
|
||||
res = transfer.checkUploadOutsideHomeDir(nil)
|
||||
assert.Equal(t, 0, res)
|
||||
assert.Greater(t, transfer.BytesReceived, int64(0))
|
||||
res = transfer.checkUploadOutsideHomeDir(os.ErrPermission)
|
||||
assert.Equal(t, 1, res)
|
||||
assert.Equal(t, int64(0), transfer.BytesReceived)
|
||||
assert.NoFileExists(t, fileName)
|
||||
|
||||
Config.TempPath = oldTempPath
|
||||
}
|
||||
329
common/transferschecker.go
Normal file
329
common/transferschecker.go
Normal file
@@ -0,0 +1,329 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
type overquotaTransfer struct {
|
||||
ConnID string
|
||||
TransferID int64
|
||||
TransferType int
|
||||
}
|
||||
|
||||
type uploadAggregationKey struct {
|
||||
Username string
|
||||
FolderName string
|
||||
}
|
||||
|
||||
// TransfersChecker defines the interface that transfer checkers must implement.
|
||||
// A transfer checker ensure that multiple concurrent transfers does not exceeded
|
||||
// the remaining user quota
|
||||
type TransfersChecker interface {
|
||||
AddTransfer(transfer dataprovider.ActiveTransfer)
|
||||
RemoveTransfer(ID int64, connectionID string)
|
||||
UpdateTransferCurrentSizes(ulSize, dlSize, ID int64, connectionID string)
|
||||
GetOverquotaTransfers() []overquotaTransfer
|
||||
}
|
||||
|
||||
func getTransfersChecker(isShared int) TransfersChecker {
|
||||
if isShared == 1 {
|
||||
logger.Info(logSender, "", "using provider transfer checker")
|
||||
return &transfersCheckerDB{}
|
||||
}
|
||||
logger.Info(logSender, "", "using memory transfer checker")
|
||||
return &transfersCheckerMem{}
|
||||
}
|
||||
|
||||
type baseTransferChecker struct {
|
||||
transfers []dataprovider.ActiveTransfer
|
||||
}
|
||||
|
||||
func (t *baseTransferChecker) isDataTransferExceeded(user dataprovider.User, transfer dataprovider.ActiveTransfer, ulSize,
|
||||
dlSize int64,
|
||||
) bool {
|
||||
ulQuota, dlQuota, totalQuota := user.GetDataTransferLimits(transfer.IP)
|
||||
if totalQuota > 0 {
|
||||
allowedSize := totalQuota - (user.UsedUploadDataTransfer + user.UsedDownloadDataTransfer)
|
||||
if ulSize+dlSize > allowedSize {
|
||||
return transfer.CurrentDLSize > 0 || transfer.CurrentULSize > 0
|
||||
}
|
||||
}
|
||||
if dlQuota > 0 {
|
||||
allowedSize := dlQuota - user.UsedDownloadDataTransfer
|
||||
if dlSize > allowedSize {
|
||||
return transfer.CurrentDLSize > 0
|
||||
}
|
||||
}
|
||||
if ulQuota > 0 {
|
||||
allowedSize := ulQuota - user.UsedUploadDataTransfer
|
||||
if ulSize > allowedSize {
|
||||
return transfer.CurrentULSize > 0
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *baseTransferChecker) getRemainingDiskQuota(user dataprovider.User, folderName string) (int64, error) {
|
||||
var result int64
|
||||
|
||||
if folderName != "" {
|
||||
for _, folder := range user.VirtualFolders {
|
||||
if folder.Name == folderName {
|
||||
if folder.QuotaSize > 0 {
|
||||
return folder.QuotaSize - folder.UsedQuotaSize, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if user.QuotaSize > 0 {
|
||||
return user.QuotaSize - user.UsedQuotaSize, nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, errors.New("no quota limit defined")
|
||||
}
|
||||
|
||||
func (t *baseTransferChecker) aggregateTransfersByUser(usersToFetch map[string]bool,
|
||||
) (map[string]bool, map[string][]dataprovider.ActiveTransfer) {
|
||||
aggregations := make(map[string][]dataprovider.ActiveTransfer)
|
||||
for _, transfer := range t.transfers {
|
||||
aggregations[transfer.Username] = append(aggregations[transfer.Username], transfer)
|
||||
if len(aggregations[transfer.Username]) > 1 {
|
||||
if _, ok := usersToFetch[transfer.Username]; !ok {
|
||||
usersToFetch[transfer.Username] = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return usersToFetch, aggregations
|
||||
}
|
||||
|
||||
func (t *baseTransferChecker) aggregateUploadTransfers() (map[string]bool, map[int][]dataprovider.ActiveTransfer) {
|
||||
usersToFetch := make(map[string]bool)
|
||||
aggregations := make(map[int][]dataprovider.ActiveTransfer)
|
||||
var keys []uploadAggregationKey
|
||||
|
||||
for _, transfer := range t.transfers {
|
||||
if transfer.Type != TransferUpload {
|
||||
continue
|
||||
}
|
||||
key := -1
|
||||
for idx, k := range keys {
|
||||
if k.Username == transfer.Username && k.FolderName == transfer.FolderName {
|
||||
key = idx
|
||||
break
|
||||
}
|
||||
}
|
||||
if key == -1 {
|
||||
key = len(keys)
|
||||
}
|
||||
keys = append(keys, uploadAggregationKey{
|
||||
Username: transfer.Username,
|
||||
FolderName: transfer.FolderName,
|
||||
})
|
||||
|
||||
aggregations[key] = append(aggregations[key], transfer)
|
||||
if len(aggregations[key]) > 1 {
|
||||
if transfer.FolderName != "" {
|
||||
usersToFetch[transfer.Username] = true
|
||||
} else {
|
||||
if _, ok := usersToFetch[transfer.Username]; !ok {
|
||||
usersToFetch[transfer.Username] = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return usersToFetch, aggregations
|
||||
}
|
||||
|
||||
func (t *baseTransferChecker) getUsersToCheck(usersToFetch map[string]bool) (map[string]dataprovider.User, error) {
|
||||
users, err := dataprovider.GetUsersForQuotaCheck(usersToFetch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usersMap := make(map[string]dataprovider.User)
|
||||
|
||||
for _, user := range users {
|
||||
usersMap[user.Username] = user
|
||||
}
|
||||
|
||||
return usersMap, nil
|
||||
}
|
||||
|
||||
func (t *baseTransferChecker) getOverquotaTransfers(usersToFetch map[string]bool,
|
||||
uploadAggregations map[int][]dataprovider.ActiveTransfer,
|
||||
userAggregations map[string][]dataprovider.ActiveTransfer,
|
||||
) []overquotaTransfer {
|
||||
if len(usersToFetch) == 0 {
|
||||
return nil
|
||||
}
|
||||
usersMap, err := t.getUsersToCheck(usersToFetch)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "unable to check transfers, error getting users quota: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var overquotaTransfers []overquotaTransfer
|
||||
|
||||
for _, transfers := range uploadAggregations {
|
||||
username := transfers[0].Username
|
||||
folderName := transfers[0].FolderName
|
||||
remaningDiskQuota, err := t.getRemainingDiskQuota(usersMap[username], folderName)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var usedDiskQuota int64
|
||||
for _, tr := range transfers {
|
||||
// We optimistically assume that a cloud transfer that replaces an existing
|
||||
// file will be successful
|
||||
usedDiskQuota += tr.CurrentULSize - tr.TruncatedSize
|
||||
}
|
||||
logger.Debug(logSender, "", "username %#v, folder %#v, concurrent transfers: %v, remaining disk quota (bytes): %v, disk quota used in ongoing transfers (bytes): %v",
|
||||
username, folderName, len(transfers), remaningDiskQuota, usedDiskQuota)
|
||||
if usedDiskQuota > remaningDiskQuota {
|
||||
for _, tr := range transfers {
|
||||
if tr.CurrentULSize > tr.TruncatedSize {
|
||||
overquotaTransfers = append(overquotaTransfers, overquotaTransfer{
|
||||
ConnID: tr.ConnID,
|
||||
TransferID: tr.ID,
|
||||
TransferType: tr.Type,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for username, transfers := range userAggregations {
|
||||
var ulSize, dlSize int64
|
||||
for _, tr := range transfers {
|
||||
ulSize += tr.CurrentULSize
|
||||
dlSize += tr.CurrentDLSize
|
||||
}
|
||||
logger.Debug(logSender, "", "username %#v, concurrent transfers: %v, quota (bytes) used in ongoing transfers, ul: %v, dl: %v",
|
||||
username, len(transfers), ulSize, dlSize)
|
||||
for _, tr := range transfers {
|
||||
if t.isDataTransferExceeded(usersMap[username], tr, ulSize, dlSize) {
|
||||
overquotaTransfers = append(overquotaTransfers, overquotaTransfer{
|
||||
ConnID: tr.ConnID,
|
||||
TransferID: tr.ID,
|
||||
TransferType: tr.Type,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return overquotaTransfers
|
||||
}
|
||||
|
||||
type transfersCheckerMem struct {
|
||||
sync.RWMutex
|
||||
baseTransferChecker
|
||||
}
|
||||
|
||||
func (t *transfersCheckerMem) AddTransfer(transfer dataprovider.ActiveTransfer) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
t.transfers = append(t.transfers, transfer)
|
||||
}
|
||||
|
||||
func (t *transfersCheckerMem) RemoveTransfer(ID int64, connectionID string) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
for idx, transfer := range t.transfers {
|
||||
if transfer.ID == ID && transfer.ConnID == connectionID {
|
||||
lastIdx := len(t.transfers) - 1
|
||||
t.transfers[idx] = t.transfers[lastIdx]
|
||||
t.transfers = t.transfers[:lastIdx]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transfersCheckerMem) UpdateTransferCurrentSizes(ulSize, dlSize, ID int64, connectionID string) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
for idx := range t.transfers {
|
||||
if t.transfers[idx].ID == ID && t.transfers[idx].ConnID == connectionID {
|
||||
t.transfers[idx].CurrentDLSize = dlSize
|
||||
t.transfers[idx].CurrentULSize = ulSize
|
||||
t.transfers[idx].UpdatedAt = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *transfersCheckerMem) GetOverquotaTransfers() []overquotaTransfer {
|
||||
t.RLock()
|
||||
|
||||
usersToFetch, uploadAggregations := t.aggregateUploadTransfers()
|
||||
usersToFetch, userAggregations := t.aggregateTransfersByUser(usersToFetch)
|
||||
|
||||
t.RUnlock()
|
||||
|
||||
return t.getOverquotaTransfers(usersToFetch, uploadAggregations, userAggregations)
|
||||
}
|
||||
|
||||
type transfersCheckerDB struct {
|
||||
baseTransferChecker
|
||||
lastCleanup time.Time
|
||||
}
|
||||
|
||||
func (t *transfersCheckerDB) AddTransfer(transfer dataprovider.ActiveTransfer) {
|
||||
dataprovider.AddActiveTransfer(transfer)
|
||||
}
|
||||
|
||||
func (t *transfersCheckerDB) RemoveTransfer(ID int64, connectionID string) {
|
||||
dataprovider.RemoveActiveTransfer(ID, connectionID)
|
||||
}
|
||||
|
||||
func (t *transfersCheckerDB) UpdateTransferCurrentSizes(ulSize, dlSize, ID int64, connectionID string) {
|
||||
dataprovider.UpdateActiveTransferSizes(ulSize, dlSize, ID, connectionID)
|
||||
}
|
||||
|
||||
func (t *transfersCheckerDB) GetOverquotaTransfers() []overquotaTransfer {
|
||||
if t.lastCleanup.IsZero() || t.lastCleanup.Add(periodicTimeoutCheckInterval*15).Before(time.Now()) {
|
||||
before := time.Now().Add(-periodicTimeoutCheckInterval * 5)
|
||||
err := dataprovider.CleanupActiveTransfers(before)
|
||||
logger.Debug(logSender, "", "cleanup active transfers completed, err: %v", err)
|
||||
if err == nil {
|
||||
t.lastCleanup = time.Now()
|
||||
}
|
||||
}
|
||||
var err error
|
||||
from := time.Now().Add(-periodicTimeoutCheckInterval * 2)
|
||||
t.transfers, err = dataprovider.GetActiveTransfers(from)
|
||||
if err != nil {
|
||||
logger.Error(logSender, "", "unable to check overquota transfers, error getting active transfers: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
usersToFetch, uploadAggregations := t.aggregateUploadTransfers()
|
||||
usersToFetch, userAggregations := t.aggregateTransfersByUser(usersToFetch)
|
||||
|
||||
return t.getOverquotaTransfers(usersToFetch, uploadAggregations, userAggregations)
|
||||
}
|
||||
768
common/transferschecker_test.go
Normal file
768
common/transferschecker_test.go
Normal file
@@ -0,0 +1,768 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rs/xid"
|
||||
"github.com/sftpgo/sdk"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/dataprovider"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
func TestTransfersCheckerDiskQuota(t *testing.T) {
|
||||
username := "transfers_check_username"
|
||||
folderName := "test_transfers_folder"
|
||||
groupName := "test_transfers_group"
|
||||
vdirPath := "/vdir"
|
||||
group := dataprovider.Group{
|
||||
BaseGroup: sdk.BaseGroup{
|
||||
Name: groupName,
|
||||
},
|
||||
UserSettings: dataprovider.GroupUserSettings{
|
||||
BaseGroupUserSettings: sdk.BaseGroupUserSettings{
|
||||
QuotaSize: 120,
|
||||
},
|
||||
},
|
||||
}
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: username,
|
||||
Password: "testpwd",
|
||||
HomeDir: filepath.Join(os.TempDir(), username),
|
||||
Status: 1,
|
||||
QuotaSize: 0, // the quota size defined for the group is used
|
||||
Permissions: map[string][]string{
|
||||
"/": {dataprovider.PermAny},
|
||||
},
|
||||
},
|
||||
VirtualFolders: []vfs.VirtualFolder{
|
||||
{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: folderName,
|
||||
MappedPath: filepath.Join(os.TempDir(), folderName),
|
||||
},
|
||||
VirtualPath: vdirPath,
|
||||
QuotaSize: 100,
|
||||
},
|
||||
},
|
||||
Groups: []sdk.GroupMapping{
|
||||
{
|
||||
Name: groupName,
|
||||
Type: sdk.GroupTypePrimary,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := dataprovider.AddGroup(&group, "", "")
|
||||
assert.NoError(t, err)
|
||||
group, err = dataprovider.GroupExists(groupName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(120), group.UserSettings.QuotaSize)
|
||||
err = dataprovider.AddUser(&user, "", "")
|
||||
assert.NoError(t, err)
|
||||
user, err = dataprovider.GetUserWithGroupSettings(username)
|
||||
assert.NoError(t, err)
|
||||
|
||||
connID1 := xid.New().String()
|
||||
fsUser, err := user.GetFilesystemForPath("/file1", connID1)
|
||||
assert.NoError(t, err)
|
||||
conn1 := NewBaseConnection(connID1, ProtocolSFTP, "", "", user)
|
||||
fakeConn1 := &fakeConnection{
|
||||
BaseConnection: conn1,
|
||||
}
|
||||
transfer1 := NewBaseTransfer(nil, conn1, nil, filepath.Join(user.HomeDir, "file1"), filepath.Join(user.HomeDir, "file1"),
|
||||
"/file1", TransferUpload, 0, 0, 120, 0, true, fsUser, dataprovider.TransferQuota{})
|
||||
transfer1.BytesReceived = 150
|
||||
err = Connections.Add(fakeConn1)
|
||||
assert.NoError(t, err)
|
||||
// the transferschecker will do nothing if there is only one ongoing transfer
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer1.errAbort)
|
||||
|
||||
connID2 := xid.New().String()
|
||||
conn2 := NewBaseConnection(connID2, ProtocolSFTP, "", "", user)
|
||||
fakeConn2 := &fakeConnection{
|
||||
BaseConnection: conn2,
|
||||
}
|
||||
transfer2 := NewBaseTransfer(nil, conn2, nil, filepath.Join(user.HomeDir, "file2"), filepath.Join(user.HomeDir, "file2"),
|
||||
"/file2", TransferUpload, 0, 0, 120, 40, true, fsUser, dataprovider.TransferQuota{})
|
||||
transfer1.BytesReceived = 50
|
||||
transfer2.BytesReceived = 60
|
||||
err = Connections.Add(fakeConn2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
connID3 := xid.New().String()
|
||||
conn3 := NewBaseConnection(connID3, ProtocolSFTP, "", "", user)
|
||||
fakeConn3 := &fakeConnection{
|
||||
BaseConnection: conn3,
|
||||
}
|
||||
transfer3 := NewBaseTransfer(nil, conn3, nil, filepath.Join(user.HomeDir, "file3"), filepath.Join(user.HomeDir, "file3"),
|
||||
"/file3", TransferDownload, 0, 0, 120, 0, true, fsUser, dataprovider.TransferQuota{})
|
||||
transfer3.BytesReceived = 60 // this value will be ignored, this is a download
|
||||
err = Connections.Add(fakeConn3)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// the transfers are not overquota
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer1.errAbort)
|
||||
assert.Nil(t, transfer2.errAbort)
|
||||
assert.Nil(t, transfer3.errAbort)
|
||||
|
||||
transfer1.BytesReceived = 80 // truncated size will be subtracted, we are not overquota
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer1.errAbort)
|
||||
assert.Nil(t, transfer2.errAbort)
|
||||
assert.Nil(t, transfer3.errAbort)
|
||||
transfer1.BytesReceived = 120
|
||||
// we are now overquota
|
||||
// if another check is in progress nothing is done
|
||||
atomic.StoreInt32(&Connections.transfersCheckStatus, 1)
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer1.errAbort)
|
||||
assert.Nil(t, transfer2.errAbort)
|
||||
assert.Nil(t, transfer3.errAbort)
|
||||
atomic.StoreInt32(&Connections.transfersCheckStatus, 0)
|
||||
|
||||
Connections.checkTransfers()
|
||||
assert.True(t, conn1.IsQuotaExceededError(transfer1.errAbort), transfer1.errAbort)
|
||||
assert.True(t, conn2.IsQuotaExceededError(transfer2.errAbort), transfer2.errAbort)
|
||||
assert.True(t, conn1.IsQuotaExceededError(transfer1.GetAbortError()))
|
||||
assert.Nil(t, transfer3.errAbort)
|
||||
assert.True(t, conn3.IsQuotaExceededError(transfer3.GetAbortError()))
|
||||
// update the user quota size
|
||||
group.UserSettings.QuotaSize = 1000
|
||||
err = dataprovider.UpdateGroup(&group, []string{username}, "", "")
|
||||
assert.NoError(t, err)
|
||||
transfer1.errAbort = nil
|
||||
transfer2.errAbort = nil
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer1.errAbort)
|
||||
assert.Nil(t, transfer2.errAbort)
|
||||
assert.Nil(t, transfer3.errAbort)
|
||||
|
||||
group.UserSettings.QuotaSize = 0
|
||||
err = dataprovider.UpdateGroup(&group, []string{username}, "", "")
|
||||
assert.NoError(t, err)
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer1.errAbort)
|
||||
assert.Nil(t, transfer2.errAbort)
|
||||
assert.Nil(t, transfer3.errAbort)
|
||||
// now check a public folder
|
||||
transfer1.BytesReceived = 0
|
||||
transfer2.BytesReceived = 0
|
||||
connID4 := xid.New().String()
|
||||
fsFolder, err := user.GetFilesystemForPath(path.Join(vdirPath, "/file1"), connID4)
|
||||
assert.NoError(t, err)
|
||||
conn4 := NewBaseConnection(connID4, ProtocolSFTP, "", "", user)
|
||||
fakeConn4 := &fakeConnection{
|
||||
BaseConnection: conn4,
|
||||
}
|
||||
transfer4 := NewBaseTransfer(nil, conn4, nil, filepath.Join(os.TempDir(), folderName, "file1"),
|
||||
filepath.Join(os.TempDir(), folderName, "file1"), path.Join(vdirPath, "/file1"), TransferUpload, 0, 0,
|
||||
100, 0, true, fsFolder, dataprovider.TransferQuota{})
|
||||
err = Connections.Add(fakeConn4)
|
||||
assert.NoError(t, err)
|
||||
connID5 := xid.New().String()
|
||||
conn5 := NewBaseConnection(connID5, ProtocolSFTP, "", "", user)
|
||||
fakeConn5 := &fakeConnection{
|
||||
BaseConnection: conn5,
|
||||
}
|
||||
transfer5 := NewBaseTransfer(nil, conn5, nil, filepath.Join(os.TempDir(), folderName, "file2"),
|
||||
filepath.Join(os.TempDir(), folderName, "file2"), path.Join(vdirPath, "/file2"), TransferUpload, 0, 0,
|
||||
100, 0, true, fsFolder, dataprovider.TransferQuota{})
|
||||
|
||||
err = Connections.Add(fakeConn5)
|
||||
assert.NoError(t, err)
|
||||
transfer4.BytesReceived = 50
|
||||
transfer5.BytesReceived = 40
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer4.errAbort)
|
||||
assert.Nil(t, transfer5.errAbort)
|
||||
transfer5.BytesReceived = 60
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer1.errAbort)
|
||||
assert.Nil(t, transfer2.errAbort)
|
||||
assert.Nil(t, transfer3.errAbort)
|
||||
assert.True(t, conn1.IsQuotaExceededError(transfer4.errAbort))
|
||||
assert.True(t, conn2.IsQuotaExceededError(transfer5.errAbort))
|
||||
|
||||
if dataprovider.GetProviderStatus().Driver != dataprovider.MemoryDataProviderName {
|
||||
providerConf := dataprovider.GetProviderConfig()
|
||||
err = dataprovider.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
transfer4.errAbort = nil
|
||||
transfer5.errAbort = nil
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer1.errAbort)
|
||||
assert.Nil(t, transfer2.errAbort)
|
||||
assert.Nil(t, transfer3.errAbort)
|
||||
assert.Nil(t, transfer4.errAbort)
|
||||
assert.Nil(t, transfer5.errAbort)
|
||||
|
||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = transfer1.Close()
|
||||
assert.NoError(t, err)
|
||||
err = transfer2.Close()
|
||||
assert.NoError(t, err)
|
||||
err = transfer3.Close()
|
||||
assert.NoError(t, err)
|
||||
err = transfer4.Close()
|
||||
assert.NoError(t, err)
|
||||
err = transfer5.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
Connections.Remove(fakeConn1.GetID())
|
||||
Connections.Remove(fakeConn2.GetID())
|
||||
Connections.Remove(fakeConn3.GetID())
|
||||
Connections.Remove(fakeConn4.GetID())
|
||||
Connections.Remove(fakeConn5.GetID())
|
||||
stats := Connections.GetStats()
|
||||
assert.Len(t, stats, 0)
|
||||
|
||||
err = dataprovider.DeleteUser(user.Username, "", "")
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = dataprovider.DeleteFolder(folderName, "", "")
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(filepath.Join(os.TempDir(), folderName))
|
||||
assert.NoError(t, err)
|
||||
err = dataprovider.DeleteGroup(groupName, "", "")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestTransferCheckerTransferQuota(t *testing.T) {
|
||||
username := "transfers_check_username"
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: username,
|
||||
Password: "test_pwd",
|
||||
HomeDir: filepath.Join(os.TempDir(), username),
|
||||
Status: 1,
|
||||
TotalDataTransfer: 1,
|
||||
Permissions: map[string][]string{
|
||||
"/": {dataprovider.PermAny},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := dataprovider.AddUser(&user, "", "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
connID1 := xid.New().String()
|
||||
fsUser, err := user.GetFilesystemForPath("/file1", connID1)
|
||||
assert.NoError(t, err)
|
||||
conn1 := NewBaseConnection(connID1, ProtocolSFTP, "", "192.168.1.1", user)
|
||||
fakeConn1 := &fakeConnection{
|
||||
BaseConnection: conn1,
|
||||
}
|
||||
transfer1 := NewBaseTransfer(nil, conn1, nil, filepath.Join(user.HomeDir, "file1"), filepath.Join(user.HomeDir, "file1"),
|
||||
"/file1", TransferUpload, 0, 0, 0, 0, true, fsUser, dataprovider.TransferQuota{AllowedTotalSize: 100})
|
||||
transfer1.BytesReceived = 150
|
||||
err = Connections.Add(fakeConn1)
|
||||
assert.NoError(t, err)
|
||||
// the transferschecker will do nothing if there is only one ongoing transfer
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer1.errAbort)
|
||||
|
||||
connID2 := xid.New().String()
|
||||
conn2 := NewBaseConnection(connID2, ProtocolSFTP, "", "127.0.0.1", user)
|
||||
fakeConn2 := &fakeConnection{
|
||||
BaseConnection: conn2,
|
||||
}
|
||||
transfer2 := NewBaseTransfer(nil, conn2, nil, filepath.Join(user.HomeDir, "file2"), filepath.Join(user.HomeDir, "file2"),
|
||||
"/file2", TransferUpload, 0, 0, 0, 0, true, fsUser, dataprovider.TransferQuota{AllowedTotalSize: 100})
|
||||
transfer2.BytesReceived = 150
|
||||
err = Connections.Add(fakeConn2)
|
||||
assert.NoError(t, err)
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer1.errAbort)
|
||||
assert.Nil(t, transfer2.errAbort)
|
||||
// now test overquota
|
||||
transfer1.BytesReceived = 1024*1024 + 1
|
||||
transfer2.BytesReceived = 0
|
||||
Connections.checkTransfers()
|
||||
assert.True(t, conn1.IsQuotaExceededError(transfer1.errAbort))
|
||||
assert.Nil(t, transfer2.errAbort)
|
||||
transfer1.errAbort = nil
|
||||
transfer1.BytesReceived = 1024*1024 + 1
|
||||
transfer2.BytesReceived = 1024
|
||||
Connections.checkTransfers()
|
||||
assert.True(t, conn1.IsQuotaExceededError(transfer1.errAbort))
|
||||
assert.True(t, conn2.IsQuotaExceededError(transfer2.errAbort))
|
||||
transfer1.BytesReceived = 0
|
||||
transfer2.BytesReceived = 0
|
||||
transfer1.errAbort = nil
|
||||
transfer2.errAbort = nil
|
||||
|
||||
err = transfer1.Close()
|
||||
assert.NoError(t, err)
|
||||
err = transfer2.Close()
|
||||
assert.NoError(t, err)
|
||||
Connections.Remove(fakeConn1.GetID())
|
||||
Connections.Remove(fakeConn2.GetID())
|
||||
|
||||
connID3 := xid.New().String()
|
||||
conn3 := NewBaseConnection(connID3, ProtocolSFTP, "", "", user)
|
||||
fakeConn3 := &fakeConnection{
|
||||
BaseConnection: conn3,
|
||||
}
|
||||
transfer3 := NewBaseTransfer(nil, conn3, nil, filepath.Join(user.HomeDir, "file1"), filepath.Join(user.HomeDir, "file1"),
|
||||
"/file1", TransferDownload, 0, 0, 0, 0, true, fsUser, dataprovider.TransferQuota{AllowedDLSize: 100})
|
||||
transfer3.BytesSent = 150
|
||||
err = Connections.Add(fakeConn3)
|
||||
assert.NoError(t, err)
|
||||
|
||||
connID4 := xid.New().String()
|
||||
conn4 := NewBaseConnection(connID4, ProtocolSFTP, "", "", user)
|
||||
fakeConn4 := &fakeConnection{
|
||||
BaseConnection: conn4,
|
||||
}
|
||||
transfer4 := NewBaseTransfer(nil, conn4, nil, filepath.Join(user.HomeDir, "file2"), filepath.Join(user.HomeDir, "file2"),
|
||||
"/file2", TransferDownload, 0, 0, 0, 0, true, fsUser, dataprovider.TransferQuota{AllowedDLSize: 100})
|
||||
transfer4.BytesSent = 150
|
||||
err = Connections.Add(fakeConn4)
|
||||
assert.NoError(t, err)
|
||||
Connections.checkTransfers()
|
||||
assert.Nil(t, transfer3.errAbort)
|
||||
assert.Nil(t, transfer4.errAbort)
|
||||
|
||||
transfer3.BytesSent = 512 * 1024
|
||||
transfer4.BytesSent = 512*1024 + 1
|
||||
Connections.checkTransfers()
|
||||
if assert.Error(t, transfer3.errAbort) {
|
||||
assert.Contains(t, transfer3.errAbort.Error(), ErrReadQuotaExceeded.Error())
|
||||
}
|
||||
if assert.Error(t, transfer4.errAbort) {
|
||||
assert.Contains(t, transfer4.errAbort.Error(), ErrReadQuotaExceeded.Error())
|
||||
}
|
||||
|
||||
Connections.Remove(fakeConn3.GetID())
|
||||
Connections.Remove(fakeConn4.GetID())
|
||||
stats := Connections.GetStats()
|
||||
assert.Len(t, stats, 0)
|
||||
|
||||
err = dataprovider.DeleteUser(user.Username, "", "")
|
||||
assert.NoError(t, err)
|
||||
err = os.RemoveAll(user.GetHomeDir())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestAggregateTransfers(t *testing.T) {
|
||||
checker := transfersCheckerMem{}
|
||||
checker.AddTransfer(dataprovider.ActiveTransfer{
|
||||
ID: 1,
|
||||
Type: TransferUpload,
|
||||
ConnID: "1",
|
||||
Username: "user",
|
||||
FolderName: "",
|
||||
TruncatedSize: 0,
|
||||
CurrentULSize: 100,
|
||||
CurrentDLSize: 0,
|
||||
CreatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
usersToFetch, aggregations := checker.aggregateUploadTransfers()
|
||||
assert.Len(t, usersToFetch, 0)
|
||||
assert.Len(t, aggregations, 1)
|
||||
|
||||
checker.AddTransfer(dataprovider.ActiveTransfer{
|
||||
ID: 1,
|
||||
Type: TransferDownload,
|
||||
ConnID: "2",
|
||||
Username: "user",
|
||||
FolderName: "",
|
||||
TruncatedSize: 0,
|
||||
CurrentULSize: 0,
|
||||
CurrentDLSize: 100,
|
||||
CreatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
|
||||
usersToFetch, aggregations = checker.aggregateUploadTransfers()
|
||||
assert.Len(t, usersToFetch, 0)
|
||||
assert.Len(t, aggregations, 1)
|
||||
|
||||
checker.AddTransfer(dataprovider.ActiveTransfer{
|
||||
ID: 1,
|
||||
Type: TransferUpload,
|
||||
ConnID: "3",
|
||||
Username: "user",
|
||||
FolderName: "folder",
|
||||
TruncatedSize: 0,
|
||||
CurrentULSize: 10,
|
||||
CurrentDLSize: 0,
|
||||
CreatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
|
||||
usersToFetch, aggregations = checker.aggregateUploadTransfers()
|
||||
assert.Len(t, usersToFetch, 0)
|
||||
assert.Len(t, aggregations, 2)
|
||||
|
||||
checker.AddTransfer(dataprovider.ActiveTransfer{
|
||||
ID: 1,
|
||||
Type: TransferUpload,
|
||||
ConnID: "4",
|
||||
Username: "user1",
|
||||
FolderName: "",
|
||||
TruncatedSize: 0,
|
||||
CurrentULSize: 100,
|
||||
CurrentDLSize: 0,
|
||||
CreatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
|
||||
usersToFetch, aggregations = checker.aggregateUploadTransfers()
|
||||
assert.Len(t, usersToFetch, 0)
|
||||
assert.Len(t, aggregations, 3)
|
||||
|
||||
checker.AddTransfer(dataprovider.ActiveTransfer{
|
||||
ID: 1,
|
||||
Type: TransferUpload,
|
||||
ConnID: "5",
|
||||
Username: "user",
|
||||
FolderName: "",
|
||||
TruncatedSize: 0,
|
||||
CurrentULSize: 100,
|
||||
CurrentDLSize: 0,
|
||||
CreatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
|
||||
usersToFetch, aggregations = checker.aggregateUploadTransfers()
|
||||
assert.Len(t, usersToFetch, 1)
|
||||
val, ok := usersToFetch["user"]
|
||||
assert.True(t, ok)
|
||||
assert.False(t, val)
|
||||
assert.Len(t, aggregations, 3)
|
||||
aggregate, ok := aggregations[0]
|
||||
assert.True(t, ok)
|
||||
assert.Len(t, aggregate, 2)
|
||||
|
||||
checker.AddTransfer(dataprovider.ActiveTransfer{
|
||||
ID: 1,
|
||||
Type: TransferUpload,
|
||||
ConnID: "6",
|
||||
Username: "user",
|
||||
FolderName: "",
|
||||
TruncatedSize: 0,
|
||||
CurrentULSize: 100,
|
||||
CurrentDLSize: 0,
|
||||
CreatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
|
||||
usersToFetch, aggregations = checker.aggregateUploadTransfers()
|
||||
assert.Len(t, usersToFetch, 1)
|
||||
val, ok = usersToFetch["user"]
|
||||
assert.True(t, ok)
|
||||
assert.False(t, val)
|
||||
assert.Len(t, aggregations, 3)
|
||||
aggregate, ok = aggregations[0]
|
||||
assert.True(t, ok)
|
||||
assert.Len(t, aggregate, 3)
|
||||
|
||||
checker.AddTransfer(dataprovider.ActiveTransfer{
|
||||
ID: 1,
|
||||
Type: TransferUpload,
|
||||
ConnID: "7",
|
||||
Username: "user",
|
||||
FolderName: "folder",
|
||||
TruncatedSize: 0,
|
||||
CurrentULSize: 10,
|
||||
CurrentDLSize: 0,
|
||||
CreatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
|
||||
usersToFetch, aggregations = checker.aggregateUploadTransfers()
|
||||
assert.Len(t, usersToFetch, 1)
|
||||
val, ok = usersToFetch["user"]
|
||||
assert.True(t, ok)
|
||||
assert.True(t, val)
|
||||
assert.Len(t, aggregations, 3)
|
||||
aggregate, ok = aggregations[0]
|
||||
assert.True(t, ok)
|
||||
assert.Len(t, aggregate, 3)
|
||||
aggregate, ok = aggregations[1]
|
||||
assert.True(t, ok)
|
||||
assert.Len(t, aggregate, 2)
|
||||
|
||||
checker.AddTransfer(dataprovider.ActiveTransfer{
|
||||
ID: 1,
|
||||
Type: TransferUpload,
|
||||
ConnID: "8",
|
||||
Username: "user",
|
||||
FolderName: "",
|
||||
TruncatedSize: 0,
|
||||
CurrentULSize: 100,
|
||||
CurrentDLSize: 0,
|
||||
CreatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
|
||||
})
|
||||
|
||||
usersToFetch, aggregations = checker.aggregateUploadTransfers()
|
||||
assert.Len(t, usersToFetch, 1)
|
||||
val, ok = usersToFetch["user"]
|
||||
assert.True(t, ok)
|
||||
assert.True(t, val)
|
||||
assert.Len(t, aggregations, 3)
|
||||
aggregate, ok = aggregations[0]
|
||||
assert.True(t, ok)
|
||||
assert.Len(t, aggregate, 4)
|
||||
aggregate, ok = aggregations[1]
|
||||
assert.True(t, ok)
|
||||
assert.Len(t, aggregate, 2)
|
||||
}
|
||||
|
||||
func TestDataTransferExceeded(t *testing.T) {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
TotalDataTransfer: 1,
|
||||
},
|
||||
}
|
||||
transfer := dataprovider.ActiveTransfer{
|
||||
CurrentULSize: 0,
|
||||
CurrentDLSize: 0,
|
||||
}
|
||||
user.UsedDownloadDataTransfer = 1024 * 1024
|
||||
user.UsedUploadDataTransfer = 512 * 1024
|
||||
checker := transfersCheckerMem{}
|
||||
res := checker.isDataTransferExceeded(user, transfer, 100, 100)
|
||||
assert.False(t, res)
|
||||
transfer.CurrentULSize = 1
|
||||
res = checker.isDataTransferExceeded(user, transfer, 100, 100)
|
||||
assert.True(t, res)
|
||||
user.UsedDownloadDataTransfer = 512*1024 - 100
|
||||
user.UsedUploadDataTransfer = 512*1024 - 100
|
||||
res = checker.isDataTransferExceeded(user, transfer, 100, 100)
|
||||
assert.False(t, res)
|
||||
res = checker.isDataTransferExceeded(user, transfer, 101, 100)
|
||||
assert.True(t, res)
|
||||
|
||||
user.TotalDataTransfer = 0
|
||||
user.DownloadDataTransfer = 1
|
||||
user.UsedDownloadDataTransfer = 512 * 1024
|
||||
transfer.CurrentULSize = 0
|
||||
transfer.CurrentDLSize = 100
|
||||
res = checker.isDataTransferExceeded(user, transfer, 0, 512*1024)
|
||||
assert.False(t, res)
|
||||
res = checker.isDataTransferExceeded(user, transfer, 0, 512*1024+1)
|
||||
assert.True(t, res)
|
||||
|
||||
user.DownloadDataTransfer = 0
|
||||
user.UploadDataTransfer = 1
|
||||
user.UsedUploadDataTransfer = 512 * 1024
|
||||
transfer.CurrentULSize = 0
|
||||
transfer.CurrentDLSize = 0
|
||||
res = checker.isDataTransferExceeded(user, transfer, 512*1024+1, 0)
|
||||
assert.False(t, res)
|
||||
transfer.CurrentULSize = 1
|
||||
res = checker.isDataTransferExceeded(user, transfer, 512*1024+1, 0)
|
||||
assert.True(t, res)
|
||||
}
|
||||
|
||||
func TestGetUsersForQuotaCheck(t *testing.T) {
|
||||
usersToFetch := make(map[string]bool)
|
||||
for i := 0; i < 50; i++ {
|
||||
usersToFetch[fmt.Sprintf("user%v", i)] = i%2 == 0
|
||||
}
|
||||
|
||||
users, err := dataprovider.GetUsersForQuotaCheck(usersToFetch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 0)
|
||||
|
||||
for i := 0; i < 40; i++ {
|
||||
user := dataprovider.User{
|
||||
BaseUser: sdk.BaseUser{
|
||||
Username: fmt.Sprintf("user%v", i),
|
||||
Password: "pwd",
|
||||
HomeDir: filepath.Join(os.TempDir(), fmt.Sprintf("user%v", i)),
|
||||
Status: 1,
|
||||
QuotaSize: 120,
|
||||
Permissions: map[string][]string{
|
||||
"/": {dataprovider.PermAny},
|
||||
},
|
||||
},
|
||||
VirtualFolders: []vfs.VirtualFolder{
|
||||
{
|
||||
BaseVirtualFolder: vfs.BaseVirtualFolder{
|
||||
Name: fmt.Sprintf("f%v", i),
|
||||
MappedPath: filepath.Join(os.TempDir(), fmt.Sprintf("f%v", i)),
|
||||
},
|
||||
VirtualPath: "/vfolder",
|
||||
QuotaSize: 100,
|
||||
},
|
||||
},
|
||||
Filters: dataprovider.UserFilters{
|
||||
BaseUserFilters: sdk.BaseUserFilters{
|
||||
DataTransferLimits: []sdk.DataTransferLimit{
|
||||
{
|
||||
Sources: []string{"172.16.0.0/16"},
|
||||
UploadDataTransfer: 50,
|
||||
DownloadDataTransfer: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
err = dataprovider.AddUser(&user, "", "")
|
||||
assert.NoError(t, err)
|
||||
err = dataprovider.UpdateVirtualFolderQuota(&vfs.BaseVirtualFolder{Name: fmt.Sprintf("f%v", i)}, 1, 50, false)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
users, err = dataprovider.GetUsersForQuotaCheck(usersToFetch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 40)
|
||||
|
||||
for _, user := range users {
|
||||
userIdxStr := strings.Replace(user.Username, "user", "", 1)
|
||||
userIdx, err := strconv.Atoi(userIdxStr)
|
||||
assert.NoError(t, err)
|
||||
if userIdx%2 == 0 {
|
||||
if assert.Len(t, user.VirtualFolders, 1, user.Username) {
|
||||
assert.Equal(t, int64(100), user.VirtualFolders[0].QuotaSize)
|
||||
assert.Equal(t, int64(50), user.VirtualFolders[0].UsedQuotaSize)
|
||||
}
|
||||
} else {
|
||||
switch dataprovider.GetProviderStatus().Driver {
|
||||
case dataprovider.MySQLDataProviderName, dataprovider.PGSQLDataProviderName,
|
||||
dataprovider.CockroachDataProviderName, dataprovider.SQLiteDataProviderName:
|
||||
assert.Len(t, user.VirtualFolders, 0, user.Username)
|
||||
}
|
||||
}
|
||||
ul, dl, total := user.GetDataTransferLimits("127.1.1.1")
|
||||
assert.Equal(t, int64(0), ul)
|
||||
assert.Equal(t, int64(0), dl)
|
||||
assert.Equal(t, int64(0), total)
|
||||
ul, dl, total = user.GetDataTransferLimits("172.16.2.3")
|
||||
assert.Equal(t, int64(50*1024*1024), ul)
|
||||
assert.Equal(t, int64(80*1024*1024), dl)
|
||||
assert.Equal(t, int64(0), total)
|
||||
}
|
||||
|
||||
for i := 0; i < 40; i++ {
|
||||
err = dataprovider.DeleteUser(fmt.Sprintf("user%v", i), "", "")
|
||||
assert.NoError(t, err)
|
||||
err = dataprovider.DeleteFolder(fmt.Sprintf("f%v", i), "", "")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
users, err = dataprovider.GetUsersForQuotaCheck(usersToFetch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 0)
|
||||
}
|
||||
|
||||
func TestDBTransferChecker(t *testing.T) {
|
||||
if !isDbTransferCheckerSupported() {
|
||||
t.Skip("this test is not supported with the current database provider")
|
||||
}
|
||||
providerConf := dataprovider.GetProviderConfig()
|
||||
err := dataprovider.Close()
|
||||
assert.NoError(t, err)
|
||||
providerConf.IsShared = 1
|
||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||
assert.NoError(t, err)
|
||||
c := getTransfersChecker(1)
|
||||
checker, ok := c.(*transfersCheckerDB)
|
||||
assert.True(t, ok)
|
||||
assert.True(t, checker.lastCleanup.IsZero())
|
||||
transfer1 := dataprovider.ActiveTransfer{
|
||||
ID: 1,
|
||||
Type: TransferDownload,
|
||||
ConnID: xid.New().String(),
|
||||
Username: "user1",
|
||||
FolderName: "folder1",
|
||||
IP: "127.0.0.1",
|
||||
}
|
||||
checker.AddTransfer(transfer1)
|
||||
transfers, err := dataprovider.GetActiveTransfers(time.Now().Add(24 * time.Hour))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, transfers, 0)
|
||||
transfers, err = dataprovider.GetActiveTransfers(time.Now().Add(-periodicTimeoutCheckInterval * 2))
|
||||
assert.NoError(t, err)
|
||||
var createdAt, updatedAt int64
|
||||
if assert.Len(t, transfers, 1) {
|
||||
transfer := transfers[0]
|
||||
assert.Equal(t, transfer1.ID, transfer.ID)
|
||||
assert.Equal(t, transfer1.Type, transfer.Type)
|
||||
assert.Equal(t, transfer1.ConnID, transfer.ConnID)
|
||||
assert.Equal(t, transfer1.Username, transfer.Username)
|
||||
assert.Equal(t, transfer1.IP, transfer.IP)
|
||||
assert.Equal(t, transfer1.FolderName, transfer.FolderName)
|
||||
assert.Greater(t, transfer.CreatedAt, int64(0))
|
||||
assert.Greater(t, transfer.UpdatedAt, int64(0))
|
||||
assert.Equal(t, int64(0), transfer.CurrentDLSize)
|
||||
assert.Equal(t, int64(0), transfer.CurrentULSize)
|
||||
createdAt = transfer.CreatedAt
|
||||
updatedAt = transfer.UpdatedAt
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
checker.UpdateTransferCurrentSizes(100, 150, transfer1.ID, transfer1.ConnID)
|
||||
transfers, err = dataprovider.GetActiveTransfers(time.Now().Add(-periodicTimeoutCheckInterval * 2))
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, transfers, 1) {
|
||||
transfer := transfers[0]
|
||||
assert.Equal(t, int64(150), transfer.CurrentDLSize)
|
||||
assert.Equal(t, int64(100), transfer.CurrentULSize)
|
||||
assert.Equal(t, createdAt, transfer.CreatedAt)
|
||||
assert.Greater(t, transfer.UpdatedAt, updatedAt)
|
||||
}
|
||||
res := checker.GetOverquotaTransfers()
|
||||
assert.Len(t, res, 0)
|
||||
|
||||
checker.RemoveTransfer(transfer1.ID, transfer1.ConnID)
|
||||
transfers, err = dataprovider.GetActiveTransfers(time.Now().Add(-periodicTimeoutCheckInterval * 2))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, transfers, 0)
|
||||
|
||||
err = dataprovider.Close()
|
||||
assert.NoError(t, err)
|
||||
res = checker.GetOverquotaTransfers()
|
||||
assert.Len(t, res, 0)
|
||||
providerConf.IsShared = 0
|
||||
err = dataprovider.Initialize(providerConf, configDir, true)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func isDbTransferCheckerSupported() bool {
|
||||
// SQLite shares the implementation with other SQL-based provider but it makes no sense
|
||||
// to use it outside test cases
|
||||
switch dataprovider.GetProviderStatus().Driver {
|
||||
case dataprovider.MySQLDataProviderName, dataprovider.PGSQLDataProviderName,
|
||||
dataprovider.CockroachDataProviderName, dataprovider.SQLiteDataProviderName:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
2042
config/config.go
2042
config/config.go
File diff suppressed because it is too large
Load Diff
25
config/config_darwin.go
Normal file
25
config/config_darwin.go
Normal file
@@ -0,0 +1,25 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
package config
|
||||
|
||||
import "github.com/spf13/viper"
|
||||
|
||||
// macOS specific config search path
|
||||
func setViperAdditionalConfigPaths() {
|
||||
viper.AddConfigPath("/usr/local/etc/sftpgo")
|
||||
}
|
||||
20
config/config_fallback.go
Normal file
20
config/config_fallback.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !linux && !darwin
|
||||
// +build !linux,!darwin
|
||||
|
||||
package config
|
||||
|
||||
func setViperAdditionalConfigPaths() {}
|
||||
@@ -1,3 +1,18 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package config
|
||||
@@ -8,4 +23,5 @@ import "github.com/spf13/viper"
|
||||
func setViperAdditionalConfigPaths() {
|
||||
viper.AddConfigPath("$HOME/.config/sftpgo")
|
||||
viper.AddConfigPath("/etc/sftpgo")
|
||||
viper.AddConfigPath("/usr/local/etc/sftpgo")
|
||||
}
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package config
|
||||
|
||||
func setViperAdditionalConfigPaths() {
|
||||
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
144
dataprovider/actions.go
Normal file
144
dataprovider/actions.go
Normal file
@@ -0,0 +1,144 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sftpgo/sdk/plugin/notifier"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/command"
|
||||
"github.com/drakkan/sftpgo/v2/httpclient"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// ActionExecutorSelf is used as username for self action, for example a user/admin that updates itself
|
||||
ActionExecutorSelf = "__self__"
|
||||
// ActionExecutorSystem is used as username for actions with no explicit executor associated, for example
|
||||
// adding/updating a user/admin by loading initial data
|
||||
ActionExecutorSystem = "__system__"
|
||||
)
|
||||
|
||||
const (
|
||||
actionObjectUser = "user"
|
||||
actionObjectFolder = "folder"
|
||||
actionObjectGroup = "group"
|
||||
actionObjectAdmin = "admin"
|
||||
actionObjectAPIKey = "api_key"
|
||||
actionObjectShare = "share"
|
||||
)
|
||||
|
||||
var (
|
||||
actionsConcurrencyGuard = make(chan struct{}, 100)
|
||||
)
|
||||
|
||||
func executeAction(operation, executor, ip, objectType, objectName string, object plugin.Renderer) {
|
||||
if plugin.Handler.HasNotifiers() {
|
||||
plugin.Handler.NotifyProviderEvent(¬ifier.ProviderEvent{
|
||||
Action: operation,
|
||||
Username: executor,
|
||||
ObjectType: objectType,
|
||||
ObjectName: objectName,
|
||||
IP: ip,
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
}, object)
|
||||
}
|
||||
if config.Actions.Hook == "" {
|
||||
return
|
||||
}
|
||||
if !util.Contains(config.Actions.ExecuteOn, operation) ||
|
||||
!util.Contains(config.Actions.ExecuteFor, objectType) {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
actionsConcurrencyGuard <- struct{}{}
|
||||
defer func() {
|
||||
<-actionsConcurrencyGuard
|
||||
}()
|
||||
|
||||
dataAsJSON, err := object.RenderAsJSON(operation != operationDelete)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "unable to serialize user as JSON for operation %#v: %v", operation, err)
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(config.Actions.Hook, "http") {
|
||||
var url *url.URL
|
||||
url, err := url.Parse(config.Actions.Hook)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "Invalid http_notification_url %#v for operation %#v: %v",
|
||||
config.Actions.Hook, operation, err)
|
||||
return
|
||||
}
|
||||
q := url.Query()
|
||||
q.Add("action", operation)
|
||||
q.Add("username", executor)
|
||||
q.Add("ip", ip)
|
||||
q.Add("object_type", objectType)
|
||||
q.Add("object_name", objectName)
|
||||
q.Add("timestamp", fmt.Sprintf("%v", time.Now().UnixNano()))
|
||||
url.RawQuery = q.Encode()
|
||||
startTime := time.Now()
|
||||
resp, err := httpclient.RetryablePost(url.String(), "application/json", bytes.NewBuffer(dataAsJSON))
|
||||
respCode := 0
|
||||
if err == nil {
|
||||
respCode = resp.StatusCode
|
||||
resp.Body.Close()
|
||||
}
|
||||
providerLog(logger.LevelDebug, "notified operation %#v to URL: %v status code: %v, elapsed: %v err: %v",
|
||||
operation, url.Redacted(), respCode, time.Since(startTime), err)
|
||||
} else {
|
||||
executeNotificationCommand(operation, executor, ip, objectType, objectName, dataAsJSON) //nolint:errcheck // the error is used in test cases only
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func executeNotificationCommand(operation, executor, ip, objectType, objectName string, objectAsJSON []byte) error {
|
||||
if !filepath.IsAbs(config.Actions.Hook) {
|
||||
err := fmt.Errorf("invalid notification command %#v", config.Actions.Hook)
|
||||
logger.Warn(logSender, "", "unable to execute notification command: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
timeout, env := command.GetConfig(config.Actions.Hook)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, config.Actions.Hook)
|
||||
cmd.Env = append(env,
|
||||
fmt.Sprintf("SFTPGO_PROVIDER_ACTION=%v", operation),
|
||||
fmt.Sprintf("SFTPGO_PROVIDER_OBJECT_TYPE=%v", objectType),
|
||||
fmt.Sprintf("SFTPGO_PROVIDER_OBJECT_NAME=%v", objectName),
|
||||
fmt.Sprintf("SFTPGO_PROVIDER_USERNAME=%v", executor),
|
||||
fmt.Sprintf("SFTPGO_PROVIDER_IP=%v", ip),
|
||||
fmt.Sprintf("SFTPGO_PROVIDER_TIMESTAMP=%v", util.GetTimeAsMsSinceEpoch(time.Now())),
|
||||
fmt.Sprintf("SFTPGO_PROVIDER_OBJECT=%v", string(objectAsJSON)))
|
||||
|
||||
startTime := time.Now()
|
||||
err := cmd.Run()
|
||||
providerLog(logger.LevelDebug, "executed command %#v, elapsed: %v, error: %v", config.Actions.Hook,
|
||||
time.Since(startTime), err)
|
||||
return err
|
||||
}
|
||||
452
dataprovider/admin.go
Normal file
452
dataprovider/admin.go
Normal file
@@ -0,0 +1,452 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/alexedwards/argon2id"
|
||||
passwordvalidator "github.com/wagslane/go-password-validator"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/kms"
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/mfa"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// Available permissions for SFTPGo admins
|
||||
const (
|
||||
PermAdminAny = "*"
|
||||
PermAdminAddUsers = "add_users"
|
||||
PermAdminChangeUsers = "edit_users"
|
||||
PermAdminDeleteUsers = "del_users"
|
||||
PermAdminViewUsers = "view_users"
|
||||
PermAdminViewConnections = "view_conns"
|
||||
PermAdminCloseConnections = "close_conns"
|
||||
PermAdminViewServerStatus = "view_status"
|
||||
PermAdminManageAdmins = "manage_admins"
|
||||
PermAdminManageGroups = "manage_groups"
|
||||
PermAdminManageAPIKeys = "manage_apikeys"
|
||||
PermAdminQuotaScans = "quota_scans"
|
||||
PermAdminManageSystem = "manage_system"
|
||||
PermAdminManageDefender = "manage_defender"
|
||||
PermAdminViewDefender = "view_defender"
|
||||
PermAdminRetentionChecks = "retention_checks"
|
||||
PermAdminMetadataChecks = "metadata_checks"
|
||||
PermAdminViewEvents = "view_events"
|
||||
)
|
||||
|
||||
var (
|
||||
validAdminPerms = []string{PermAdminAny, PermAdminAddUsers, PermAdminChangeUsers, PermAdminDeleteUsers,
|
||||
PermAdminViewUsers, PermAdminManageGroups, PermAdminViewConnections, PermAdminCloseConnections,
|
||||
PermAdminViewServerStatus, PermAdminManageAdmins, PermAdminManageAPIKeys, PermAdminQuotaScans,
|
||||
PermAdminManageSystem, PermAdminManageDefender, PermAdminViewDefender, PermAdminRetentionChecks,
|
||||
PermAdminMetadataChecks, PermAdminViewEvents}
|
||||
)
|
||||
|
||||
// AdminTOTPConfig defines the time-based one time password configuration
|
||||
type AdminTOTPConfig struct {
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
ConfigName string `json:"config_name,omitempty"`
|
||||
Secret *kms.Secret `json:"secret,omitempty"`
|
||||
}
|
||||
|
||||
func (c *AdminTOTPConfig) validate(username string) error {
|
||||
if !c.Enabled {
|
||||
c.ConfigName = ""
|
||||
c.Secret = kms.NewEmptySecret()
|
||||
return nil
|
||||
}
|
||||
if c.ConfigName == "" {
|
||||
return util.NewValidationError("totp: config name is mandatory")
|
||||
}
|
||||
if !util.Contains(mfa.GetAvailableTOTPConfigNames(), c.ConfigName) {
|
||||
return util.NewValidationError(fmt.Sprintf("totp: config name %#v not found", c.ConfigName))
|
||||
}
|
||||
if c.Secret.IsEmpty() {
|
||||
return util.NewValidationError("totp: secret is mandatory")
|
||||
}
|
||||
if c.Secret.IsPlain() {
|
||||
c.Secret.SetAdditionalData(username)
|
||||
if err := c.Secret.Encrypt(); err != nil {
|
||||
return util.NewValidationError(fmt.Sprintf("totp: unable to encrypt secret: %v", err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AdminFilters defines additional restrictions for SFTPGo admins
|
||||
// TODO: rename to AdminOptions in v3
|
||||
type AdminFilters struct {
|
||||
// only clients connecting from these IP/Mask are allowed.
|
||||
// IP/Mask must be in CIDR notation as defined in RFC 4632 and RFC 4291
|
||||
// for example "192.0.2.0/24" or "2001:db8::/32"
|
||||
AllowList []string `json:"allow_list,omitempty"`
|
||||
// API key auth allows to impersonate this administrator with an API key
|
||||
AllowAPIKeyAuth bool `json:"allow_api_key_auth,omitempty"`
|
||||
// Time-based one time passwords configuration
|
||||
TOTPConfig AdminTOTPConfig `json:"totp_config,omitempty"`
|
||||
// Recovery codes to use if the user loses access to their second factor auth device.
|
||||
// Each code can only be used once, you should use these codes to login and disable or
|
||||
// reset 2FA for your account
|
||||
RecoveryCodes []RecoveryCode `json:"recovery_codes,omitempty"`
|
||||
}
|
||||
|
||||
// Admin defines a SFTPGo admin
|
||||
type Admin struct {
|
||||
// Database unique identifier
|
||||
ID int64 `json:"id"`
|
||||
// 1 enabled, 0 disabled (login is not allowed)
|
||||
Status int `json:"status"`
|
||||
// Username
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Email string `json:"email,omitempty"`
|
||||
Permissions []string `json:"permissions"`
|
||||
Filters AdminFilters `json:"filters,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
AdditionalInfo string `json:"additional_info,omitempty"`
|
||||
// Creation time as unix timestamp in milliseconds. It will be 0 for admins created before v2.2.0
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
// last update time as unix timestamp in milliseconds
|
||||
UpdatedAt int64 `json:"updated_at"`
|
||||
// Last login as unix timestamp in milliseconds
|
||||
LastLogin int64 `json:"last_login"`
|
||||
}
|
||||
|
||||
// CountUnusedRecoveryCodes returns the number of unused recovery codes
|
||||
func (a *Admin) CountUnusedRecoveryCodes() int {
|
||||
unused := 0
|
||||
for _, code := range a.Filters.RecoveryCodes {
|
||||
if !code.Used {
|
||||
unused++
|
||||
}
|
||||
}
|
||||
return unused
|
||||
}
|
||||
|
||||
func (a *Admin) hashPassword() error {
|
||||
if a.Password != "" && !util.IsStringPrefixInSlice(a.Password, internalHashPwdPrefixes) {
|
||||
if config.PasswordValidation.Admins.MinEntropy > 0 {
|
||||
if err := passwordvalidator.Validate(a.Password, config.PasswordValidation.Admins.MinEntropy); err != nil {
|
||||
return util.NewValidationError(err.Error())
|
||||
}
|
||||
}
|
||||
if config.PasswordHashing.Algo == HashingAlgoBcrypt {
|
||||
pwd, err := bcrypt.GenerateFromPassword([]byte(a.Password), config.PasswordHashing.BcryptOptions.Cost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Password = string(pwd)
|
||||
} else {
|
||||
pwd, err := argon2id.CreateHash(a.Password, argon2Params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Password = pwd
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Admin) hasRedactedSecret() bool {
|
||||
return a.Filters.TOTPConfig.Secret.IsRedacted()
|
||||
}
|
||||
|
||||
func (a *Admin) validateRecoveryCodes() error {
|
||||
for i := 0; i < len(a.Filters.RecoveryCodes); i++ {
|
||||
code := &a.Filters.RecoveryCodes[i]
|
||||
if code.Secret.IsEmpty() {
|
||||
return util.NewValidationError("mfa: recovery code cannot be empty")
|
||||
}
|
||||
if code.Secret.IsPlain() {
|
||||
code.Secret.SetAdditionalData(a.Username)
|
||||
if err := code.Secret.Encrypt(); err != nil {
|
||||
return util.NewValidationError(fmt.Sprintf("mfa: unable to encrypt recovery code: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Admin) validatePermissions() error {
|
||||
a.Permissions = util.RemoveDuplicates(a.Permissions, false)
|
||||
if len(a.Permissions) == 0 {
|
||||
return util.NewValidationError("please grant some permissions to this admin")
|
||||
}
|
||||
if util.Contains(a.Permissions, PermAdminAny) {
|
||||
a.Permissions = []string{PermAdminAny}
|
||||
}
|
||||
for _, perm := range a.Permissions {
|
||||
if !util.Contains(validAdminPerms, perm) {
|
||||
return util.NewValidationError(fmt.Sprintf("invalid permission: %#v", perm))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Admin) validate() error {
|
||||
a.SetEmptySecretsIfNil()
|
||||
if a.Username == "" {
|
||||
return util.NewValidationError("username is mandatory")
|
||||
}
|
||||
if a.Password == "" {
|
||||
return util.NewValidationError("please set a password")
|
||||
}
|
||||
if a.hasRedactedSecret() {
|
||||
return util.NewValidationError("cannot save an admin with a redacted secret")
|
||||
}
|
||||
if err := a.Filters.TOTPConfig.validate(a.Username); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.validateRecoveryCodes(); err != nil {
|
||||
return err
|
||||
}
|
||||
if config.NamingRules&1 == 0 && !usernameRegex.MatchString(a.Username) {
|
||||
return util.NewValidationError(fmt.Sprintf("username %#v is not valid, the following characters are allowed: a-zA-Z0-9-_.~", a.Username))
|
||||
}
|
||||
if err := a.hashPassword(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.validatePermissions(); err != nil {
|
||||
return err
|
||||
}
|
||||
if a.Email != "" && !util.IsEmailValid(a.Email) {
|
||||
return util.NewValidationError(fmt.Sprintf("email %#v is not valid", a.Email))
|
||||
}
|
||||
a.Filters.AllowList = util.RemoveDuplicates(a.Filters.AllowList, false)
|
||||
for _, IPMask := range a.Filters.AllowList {
|
||||
_, _, err := net.ParseCIDR(IPMask)
|
||||
if err != nil {
|
||||
return util.NewValidationError(fmt.Sprintf("could not parse allow list entry %#v : %v", IPMask, err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckPassword verifies the admin password
|
||||
func (a *Admin) CheckPassword(password string) (bool, error) {
|
||||
if strings.HasPrefix(a.Password, bcryptPwdPrefix) {
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(a.Password), []byte(password)); err != nil {
|
||||
return false, ErrInvalidCredentials
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
match, err := argon2id.ComparePasswordAndHash(password, a.Password)
|
||||
if !match || err != nil {
|
||||
return false, ErrInvalidCredentials
|
||||
}
|
||||
return match, err
|
||||
}
|
||||
|
||||
// CanLoginFromIP returns true if login from the given IP is allowed
|
||||
func (a *Admin) CanLoginFromIP(ip string) bool {
|
||||
if len(a.Filters.AllowList) == 0 {
|
||||
return true
|
||||
}
|
||||
parsedIP := net.ParseIP(ip)
|
||||
if parsedIP == nil {
|
||||
return len(a.Filters.AllowList) == 0
|
||||
}
|
||||
|
||||
for _, ipMask := range a.Filters.AllowList {
|
||||
_, network, err := net.ParseCIDR(ipMask)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if network.Contains(parsedIP) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CanLogin returns an error if the login is not allowed
|
||||
func (a *Admin) CanLogin(ip string) error {
|
||||
if a.Status != 1 {
|
||||
return fmt.Errorf("admin %#v is disabled", a.Username)
|
||||
}
|
||||
if !a.CanLoginFromIP(ip) {
|
||||
return fmt.Errorf("login from IP %v not allowed", ip)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Admin) checkUserAndPass(password, ip string) error {
|
||||
if err := a.CanLogin(ip); err != nil {
|
||||
return err
|
||||
}
|
||||
if a.Password == "" || password == "" {
|
||||
return errors.New("credentials cannot be null or empty")
|
||||
}
|
||||
match, err := a.CheckPassword(password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !match {
|
||||
return ErrInvalidCredentials
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RenderAsJSON implements the renderer interface used within plugins
|
||||
func (a *Admin) RenderAsJSON(reload bool) ([]byte, error) {
|
||||
if reload {
|
||||
admin, err := provider.adminExists(a.Username)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "unable to reload admin before rendering as json: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
admin.HideConfidentialData()
|
||||
return json.Marshal(admin)
|
||||
}
|
||||
a.HideConfidentialData()
|
||||
return json.Marshal(a)
|
||||
}
|
||||
|
||||
// HideConfidentialData hides admin confidential data
|
||||
func (a *Admin) HideConfidentialData() {
|
||||
a.Password = ""
|
||||
if a.Filters.TOTPConfig.Secret != nil {
|
||||
a.Filters.TOTPConfig.Secret.Hide()
|
||||
}
|
||||
for _, code := range a.Filters.RecoveryCodes {
|
||||
if code.Secret != nil {
|
||||
code.Secret.Hide()
|
||||
}
|
||||
}
|
||||
a.SetNilSecretsIfEmpty()
|
||||
}
|
||||
|
||||
// SetEmptySecretsIfNil sets the secrets to empty if nil
|
||||
func (a *Admin) SetEmptySecretsIfNil() {
|
||||
if a.Filters.TOTPConfig.Secret == nil {
|
||||
a.Filters.TOTPConfig.Secret = kms.NewEmptySecret()
|
||||
}
|
||||
}
|
||||
|
||||
// SetNilSecretsIfEmpty set the secrets to nil if empty.
|
||||
// This is useful before rendering as JSON so the empty fields
|
||||
// will not be serialized.
|
||||
func (a *Admin) SetNilSecretsIfEmpty() {
|
||||
if a.Filters.TOTPConfig.Secret != nil && a.Filters.TOTPConfig.Secret.IsEmpty() {
|
||||
a.Filters.TOTPConfig.Secret = nil
|
||||
}
|
||||
}
|
||||
|
||||
// HasPermission returns true if the admin has the specified permission
|
||||
func (a *Admin) HasPermission(perm string) bool {
|
||||
if util.Contains(a.Permissions, PermAdminAny) {
|
||||
return true
|
||||
}
|
||||
return util.Contains(a.Permissions, perm)
|
||||
}
|
||||
|
||||
// GetPermissionsAsString returns permission as string
|
||||
func (a *Admin) GetPermissionsAsString() string {
|
||||
return strings.Join(a.Permissions, ", ")
|
||||
}
|
||||
|
||||
// GetLastLoginAsString returns the last login as string
|
||||
func (a *Admin) GetLastLoginAsString() string {
|
||||
if a.LastLogin > 0 {
|
||||
return util.GetTimeFromMsecSinceEpoch(a.LastLogin).UTC().Format(iso8601UTCFormat)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetAllowedIPAsString returns the allowed IP as comma separated string
|
||||
func (a *Admin) GetAllowedIPAsString() string {
|
||||
return strings.Join(a.Filters.AllowList, ",")
|
||||
}
|
||||
|
||||
// GetValidPerms returns the allowed admin permissions
|
||||
func (a *Admin) GetValidPerms() []string {
|
||||
return validAdminPerms
|
||||
}
|
||||
|
||||
// CanManageMFA returns true if the admin can add a multi-factor authentication configuration
|
||||
func (a *Admin) CanManageMFA() bool {
|
||||
return len(mfa.GetAvailableTOTPConfigs()) > 0
|
||||
}
|
||||
|
||||
// GetSignature returns a signature for this admin.
|
||||
// It could change after an update
|
||||
func (a *Admin) GetSignature() string {
|
||||
data := []byte(a.Username)
|
||||
data = append(data, []byte(a.Password)...)
|
||||
signature := sha256.Sum256(data)
|
||||
return base64.StdEncoding.EncodeToString(signature[:])
|
||||
}
|
||||
|
||||
func (a *Admin) getACopy() Admin {
|
||||
a.SetEmptySecretsIfNil()
|
||||
permissions := make([]string, len(a.Permissions))
|
||||
copy(permissions, a.Permissions)
|
||||
filters := AdminFilters{}
|
||||
filters.AllowList = make([]string, len(a.Filters.AllowList))
|
||||
filters.AllowAPIKeyAuth = a.Filters.AllowAPIKeyAuth
|
||||
filters.TOTPConfig.Enabled = a.Filters.TOTPConfig.Enabled
|
||||
filters.TOTPConfig.ConfigName = a.Filters.TOTPConfig.ConfigName
|
||||
filters.TOTPConfig.Secret = a.Filters.TOTPConfig.Secret.Clone()
|
||||
copy(filters.AllowList, a.Filters.AllowList)
|
||||
filters.RecoveryCodes = make([]RecoveryCode, 0)
|
||||
for _, code := range a.Filters.RecoveryCodes {
|
||||
if code.Secret == nil {
|
||||
code.Secret = kms.NewEmptySecret()
|
||||
}
|
||||
filters.RecoveryCodes = append(filters.RecoveryCodes, RecoveryCode{
|
||||
Secret: code.Secret.Clone(),
|
||||
Used: code.Used,
|
||||
})
|
||||
}
|
||||
|
||||
return Admin{
|
||||
ID: a.ID,
|
||||
Status: a.Status,
|
||||
Username: a.Username,
|
||||
Password: a.Password,
|
||||
Email: a.Email,
|
||||
Permissions: permissions,
|
||||
Filters: filters,
|
||||
AdditionalInfo: a.AdditionalInfo,
|
||||
Description: a.Description,
|
||||
LastLogin: a.LastLogin,
|
||||
CreatedAt: a.CreatedAt,
|
||||
UpdatedAt: a.UpdatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Admin) setFromEnv() error {
|
||||
envUsername := strings.TrimSpace(os.Getenv("SFTPGO_DEFAULT_ADMIN_USERNAME"))
|
||||
envPassword := strings.TrimSpace(os.Getenv("SFTPGO_DEFAULT_ADMIN_PASSWORD"))
|
||||
if envUsername == "" || envPassword == "" {
|
||||
return errors.New(`to create the default admin you need to set the env vars "SFTPGO_DEFAULT_ADMIN_USERNAME" and "SFTPGO_DEFAULT_ADMIN_PASSWORD"`)
|
||||
}
|
||||
a.Username = envUsername
|
||||
a.Password = envPassword
|
||||
a.Status = 1
|
||||
a.Permissions = []string{PermAdminAny}
|
||||
return nil
|
||||
}
|
||||
200
dataprovider/apikey.go
Normal file
200
dataprovider/apikey.go
Normal file
@@ -0,0 +1,200 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alexedwards/argon2id"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
// APIKeyScope defines the supported API key scopes
|
||||
type APIKeyScope int
|
||||
|
||||
// Supported API key scopes
|
||||
const (
|
||||
// the API key will be used for an admin
|
||||
APIKeyScopeAdmin APIKeyScope = iota + 1
|
||||
// the API key will be used for a user
|
||||
APIKeyScopeUser
|
||||
)
|
||||
|
||||
// APIKey defines a SFTPGo API key.
|
||||
// API keys can be used as authentication alternative to short lived tokens
|
||||
// for REST API
|
||||
type APIKey struct {
|
||||
// Database unique identifier
|
||||
ID int64 `json:"-"`
|
||||
// Unique key identifier, used for key lookups.
|
||||
// The generated key is in the format `KeyID.hash(Key)` so we can split
|
||||
// and lookup by KeyID and then verify if the key matches the recorded hash
|
||||
KeyID string `json:"id"`
|
||||
// User friendly key name
|
||||
Name string `json:"name"`
|
||||
// we store the hash of the key, this is just like a password
|
||||
Key string `json:"key,omitempty"`
|
||||
Scope APIKeyScope `json:"scope"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
UpdatedAt int64 `json:"updated_at"`
|
||||
// 0 means never used
|
||||
LastUseAt int64 `json:"last_use_at,omitempty"`
|
||||
// 0 means never expire
|
||||
ExpiresAt int64 `json:"expires_at,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
// Username associated with this API key.
|
||||
// If empty and the scope is APIKeyScopeUser the key is valid for any user
|
||||
User string `json:"user,omitempty"`
|
||||
// Admin username associated with this API key.
|
||||
// If empty and the scope is APIKeyScopeAdmin the key is valid for any admin
|
||||
Admin string `json:"admin,omitempty"`
|
||||
// these fields are for internal use
|
||||
userID int64
|
||||
adminID int64
|
||||
plainKey string
|
||||
}
|
||||
|
||||
func (k *APIKey) getACopy() APIKey {
|
||||
return APIKey{
|
||||
ID: k.ID,
|
||||
KeyID: k.KeyID,
|
||||
Name: k.Name,
|
||||
Key: k.Key,
|
||||
Scope: k.Scope,
|
||||
CreatedAt: k.CreatedAt,
|
||||
UpdatedAt: k.UpdatedAt,
|
||||
LastUseAt: k.LastUseAt,
|
||||
ExpiresAt: k.ExpiresAt,
|
||||
Description: k.Description,
|
||||
User: k.User,
|
||||
Admin: k.Admin,
|
||||
userID: k.userID,
|
||||
adminID: k.adminID,
|
||||
}
|
||||
}
|
||||
|
||||
// RenderAsJSON implements the renderer interface used within plugins
|
||||
func (k *APIKey) RenderAsJSON(reload bool) ([]byte, error) {
|
||||
if reload {
|
||||
apiKey, err := provider.apiKeyExists(k.KeyID)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "unable to reload api key before rendering as json: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
apiKey.HideConfidentialData()
|
||||
return json.Marshal(apiKey)
|
||||
}
|
||||
k.HideConfidentialData()
|
||||
return json.Marshal(k)
|
||||
}
|
||||
|
||||
// HideConfidentialData hides API key confidential data
|
||||
func (k *APIKey) HideConfidentialData() {
|
||||
k.Key = ""
|
||||
}
|
||||
|
||||
func (k *APIKey) hashKey() error {
|
||||
if k.Key != "" && !util.IsStringPrefixInSlice(k.Key, internalHashPwdPrefixes) {
|
||||
if config.PasswordHashing.Algo == HashingAlgoBcrypt {
|
||||
hashed, err := bcrypt.GenerateFromPassword([]byte(k.Key), config.PasswordHashing.BcryptOptions.Cost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k.Key = string(hashed)
|
||||
} else {
|
||||
hashed, err := argon2id.CreateHash(k.Key, argon2Params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k.Key = hashed
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *APIKey) generateKey() {
|
||||
if k.KeyID != "" || k.Key != "" {
|
||||
return
|
||||
}
|
||||
k.KeyID = util.GenerateUniqueID()
|
||||
k.Key = util.GenerateUniqueID()
|
||||
k.plainKey = k.Key
|
||||
}
|
||||
|
||||
// DisplayKey returns the key to show to the user
|
||||
func (k *APIKey) DisplayKey() string {
|
||||
return fmt.Sprintf("%v.%v", k.KeyID, k.plainKey)
|
||||
}
|
||||
|
||||
func (k *APIKey) validate() error {
|
||||
if k.Name == "" {
|
||||
return util.NewValidationError("name is mandatory")
|
||||
}
|
||||
if k.Scope != APIKeyScopeAdmin && k.Scope != APIKeyScopeUser {
|
||||
return util.NewValidationError(fmt.Sprintf("invalid scope: %v", k.Scope))
|
||||
}
|
||||
k.generateKey()
|
||||
if err := k.hashKey(); err != nil {
|
||||
return err
|
||||
}
|
||||
if k.User != "" && k.Admin != "" {
|
||||
return util.NewValidationError("an API key can be related to a user or an admin, not both")
|
||||
}
|
||||
if k.Scope == APIKeyScopeAdmin {
|
||||
k.User = ""
|
||||
}
|
||||
if k.Scope == APIKeyScopeUser {
|
||||
k.Admin = ""
|
||||
}
|
||||
if k.User != "" {
|
||||
_, err := provider.userExists(k.User)
|
||||
if err != nil {
|
||||
return util.NewValidationError(fmt.Sprintf("unable to check API key user %v: %v", k.User, err))
|
||||
}
|
||||
}
|
||||
if k.Admin != "" {
|
||||
_, err := provider.adminExists(k.Admin)
|
||||
if err != nil {
|
||||
return util.NewValidationError(fmt.Sprintf("unable to check API key admin %v: %v", k.Admin, err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Authenticate tries to authenticate the provided plain key
|
||||
func (k *APIKey) Authenticate(plainKey string) error {
|
||||
if k.ExpiresAt > 0 && k.ExpiresAt < util.GetTimeAsMsSinceEpoch(time.Now()) {
|
||||
return fmt.Errorf("API key %#v is expired, expiration timestamp: %v current timestamp: %v", k.KeyID,
|
||||
k.ExpiresAt, util.GetTimeAsMsSinceEpoch(time.Now()))
|
||||
}
|
||||
if strings.HasPrefix(k.Key, bcryptPwdPrefix) {
|
||||
if err := bcrypt.CompareHashAndPassword([]byte(k.Key), []byte(plainKey)); err != nil {
|
||||
return ErrInvalidCredentials
|
||||
}
|
||||
} else if strings.HasPrefix(k.Key, argonPwdPrefix) {
|
||||
match, err := argon2id.ComparePasswordAndHash(plainKey, k.Key)
|
||||
if err != nil || !match {
|
||||
return ErrInvalidCredentials
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
2390
dataprovider/bolt.go
2390
dataprovider/bolt.go
File diff suppressed because it is too large
Load Diff
32
dataprovider/bolt_disabled.go
Normal file
32
dataprovider/bolt_disabled.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build nobolt
|
||||
// +build nobolt
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
func init() {
|
||||
version.AddFeature("-bolt")
|
||||
}
|
||||
|
||||
func initializeBoltProvider(basePath string) error {
|
||||
return errors.New("bolt disabled at build time")
|
||||
}
|
||||
76
dataprovider/cachedpassword.go
Normal file
76
dataprovider/cachedpassword.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var cachedPasswords passwordsCache
|
||||
|
||||
func init() {
|
||||
cachedPasswords = passwordsCache{
|
||||
cache: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
type passwordsCache struct {
|
||||
sync.RWMutex
|
||||
cache map[string]string
|
||||
}
|
||||
|
||||
func (c *passwordsCache) Add(username, password string) {
|
||||
if !config.PasswordCaching || username == "" || password == "" {
|
||||
return
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
c.cache[username] = password
|
||||
}
|
||||
|
||||
func (c *passwordsCache) Remove(username string) {
|
||||
if !config.PasswordCaching {
|
||||
return
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
delete(c.cache, username)
|
||||
}
|
||||
|
||||
// Check returns if the user is found and if the password match
|
||||
func (c *passwordsCache) Check(username, password string) (bool, bool) {
|
||||
if username == "" || password == "" {
|
||||
return false, false
|
||||
}
|
||||
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
pwd, ok := c.cache[username]
|
||||
if !ok {
|
||||
return false, false
|
||||
}
|
||||
|
||||
return true, pwd == password
|
||||
}
|
||||
|
||||
// CheckCachedPassword is an utility method used only in test cases
|
||||
func CheckCachedPassword(username, password string) (bool, bool) {
|
||||
return cachedPasswords.Check(username, password)
|
||||
}
|
||||
173
dataprovider/cacheduser.go
Normal file
173
dataprovider/cacheduser.go
Normal file
@@ -0,0 +1,173 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/webdav"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
webDAVUsersCache *usersCache
|
||||
)
|
||||
|
||||
func init() {
|
||||
webDAVUsersCache = &usersCache{
|
||||
users: map[string]CachedUser{},
|
||||
}
|
||||
}
|
||||
|
||||
// InitializeWebDAVUserCache initializes the cache for webdav users
|
||||
func InitializeWebDAVUserCache(maxSize int) {
|
||||
webDAVUsersCache = &usersCache{
|
||||
users: map[string]CachedUser{},
|
||||
maxSize: maxSize,
|
||||
}
|
||||
}
|
||||
|
||||
// CachedUser adds fields useful for caching to a SFTPGo user
|
||||
type CachedUser struct {
|
||||
User User
|
||||
Expiration time.Time
|
||||
Password string
|
||||
LockSystem webdav.LockSystem
|
||||
}
|
||||
|
||||
// IsExpired returns true if the cached user is expired
|
||||
func (c *CachedUser) IsExpired() bool {
|
||||
if c.Expiration.IsZero() {
|
||||
return false
|
||||
}
|
||||
return c.Expiration.Before(time.Now())
|
||||
}
|
||||
|
||||
type usersCache struct {
|
||||
sync.RWMutex
|
||||
users map[string]CachedUser
|
||||
maxSize int
|
||||
}
|
||||
|
||||
func (cache *usersCache) updateLastLogin(username string) {
|
||||
cache.Lock()
|
||||
defer cache.Unlock()
|
||||
|
||||
if cachedUser, ok := cache.users[username]; ok {
|
||||
cachedUser.User.LastLogin = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
cache.users[username] = cachedUser
|
||||
}
|
||||
}
|
||||
|
||||
// swapWebDAVUser updates an existing cached user with the specified one
|
||||
// preserving the lock fs if possible
|
||||
// FIXME: this could be racy in rare cases
|
||||
func (cache *usersCache) swap(userRef *User) {
|
||||
user := userRef.getACopy()
|
||||
err := user.LoadAndApplyGroupSettings()
|
||||
|
||||
cache.Lock()
|
||||
defer cache.Unlock()
|
||||
|
||||
if cachedUser, ok := cache.users[user.Username]; ok {
|
||||
if cachedUser.User.Password != user.Password {
|
||||
providerLog(logger.LevelDebug, "current password different from the cached one for user %#v, removing from cache",
|
||||
user.Username)
|
||||
// the password changed, the cached user is no longer valid
|
||||
delete(cache.users, user.Username)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
providerLog(logger.LevelDebug, "unable to load group settings, for user %#v, removing from cache, err :%v",
|
||||
user.Username, err)
|
||||
delete(cache.users, user.Username)
|
||||
return
|
||||
}
|
||||
if cachedUser.User.isFsEqual(&user) {
|
||||
// the updated user has the same fs as the cached one, we can preserve the lock filesystem
|
||||
providerLog(logger.LevelDebug, "current password and fs unchanged for for user %#v, swap cached one",
|
||||
user.Username)
|
||||
cachedUser.User = user
|
||||
cache.users[user.Username] = cachedUser
|
||||
} else {
|
||||
// filesystem changed, the cached user is no longer valid
|
||||
providerLog(logger.LevelDebug, "current fs different from the cached one for user %#v, removing from cache",
|
||||
user.Username)
|
||||
delete(cache.users, user.Username)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cache *usersCache) add(cachedUser *CachedUser) {
|
||||
cache.Lock()
|
||||
defer cache.Unlock()
|
||||
|
||||
if cache.maxSize > 0 && len(cache.users) >= cache.maxSize {
|
||||
var userToRemove string
|
||||
var expirationTime time.Time
|
||||
|
||||
for k, v := range cache.users {
|
||||
if userToRemove == "" {
|
||||
userToRemove = k
|
||||
expirationTime = v.Expiration
|
||||
continue
|
||||
}
|
||||
expireTime := v.Expiration
|
||||
if !expireTime.IsZero() && expireTime.Before(expirationTime) {
|
||||
userToRemove = k
|
||||
expirationTime = expireTime
|
||||
}
|
||||
}
|
||||
|
||||
delete(cache.users, userToRemove)
|
||||
}
|
||||
|
||||
if cachedUser.User.Username != "" {
|
||||
cache.users[cachedUser.User.Username] = *cachedUser
|
||||
}
|
||||
}
|
||||
|
||||
func (cache *usersCache) remove(username string) {
|
||||
cache.Lock()
|
||||
defer cache.Unlock()
|
||||
|
||||
delete(cache.users, username)
|
||||
}
|
||||
|
||||
func (cache *usersCache) get(username string) (*CachedUser, bool) {
|
||||
cache.RLock()
|
||||
defer cache.RUnlock()
|
||||
|
||||
cachedUser, ok := cache.users[username]
|
||||
return &cachedUser, ok
|
||||
}
|
||||
|
||||
// CacheWebDAVUser add a user to the WebDAV cache
|
||||
func CacheWebDAVUser(cachedUser *CachedUser) {
|
||||
webDAVUsersCache.add(cachedUser)
|
||||
}
|
||||
|
||||
// GetCachedWebDAVUser returns a previously cached WebDAV user
|
||||
func GetCachedWebDAVUser(username string) (*CachedUser, bool) {
|
||||
return webDAVUsersCache.get(username)
|
||||
}
|
||||
|
||||
// RemoveCachedWebDAVUser removes a cached WebDAV user
|
||||
func RemoveCachedWebDAVUser(username string) {
|
||||
webDAVUsersCache.remove(username)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
234
dataprovider/group.go
Normal file
234
dataprovider/group.go
Normal file
@@ -0,0 +1,234 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/sftpgo/sdk"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/plugin"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
// GroupUserSettings defines the settings to apply to users
|
||||
type GroupUserSettings struct {
|
||||
sdk.BaseGroupUserSettings
|
||||
// Filesystem configuration details
|
||||
FsConfig vfs.Filesystem `json:"filesystem"`
|
||||
}
|
||||
|
||||
// Group defines an SFTPGo group.
|
||||
// Groups are used to easily configure similar users
|
||||
type Group struct {
|
||||
sdk.BaseGroup
|
||||
// settings to apply to users for whom this is a primary group
|
||||
UserSettings GroupUserSettings `json:"user_settings,omitempty"`
|
||||
// Mapping between virtual paths and virtual folders
|
||||
VirtualFolders []vfs.VirtualFolder `json:"virtual_folders,omitempty"`
|
||||
}
|
||||
|
||||
// GetPermissions returns the permissions as list
|
||||
func (g *Group) GetPermissions() []sdk.DirectoryPermissions {
|
||||
result := make([]sdk.DirectoryPermissions, 0, len(g.UserSettings.Permissions))
|
||||
for k, v := range g.UserSettings.Permissions {
|
||||
result = append(result, sdk.DirectoryPermissions{
|
||||
Path: k,
|
||||
Permissions: v,
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetAllowedIPAsString returns the allowed IP as comma separated string
|
||||
func (g *Group) GetAllowedIPAsString() string {
|
||||
return strings.Join(g.UserSettings.Filters.AllowedIP, ",")
|
||||
}
|
||||
|
||||
// GetDeniedIPAsString returns the denied IP as comma separated string
|
||||
func (g *Group) GetDeniedIPAsString() string {
|
||||
return strings.Join(g.UserSettings.Filters.DeniedIP, ",")
|
||||
}
|
||||
|
||||
// HasExternalAuth returns true if the external authentication is globally enabled
|
||||
// and it is not disabled for this group
|
||||
func (g *Group) HasExternalAuth() bool {
|
||||
if g.UserSettings.Filters.Hooks.ExternalAuthDisabled {
|
||||
return false
|
||||
}
|
||||
if config.ExternalAuthHook != "" {
|
||||
return true
|
||||
}
|
||||
return plugin.Handler.HasAuthenticators()
|
||||
}
|
||||
|
||||
// SetEmptySecretsIfNil sets the secrets to empty if nil
|
||||
func (g *Group) SetEmptySecretsIfNil() {
|
||||
g.UserSettings.FsConfig.SetEmptySecretsIfNil()
|
||||
for idx := range g.VirtualFolders {
|
||||
vfolder := &g.VirtualFolders[idx]
|
||||
vfolder.FsConfig.SetEmptySecretsIfNil()
|
||||
}
|
||||
}
|
||||
|
||||
// PrepareForRendering prepares a group for rendering.
|
||||
// It hides confidential data and set to nil the empty secrets
|
||||
// so they are not serialized
|
||||
func (g *Group) PrepareForRendering() {
|
||||
g.UserSettings.FsConfig.HideConfidentialData()
|
||||
g.UserSettings.FsConfig.SetNilSecretsIfEmpty()
|
||||
for idx := range g.VirtualFolders {
|
||||
folder := &g.VirtualFolders[idx]
|
||||
folder.PrepareForRendering()
|
||||
}
|
||||
}
|
||||
|
||||
// RenderAsJSON implements the renderer interface used within plugins
|
||||
func (g *Group) RenderAsJSON(reload bool) ([]byte, error) {
|
||||
if reload {
|
||||
group, err := provider.groupExists(g.Name)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "unable to reload group before rendering as json: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
group.PrepareForRendering()
|
||||
return json.Marshal(group)
|
||||
}
|
||||
g.PrepareForRendering()
|
||||
return json.Marshal(g)
|
||||
}
|
||||
|
||||
// GetEncryptionAdditionalData returns the additional data to use for AEAD
|
||||
func (g *Group) GetEncryptionAdditionalData() string {
|
||||
return fmt.Sprintf("group_%v", g.Name)
|
||||
}
|
||||
|
||||
// HasRedactedSecret returns true if the user has a redacted secret
|
||||
func (g *Group) hasRedactedSecret() bool {
|
||||
for idx := range g.VirtualFolders {
|
||||
folder := &g.VirtualFolders[idx]
|
||||
if folder.HasRedactedSecret() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return g.UserSettings.FsConfig.HasRedactedSecret()
|
||||
}
|
||||
|
||||
func (g *Group) validate() error {
|
||||
g.SetEmptySecretsIfNil()
|
||||
if g.Name == "" {
|
||||
return util.NewValidationError("name is mandatory")
|
||||
}
|
||||
if config.NamingRules&1 == 0 && !usernameRegex.MatchString(g.Name) {
|
||||
return util.NewValidationError(fmt.Sprintf("name %#v is not valid, the following characters are allowed: a-zA-Z0-9-_.~", g.Name))
|
||||
}
|
||||
if g.hasRedactedSecret() {
|
||||
return util.NewValidationError("cannot save a user with a redacted secret")
|
||||
}
|
||||
vfolders, err := validateAssociatedVirtualFolders(g.VirtualFolders)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.VirtualFolders = vfolders
|
||||
return g.validateUserSettings()
|
||||
}
|
||||
|
||||
func (g *Group) validateUserSettings() error {
|
||||
if g.UserSettings.HomeDir != "" {
|
||||
g.UserSettings.HomeDir = filepath.Clean(g.UserSettings.HomeDir)
|
||||
if !filepath.IsAbs(g.UserSettings.HomeDir) {
|
||||
return util.NewValidationError(fmt.Sprintf("home_dir must be an absolute path, actual value: %v",
|
||||
g.UserSettings.HomeDir))
|
||||
}
|
||||
}
|
||||
if err := g.UserSettings.FsConfig.Validate(g.GetEncryptionAdditionalData()); err != nil {
|
||||
return err
|
||||
}
|
||||
if g.UserSettings.TotalDataTransfer > 0 {
|
||||
// if a total data transfer is defined we reset the separate upload and download limits
|
||||
g.UserSettings.UploadDataTransfer = 0
|
||||
g.UserSettings.DownloadDataTransfer = 0
|
||||
}
|
||||
if len(g.UserSettings.Permissions) > 0 {
|
||||
permissions, err := validateUserPermissions(g.UserSettings.Permissions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.UserSettings.Permissions = permissions
|
||||
}
|
||||
if err := validateBaseFilters(&g.UserSettings.Filters); err != nil {
|
||||
return err
|
||||
}
|
||||
if !g.HasExternalAuth() {
|
||||
g.UserSettings.Filters.ExternalAuthCacheTime = 0
|
||||
}
|
||||
g.UserSettings.Filters.UserType = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Group) getACopy() Group {
|
||||
users := make([]string, len(g.Users))
|
||||
copy(users, g.Users)
|
||||
virtualFolders := make([]vfs.VirtualFolder, 0, len(g.VirtualFolders))
|
||||
for idx := range g.VirtualFolders {
|
||||
vfolder := g.VirtualFolders[idx].GetACopy()
|
||||
virtualFolders = append(virtualFolders, vfolder)
|
||||
}
|
||||
permissions := make(map[string][]string)
|
||||
for k, v := range g.UserSettings.Permissions {
|
||||
perms := make([]string, len(v))
|
||||
copy(perms, v)
|
||||
permissions[k] = perms
|
||||
}
|
||||
|
||||
return Group{
|
||||
BaseGroup: sdk.BaseGroup{
|
||||
ID: g.ID,
|
||||
Name: g.Name,
|
||||
Description: g.Description,
|
||||
CreatedAt: g.CreatedAt,
|
||||
UpdatedAt: g.UpdatedAt,
|
||||
Users: users,
|
||||
},
|
||||
UserSettings: GroupUserSettings{
|
||||
BaseGroupUserSettings: sdk.BaseGroupUserSettings{
|
||||
HomeDir: g.UserSettings.HomeDir,
|
||||
MaxSessions: g.UserSettings.MaxSessions,
|
||||
QuotaSize: g.UserSettings.QuotaSize,
|
||||
QuotaFiles: g.UserSettings.QuotaFiles,
|
||||
Permissions: permissions,
|
||||
UploadBandwidth: g.UserSettings.UploadBandwidth,
|
||||
DownloadBandwidth: g.UserSettings.DownloadBandwidth,
|
||||
UploadDataTransfer: g.UserSettings.UploadDataTransfer,
|
||||
DownloadDataTransfer: g.UserSettings.DownloadDataTransfer,
|
||||
TotalDataTransfer: g.UserSettings.TotalDataTransfer,
|
||||
Filters: copyBaseUserFilters(g.UserSettings.Filters),
|
||||
},
|
||||
FsConfig: g.UserSettings.FsConfig.GetACopy(),
|
||||
},
|
||||
VirtualFolders: virtualFolders,
|
||||
}
|
||||
}
|
||||
|
||||
// GetUsersAsString returns the list of users as comma separated string
|
||||
func (g *Group) GetUsersAsString() string {
|
||||
return strings.Join(g.Users, ",")
|
||||
}
|
||||
2058
dataprovider/memory.go
Normal file
2058
dataprovider/memory.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,89 +1,778 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !nomysql
|
||||
// +build !nomysql
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/go-sql-driver/mysql"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
// MySQLProvider auth provider for MySQL/MariaDB database
|
||||
const (
|
||||
mysqlResetSQL = "DROP TABLE IF EXISTS `{{api_keys}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{folders_mapping}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{users_folders_mapping}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{users_groups_mapping}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{groups_folders_mapping}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{admins}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{folders}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{shares}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{users}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{groups}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{defender_events}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{defender_hosts}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{active_transfers}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{shared_sessions}}` CASCADE;" +
|
||||
"DROP TABLE IF EXISTS `{{schema_version}}` CASCADE;"
|
||||
mysqlInitialSQL = "CREATE TABLE `{{schema_version}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `version` integer NOT NULL);" +
|
||||
"CREATE TABLE `{{admins}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `username` varchar(255) NOT NULL UNIQUE, " +
|
||||
"`description` varchar(512) NULL, `password` varchar(255) NOT NULL, `email` varchar(255) NULL, `status` integer NOT NULL, " +
|
||||
"`permissions` longtext NOT NULL, `filters` longtext NULL, `additional_info` longtext NULL, `last_login` bigint NOT NULL, " +
|
||||
"`created_at` bigint NOT NULL, `updated_at` bigint NOT NULL);" +
|
||||
"CREATE TABLE `{{defender_hosts}}` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||
"`ip` varchar(50) NOT NULL UNIQUE, `ban_time` bigint NOT NULL, `updated_at` bigint NOT NULL);" +
|
||||
"CREATE TABLE `{{defender_events}}` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||
"`date_time` bigint NOT NULL, `score` integer NOT NULL, `host_id` bigint NOT NULL);" +
|
||||
"ALTER TABLE `{{defender_events}}` ADD CONSTRAINT `{{prefix}}defender_events_host_id_fk_defender_hosts_id` " +
|
||||
"FOREIGN KEY (`host_id`) REFERENCES `{{defender_hosts}}` (`id`) ON DELETE CASCADE;" +
|
||||
"CREATE TABLE `{{folders}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(255) NOT NULL UNIQUE, " +
|
||||
"`description` varchar(512) NULL, `path` longtext NULL, `used_quota_size` bigint NOT NULL, " +
|
||||
"`used_quota_files` integer NOT NULL, `last_quota_update` bigint NOT NULL, `filesystem` longtext NULL);" +
|
||||
"CREATE TABLE `{{users}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `username` varchar(255) NOT NULL UNIQUE, " +
|
||||
"`status` integer NOT NULL, `expiration_date` bigint NOT NULL, `description` varchar(512) NULL, `password` longtext NULL, " +
|
||||
"`public_keys` longtext NULL, `home_dir` longtext NOT NULL, `uid` bigint NOT NULL, `gid` bigint NOT NULL, " +
|
||||
"`max_sessions` integer NOT NULL, `quota_size` bigint NOT NULL, `quota_files` integer NOT NULL, " +
|
||||
"`permissions` longtext NOT NULL, `used_quota_size` bigint NOT NULL, `used_quota_files` integer NOT NULL, " +
|
||||
"`last_quota_update` bigint NOT NULL, `upload_bandwidth` integer NOT NULL, `download_bandwidth` integer NOT NULL, " +
|
||||
"`last_login` bigint NOT NULL, `filters` longtext NULL, `filesystem` longtext NULL, `additional_info` longtext NULL, " +
|
||||
"`created_at` bigint NOT NULL, `updated_at` bigint NOT NULL, `email` varchar(255) NULL);" +
|
||||
"CREATE TABLE `{{folders_mapping}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `virtual_path` longtext NOT NULL, " +
|
||||
"`quota_size` bigint NOT NULL, `quota_files` integer NOT NULL, `folder_id` integer NOT NULL, `user_id` integer NOT NULL);" +
|
||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}unique_mapping` UNIQUE (`user_id`, `folder_id`);" +
|
||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}folders_mapping_folder_id_fk_folders_id` FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;" +
|
||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}folders_mapping_user_id_fk_users_id` FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
||||
"CREATE TABLE `{{shares}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||
"`share_id` varchar(60) NOT NULL UNIQUE, `name` varchar(255) NOT NULL, `description` varchar(512) NULL, " +
|
||||
"`scope` integer NOT NULL, `paths` longtext NOT NULL, `created_at` bigint NOT NULL, " +
|
||||
"`updated_at` bigint NOT NULL, `last_use_at` bigint NOT NULL, `expires_at` bigint NOT NULL, " +
|
||||
"`password` longtext NULL, `max_tokens` integer NOT NULL, `used_tokens` integer NOT NULL, " +
|
||||
"`allow_from` longtext NULL, `user_id` integer NOT NULL);" +
|
||||
"ALTER TABLE `{{shares}}` ADD CONSTRAINT `{{prefix}}shares_user_id_fk_users_id` " +
|
||||
"FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
||||
"CREATE TABLE `{{api_keys}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `name` varchar(255) NOT NULL, `key_id` varchar(50) NOT NULL UNIQUE," +
|
||||
"`api_key` varchar(255) NOT NULL UNIQUE, `scope` integer NOT NULL, `created_at` bigint NOT NULL, `updated_at` bigint NOT NULL, `last_use_at` bigint NOT NULL, " +
|
||||
"`expires_at` bigint NOT NULL, `description` longtext NULL, `admin_id` integer NULL, `user_id` integer NULL);" +
|
||||
"ALTER TABLE `{{api_keys}}` ADD CONSTRAINT `{{prefix}}api_keys_admin_id_fk_admins_id` FOREIGN KEY (`admin_id`) REFERENCES `{{admins}}` (`id`) ON DELETE CASCADE;" +
|
||||
"ALTER TABLE `{{api_keys}}` ADD CONSTRAINT `{{prefix}}api_keys_user_id_fk_users_id` FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
||||
"CREATE INDEX `{{prefix}}users_updated_at_idx` ON `{{users}}` (`updated_at`);" +
|
||||
"CREATE INDEX `{{prefix}}defender_hosts_updated_at_idx` ON `{{defender_hosts}}` (`updated_at`);" +
|
||||
"CREATE INDEX `{{prefix}}defender_hosts_ban_time_idx` ON `{{defender_hosts}}` (`ban_time`);" +
|
||||
"CREATE INDEX `{{prefix}}defender_events_date_time_idx` ON `{{defender_events}}` (`date_time`);" +
|
||||
"INSERT INTO {{schema_version}} (version) VALUES (15);"
|
||||
mysqlV16SQL = "ALTER TABLE `{{users}}` ADD COLUMN `download_data_transfer` integer DEFAULT 0 NOT NULL;" +
|
||||
"ALTER TABLE `{{users}}` ALTER COLUMN `download_data_transfer` DROP DEFAULT;" +
|
||||
"ALTER TABLE `{{users}}` ADD COLUMN `total_data_transfer` integer DEFAULT 0 NOT NULL;" +
|
||||
"ALTER TABLE `{{users}}` ALTER COLUMN `total_data_transfer` DROP DEFAULT;" +
|
||||
"ALTER TABLE `{{users}}` ADD COLUMN `upload_data_transfer` integer DEFAULT 0 NOT NULL;" +
|
||||
"ALTER TABLE `{{users}}` ALTER COLUMN `upload_data_transfer` DROP DEFAULT;" +
|
||||
"ALTER TABLE `{{users}}` ADD COLUMN `used_download_data_transfer` integer DEFAULT 0 NOT NULL;" +
|
||||
"ALTER TABLE `{{users}}` ALTER COLUMN `used_download_data_transfer` DROP DEFAULT;" +
|
||||
"ALTER TABLE `{{users}}` ADD COLUMN `used_upload_data_transfer` integer DEFAULT 0 NOT NULL;" +
|
||||
"ALTER TABLE `{{users}}` ALTER COLUMN `used_upload_data_transfer` DROP DEFAULT;" +
|
||||
"CREATE TABLE `{{active_transfers}}` (`id` bigint AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||
"`connection_id` varchar(100) NOT NULL, `transfer_id` bigint NOT NULL, `transfer_type` integer NOT NULL, " +
|
||||
"`username` varchar(255) NOT NULL, `folder_name` varchar(255) NULL, `ip` varchar(50) NOT NULL, " +
|
||||
"`truncated_size` bigint NOT NULL, `current_ul_size` bigint NOT NULL, `current_dl_size` bigint NOT NULL, " +
|
||||
"`created_at` bigint NOT NULL, `updated_at` bigint NOT NULL);" +
|
||||
"CREATE INDEX `{{prefix}}active_transfers_connection_id_idx` ON `{{active_transfers}}` (`connection_id`);" +
|
||||
"CREATE INDEX `{{prefix}}active_transfers_transfer_id_idx` ON `{{active_transfers}}` (`transfer_id`);" +
|
||||
"CREATE INDEX `{{prefix}}active_transfers_updated_at_idx` ON `{{active_transfers}}` (`updated_at`);"
|
||||
mysqlV16DownSQL = "ALTER TABLE `{{users}}` DROP COLUMN `used_upload_data_transfer`;" +
|
||||
"ALTER TABLE `{{users}}` DROP COLUMN `used_download_data_transfer`;" +
|
||||
"ALTER TABLE `{{users}}` DROP COLUMN `upload_data_transfer`;" +
|
||||
"ALTER TABLE `{{users}}` DROP COLUMN `total_data_transfer`;" +
|
||||
"ALTER TABLE `{{users}}` DROP COLUMN `download_data_transfer`;" +
|
||||
"DROP TABLE `{{active_transfers}}` CASCADE;"
|
||||
mysqlV17SQL = "CREATE TABLE `{{groups}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||
"`name` varchar(255) NOT NULL UNIQUE, `description` varchar(512) NULL, `created_at` bigint NOT NULL, " +
|
||||
"`updated_at` bigint NOT NULL, `user_settings` longtext NULL);" +
|
||||
"CREATE TABLE `{{groups_folders_mapping}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||
"`group_id` integer NOT NULL, `folder_id` integer NOT NULL, " +
|
||||
"`virtual_path` longtext NOT NULL, `quota_size` bigint NOT NULL, `quota_files` integer NOT NULL);" +
|
||||
"CREATE TABLE `{{users_groups_mapping}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
|
||||
"`user_id` integer NOT NULL, `group_id` integer NOT NULL, `group_type` integer NOT NULL);" +
|
||||
"ALTER TABLE `{{folders_mapping}}` DROP FOREIGN KEY `{{prefix}}folders_mapping_folder_id_fk_folders_id`;" +
|
||||
"ALTER TABLE `{{folders_mapping}}` DROP FOREIGN KEY `{{prefix}}folders_mapping_user_id_fk_users_id`;" +
|
||||
"ALTER TABLE `{{folders_mapping}}` DROP INDEX `{{prefix}}unique_mapping`;" +
|
||||
"RENAME TABLE `{{folders_mapping}}` TO `{{users_folders_mapping}}`;" +
|
||||
"ALTER TABLE `{{users_folders_mapping}}` ADD CONSTRAINT `{{prefix}}unique_user_folder_mapping` " +
|
||||
"UNIQUE (`user_id`, `folder_id`);" +
|
||||
"ALTER TABLE `{{users_folders_mapping}}` ADD CONSTRAINT `{{prefix}}users_folders_mapping_user_id_fk_users_id` " +
|
||||
"FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
||||
"ALTER TABLE `{{users_folders_mapping}}` ADD CONSTRAINT `{{prefix}}users_folders_mapping_folder_id_fk_folders_id` " +
|
||||
"FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;" +
|
||||
"ALTER TABLE `{{users_groups_mapping}}` ADD CONSTRAINT `{{prefix}}unique_user_group_mapping` UNIQUE (`user_id`, `group_id`);" +
|
||||
"ALTER TABLE `{{groups_folders_mapping}}` ADD CONSTRAINT `{{prefix}}unique_group_folder_mapping` UNIQUE (`group_id`, `folder_id`);" +
|
||||
"ALTER TABLE `{{users_groups_mapping}}` ADD CONSTRAINT `{{prefix}}users_groups_mapping_group_id_fk_groups_id` " +
|
||||
"FOREIGN KEY (`group_id`) REFERENCES `{{groups}}` (`id`) ON DELETE NO ACTION;" +
|
||||
"ALTER TABLE `{{users_groups_mapping}}` ADD CONSTRAINT `{{prefix}}users_groups_mapping_user_id_fk_users_id` " +
|
||||
"FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
||||
"ALTER TABLE `{{groups_folders_mapping}}` ADD CONSTRAINT `{{prefix}}groups_folders_mapping_folder_id_fk_folders_id` " +
|
||||
"FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;" +
|
||||
"ALTER TABLE `{{groups_folders_mapping}}` ADD CONSTRAINT `{{prefix}}groups_folders_mapping_group_id_fk_groups_id` " +
|
||||
"FOREIGN KEY (`group_id`) REFERENCES `{{groups}}` (`id`) ON DELETE CASCADE;" +
|
||||
"CREATE INDEX `{{prefix}}groups_updated_at_idx` ON `{{groups}}` (`updated_at`);"
|
||||
mysqlV17DownSQL = "ALTER TABLE `{{groups_folders_mapping}}` DROP FOREIGN KEY `{{prefix}}groups_folders_mapping_group_id_fk_groups_id`;" +
|
||||
"ALTER TABLE `{{groups_folders_mapping}}` DROP FOREIGN KEY `{{prefix}}groups_folders_mapping_folder_id_fk_folders_id`;" +
|
||||
"ALTER TABLE `{{users_groups_mapping}}` DROP FOREIGN KEY `{{prefix}}users_groups_mapping_user_id_fk_users_id`;" +
|
||||
"ALTER TABLE `{{users_groups_mapping}}` DROP FOREIGN KEY `{{prefix}}users_groups_mapping_group_id_fk_groups_id`;" +
|
||||
"ALTER TABLE `{{groups_folders_mapping}}` DROP INDEX `{{prefix}}unique_group_folder_mapping`;" +
|
||||
"ALTER TABLE `{{users_groups_mapping}}` DROP INDEX `{{prefix}}unique_user_group_mapping`;" +
|
||||
"DROP TABLE `{{users_groups_mapping}}` CASCADE;" +
|
||||
"DROP TABLE `{{groups_folders_mapping}}` CASCADE;" +
|
||||
"DROP TABLE `{{groups}}` CASCADE;" +
|
||||
"ALTER TABLE `{{users_folders_mapping}}` DROP FOREIGN KEY `{{prefix}}users_folders_mapping_folder_id_fk_folders_id`;" +
|
||||
"ALTER TABLE `{{users_folders_mapping}}` DROP FOREIGN KEY `{{prefix}}users_folders_mapping_user_id_fk_users_id`;" +
|
||||
"ALTER TABLE `{{users_folders_mapping}}` DROP INDEX `{{prefix}}unique_user_folder_mapping`;" +
|
||||
"RENAME TABLE `{{users_folders_mapping}}` TO `{{folders_mapping}}`;" +
|
||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}unique_mapping` UNIQUE (`user_id`, `folder_id`);" +
|
||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}folders_mapping_user_id_fk_users_id` " +
|
||||
"FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;" +
|
||||
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `{{prefix}}folders_mapping_folder_id_fk_folders_id` " +
|
||||
"FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;"
|
||||
mysqlV19SQL = "CREATE TABLE `{{shared_sessions}}` (`key` varchar(128) NOT NULL PRIMARY KEY, " +
|
||||
"`data` longtext NOT NULL, `type` integer NOT NULL, `timestamp` bigint NOT NULL);" +
|
||||
"CREATE INDEX `{{prefix}}shared_sessions_type_idx` ON `{{shared_sessions}}` (`type`);" +
|
||||
"CREATE INDEX `{{prefix}}shared_sessions_timestamp_idx` ON `{{shared_sessions}}` (`timestamp`);"
|
||||
mysqlV19DownSQL = "DROP TABLE `{{shared_sessions}}` CASCADE;"
|
||||
)
|
||||
|
||||
// MySQLProvider defines the auth provider for MySQL/MariaDB database
|
||||
type MySQLProvider struct {
|
||||
dbHandle *sql.DB
|
||||
}
|
||||
|
||||
func init() {
|
||||
version.AddFeature("+mysql")
|
||||
}
|
||||
|
||||
func initializeMySQLProvider() error {
|
||||
var err error
|
||||
logSender = MySQLDataProviderName
|
||||
dbHandle, err := sql.Open("mysql", getMySQLConnectionString(false))
|
||||
|
||||
connString, err := getMySQLConnectionString(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
redactedConnString, err := getMySQLConnectionString(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbHandle, err := sql.Open("mysql", connString)
|
||||
if err == nil {
|
||||
providerLog(logger.LevelDebug, "mysql database handle created, connection string: %#v, pool size: %v",
|
||||
getMySQLConnectionString(true), config.PoolSize)
|
||||
redactedConnString, config.PoolSize)
|
||||
dbHandle.SetMaxOpenConns(config.PoolSize)
|
||||
dbHandle.SetConnMaxLifetime(1800 * time.Second)
|
||||
provider = MySQLProvider{dbHandle: dbHandle}
|
||||
if config.PoolSize > 0 {
|
||||
dbHandle.SetMaxIdleConns(config.PoolSize)
|
||||
} else {
|
||||
dbHandle.SetMaxIdleConns(2)
|
||||
}
|
||||
dbHandle.SetConnMaxLifetime(240 * time.Second)
|
||||
provider = &MySQLProvider{dbHandle: dbHandle}
|
||||
} else {
|
||||
providerLog(logger.LevelWarn, "error creating mysql database handler, connection string: %#v, error: %v",
|
||||
getMySQLConnectionString(true), err)
|
||||
providerLog(logger.LevelError, "error creating mysql database handler, connection string: %#v, error: %v",
|
||||
redactedConnString, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
func getMySQLConnectionString(redactedPwd bool) string {
|
||||
func getMySQLConnectionString(redactedPwd bool) (string, error) {
|
||||
var connectionString string
|
||||
if len(config.ConnectionString) == 0 {
|
||||
if config.ConnectionString == "" {
|
||||
password := config.Password
|
||||
if redactedPwd {
|
||||
if redactedPwd && password != "" {
|
||||
password = "[redacted]"
|
||||
}
|
||||
connectionString = fmt.Sprintf("%v:%v@tcp([%v]:%v)/%v?charset=utf8&interpolateParams=true&timeout=10s&tls=%v&writeTimeout=10s&readTimeout=10s",
|
||||
config.Username, password, config.Host, config.Port, config.Name, getSSLMode())
|
||||
sslMode := getSSLMode()
|
||||
if sslMode == "custom" && !redactedPwd {
|
||||
tlsConfig := &tls.Config{}
|
||||
if config.RootCert != "" {
|
||||
rootCAs, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
rootCAs = x509.NewCertPool()
|
||||
}
|
||||
rootCrt, err := os.ReadFile(config.RootCert)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to load root certificate %#v: %v", config.RootCert, err)
|
||||
}
|
||||
if !rootCAs.AppendCertsFromPEM(rootCrt) {
|
||||
return "", fmt.Errorf("unable to parse root certificate %#v", config.RootCert)
|
||||
}
|
||||
tlsConfig.RootCAs = rootCAs
|
||||
}
|
||||
if config.ClientCert != "" && config.ClientKey != "" {
|
||||
clientCert := make([]tls.Certificate, 0, 1)
|
||||
tlsCert, err := tls.LoadX509KeyPair(config.ClientCert, config.ClientKey)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to load key pair %#v, %#v: %v", config.ClientCert, config.ClientKey, err)
|
||||
}
|
||||
clientCert = append(clientCert, tlsCert)
|
||||
tlsConfig.Certificates = clientCert
|
||||
}
|
||||
if config.SSLMode == 2 {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
}
|
||||
providerLog(logger.LevelInfo, "registering custom TLS config, root cert %#v, client cert %#v, client key %#v",
|
||||
config.RootCert, config.ClientCert, config.ClientKey)
|
||||
if err := mysql.RegisterTLSConfig("custom", tlsConfig); err != nil {
|
||||
return "", fmt.Errorf("unable to register tls config: %v", err)
|
||||
}
|
||||
}
|
||||
connectionString = fmt.Sprintf("%v:%v@tcp([%v]:%v)/%v?charset=utf8mb4&interpolateParams=true&timeout=10s&parseTime=true&tls=%v&writeTimeout=60s&readTimeout=60s",
|
||||
config.Username, password, config.Host, config.Port, config.Name, sslMode)
|
||||
} else {
|
||||
connectionString = config.ConnectionString
|
||||
}
|
||||
return connectionString
|
||||
return connectionString, nil
|
||||
}
|
||||
|
||||
func (p MySQLProvider) checkAvailability() error {
|
||||
func (p *MySQLProvider) checkAvailability() error {
|
||||
return sqlCommonCheckAvailability(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) validateUserAndPass(username string, password string) (User, error) {
|
||||
return sqlCommonValidateUserAndPass(username, password, p.dbHandle)
|
||||
func (p *MySQLProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
|
||||
return sqlCommonValidateUserAndPass(username, password, ip, protocol, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) validateUserAndPubKey(username string, publicKey string) (User, string, error) {
|
||||
return sqlCommonValidateUserAndPubKey(username, publicKey, p.dbHandle)
|
||||
func (p *MySQLProvider) validateUserAndTLSCert(username, protocol string, tlsCert *x509.Certificate) (User, error) {
|
||||
return sqlCommonValidateUserAndTLSCertificate(username, protocol, tlsCert, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) getUserByID(ID int64) (User, error) {
|
||||
return sqlCommonGetUserByID(ID, p.dbHandle)
|
||||
func (p *MySQLProvider) validateUserAndPubKey(username string, publicKey []byte, isSSHCert bool) (User, string, error) {
|
||||
return sqlCommonValidateUserAndPubKey(username, publicKey, isSSHCert, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
func (p *MySQLProvider) updateTransferQuota(username string, uploadSize, downloadSize int64, reset bool) error {
|
||||
return sqlCommonUpdateTransferQuota(username, uploadSize, downloadSize, reset, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
return sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) getUsedQuota(username string) (int, int64, error) {
|
||||
func (p *MySQLProvider) getUsedQuota(username string) (int, int64, int64, int64, error) {
|
||||
return sqlCommonGetUsedQuota(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) userExists(username string) (User, error) {
|
||||
return sqlCommonCheckUserExists(username, p.dbHandle)
|
||||
func (p *MySQLProvider) setUpdatedAt(username string) {
|
||||
sqlCommonSetUpdatedAt(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) addUser(user User) error {
|
||||
func (p *MySQLProvider) updateLastLogin(username string) error {
|
||||
return sqlCommonUpdateLastLogin(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateAdminLastLogin(username string) error {
|
||||
return sqlCommonUpdateAdminLastLogin(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) userExists(username string) (User, error) {
|
||||
return sqlCommonGetUserByUsername(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) addUser(user *User) error {
|
||||
return sqlCommonAddUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) updateUser(user User) error {
|
||||
func (p *MySQLProvider) updateUser(user *User) error {
|
||||
return sqlCommonUpdateUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) deleteUser(user User) error {
|
||||
func (p *MySQLProvider) deleteUser(user User) error {
|
||||
return sqlCommonDeleteUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
|
||||
return sqlCommonGetUsers(limit, offset, order, username, p.dbHandle)
|
||||
func (p *MySQLProvider) updateUserPassword(username, password string) error {
|
||||
return sqlCommonUpdateUserPassword(username, password, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) dumpUsers() ([]User, error) {
|
||||
return sqlCommonDumpUsers(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getRecentlyUpdatedUsers(after int64) ([]User, error) {
|
||||
return sqlCommonGetRecentlyUpdatedUsers(after, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getUsers(limit int, offset int, order string) ([]User, error) {
|
||||
return sqlCommonGetUsers(limit, offset, order, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getUsersForQuotaCheck(toFetch map[string]bool) ([]User, error) {
|
||||
return sqlCommonGetUsersForQuotaCheck(toFetch, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
|
||||
return sqlCommonDumpFolders(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getFolders(limit, offset int, order string, minimal bool) ([]vfs.BaseVirtualFolder, error) {
|
||||
return sqlCommonGetFolders(limit, offset, order, minimal, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getFolderByName(name string) (vfs.BaseVirtualFolder, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
||||
defer cancel()
|
||||
return sqlCommonGetFolderByName(ctx, name, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) addFolder(folder *vfs.BaseVirtualFolder) error {
|
||||
return sqlCommonAddFolder(folder, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateFolder(folder *vfs.BaseVirtualFolder) error {
|
||||
return sqlCommonUpdateFolder(folder, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) deleteFolder(folder vfs.BaseVirtualFolder) error {
|
||||
return sqlCommonDeleteFolder(folder, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateFolderQuota(name string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
return sqlCommonUpdateFolderQuota(name, filesAdd, sizeAdd, reset, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getUsedFolderQuota(name string) (int, int64, error) {
|
||||
return sqlCommonGetFolderUsedQuota(name, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getGroups(limit, offset int, order string, minimal bool) ([]Group, error) {
|
||||
return sqlCommonGetGroups(limit, offset, order, minimal, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getGroupsWithNames(names []string) ([]Group, error) {
|
||||
return sqlCommonGetGroupsWithNames(names, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getUsersInGroups(names []string) ([]string, error) {
|
||||
return sqlCommonGetUsersInGroups(names, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) groupExists(name string) (Group, error) {
|
||||
return sqlCommonGetGroupByName(name, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) addGroup(group *Group) error {
|
||||
return sqlCommonAddGroup(group, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateGroup(group *Group) error {
|
||||
return sqlCommonUpdateGroup(group, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) deleteGroup(group Group) error {
|
||||
return sqlCommonDeleteGroup(group, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) dumpGroups() ([]Group, error) {
|
||||
return sqlCommonDumpGroups(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) adminExists(username string) (Admin, error) {
|
||||
return sqlCommonGetAdminByUsername(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) addAdmin(admin *Admin) error {
|
||||
return sqlCommonAddAdmin(admin, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateAdmin(admin *Admin) error {
|
||||
return sqlCommonUpdateAdmin(admin, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) deleteAdmin(admin Admin) error {
|
||||
return sqlCommonDeleteAdmin(admin, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getAdmins(limit int, offset int, order string) ([]Admin, error) {
|
||||
return sqlCommonGetAdmins(limit, offset, order, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) dumpAdmins() ([]Admin, error) {
|
||||
return sqlCommonDumpAdmins(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) validateAdminAndPass(username, password, ip string) (Admin, error) {
|
||||
return sqlCommonValidateAdminAndPass(username, password, ip, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) apiKeyExists(keyID string) (APIKey, error) {
|
||||
return sqlCommonGetAPIKeyByID(keyID, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) addAPIKey(apiKey *APIKey) error {
|
||||
return sqlCommonAddAPIKey(apiKey, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateAPIKey(apiKey *APIKey) error {
|
||||
return sqlCommonUpdateAPIKey(apiKey, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) deleteAPIKey(apiKey APIKey) error {
|
||||
return sqlCommonDeleteAPIKey(apiKey, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getAPIKeys(limit int, offset int, order string) ([]APIKey, error) {
|
||||
return sqlCommonGetAPIKeys(limit, offset, order, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) dumpAPIKeys() ([]APIKey, error) {
|
||||
return sqlCommonDumpAPIKeys(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateAPIKeyLastUse(keyID string) error {
|
||||
return sqlCommonUpdateAPIKeyLastUse(keyID, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) shareExists(shareID, username string) (Share, error) {
|
||||
return sqlCommonGetShareByID(shareID, username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) addShare(share *Share) error {
|
||||
return sqlCommonAddShare(share, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateShare(share *Share) error {
|
||||
return sqlCommonUpdateShare(share, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) deleteShare(share Share) error {
|
||||
return sqlCommonDeleteShare(share, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getShares(limit int, offset int, order, username string) ([]Share, error) {
|
||||
return sqlCommonGetShares(limit, offset, order, username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) dumpShares() ([]Share, error) {
|
||||
return sqlCommonDumpShares(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateShareLastUse(shareID string, numTokens int) error {
|
||||
return sqlCommonUpdateShareLastUse(shareID, numTokens, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getDefenderHosts(from int64, limit int) ([]DefenderEntry, error) {
|
||||
return sqlCommonGetDefenderHosts(from, limit, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getDefenderHostByIP(ip string, from int64) (DefenderEntry, error) {
|
||||
return sqlCommonGetDefenderHostByIP(ip, from, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) isDefenderHostBanned(ip string) (DefenderEntry, error) {
|
||||
return sqlCommonIsDefenderHostBanned(ip, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateDefenderBanTime(ip string, minutes int) error {
|
||||
return sqlCommonDefenderIncrementBanTime(ip, minutes, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) deleteDefenderHost(ip string) error {
|
||||
return sqlCommonDeleteDefenderHost(ip, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) addDefenderEvent(ip string, score int) error {
|
||||
return sqlCommonAddDefenderHostAndEvent(ip, score, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) setDefenderBanTime(ip string, banTime int64) error {
|
||||
return sqlCommonSetDefenderBanTime(ip, banTime, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) cleanupDefender(from int64) error {
|
||||
return sqlCommonDefenderCleanup(from, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) addActiveTransfer(transfer ActiveTransfer) error {
|
||||
return sqlCommonAddActiveTransfer(transfer, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) updateActiveTransferSizes(ulSize, dlSize, transferID int64, connectionID string) error {
|
||||
return sqlCommonUpdateActiveTransferSizes(ulSize, dlSize, transferID, connectionID, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) removeActiveTransfer(transferID int64, connectionID string) error {
|
||||
return sqlCommonRemoveActiveTransfer(transferID, connectionID, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) cleanupActiveTransfers(before time.Time) error {
|
||||
return sqlCommonCleanupActiveTransfers(before, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getActiveTransfers(from time.Time) ([]ActiveTransfer, error) {
|
||||
return sqlCommonGetActiveTransfers(from, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) addSharedSession(session Session) error {
|
||||
return sqlCommonAddSession(session, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) deleteSharedSession(key string) error {
|
||||
return sqlCommonDeleteSession(key, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) getSharedSession(key string) (Session, error) {
|
||||
return sqlCommonGetSession(key, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) cleanupSharedSessions(sessionType SessionType, before int64) error {
|
||||
return sqlCommonCleanupSessions(sessionType, before, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) close() error {
|
||||
return p.dbHandle.Close()
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) reloadConfig() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializeDatabase creates the initial database structure
|
||||
func (p *MySQLProvider) initializeDatabase() error {
|
||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, false)
|
||||
if err == nil && dbVersion.Version > 0 {
|
||||
return ErrNoInitRequired
|
||||
}
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return errSchemaVersionEmpty
|
||||
}
|
||||
logger.InfoToConsole("creating initial database schema, version 15")
|
||||
providerLog(logger.LevelInfo, "creating initial database schema, version 15")
|
||||
initialSQL := strings.ReplaceAll(mysqlInitialSQL, "{{schema_version}}", sqlTableSchemaVersion)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{admins}}", sqlTableAdmins)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders}}", sqlTableFolders)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{users}}", sqlTableUsers)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders_mapping}}", sqlTableFoldersMapping)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{api_keys}}", sqlTableAPIKeys)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{shares}}", sqlTableShares)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{defender_events}}", sqlTableDefenderEvents)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{defender_hosts}}", sqlTableDefenderHosts)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{prefix}}", config.SQLTablesPrefix)
|
||||
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, strings.Split(initialSQL, ";"), 15, true)
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) migrateDatabase() error { //nolint:dupl
|
||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch version := dbVersion.Version; {
|
||||
case version == sqlDatabaseVersion:
|
||||
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", version)
|
||||
return ErrNoInitRequired
|
||||
case version < 15:
|
||||
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
||||
providerLog(logger.LevelError, "%v", err)
|
||||
logger.ErrorToConsole("%v", err)
|
||||
return err
|
||||
case version == 15:
|
||||
return updateMySQLDatabaseFromV15(p.dbHandle)
|
||||
case version == 16:
|
||||
return updateMySQLDatabaseFromV16(p.dbHandle)
|
||||
case version == 17:
|
||||
return updateMySQLDatabaseFromV17(p.dbHandle)
|
||||
case version == 18:
|
||||
return updateMySQLDatabaseFromV18(p.dbHandle)
|
||||
default:
|
||||
if version > sqlDatabaseVersion {
|
||||
providerLog(logger.LevelError, "database version %v is newer than the supported one: %v", version,
|
||||
sqlDatabaseVersion)
|
||||
logger.WarnToConsole("database version %v is newer than the supported one: %v", version,
|
||||
sqlDatabaseVersion)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("database version not handled: %v", version)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) revertDatabase(targetVersion int) error {
|
||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dbVersion.Version == targetVersion {
|
||||
return errors.New("current version match target version, nothing to do")
|
||||
}
|
||||
|
||||
switch dbVersion.Version {
|
||||
case 16:
|
||||
return downgradeMySQLDatabaseFromV16(p.dbHandle)
|
||||
case 17:
|
||||
return downgradeMySQLDatabaseFromV17(p.dbHandle)
|
||||
case 18:
|
||||
return downgradeMySQLDatabaseFromV18(p.dbHandle)
|
||||
case 19:
|
||||
return downgradeMySQLDatabaseFromV19(p.dbHandle)
|
||||
default:
|
||||
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *MySQLProvider) resetDatabase() error {
|
||||
sql := sqlReplaceAll(mysqlResetSQL)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, strings.Split(sql, ";"), 0, false)
|
||||
}
|
||||
|
||||
func updateMySQLDatabaseFromV15(dbHandle *sql.DB) error {
|
||||
if err := updateMySQLDatabaseFrom15To16(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return updateMySQLDatabaseFromV16(dbHandle)
|
||||
}
|
||||
|
||||
func updateMySQLDatabaseFromV16(dbHandle *sql.DB) error {
|
||||
if err := updateMySQLDatabaseFrom16To17(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return updateMySQLDatabaseFromV17(dbHandle)
|
||||
}
|
||||
|
||||
func updateMySQLDatabaseFromV17(dbHandle *sql.DB) error {
|
||||
if err := updateMySQLDatabaseFrom17To18(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return updateMySQLDatabaseFromV18(dbHandle)
|
||||
}
|
||||
|
||||
func updateMySQLDatabaseFromV18(dbHandle *sql.DB) error {
|
||||
return updateMySQLDatabaseFrom18To19(dbHandle)
|
||||
}
|
||||
|
||||
func downgradeMySQLDatabaseFromV16(dbHandle *sql.DB) error {
|
||||
return downgradeMySQLDatabaseFrom16To15(dbHandle)
|
||||
}
|
||||
|
||||
func downgradeMySQLDatabaseFromV17(dbHandle *sql.DB) error {
|
||||
if err := downgradeMySQLDatabaseFrom17To16(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return downgradeMySQLDatabaseFromV16(dbHandle)
|
||||
}
|
||||
|
||||
func downgradeMySQLDatabaseFromV18(dbHandle *sql.DB) error {
|
||||
if err := downgradeMySQLDatabaseFrom18To17(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return downgradeMySQLDatabaseFromV17(dbHandle)
|
||||
}
|
||||
|
||||
func downgradeMySQLDatabaseFromV19(dbHandle *sql.DB) error {
|
||||
if err := downgradeMySQLDatabaseFrom19To18(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return downgradeMySQLDatabaseFromV18(dbHandle)
|
||||
}
|
||||
|
||||
func updateMySQLDatabaseFrom15To16(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("updating database version: 15 -> 16")
|
||||
providerLog(logger.LevelInfo, "updating database version: 15 -> 16")
|
||||
sql := strings.ReplaceAll(mysqlV16SQL, "{{users}}", sqlTableUsers)
|
||||
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 16, true)
|
||||
}
|
||||
|
||||
func updateMySQLDatabaseFrom16To17(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("updating database version: 16 -> 17")
|
||||
providerLog(logger.LevelInfo, "updating database version: 16 -> 17")
|
||||
sql := strings.ReplaceAll(mysqlV17SQL, "{{users}}", sqlTableUsers)
|
||||
sql = strings.ReplaceAll(sql, "{{groups}}", sqlTableGroups)
|
||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
||||
sql = strings.ReplaceAll(sql, "{{folders_mapping}}", sqlTableFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{users_folders_mapping}}", sqlTableUsersFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{users_groups_mapping}}", sqlTableUsersGroupsMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{groups_folders_mapping}}", sqlTableGroupsFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 17, true)
|
||||
}
|
||||
|
||||
func updateMySQLDatabaseFrom17To18(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("updating database version: 17 -> 18")
|
||||
providerLog(logger.LevelInfo, "updating database version: 17 -> 18")
|
||||
if err := importGCSCredentials(); err != nil {
|
||||
return err
|
||||
}
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, nil, 18, true)
|
||||
}
|
||||
|
||||
func updateMySQLDatabaseFrom18To19(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("updating database version: 18 -> 19")
|
||||
providerLog(logger.LevelInfo, "updating database version: 18 -> 19")
|
||||
sql := strings.ReplaceAll(mysqlV19SQL, "{{shared_sessions}}", sqlTableSharedSessions)
|
||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 19, true)
|
||||
}
|
||||
|
||||
func downgradeMySQLDatabaseFrom16To15(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("downgrading database version: 16 -> 15")
|
||||
providerLog(logger.LevelInfo, "downgrading database version: 16 -> 15")
|
||||
sql := strings.ReplaceAll(mysqlV16DownSQL, "{{users}}", sqlTableUsers)
|
||||
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 15, false)
|
||||
}
|
||||
|
||||
func downgradeMySQLDatabaseFrom17To16(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("downgrading database version: 17 -> 16")
|
||||
providerLog(logger.LevelInfo, "downgrading database version: 17 -> 16")
|
||||
sql := strings.ReplaceAll(mysqlV17DownSQL, "{{users}}", sqlTableUsers)
|
||||
sql = strings.ReplaceAll(sql, "{{groups}}", sqlTableGroups)
|
||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
||||
sql = strings.ReplaceAll(sql, "{{folders_mapping}}", sqlTableFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{users_folders_mapping}}", sqlTableUsersFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{users_groups_mapping}}", sqlTableUsersGroupsMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{groups_folders_mapping}}", sqlTableGroupsFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, strings.Split(sql, ";"), 16, false)
|
||||
}
|
||||
|
||||
func downgradeMySQLDatabaseFrom18To17(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("downgrading database version: 18 -> 17")
|
||||
providerLog(logger.LevelInfo, "downgrading database version: 18 -> 17")
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, nil, 17, false)
|
||||
}
|
||||
|
||||
func downgradeMySQLDatabaseFrom19To18(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("downgrading database version: 19 -> 18")
|
||||
providerLog(logger.LevelInfo, "downgrading database version: 19 -> 18")
|
||||
sql := strings.ReplaceAll(mysqlV19DownSQL, "{{shared_sessions}}", sqlTableSharedSessions)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 18, false)
|
||||
}
|
||||
|
||||
32
dataprovider/mysql_disabled.go
Normal file
32
dataprovider/mysql_disabled.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build nomysql
|
||||
// +build nomysql
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
func init() {
|
||||
version.AddFeature("-mysql")
|
||||
}
|
||||
|
||||
func initializeMySQLProvider() error {
|
||||
return errors.New("MySQL disabled at build time")
|
||||
}
|
||||
@@ -1,28 +1,210 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !nopgsql
|
||||
// +build !nopgsql
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
// we import lib/pq here to be able to disable PostgreSQL support using a build tag
|
||||
_ "github.com/lib/pq"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
"github.com/drakkan/sftpgo/v2/vfs"
|
||||
)
|
||||
|
||||
// PGSQLProvider auth provider for PostgreSQL database
|
||||
const (
|
||||
pgsqlResetSQL = `DROP TABLE IF EXISTS "{{api_keys}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{folders_mapping}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{users_folders_mapping}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{users_groups_mapping}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{groups_folders_mapping}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{admins}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{folders}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{shares}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{users}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{groups}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{defender_events}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{defender_hosts}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{active_transfers}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{shared_sessions}}" CASCADE;
|
||||
DROP TABLE IF EXISTS "{{schema_version}}" CASCADE;
|
||||
`
|
||||
pgsqlInitial = `CREATE TABLE "{{schema_version}}" ("id" serial NOT NULL PRIMARY KEY, "version" integer NOT NULL);
|
||||
CREATE TABLE "{{admins}}" ("id" serial NOT NULL PRIMARY KEY, "username" varchar(255) NOT NULL UNIQUE,
|
||||
"description" varchar(512) NULL, "password" varchar(255) NOT NULL, "email" varchar(255) NULL, "status" integer NOT NULL,
|
||||
"permissions" text NOT NULL, "filters" text NULL, "additional_info" text NULL, "last_login" bigint NOT NULL,
|
||||
"created_at" bigint NOT NULL, "updated_at" bigint NOT NULL);
|
||||
CREATE TABLE "{{defender_hosts}}" ("id" bigserial NOT NULL PRIMARY KEY, "ip" varchar(50) NOT NULL UNIQUE,
|
||||
"ban_time" bigint NOT NULL, "updated_at" bigint NOT NULL);
|
||||
CREATE TABLE "{{defender_events}}" ("id" bigserial NOT NULL PRIMARY KEY, "date_time" bigint NOT NULL, "score" integer NOT NULL,
|
||||
"host_id" bigint NOT NULL);
|
||||
ALTER TABLE "{{defender_events}}" ADD CONSTRAINT "{{prefix}}defender_events_host_id_fk_defender_hosts_id" FOREIGN KEY
|
||||
("host_id") REFERENCES "{{defender_hosts}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||
CREATE TABLE "{{folders}}" ("id" serial NOT NULL PRIMARY KEY, "name" varchar(255) NOT NULL UNIQUE, "description" varchar(512) NULL,
|
||||
"path" text NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL,
|
||||
"filesystem" text NULL);
|
||||
CREATE TABLE "{{users}}" ("id" serial NOT NULL PRIMARY KEY, "username" varchar(255) NOT NULL UNIQUE, "status" integer NOT NULL,
|
||||
"expiration_date" bigint NOT NULL, "description" varchar(512) NULL, "password" text NULL, "public_keys" text NULL,
|
||||
"home_dir" text NOT NULL, "uid" bigint NOT NULL, "gid" bigint NOT NULL, "max_sessions" integer NOT NULL,
|
||||
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "permissions" text NOT NULL, "used_quota_size" bigint NOT NULL,
|
||||
"used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL, "upload_bandwidth" integer NOT NULL,
|
||||
"download_bandwidth" integer NOT NULL, "last_login" bigint NOT NULL, "filters" text NULL, "filesystem" text NULL,
|
||||
"additional_info" text NULL, "created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "email" varchar(255) NULL);
|
||||
CREATE TABLE "{{folders_mapping}}" ("id" serial NOT NULL PRIMARY KEY, "virtual_path" text NOT NULL,
|
||||
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "folder_id" integer NOT NULL, "user_id" integer NOT NULL);
|
||||
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "{{prefix}}unique_mapping" UNIQUE ("user_id", "folder_id");
|
||||
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "{{prefix}}folders_mapping_folder_id_fk_folders_id"
|
||||
FOREIGN KEY ("folder_id") REFERENCES "{{folders}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "{{prefix}}folders_mapping_user_id_fk_users_id"
|
||||
FOREIGN KEY ("user_id") REFERENCES "{{users}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||
CREATE TABLE "{{shares}}" ("id" serial NOT NULL PRIMARY KEY,
|
||||
"share_id" varchar(60) NOT NULL UNIQUE, "name" varchar(255) NOT NULL, "description" varchar(512) NULL,
|
||||
"scope" integer NOT NULL, "paths" text NOT NULL, "created_at" bigint NOT NULL, "updated_at" bigint NOT NULL,
|
||||
"last_use_at" bigint NOT NULL, "expires_at" bigint NOT NULL, "password" text NULL,
|
||||
"max_tokens" integer NOT NULL, "used_tokens" integer NOT NULL, "allow_from" text NULL,
|
||||
"user_id" integer NOT NULL);
|
||||
ALTER TABLE "{{shares}}" ADD CONSTRAINT "{{prefix}}shares_user_id_fk_users_id" FOREIGN KEY ("user_id")
|
||||
REFERENCES "{{users}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||
CREATE TABLE "{{api_keys}}" ("id" serial NOT NULL PRIMARY KEY, "name" varchar(255) NOT NULL,
|
||||
"key_id" varchar(50) NOT NULL UNIQUE, "api_key" varchar(255) NOT NULL UNIQUE, "scope" integer NOT NULL,
|
||||
"created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "last_use_at" bigint NOT NULL,"expires_at" bigint NOT NULL,
|
||||
"description" text NULL, "admin_id" integer NULL, "user_id" integer NULL);
|
||||
ALTER TABLE "{{api_keys}}" ADD CONSTRAINT "{{prefix}}api_keys_admin_id_fk_admins_id" FOREIGN KEY ("admin_id")
|
||||
REFERENCES "{{admins}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||
ALTER TABLE "{{api_keys}}" ADD CONSTRAINT "{{prefix}}api_keys_user_id_fk_users_id" FOREIGN KEY ("user_id")
|
||||
REFERENCES "{{users}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||
CREATE INDEX "{{prefix}}folders_mapping_folder_id_idx" ON "{{folders_mapping}}" ("folder_id");
|
||||
CREATE INDEX "{{prefix}}folders_mapping_user_id_idx" ON "{{folders_mapping}}" ("user_id");
|
||||
CREATE INDEX "{{prefix}}api_keys_admin_id_idx" ON "{{api_keys}}" ("admin_id");
|
||||
CREATE INDEX "{{prefix}}api_keys_user_id_idx" ON "{{api_keys}}" ("user_id");
|
||||
CREATE INDEX "{{prefix}}users_updated_at_idx" ON "{{users}}" ("updated_at");
|
||||
CREATE INDEX "{{prefix}}shares_user_id_idx" ON "{{shares}}" ("user_id");
|
||||
CREATE INDEX "{{prefix}}defender_hosts_updated_at_idx" ON "{{defender_hosts}}" ("updated_at");
|
||||
CREATE INDEX "{{prefix}}defender_hosts_ban_time_idx" ON "{{defender_hosts}}" ("ban_time");
|
||||
CREATE INDEX "{{prefix}}defender_events_date_time_idx" ON "{{defender_events}}" ("date_time");
|
||||
CREATE INDEX "{{prefix}}defender_events_host_id_idx" ON "{{defender_events}}" ("host_id");
|
||||
INSERT INTO {{schema_version}} (version) VALUES (15);
|
||||
`
|
||||
pgsqlV16SQL = `ALTER TABLE "{{users}}" ADD COLUMN "download_data_transfer" integer DEFAULT 0 NOT NULL;
|
||||
ALTER TABLE "{{users}}" ALTER COLUMN "download_data_transfer" DROP DEFAULT;
|
||||
ALTER TABLE "{{users}}" ADD COLUMN "total_data_transfer" integer DEFAULT 0 NOT NULL;
|
||||
ALTER TABLE "{{users}}" ALTER COLUMN "total_data_transfer" DROP DEFAULT;
|
||||
ALTER TABLE "{{users}}" ADD COLUMN "upload_data_transfer" integer DEFAULT 0 NOT NULL;
|
||||
ALTER TABLE "{{users}}" ALTER COLUMN "upload_data_transfer" DROP DEFAULT;
|
||||
ALTER TABLE "{{users}}" ADD COLUMN "used_download_data_transfer" integer DEFAULT 0 NOT NULL;
|
||||
ALTER TABLE "{{users}}" ALTER COLUMN "used_download_data_transfer" DROP DEFAULT;
|
||||
ALTER TABLE "{{users}}" ADD COLUMN "used_upload_data_transfer" integer DEFAULT 0 NOT NULL;
|
||||
ALTER TABLE "{{users}}" ALTER COLUMN "used_upload_data_transfer" DROP DEFAULT;
|
||||
CREATE TABLE "{{active_transfers}}" ("id" bigserial NOT NULL PRIMARY KEY, "connection_id" varchar(100) NOT NULL,
|
||||
"transfer_id" bigint NOT NULL, "transfer_type" integer NOT NULL, "username" varchar(255) NOT NULL,
|
||||
"folder_name" varchar(255) NULL, "ip" varchar(50) NOT NULL, "truncated_size" bigint NOT NULL,
|
||||
"current_ul_size" bigint NOT NULL, "current_dl_size" bigint NOT NULL, "created_at" bigint NOT NULL,
|
||||
"updated_at" bigint NOT NULL);
|
||||
CREATE INDEX "{{prefix}}active_transfers_connection_id_idx" ON "{{active_transfers}}" ("connection_id");
|
||||
CREATE INDEX "{{prefix}}active_transfers_transfer_id_idx" ON "{{active_transfers}}" ("transfer_id");
|
||||
CREATE INDEX "{{prefix}}active_transfers_updated_at_idx" ON "{{active_transfers}}" ("updated_at");
|
||||
`
|
||||
pgsqlV16DownSQL = `ALTER TABLE "{{users}}" DROP COLUMN "used_upload_data_transfer" CASCADE;
|
||||
ALTER TABLE "{{users}}" DROP COLUMN "used_download_data_transfer" CASCADE;
|
||||
ALTER TABLE "{{users}}" DROP COLUMN "upload_data_transfer" CASCADE;
|
||||
ALTER TABLE "{{users}}" DROP COLUMN "total_data_transfer" CASCADE;
|
||||
ALTER TABLE "{{users}}" DROP COLUMN "download_data_transfer" CASCADE;
|
||||
DROP TABLE "{{active_transfers}}" CASCADE;
|
||||
`
|
||||
pgsqlV17SQL = `CREATE TABLE "{{groups}}" ("id" serial NOT NULL PRIMARY KEY, "name" varchar(255) NOT NULL UNIQUE,
|
||||
"description" varchar(512) NULL, "created_at" bigint NOT NULL, "updated_at" bigint NOT NULL, "user_settings" text NULL);
|
||||
CREATE TABLE "{{groups_folders_mapping}}" ("id" serial NOT NULL PRIMARY KEY, "group_id" integer NOT NULL,
|
||||
"folder_id" integer NOT NULL, "virtual_path" text NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL);
|
||||
CREATE TABLE "{{users_groups_mapping}}" ("id" serial NOT NULL PRIMARY KEY, "user_id" integer NOT NULL,
|
||||
"group_id" integer NOT NULL, "group_type" integer NOT NULL);
|
||||
DROP INDEX "{{prefix}}folders_mapping_folder_id_idx";
|
||||
DROP INDEX "{{prefix}}folders_mapping_user_id_idx";
|
||||
ALTER TABLE "{{folders_mapping}}" DROP CONSTRAINT "{{prefix}}unique_mapping";
|
||||
ALTER TABLE "{{folders_mapping}}" RENAME TO "{{users_folders_mapping}}";
|
||||
ALTER TABLE "{{users_folders_mapping}}" ADD CONSTRAINT "{{prefix}}unique_user_folder_mapping" UNIQUE ("user_id", "folder_id");
|
||||
CREATE INDEX "{{prefix}}users_folders_mapping_folder_id_idx" ON "{{users_folders_mapping}}" ("folder_id");
|
||||
CREATE INDEX "{{prefix}}users_folders_mapping_user_id_idx" ON "{{users_folders_mapping}}" ("user_id");
|
||||
ALTER TABLE "{{users_groups_mapping}}" ADD CONSTRAINT "{{prefix}}unique_user_group_mapping" UNIQUE ("user_id", "group_id");
|
||||
ALTER TABLE "{{groups_folders_mapping}}" ADD CONSTRAINT "{{prefix}}unique_group_folder_mapping" UNIQUE ("group_id", "folder_id");
|
||||
CREATE INDEX "{{prefix}}users_groups_mapping_group_id_idx" ON "{{users_groups_mapping}}" ("group_id");
|
||||
ALTER TABLE "{{users_groups_mapping}}" ADD CONSTRAINT "{{prefix}}users_groups_mapping_group_id_fk_groups_id"
|
||||
FOREIGN KEY ("group_id") REFERENCES "{{groups}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE NO ACTION;
|
||||
CREATE INDEX "{{prefix}}users_groups_mapping_user_id_idx" ON "{{users_groups_mapping}}" ("user_id");
|
||||
ALTER TABLE "{{users_groups_mapping}}" ADD CONSTRAINT "{{prefix}}users_groups_mapping_user_id_fk_users_id"
|
||||
FOREIGN KEY ("user_id") REFERENCES "{{users}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||
CREATE INDEX "{{prefix}}groups_folders_mapping_folder_id_idx" ON "{{groups_folders_mapping}}" ("folder_id");
|
||||
ALTER TABLE "{{groups_folders_mapping}}" ADD CONSTRAINT "{{prefix}}groups_folders_mapping_folder_id_fk_folders_id"
|
||||
FOREIGN KEY ("folder_id") REFERENCES "{{folders}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||
CREATE INDEX "{{prefix}}groups_folders_mapping_group_id_idx" ON "{{groups_folders_mapping}}" ("group_id");
|
||||
ALTER TABLE "{{groups_folders_mapping}}" ADD CONSTRAINT "{{prefix}}groups_folders_mapping_group_id_fk_groups_id"
|
||||
FOREIGN KEY ("group_id") REFERENCES "{{groups}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE;
|
||||
CREATE INDEX "{{prefix}}groups_updated_at_idx" ON "{{groups}}" ("updated_at");
|
||||
`
|
||||
pgsqlV17DownSQL = `DROP TABLE "{{users_groups_mapping}}" CASCADE;
|
||||
DROP TABLE "{{groups_folders_mapping}}" CASCADE;
|
||||
DROP TABLE "{{groups}}" CASCADE;
|
||||
DROP INDEX "{{prefix}}users_folders_mapping_folder_id_idx";
|
||||
DROP INDEX "{{prefix}}users_folders_mapping_user_id_idx";
|
||||
ALTER TABLE "{{users_folders_mapping}}" DROP CONSTRAINT "{{prefix}}unique_user_folder_mapping";
|
||||
ALTER TABLE "{{users_folders_mapping}}" RENAME TO "{{folders_mapping}}";
|
||||
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "{{prefix}}unique_mapping" UNIQUE ("user_id", "folder_id");
|
||||
CREATE INDEX "{{prefix}}folders_mapping_folder_id_idx" ON "{{folders_mapping}}" ("folder_id");
|
||||
CREATE INDEX "{{prefix}}folders_mapping_user_id_idx" ON "{{folders_mapping}}" ("user_id");
|
||||
`
|
||||
pgsqlV19SQL = `CREATE TABLE "{{shared_sessions}}" ("key" varchar(128) NOT NULL PRIMARY KEY,
|
||||
"data" text NOT NULL, "type" integer NOT NULL, "timestamp" bigint NOT NULL);
|
||||
CREATE INDEX "{{prefix}}shared_sessions_type_idx" ON "{{shared_sessions}}" ("type");
|
||||
CREATE INDEX "{{prefix}}shared_sessions_timestamp_idx" ON "{{shared_sessions}}" ("timestamp");`
|
||||
pgsqlV19DownSQL = `DROP TABLE "{{shared_sessions}}" CASCADE;`
|
||||
)
|
||||
|
||||
// PGSQLProvider defines the auth provider for PostgreSQL database
|
||||
type PGSQLProvider struct {
|
||||
dbHandle *sql.DB
|
||||
}
|
||||
|
||||
func init() {
|
||||
version.AddFeature("+pgsql")
|
||||
}
|
||||
|
||||
func initializePGSQLProvider() error {
|
||||
var err error
|
||||
logSender = PGSQLDataProviderName
|
||||
dbHandle, err := sql.Open("postgres", getPGSQLConnectionString(false))
|
||||
if err == nil {
|
||||
providerLog(logger.LevelDebug, "postgres database handle created, connection string: %#v, pool size: %v",
|
||||
getPGSQLConnectionString(true), config.PoolSize)
|
||||
dbHandle.SetMaxOpenConns(config.PoolSize)
|
||||
provider = PGSQLProvider{dbHandle: dbHandle}
|
||||
if config.PoolSize > 0 {
|
||||
dbHandle.SetMaxIdleConns(config.PoolSize)
|
||||
} else {
|
||||
dbHandle.SetMaxIdleConns(2)
|
||||
}
|
||||
dbHandle.SetConnMaxLifetime(240 * time.Second)
|
||||
provider = &PGSQLProvider{dbHandle: dbHandle}
|
||||
} else {
|
||||
providerLog(logger.LevelWarn, "error creating postgres database handler, connection string: %#v, error: %v",
|
||||
providerLog(logger.LevelError, "error creating postgres database handler, connection string: %#v, error: %v",
|
||||
getPGSQLConnectionString(true), err)
|
||||
}
|
||||
return err
|
||||
@@ -30,59 +212,568 @@ func initializePGSQLProvider() error {
|
||||
|
||||
func getPGSQLConnectionString(redactedPwd bool) string {
|
||||
var connectionString string
|
||||
if len(config.ConnectionString) == 0 {
|
||||
if config.ConnectionString == "" {
|
||||
password := config.Password
|
||||
if redactedPwd {
|
||||
if redactedPwd && password != "" {
|
||||
password = "[redacted]"
|
||||
}
|
||||
connectionString = fmt.Sprintf("host='%v' port=%v dbname='%v' user='%v' password='%v' sslmode=%v connect_timeout=10",
|
||||
config.Host, config.Port, config.Name, config.Username, password, getSSLMode())
|
||||
if config.RootCert != "" {
|
||||
connectionString += fmt.Sprintf(" sslrootcert='%v'", config.RootCert)
|
||||
}
|
||||
if config.ClientCert != "" && config.ClientKey != "" {
|
||||
connectionString += fmt.Sprintf(" sslcert='%v' sslkey='%v'", config.ClientCert, config.ClientKey)
|
||||
}
|
||||
} else {
|
||||
connectionString = config.ConnectionString
|
||||
}
|
||||
return connectionString
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) checkAvailability() error {
|
||||
func (p *PGSQLProvider) checkAvailability() error {
|
||||
return sqlCommonCheckAvailability(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) validateUserAndPass(username string, password string) (User, error) {
|
||||
return sqlCommonValidateUserAndPass(username, password, p.dbHandle)
|
||||
func (p *PGSQLProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
|
||||
return sqlCommonValidateUserAndPass(username, password, ip, protocol, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) validateUserAndPubKey(username string, publicKey string) (User, string, error) {
|
||||
return sqlCommonValidateUserAndPubKey(username, publicKey, p.dbHandle)
|
||||
func (p *PGSQLProvider) validateUserAndTLSCert(username, protocol string, tlsCert *x509.Certificate) (User, error) {
|
||||
return sqlCommonValidateUserAndTLSCertificate(username, protocol, tlsCert, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) getUserByID(ID int64) (User, error) {
|
||||
return sqlCommonGetUserByID(ID, p.dbHandle)
|
||||
func (p *PGSQLProvider) validateUserAndPubKey(username string, publicKey []byte, isSSHCert bool) (User, string, error) {
|
||||
return sqlCommonValidateUserAndPubKey(username, publicKey, isSSHCert, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
func (p *PGSQLProvider) updateTransferQuota(username string, uploadSize, downloadSize int64, reset bool) error {
|
||||
return sqlCommonUpdateTransferQuota(username, uploadSize, downloadSize, reset, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
return sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) getUsedQuota(username string) (int, int64, error) {
|
||||
func (p *PGSQLProvider) getUsedQuota(username string) (int, int64, int64, int64, error) {
|
||||
return sqlCommonGetUsedQuota(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) userExists(username string) (User, error) {
|
||||
return sqlCommonCheckUserExists(username, p.dbHandle)
|
||||
func (p *PGSQLProvider) setUpdatedAt(username string) {
|
||||
sqlCommonSetUpdatedAt(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) addUser(user User) error {
|
||||
func (p *PGSQLProvider) updateLastLogin(username string) error {
|
||||
return sqlCommonUpdateLastLogin(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateAdminLastLogin(username string) error {
|
||||
return sqlCommonUpdateAdminLastLogin(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) userExists(username string) (User, error) {
|
||||
return sqlCommonGetUserByUsername(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) addUser(user *User) error {
|
||||
return sqlCommonAddUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) updateUser(user User) error {
|
||||
func (p *PGSQLProvider) updateUser(user *User) error {
|
||||
return sqlCommonUpdateUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) deleteUser(user User) error {
|
||||
func (p *PGSQLProvider) deleteUser(user User) error {
|
||||
return sqlCommonDeleteUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
|
||||
return sqlCommonGetUsers(limit, offset, order, username, p.dbHandle)
|
||||
func (p *PGSQLProvider) updateUserPassword(username, password string) error {
|
||||
return sqlCommonUpdateUserPassword(username, password, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) dumpUsers() ([]User, error) {
|
||||
return sqlCommonDumpUsers(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getRecentlyUpdatedUsers(after int64) ([]User, error) {
|
||||
return sqlCommonGetRecentlyUpdatedUsers(after, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getUsers(limit int, offset int, order string) ([]User, error) {
|
||||
return sqlCommonGetUsers(limit, offset, order, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getUsersForQuotaCheck(toFetch map[string]bool) ([]User, error) {
|
||||
return sqlCommonGetUsersForQuotaCheck(toFetch, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
|
||||
return sqlCommonDumpFolders(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getFolders(limit, offset int, order string, minimal bool) ([]vfs.BaseVirtualFolder, error) {
|
||||
return sqlCommonGetFolders(limit, offset, order, minimal, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getFolderByName(name string) (vfs.BaseVirtualFolder, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
|
||||
defer cancel()
|
||||
return sqlCommonGetFolderByName(ctx, name, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) addFolder(folder *vfs.BaseVirtualFolder) error {
|
||||
return sqlCommonAddFolder(folder, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateFolder(folder *vfs.BaseVirtualFolder) error {
|
||||
return sqlCommonUpdateFolder(folder, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) deleteFolder(folder vfs.BaseVirtualFolder) error {
|
||||
return sqlCommonDeleteFolder(folder, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateFolderQuota(name string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
return sqlCommonUpdateFolderQuota(name, filesAdd, sizeAdd, reset, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getUsedFolderQuota(name string) (int, int64, error) {
|
||||
return sqlCommonGetFolderUsedQuota(name, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getGroups(limit, offset int, order string, minimal bool) ([]Group, error) {
|
||||
return sqlCommonGetGroups(limit, offset, order, minimal, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getGroupsWithNames(names []string) ([]Group, error) {
|
||||
return sqlCommonGetGroupsWithNames(names, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getUsersInGroups(names []string) ([]string, error) {
|
||||
return sqlCommonGetUsersInGroups(names, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) groupExists(name string) (Group, error) {
|
||||
return sqlCommonGetGroupByName(name, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) addGroup(group *Group) error {
|
||||
return sqlCommonAddGroup(group, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateGroup(group *Group) error {
|
||||
return sqlCommonUpdateGroup(group, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) deleteGroup(group Group) error {
|
||||
return sqlCommonDeleteGroup(group, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) dumpGroups() ([]Group, error) {
|
||||
return sqlCommonDumpGroups(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) adminExists(username string) (Admin, error) {
|
||||
return sqlCommonGetAdminByUsername(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) addAdmin(admin *Admin) error {
|
||||
return sqlCommonAddAdmin(admin, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateAdmin(admin *Admin) error {
|
||||
return sqlCommonUpdateAdmin(admin, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) deleteAdmin(admin Admin) error {
|
||||
return sqlCommonDeleteAdmin(admin, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getAdmins(limit int, offset int, order string) ([]Admin, error) {
|
||||
return sqlCommonGetAdmins(limit, offset, order, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) dumpAdmins() ([]Admin, error) {
|
||||
return sqlCommonDumpAdmins(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) validateAdminAndPass(username, password, ip string) (Admin, error) {
|
||||
return sqlCommonValidateAdminAndPass(username, password, ip, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) apiKeyExists(keyID string) (APIKey, error) {
|
||||
return sqlCommonGetAPIKeyByID(keyID, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) addAPIKey(apiKey *APIKey) error {
|
||||
return sqlCommonAddAPIKey(apiKey, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateAPIKey(apiKey *APIKey) error {
|
||||
return sqlCommonUpdateAPIKey(apiKey, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) deleteAPIKey(apiKey APIKey) error {
|
||||
return sqlCommonDeleteAPIKey(apiKey, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getAPIKeys(limit int, offset int, order string) ([]APIKey, error) {
|
||||
return sqlCommonGetAPIKeys(limit, offset, order, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) dumpAPIKeys() ([]APIKey, error) {
|
||||
return sqlCommonDumpAPIKeys(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateAPIKeyLastUse(keyID string) error {
|
||||
return sqlCommonUpdateAPIKeyLastUse(keyID, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) shareExists(shareID, username string) (Share, error) {
|
||||
return sqlCommonGetShareByID(shareID, username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) addShare(share *Share) error {
|
||||
return sqlCommonAddShare(share, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateShare(share *Share) error {
|
||||
return sqlCommonUpdateShare(share, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) deleteShare(share Share) error {
|
||||
return sqlCommonDeleteShare(share, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getShares(limit int, offset int, order, username string) ([]Share, error) {
|
||||
return sqlCommonGetShares(limit, offset, order, username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) dumpShares() ([]Share, error) {
|
||||
return sqlCommonDumpShares(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateShareLastUse(shareID string, numTokens int) error {
|
||||
return sqlCommonUpdateShareLastUse(shareID, numTokens, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getDefenderHosts(from int64, limit int) ([]DefenderEntry, error) {
|
||||
return sqlCommonGetDefenderHosts(from, limit, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getDefenderHostByIP(ip string, from int64) (DefenderEntry, error) {
|
||||
return sqlCommonGetDefenderHostByIP(ip, from, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) isDefenderHostBanned(ip string) (DefenderEntry, error) {
|
||||
return sqlCommonIsDefenderHostBanned(ip, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateDefenderBanTime(ip string, minutes int) error {
|
||||
return sqlCommonDefenderIncrementBanTime(ip, minutes, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) deleteDefenderHost(ip string) error {
|
||||
return sqlCommonDeleteDefenderHost(ip, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) addDefenderEvent(ip string, score int) error {
|
||||
return sqlCommonAddDefenderHostAndEvent(ip, score, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) setDefenderBanTime(ip string, banTime int64) error {
|
||||
return sqlCommonSetDefenderBanTime(ip, banTime, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) cleanupDefender(from int64) error {
|
||||
return sqlCommonDefenderCleanup(from, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) addActiveTransfer(transfer ActiveTransfer) error {
|
||||
return sqlCommonAddActiveTransfer(transfer, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) updateActiveTransferSizes(ulSize, dlSize, transferID int64, connectionID string) error {
|
||||
return sqlCommonUpdateActiveTransferSizes(ulSize, dlSize, transferID, connectionID, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) removeActiveTransfer(transferID int64, connectionID string) error {
|
||||
return sqlCommonRemoveActiveTransfer(transferID, connectionID, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) cleanupActiveTransfers(before time.Time) error {
|
||||
return sqlCommonCleanupActiveTransfers(before, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getActiveTransfers(from time.Time) ([]ActiveTransfer, error) {
|
||||
return sqlCommonGetActiveTransfers(from, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) addSharedSession(session Session) error {
|
||||
return sqlCommonAddSession(session, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) deleteSharedSession(key string) error {
|
||||
return sqlCommonDeleteSession(key, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) getSharedSession(key string) (Session, error) {
|
||||
return sqlCommonGetSession(key, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) cleanupSharedSessions(sessionType SessionType, before int64) error {
|
||||
return sqlCommonCleanupSessions(sessionType, before, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) close() error {
|
||||
return p.dbHandle.Close()
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) reloadConfig() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializeDatabase creates the initial database structure
|
||||
func (p *PGSQLProvider) initializeDatabase() error {
|
||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, false)
|
||||
if err == nil && dbVersion.Version > 0 {
|
||||
return ErrNoInitRequired
|
||||
}
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return errSchemaVersionEmpty
|
||||
}
|
||||
logger.InfoToConsole("creating initial database schema, version 15")
|
||||
providerLog(logger.LevelInfo, "creating initial database schema, version 15")
|
||||
initialSQL := strings.ReplaceAll(pgsqlInitial, "{{schema_version}}", sqlTableSchemaVersion)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{admins}}", sqlTableAdmins)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders}}", sqlTableFolders)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{users}}", sqlTableUsers)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{folders_mapping}}", sqlTableFoldersMapping)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{api_keys}}", sqlTableAPIKeys)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{shares}}", sqlTableShares)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{defender_events}}", sqlTableDefenderEvents)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{defender_hosts}}", sqlTableDefenderHosts)
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "{{prefix}}", config.SQLTablesPrefix)
|
||||
if config.Driver == CockroachDataProviderName {
|
||||
// Cockroach does not support deferrable constraint validation, we don't need them,
|
||||
// we keep these definitions for the PostgreSQL driver to avoid changes for users
|
||||
// upgrading from old SFTPGo versions
|
||||
initialSQL = strings.ReplaceAll(initialSQL, "DEFERRABLE INITIALLY DEFERRED", "")
|
||||
}
|
||||
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{initialSQL}, 15, true)
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) migrateDatabase() error { //nolint:dupl
|
||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch version := dbVersion.Version; {
|
||||
case version == sqlDatabaseVersion:
|
||||
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", version)
|
||||
return ErrNoInitRequired
|
||||
case version < 15:
|
||||
err = fmt.Errorf("database version %v is too old, please see the upgrading docs", version)
|
||||
providerLog(logger.LevelError, "%v", err)
|
||||
logger.ErrorToConsole("%v", err)
|
||||
return err
|
||||
case version == 15:
|
||||
return updatePGSQLDatabaseFromV15(p.dbHandle)
|
||||
case version == 16:
|
||||
return updatePGSQLDatabaseFromV16(p.dbHandle)
|
||||
case version == 17:
|
||||
return updatePGSQLDatabaseFromV17(p.dbHandle)
|
||||
case version == 18:
|
||||
return updatePGSQLDatabaseFromV18(p.dbHandle)
|
||||
default:
|
||||
if version > sqlDatabaseVersion {
|
||||
providerLog(logger.LevelError, "database version %v is newer than the supported one: %v", version,
|
||||
sqlDatabaseVersion)
|
||||
logger.WarnToConsole("database version %v is newer than the supported one: %v", version,
|
||||
sqlDatabaseVersion)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("database version not handled: %v", version)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) revertDatabase(targetVersion int) error {
|
||||
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dbVersion.Version == targetVersion {
|
||||
return errors.New("current version match target version, nothing to do")
|
||||
}
|
||||
|
||||
switch dbVersion.Version {
|
||||
case 16:
|
||||
return downgradePGSQLDatabaseFromV16(p.dbHandle)
|
||||
case 17:
|
||||
return downgradePGSQLDatabaseFromV17(p.dbHandle)
|
||||
case 18:
|
||||
return downgradePGSQLDatabaseFromV18(p.dbHandle)
|
||||
case 19:
|
||||
return downgradePGSQLDatabaseFromV19(p.dbHandle)
|
||||
default:
|
||||
return fmt.Errorf("database version not handled: %v", dbVersion.Version)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PGSQLProvider) resetDatabase() error {
|
||||
sql := sqlReplaceAll(pgsqlResetSQL)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(p.dbHandle, []string{sql}, 0, false)
|
||||
}
|
||||
|
||||
func updatePGSQLDatabaseFromV15(dbHandle *sql.DB) error {
|
||||
if err := updatePGSQLDatabaseFrom15To16(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return updatePGSQLDatabaseFromV16(dbHandle)
|
||||
}
|
||||
|
||||
func updatePGSQLDatabaseFromV16(dbHandle *sql.DB) error {
|
||||
if err := updatePGSQLDatabaseFrom16To17(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return updatePGSQLDatabaseFromV17(dbHandle)
|
||||
}
|
||||
|
||||
func updatePGSQLDatabaseFromV17(dbHandle *sql.DB) error {
|
||||
if err := updatePGSQLDatabaseFrom17To18(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return updatePGSQLDatabaseFromV18(dbHandle)
|
||||
}
|
||||
|
||||
func updatePGSQLDatabaseFromV18(dbHandle *sql.DB) error {
|
||||
return updatePGSQLDatabaseFrom18To19(dbHandle)
|
||||
}
|
||||
|
||||
func downgradePGSQLDatabaseFromV16(dbHandle *sql.DB) error {
|
||||
return downgradePGSQLDatabaseFrom16To15(dbHandle)
|
||||
}
|
||||
|
||||
func downgradePGSQLDatabaseFromV17(dbHandle *sql.DB) error {
|
||||
if err := downgradePGSQLDatabaseFrom17To16(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return downgradePGSQLDatabaseFromV16(dbHandle)
|
||||
}
|
||||
|
||||
func downgradePGSQLDatabaseFromV18(dbHandle *sql.DB) error {
|
||||
if err := downgradePGSQLDatabaseFrom18To17(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return downgradePGSQLDatabaseFromV17(dbHandle)
|
||||
}
|
||||
|
||||
func downgradePGSQLDatabaseFromV19(dbHandle *sql.DB) error {
|
||||
if err := downgradePGSQLDatabaseFrom19To18(dbHandle); err != nil {
|
||||
return err
|
||||
}
|
||||
return downgradePGSQLDatabaseFromV18(dbHandle)
|
||||
}
|
||||
|
||||
func updatePGSQLDatabaseFrom15To16(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("updating database version: 15 -> 16")
|
||||
providerLog(logger.LevelInfo, "updating database version: 15 -> 16")
|
||||
sql := strings.ReplaceAll(pgsqlV16SQL, "{{users}}", sqlTableUsers)
|
||||
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||
if config.Driver == CockroachDataProviderName {
|
||||
// Cockroach does not allow to run this schema migration within a transaction
|
||||
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
|
||||
defer cancel()
|
||||
|
||||
for _, q := range strings.Split(sql, ";") {
|
||||
if strings.TrimSpace(q) == "" {
|
||||
continue
|
||||
}
|
||||
_, err := dbHandle.ExecContext(ctx, q)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return sqlCommonUpdateDatabaseVersion(ctx, dbHandle, 16)
|
||||
}
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 16, true)
|
||||
}
|
||||
|
||||
func updatePGSQLDatabaseFrom16To17(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("updating database version: 16 -> 17")
|
||||
providerLog(logger.LevelInfo, "updating database version: 16 -> 17")
|
||||
sql := pgsqlV17SQL
|
||||
if config.Driver == CockroachDataProviderName {
|
||||
sql = strings.ReplaceAll(sql, `ALTER TABLE "{{folders_mapping}}" DROP CONSTRAINT "{{prefix}}unique_mapping";`,
|
||||
`DROP INDEX "{{prefix}}unique_mapping" CASCADE;`)
|
||||
}
|
||||
sql = strings.ReplaceAll(sql, "{{groups}}", sqlTableGroups)
|
||||
sql = strings.ReplaceAll(sql, "{{users}}", sqlTableUsers)
|
||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
||||
sql = strings.ReplaceAll(sql, "{{folders_mapping}}", sqlTableFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{users_folders_mapping}}", sqlTableUsersFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{users_groups_mapping}}", sqlTableUsersGroupsMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{groups_folders_mapping}}", sqlTableGroupsFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 17, true)
|
||||
}
|
||||
|
||||
func updatePGSQLDatabaseFrom17To18(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("updating database version: 17 -> 18")
|
||||
providerLog(logger.LevelInfo, "updating database version: 17 -> 18")
|
||||
if err := importGCSCredentials(); err != nil {
|
||||
return err
|
||||
}
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, nil, 18, true)
|
||||
}
|
||||
|
||||
func updatePGSQLDatabaseFrom18To19(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("updating database version: 18 -> 19")
|
||||
providerLog(logger.LevelInfo, "updating database version: 18 -> 19")
|
||||
sql := strings.ReplaceAll(pgsqlV19SQL, "{{shared_sessions}}", sqlTableSharedSessions)
|
||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 19, true)
|
||||
}
|
||||
|
||||
func downgradePGSQLDatabaseFrom16To15(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("downgrading database version: 16 -> 15")
|
||||
providerLog(logger.LevelInfo, "downgrading database version: 16 -> 15")
|
||||
sql := strings.ReplaceAll(pgsqlV16DownSQL, "{{users}}", sqlTableUsers)
|
||||
sql = strings.ReplaceAll(sql, "{{active_transfers}}", sqlTableActiveTransfers)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 15, false)
|
||||
}
|
||||
|
||||
func downgradePGSQLDatabaseFrom17To16(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("downgrading database version: 17 -> 16")
|
||||
providerLog(logger.LevelInfo, "downgrading database version: 17 -> 16")
|
||||
sql := pgsqlV17DownSQL
|
||||
if config.Driver == CockroachDataProviderName {
|
||||
sql = strings.ReplaceAll(sql, `ALTER TABLE "{{users_folders_mapping}}" DROP CONSTRAINT "{{prefix}}unique_user_folder_mapping";`,
|
||||
`DROP INDEX "{{prefix}}unique_user_folder_mapping" CASCADE;`)
|
||||
}
|
||||
sql = strings.ReplaceAll(sql, "{{groups}}", sqlTableGroups)
|
||||
sql = strings.ReplaceAll(sql, "{{users}}", sqlTableUsers)
|
||||
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
|
||||
sql = strings.ReplaceAll(sql, "{{folders_mapping}}", sqlTableFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{users_folders_mapping}}", sqlTableUsersFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{users_groups_mapping}}", sqlTableUsersGroupsMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{groups_folders_mapping}}", sqlTableGroupsFoldersMapping)
|
||||
sql = strings.ReplaceAll(sql, "{{prefix}}", config.SQLTablesPrefix)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 16, false)
|
||||
}
|
||||
|
||||
func downgradePGSQLDatabaseFrom18To17(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("downgrading database version: 18 -> 17")
|
||||
providerLog(logger.LevelInfo, "downgrading database version: 18 -> 17")
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, nil, 17, false)
|
||||
}
|
||||
|
||||
func downgradePGSQLDatabaseFrom19To18(dbHandle *sql.DB) error {
|
||||
logger.InfoToConsole("downgrading database version: 19 -> 18")
|
||||
providerLog(logger.LevelInfo, "downgrading database version: 19 -> 18")
|
||||
sql := strings.ReplaceAll(pgsqlV19DownSQL, "{{shared_sessions}}", sqlTableSharedSessions)
|
||||
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 18, false)
|
||||
}
|
||||
|
||||
32
dataprovider/pgsql_disabled.go
Normal file
32
dataprovider/pgsql_disabled.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build nopgsql
|
||||
// +build nopgsql
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/version"
|
||||
)
|
||||
|
||||
func init() {
|
||||
version.AddFeature("-pgsql")
|
||||
}
|
||||
|
||||
func initializePGSQLProvider() error {
|
||||
return errors.New("PostgreSQL disabled at build time")
|
||||
}
|
||||
261
dataprovider/quotaupdater.go
Normal file
261
dataprovider/quotaupdater.go
Normal file
@@ -0,0 +1,261 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
)
|
||||
|
||||
var delayedQuotaUpdater quotaUpdater
|
||||
|
||||
func init() {
|
||||
delayedQuotaUpdater = newQuotaUpdater()
|
||||
}
|
||||
|
||||
type quotaObject struct {
|
||||
size int64
|
||||
files int
|
||||
}
|
||||
|
||||
type transferQuotaObject struct {
|
||||
ulSize int64
|
||||
dlSize int64
|
||||
}
|
||||
|
||||
type quotaUpdater struct {
|
||||
paramsMutex sync.RWMutex
|
||||
waitTime time.Duration
|
||||
sync.RWMutex
|
||||
pendingUserQuotaUpdates map[string]quotaObject
|
||||
pendingFolderQuotaUpdates map[string]quotaObject
|
||||
pendingTransferQuotaUpdates map[string]transferQuotaObject
|
||||
}
|
||||
|
||||
func newQuotaUpdater() quotaUpdater {
|
||||
return quotaUpdater{
|
||||
pendingUserQuotaUpdates: make(map[string]quotaObject),
|
||||
pendingFolderQuotaUpdates: make(map[string]quotaObject),
|
||||
pendingTransferQuotaUpdates: make(map[string]transferQuotaObject),
|
||||
}
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) start() {
|
||||
q.setWaitTime(config.DelayedQuotaUpdate)
|
||||
|
||||
go q.loop()
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) loop() {
|
||||
waitTime := q.getWaitTime()
|
||||
providerLog(logger.LevelDebug, "delayed quota update loop started, wait time: %v", waitTime)
|
||||
for waitTime > 0 {
|
||||
// We do this with a time.Sleep instead of a time.Ticker because we don't know
|
||||
// how long each quota processing cycle will take, and we want to make
|
||||
// sure we wait the configured seconds between each iteration
|
||||
time.Sleep(waitTime)
|
||||
providerLog(logger.LevelDebug, "delayed quota update check start")
|
||||
q.storeUsersQuota()
|
||||
q.storeFoldersQuota()
|
||||
q.storeUsersTransferQuota()
|
||||
providerLog(logger.LevelDebug, "delayed quota update check end")
|
||||
waitTime = q.getWaitTime()
|
||||
}
|
||||
providerLog(logger.LevelDebug, "delayed quota update loop ended, wait time: %v", waitTime)
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) setWaitTime(secs int) {
|
||||
q.paramsMutex.Lock()
|
||||
defer q.paramsMutex.Unlock()
|
||||
|
||||
q.waitTime = time.Duration(secs) * time.Second
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) getWaitTime() time.Duration {
|
||||
q.paramsMutex.RLock()
|
||||
defer q.paramsMutex.RUnlock()
|
||||
|
||||
return q.waitTime
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) resetUserQuota(username string) {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
||||
delete(q.pendingUserQuotaUpdates, username)
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) updateUserQuota(username string, files int, size int64) {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
||||
obj := q.pendingUserQuotaUpdates[username]
|
||||
obj.size += size
|
||||
obj.files += files
|
||||
if obj.files == 0 && obj.size == 0 {
|
||||
delete(q.pendingUserQuotaUpdates, username)
|
||||
return
|
||||
}
|
||||
q.pendingUserQuotaUpdates[username] = obj
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) getUserPendingQuota(username string) (int, int64) {
|
||||
q.RLock()
|
||||
defer q.RUnlock()
|
||||
|
||||
obj := q.pendingUserQuotaUpdates[username]
|
||||
|
||||
return obj.files, obj.size
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) resetFolderQuota(name string) {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
||||
delete(q.pendingFolderQuotaUpdates, name)
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) updateFolderQuota(name string, files int, size int64) {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
||||
obj := q.pendingFolderQuotaUpdates[name]
|
||||
obj.size += size
|
||||
obj.files += files
|
||||
if obj.files == 0 && obj.size == 0 {
|
||||
delete(q.pendingFolderQuotaUpdates, name)
|
||||
return
|
||||
}
|
||||
q.pendingFolderQuotaUpdates[name] = obj
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) getFolderPendingQuota(name string) (int, int64) {
|
||||
q.RLock()
|
||||
defer q.RUnlock()
|
||||
|
||||
obj := q.pendingFolderQuotaUpdates[name]
|
||||
|
||||
return obj.files, obj.size
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) resetUserTransferQuota(username string) {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
||||
delete(q.pendingTransferQuotaUpdates, username)
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) updateUserTransferQuota(username string, ulSize, dlSize int64) {
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
||||
obj := q.pendingTransferQuotaUpdates[username]
|
||||
obj.ulSize += ulSize
|
||||
obj.dlSize += dlSize
|
||||
if obj.ulSize == 0 && obj.dlSize == 0 {
|
||||
delete(q.pendingTransferQuotaUpdates, username)
|
||||
return
|
||||
}
|
||||
q.pendingTransferQuotaUpdates[username] = obj
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) getUserPendingTransferQuota(username string) (int64, int64) {
|
||||
q.RLock()
|
||||
defer q.RUnlock()
|
||||
|
||||
obj := q.pendingTransferQuotaUpdates[username]
|
||||
|
||||
return obj.ulSize, obj.dlSize
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) getUsernames() []string {
|
||||
q.RLock()
|
||||
defer q.RUnlock()
|
||||
|
||||
result := make([]string, 0, len(q.pendingUserQuotaUpdates))
|
||||
for username := range q.pendingUserQuotaUpdates {
|
||||
result = append(result, username)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) getFoldernames() []string {
|
||||
q.RLock()
|
||||
defer q.RUnlock()
|
||||
|
||||
result := make([]string, 0, len(q.pendingFolderQuotaUpdates))
|
||||
for name := range q.pendingFolderQuotaUpdates {
|
||||
result = append(result, name)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) getTransferQuotaUsernames() []string {
|
||||
q.RLock()
|
||||
defer q.RUnlock()
|
||||
|
||||
result := make([]string, 0, len(q.pendingTransferQuotaUpdates))
|
||||
for username := range q.pendingTransferQuotaUpdates {
|
||||
result = append(result, username)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) storeUsersQuota() {
|
||||
for _, username := range q.getUsernames() {
|
||||
files, size := q.getUserPendingQuota(username)
|
||||
if size != 0 || files != 0 {
|
||||
err := provider.updateQuota(username, files, size, false)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "unable to update quota delayed for user %#v: %v", username, err)
|
||||
continue
|
||||
}
|
||||
q.updateUserQuota(username, -files, -size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) storeFoldersQuota() {
|
||||
for _, name := range q.getFoldernames() {
|
||||
files, size := q.getFolderPendingQuota(name)
|
||||
if size != 0 || files != 0 {
|
||||
err := provider.updateFolderQuota(name, files, size, false)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "unable to update quota delayed for folder %#v: %v", name, err)
|
||||
continue
|
||||
}
|
||||
q.updateFolderQuota(name, -files, -size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (q *quotaUpdater) storeUsersTransferQuota() {
|
||||
for _, username := range q.getTransferQuotaUsernames() {
|
||||
ulSize, dlSize := q.getUserPendingTransferQuota(username)
|
||||
if ulSize != 0 || dlSize != 0 {
|
||||
err := provider.updateTransferQuota(username, ulSize, dlSize, false)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "unable to update transfer quota delayed for user %#v: %v", username, err)
|
||||
continue
|
||||
}
|
||||
q.updateUserTransferQuota(username, -ulSize, -dlSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
110
dataprovider/scheduler.go
Normal file
110
dataprovider/scheduler.go
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright (C) 2019-2022 Nicola Murino
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published
|
||||
// by the Free Software Foundation, version 3.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/robfig/cron/v3"
|
||||
|
||||
"github.com/drakkan/sftpgo/v2/logger"
|
||||
"github.com/drakkan/sftpgo/v2/metric"
|
||||
"github.com/drakkan/sftpgo/v2/util"
|
||||
)
|
||||
|
||||
var (
|
||||
scheduler *cron.Cron
|
||||
lastCachesUpdate int64
|
||||
// used for bolt and memory providers, so we avoid iterating all users
|
||||
// to find recently modified ones
|
||||
lastUserUpdate int64
|
||||
)
|
||||
|
||||
func stopScheduler() {
|
||||
if scheduler != nil {
|
||||
scheduler.Stop()
|
||||
scheduler = nil
|
||||
}
|
||||
}
|
||||
|
||||
func startScheduler() error {
|
||||
stopScheduler()
|
||||
|
||||
scheduler = cron.New()
|
||||
_, err := scheduler.AddFunc("@every 30s", checkDataprovider)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to schedule dataprovider availability check: %w", err)
|
||||
}
|
||||
|
||||
if config.AutoBackup.Enabled {
|
||||
spec := fmt.Sprintf("0 %v * * %v", config.AutoBackup.Hour, config.AutoBackup.DayOfWeek)
|
||||
_, err = scheduler.AddFunc(spec, config.doBackup)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to schedule auto backup: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = addScheduledCacheUpdates()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scheduler.Start()
|
||||
return nil
|
||||
}
|
||||
|
||||
func addScheduledCacheUpdates() error {
|
||||
lastCachesUpdate = util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
_, err := scheduler.AddFunc("@every 10m", checkCacheUpdates)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to schedule cache updates: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDataprovider() {
|
||||
err := provider.checkAvailability()
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "check availability error: %v", err)
|
||||
}
|
||||
metric.UpdateDataProviderAvailability(err)
|
||||
}
|
||||
|
||||
func checkCacheUpdates() {
|
||||
providerLog(logger.LevelDebug, "start caches check, update time %v", util.GetTimeFromMsecSinceEpoch(lastCachesUpdate))
|
||||
checkTime := util.GetTimeAsMsSinceEpoch(time.Now())
|
||||
users, err := provider.getRecentlyUpdatedUsers(lastCachesUpdate)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelError, "unable to get recently updated users: %v", err)
|
||||
return
|
||||
}
|
||||
for _, user := range users {
|
||||
providerLog(logger.LevelDebug, "invalidate caches for user %#v", user.Username)
|
||||
webDAVUsersCache.swap(&user)
|
||||
cachedPasswords.Remove(user.Username)
|
||||
}
|
||||
|
||||
lastCachesUpdate = checkTime
|
||||
providerLog(logger.LevelDebug, "end caches check, new update time %v", util.GetTimeFromMsecSinceEpoch(lastCachesUpdate))
|
||||
}
|
||||
|
||||
func setLastUserUpdate() {
|
||||
atomic.StoreInt64(&lastUserUpdate, util.GetTimeAsMsSinceEpoch(time.Now()))
|
||||
}
|
||||
|
||||
func getLastUserUpdate() int64 {
|
||||
return atomic.LoadInt64(&lastUserUpdate)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user