Compare commits
1227 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7d19d3f10b | ||
|
|
a5d6441fb6 | ||
|
|
5605496c5f | ||
|
|
64c28bf6aa | ||
|
|
b9e75ff661 | ||
|
|
362396811e | ||
|
|
916e53cf9a | ||
|
|
5f35b8b704 | ||
|
|
08e29d4ee0 | ||
|
|
ff4c1b239e | ||
|
|
49c8affcef | ||
|
|
14139af165 | ||
|
|
af4f54bf11 | ||
|
|
9ba468698b | ||
|
|
0f74c077ac | ||
|
|
dd98c06397 | ||
|
|
b989cdabe5 | ||
|
|
9e7e89d69e | ||
|
|
f64056b820 | ||
|
|
7e6d944cb5 | ||
|
|
a66d207291 | ||
|
|
0a8edcd811 | ||
|
|
0fa08ddbaa | ||
|
|
3d4c35522a | ||
|
|
f400e67daa | ||
|
|
4e10275fd1 | ||
|
|
7bd71474ef | ||
|
|
0ac2120532 | ||
|
|
9e5287cfb4 | ||
|
|
450ab6b252 | ||
|
|
51d900558a | ||
|
|
a71690ff2a | ||
|
|
f390eab1de | ||
|
|
571e088fdd | ||
|
|
6714085d58 | ||
|
|
0389605d65 | ||
|
|
b8ef94ece7 | ||
|
|
6f422c3d8b | ||
|
|
222f0c735b | ||
|
|
63bf8eb1a1 | ||
|
|
db0e58ae7e | ||
|
|
87045284cc | ||
|
|
f3ee20980a | ||
|
|
54f1946aba | ||
|
|
47842ae614 | ||
|
|
7e0b62b703 | ||
|
|
15b4194e8f | ||
|
|
5a199acbb2 | ||
|
|
07b3f2f4d6 | ||
|
|
13ee236884 | ||
|
|
3822b7d3f7 | ||
|
|
2b2b69fb23 | ||
|
|
4b4edef0ad | ||
|
|
aa1e73326f | ||
|
|
07012aa812 | ||
|
|
0e54fa5655 | ||
|
|
3e44a1dd2d | ||
|
|
a417df60b3 | ||
|
|
2067c5c527 | ||
|
|
8a43486730 | ||
|
|
2636fedce8 | ||
|
|
a42e9ffa6b | ||
|
|
0e8c41bbd1 | ||
|
|
1e21aa9453 | ||
|
|
f9eadd7f04 | ||
|
|
04dc97072b | ||
|
|
ddda0b5ece | ||
|
|
76e89d07d4 | ||
|
|
a538255034 | ||
|
|
4ad2a9c1fa | ||
|
|
7ae9303c99 | ||
|
|
6c7b3ac5bb | ||
|
|
bd294bb3cf | ||
|
|
7349598b19 | ||
|
|
7f19f9f39c | ||
|
|
f19691250d | ||
|
|
554a1cb1f4 | ||
|
|
e54237ff70 | ||
|
|
e58709c822 | ||
|
|
5eca73a399 | ||
|
|
f8a19f747d | ||
|
|
ea3c1d7a3b | ||
|
|
bd585d8e52 | ||
|
|
a40fa93d7b | ||
|
|
4498bbf2e4 | ||
|
|
63e3891808 | ||
|
|
3ebdfa9b2d | ||
|
|
8debde842c | ||
|
|
3e5cf56460 | ||
|
|
f264b005ff | ||
|
|
bf76b0b158 | ||
|
|
c2a65a9a74 | ||
|
|
3267a50ae3 | ||
|
|
f0839519a8 | ||
|
|
95e9106902 | ||
|
|
da03f6c4e3 | ||
|
|
9e77cd1a26 | ||
|
|
56bf51277c | ||
|
|
37d98ca290 | ||
|
|
9473dc3937 | ||
|
|
6777008aec | ||
|
|
3e8254e398 | ||
|
|
9ddd2d3588 | ||
|
|
57935f585c | ||
|
|
2b463d61e3 | ||
|
|
ced4206c5f | ||
|
|
c86db09cd8 | ||
|
|
194c3c13ac | ||
|
|
d65c00728a | ||
|
|
526f6e0f6b | ||
|
|
a61211d32c | ||
|
|
78f75cdcb9 | ||
|
|
4cd340e07f | ||
|
|
890dde0e00 | ||
|
|
b1efe8d0b5 | ||
|
|
71fff28d29 | ||
|
|
6bfdf941bc | ||
|
|
fdc10aa6c7 | ||
|
|
455bb550ee | ||
|
|
2a827544ef | ||
|
|
9d2b5dc07d | ||
|
|
3ca62d76d7 | ||
|
|
00b9280834 | ||
|
|
ef0a3bc571 | ||
|
|
e3c5cf981f | ||
|
|
ec5da8b4a5 | ||
|
|
81de7d271e | ||
|
|
c8158e14e0 | ||
|
|
e96ae5ca51 | ||
|
|
e059197398 | ||
|
|
a2e73228d2 | ||
|
|
1470018054 | ||
|
|
e6bfbcd489 | ||
|
|
a0bbcf6ebb | ||
|
|
7f5a13d185 | ||
|
|
d5946da1e2 | ||
|
|
21682d1c1d | ||
|
|
fd52475ae2 | ||
|
|
55b47cf741 | ||
|
|
e0ce2e2e8a | ||
|
|
8fc4971df1 | ||
|
|
20e8cb898a | ||
|
|
b5894b257f | ||
|
|
cb517a3595 | ||
|
|
1b8f94c08f | ||
|
|
e46051299f | ||
|
|
bf2dcfe307 | ||
|
|
719f6077ab | ||
|
|
101783ee86 | ||
|
|
6843402d2e | ||
|
|
88feda6bf9 | ||
|
|
5c446ff645 | ||
|
|
9a6b1a1315 | ||
|
|
90009a649d | ||
|
|
8762628481 | ||
|
|
a5e41c9336 | ||
|
|
729f30aebf | ||
|
|
1da213a6e3 | ||
|
|
2b0b19da9e | ||
|
|
686166f2ce | ||
|
|
93ce593ed0 | ||
|
|
6f4475ff72 | ||
|
|
0b9a96ec6b | ||
|
|
f0f5ee392b | ||
|
|
dadaca141a | ||
|
|
7ab30099dd | ||
|
|
3170991aa8 | ||
|
|
118744a860 | ||
|
|
fe6a3f2ce8 | ||
|
|
75efaa9741 | ||
|
|
560e7f316a | ||
|
|
eee5d74e87 | ||
|
|
9ae473fcdc | ||
|
|
b774289c6d | ||
|
|
ecf715880f | ||
|
|
b2e28fe3a2 | ||
|
|
cc2f23bd89 | ||
|
|
7329cd804b | ||
|
|
84e3132ed1 | ||
|
|
f6b11c2d01 | ||
|
|
32da923dfe | ||
|
|
91dfa501f8 | ||
|
|
7c724e18fe | ||
|
|
302f83c7a4 | ||
|
|
984ca1fb7e | ||
|
|
87f6a18476 | ||
|
|
90c21458b8 | ||
|
|
f536c64043 | ||
|
|
1a33b5bb53 | ||
|
|
0ecaa862bd | ||
|
|
751946f47a | ||
|
|
796ea1dde9 | ||
|
|
a87aa9b98e | ||
|
|
9abd186166 | ||
|
|
18d0bf9dc3 | ||
|
|
c9bd08cf9c | ||
|
|
d2f4edcdb6 | ||
|
|
67abf03fe3 | ||
|
|
5d7f6960f3 | ||
|
|
4bea9ed760 | ||
|
|
4995cf1b02 | ||
|
|
a5d0cbbe44 | ||
|
|
7b1a0d3cd3 | ||
|
|
1e0b3a2a8c | ||
|
|
e72bb1e124 | ||
|
|
164621289c | ||
|
|
737109b2b8 | ||
|
|
8b8e27b702 | ||
|
|
4b099640de | ||
|
|
80da2dc722 | ||
|
|
61947e67ae | ||
|
|
9a37e3d159 | ||
|
|
14fb6c4038 | ||
|
|
dd9c5b2149 | ||
|
|
ecd488a840 | ||
|
|
4a44a7dfe1 | ||
|
|
16a44a144b | ||
|
|
97f8142b1e | ||
|
|
504cd3efda | ||
|
|
857b6cc10a | ||
|
|
002a06629e | ||
|
|
5bc0f4f8af | ||
|
|
cacfffc5bf | ||
|
|
87d7854453 | ||
|
|
aa34388de0 | ||
|
|
a3f50029ba | ||
|
|
f9d8b83c2a | ||
|
|
7c8bb5b18a | ||
|
|
254b2ae87f | ||
|
|
5a40f998ae | ||
|
|
77f3400161 | ||
|
|
3521bacc4a | ||
|
|
55f8171dd1 | ||
|
|
a7b159aebb | ||
|
|
5c114b28e3 | ||
|
|
e079444e8a | ||
|
|
3cb23ac956 | ||
|
|
8fb256ac91 | ||
|
|
ca32cd5e0e | ||
|
|
e0defafa26 | ||
|
|
5cccb872bb | ||
|
|
aaf940edab | ||
|
|
853086b942 | ||
|
|
81bdba6782 | ||
|
|
d955ddcef9 | ||
|
|
4bbb195711 | ||
|
|
a193089646 | ||
|
|
9bfdc10172 | ||
|
|
b062b38ef4 | ||
|
|
a31a9dc32c | ||
|
|
fa43791ea9 | ||
|
|
93b9c1617e | ||
|
|
4c710d731f | ||
|
|
d9f30e7ac5 | ||
|
|
03da7f696c | ||
|
|
883a3dceaf | ||
|
|
7b86e2ac59 | ||
|
|
8502d7b051 | ||
|
|
6f8b71b89f | ||
|
|
7e7f662a23 | ||
|
|
0bec1c6012 | ||
|
|
5582f5c811 | ||
|
|
48ed3dab1f | ||
|
|
d8de0faef5 | ||
|
|
df828b6021 | ||
|
|
056daaddfc | ||
|
|
5c2fd8d52a | ||
|
|
4519bffa39 | ||
|
|
1ea7429921 | ||
|
|
816c174036 | ||
|
|
79857a8733 | ||
|
|
dcc3292dbc | ||
|
|
7f674a7fb3 | ||
|
|
b64d3c2fbf | ||
|
|
7fc5cb80d6 | ||
|
|
92460f811f | ||
|
|
e18ad55067 | ||
|
|
4e9dae6fa4 | ||
|
|
f5a0559be6 | ||
|
|
670018f05e | ||
|
|
8bbf54d2b6 | ||
|
|
d31cccf85f | ||
|
|
c19b03a3f7 | ||
|
|
c6b8644828 | ||
|
|
f1a255aa6c | ||
|
|
876bf8aa4f | ||
|
|
900e519ff1 | ||
|
|
f1832d4478 | ||
|
|
ebbbf81e65 | ||
|
|
1fccd05e9e | ||
|
|
66945c0a02 | ||
|
|
fa0ca8fe89 | ||
|
|
c478c7dae9 | ||
|
|
9382db751c | ||
|
|
7e2a8e70c9 | ||
|
|
cd35636939 | ||
|
|
d51adb041e | ||
|
|
02db00d008 | ||
|
|
fb2d59ec92 | ||
|
|
1df1225eed | ||
|
|
aca71bff7a | ||
|
|
9709aed5e6 | ||
|
|
d2a4178846 | ||
|
|
d73be7aee5 | ||
|
|
ffe7f7ff16 | ||
|
|
a6ed6fc721 | ||
|
|
c3831de94e | ||
|
|
9b6b9cca3d | ||
|
|
64d1ea2d89 | ||
|
|
1c51239da8 | ||
|
|
51c15de892 | ||
|
|
b8efb1b8ec | ||
|
|
ec1d20f46f | ||
|
|
1f619d5ea6 | ||
|
|
6d3d94a01f | ||
|
|
0a3d94f73d | ||
|
|
7c68b03d07 | ||
|
|
2912b2e92e | ||
|
|
a6fe802370 | ||
|
|
ad483b7581 | ||
|
|
df86955f28 | ||
|
|
00ec426a80 | ||
|
|
222db53410 | ||
|
|
4d85dc108f | ||
|
|
6d582a821b | ||
|
|
794afbf85e | ||
|
|
e3f3997c5e | ||
|
|
f78090e47f | ||
|
|
4d7a4aa99a | ||
|
|
c36217c654 | ||
|
|
59bb578b89 | ||
|
|
7d8823307f | ||
|
|
8174349032 | ||
|
|
00a02dc14d | ||
|
|
ced73ed04e | ||
|
|
cc73bb811b | ||
|
|
a587228cf0 | ||
|
|
1472a0f415 | ||
|
|
0bb141960f | ||
|
|
c153330ab8 | ||
|
|
5b4ef0ee3b | ||
|
|
9632b6ee94 | ||
|
|
78eb1c1166 | ||
|
|
a7c0b07a2a | ||
|
|
dc1cc88a46 | ||
|
|
3f5451eab6 | ||
|
|
30d98326ca | ||
|
|
bedc8e288b | ||
|
|
6092b6628e | ||
|
|
6ee51c5cc1 | ||
|
|
4df0ae82ac | ||
|
|
5db31f0fb3 | ||
|
|
0f8170c10f | ||
|
|
3c24cb773f | ||
|
|
bec54ac8ae | ||
|
|
c330ac8418 | ||
|
|
3e478f42ea | ||
|
|
18ab757216 | ||
|
|
b6bcf0cd94 | ||
|
|
015aa36c56 | ||
|
|
f2480ce5c9 | ||
|
|
f828c58dca | ||
|
|
dc19921b0c | ||
|
|
3f3591bae0 | ||
|
|
fc048728d9 | ||
|
|
aeb4675196 | ||
|
|
4652f9ede8 | ||
|
|
531cb5b5a1 | ||
|
|
9fb43b2c46 | ||
|
|
8a8298ad46 | ||
|
|
3d6b09e949 | ||
|
|
fb8f013ea7 | ||
|
|
c41319bb7a | ||
|
|
46157ebbb6 | ||
|
|
200b1d08c7 | ||
|
|
24b0352eb6 | ||
|
|
52f3a98cc8 | ||
|
|
e29a3efd39 | ||
|
|
ca730e77a5 | ||
|
|
0833b4698e | ||
|
|
ee5c5e033d | ||
|
|
78233ff9a3 | ||
|
|
b331dc5686 | ||
|
|
dfcfcee208 | ||
|
|
094ee1522e | ||
|
|
3bc58f5988 | ||
|
|
f6938e76dc | ||
|
|
570964deb3 | ||
|
|
31984ffec1 | ||
|
|
74fc3aaf37 | ||
|
|
97d0a48557 | ||
|
|
3bbe67571f | ||
|
|
f131ef130b | ||
|
|
4a6a4ce28d | ||
|
|
a80ac80fcd | ||
|
|
4aa9686e3b | ||
|
|
64e87d64bd | ||
|
|
9ca0b46f30 | ||
|
|
6eb154bb74 | ||
|
|
ea01c3a125 | ||
|
|
1b4a1fbbe5 | ||
|
|
ec81a7ac29 | ||
|
|
22d28a37b6 | ||
|
|
cc134cad9a | ||
|
|
1459150024 | ||
|
|
87751e562e | ||
|
|
e6f969cb04 | ||
|
|
ba1febba73 | ||
|
|
af8fa7ff81 | ||
|
|
4ab2e4088a | ||
|
|
da0ccc6426 | ||
|
|
0661876e99 | ||
|
|
cd72ac4fc9 | ||
|
|
da5a061b65 | ||
|
|
65948a47f1 | ||
|
|
bf4b3e6840 | ||
|
|
6ea38188e8 | ||
|
|
b5639a51fd | ||
|
|
5c34d814d6 | ||
|
|
0eca4f1866 | ||
|
|
b52f829f05 | ||
|
|
90f64c9f63 | ||
|
|
c106498dd8 | ||
|
|
7bad65a43e | ||
|
|
101c2962ab | ||
|
|
59140a6d51 | ||
|
|
b1d54f69d9 | ||
|
|
374de07c7b | ||
|
|
8a4c21b64a | ||
|
|
16ba7ddb34 | ||
|
|
bd9506da42 | ||
|
|
b903a6e46f | ||
|
|
bcf088f586 | ||
|
|
be3857d572 | ||
|
|
b99d4ce82e | ||
|
|
0a558203da | ||
|
|
5a549a88fe | ||
|
|
fe953d6b38 | ||
|
|
05c62b9f40 | ||
|
|
555dc3b0c0 | ||
|
|
0de0d3308c | ||
|
|
a20373b613 | ||
|
|
ced2e16f41 | ||
|
|
3ac832c8dd | ||
|
|
a3c087456b | ||
|
|
419774158a | ||
|
|
0503215e7a | ||
|
|
9541843ff7 | ||
|
|
98f22ba110 | ||
|
|
1e9a19e326 | ||
|
|
0046c9960a | ||
|
|
7640612a95 | ||
|
|
a26962f367 | ||
|
|
f778e47d22 | ||
|
|
4781921336 | ||
|
|
3ae8abda9e | ||
|
|
90b324d707 | ||
|
|
3a22aae34f | ||
|
|
45a0473fec | ||
|
|
a7313e4492 | ||
|
|
c41ae116eb | ||
|
|
83c7453957 | ||
|
|
85a47810ff | ||
|
|
c997ef876c | ||
|
|
ae8ccadad2 | ||
|
|
5967aa1aa5 | ||
|
|
c900cde8e4 | ||
|
|
13183a9f76 | ||
|
|
5a568b4077 | ||
|
|
030507a2ce | ||
|
|
338301955f | ||
|
|
6d313f6d8f | ||
|
|
776dffcf12 | ||
|
|
e1a2451c22 | ||
|
|
7344366ce8 | ||
|
|
bd5191dfc5 | ||
|
|
bfa4085932 | ||
|
|
302ec2558c | ||
|
|
ff19879ffd | ||
|
|
04001f7ad3 | ||
|
|
076b2f0ee0 | ||
|
|
93dfb03eaf | ||
|
|
e09bdd43d4 | ||
|
|
ac8d8a3da1 | ||
|
|
a4157e83e9 | ||
|
|
13f23838a1 | ||
|
|
fd4c388b23 | ||
|
|
88b10da596 | ||
|
|
c07dc74d48 | ||
|
|
b48e01155c | ||
|
|
0ff010cc94 | ||
|
|
81aac15a6c | ||
|
|
c1b862394d | ||
|
|
f19937b715 | ||
|
|
f2f612b450 | ||
|
|
0c2640bbab | ||
|
|
3bb0ca1d2b | ||
|
|
d5b42f72e2 | ||
|
|
62744e081b | ||
|
|
9dcaf1555f | ||
|
|
a09cf5c8b9 | ||
|
|
47ebe42375 | ||
|
|
4d97ab9eb9 | ||
|
|
8ed13dc4a9 | ||
|
|
3b66dd0873 | ||
|
|
d992f0ffcc | ||
|
|
6c5a7e8f13 | ||
|
|
9d3d7db29c | ||
|
|
8607788975 | ||
|
|
4be6307d87 | ||
|
|
feec2118bb | ||
|
|
43182fc25e | ||
|
|
976f588863 | ||
|
|
575bcf1f03 | ||
|
|
969c992bfd | ||
|
|
c1239fbf59 | ||
|
|
c63b923ec3 | ||
|
|
574c4029fc | ||
|
|
423d8306be | ||
|
|
fc7066a25c | ||
|
|
e1bf46c6a5 | ||
|
|
3b46e6a6fb | ||
|
|
7a85c66ee7 | ||
|
|
25a44030f9 | ||
|
|
600268ebb8 | ||
|
|
1223957f91 | ||
|
|
15cde2dd1a | ||
|
|
50e441849a | ||
|
|
02bb09ec01 | ||
|
|
402947a43c | ||
|
|
b9bc8d722d | ||
|
|
0cb5c49cf3 | ||
|
|
9fc4be6d40 | ||
|
|
ecfed4dc04 | ||
|
|
b415e4d98f | ||
|
|
7d059efe06 | ||
|
|
60cfbd2989 | ||
|
|
8ecf64f481 | ||
|
|
019b0f2fd5 | ||
|
|
15d6cd144a | ||
|
|
f59f62317e | ||
|
|
f2b93c0402 | ||
|
|
0540b8780e | ||
|
|
fa45c9c138 | ||
|
|
b67cd0d3df | ||
|
|
c8f7fc9bc9 | ||
|
|
f1b998ce16 | ||
|
|
aaa758e978 | ||
|
|
716946a148 | ||
|
|
15934d72cc | ||
|
|
8f6cdacd00 | ||
|
|
8f736da4b8 | ||
|
|
4ea4202b99 | ||
|
|
d4bfc3f6b5 | ||
|
|
23d9ebfc91 | ||
|
|
5c99f4fb60 | ||
|
|
2263c7e20f | ||
|
|
515b2d917e | ||
|
|
af4723356d | ||
|
|
068dd34a38 | ||
|
|
b16a5c2caf | ||
|
|
a383957cfa | ||
|
|
00f97aabb4 | ||
|
|
32db0787bb | ||
|
|
1275328fdf | ||
|
|
7778716fa7 | ||
|
|
77476d0f56 | ||
|
|
c7a1fc2996 | ||
|
|
e7d8e73be8 | ||
|
|
3ee27f4370 | ||
|
|
92424cd1c2 | ||
|
|
0190dad984 | ||
|
|
198258f4e7 | ||
|
|
5be4b6bd44 | ||
|
|
3941255733 | ||
|
|
46998252e5 | ||
|
|
74b51f0ad3 | ||
|
|
b11865f971 | ||
|
|
f4369cdbef | ||
|
|
92638ce93d | ||
|
|
6ef85d6026 | ||
|
|
bc88503f25 | ||
|
|
47317bed9b | ||
|
|
f45c89fc46 | ||
|
|
112e3b2fc2 | ||
|
|
124c471a2b | ||
|
|
683ba6cd5b | ||
|
|
21fbcf4556 | ||
|
|
2ffefbeb33 | ||
|
|
c844fc7477 | ||
|
|
4b98f37df1 | ||
|
|
0bc4db9950 | ||
|
|
5acf29dae6 | ||
|
|
e9a42cd508 | ||
|
|
ed26d68948 | ||
|
|
b389f93d97 | ||
|
|
150aebf8d2 | ||
|
|
74e0223eb9 | ||
|
|
0823928f98 | ||
|
|
f895059660 | ||
|
|
acb4310c11 | ||
|
|
fdf3f23df5 | ||
|
|
d92861a8e8 | ||
|
|
1ee843757d | ||
|
|
ea26d7786c | ||
|
|
6eb43baf3d | ||
|
|
2f56375121 | ||
|
|
3bfd7e4d17 | ||
|
|
e1c66d96a1 | ||
|
|
a43854ae9b | ||
|
|
183bedd6ed | ||
|
|
2a89a8f664 | ||
|
|
5cd27ce529 | ||
|
|
cee2e18caf | ||
|
|
9ad750da54 | ||
|
|
5f49af1780 | ||
|
|
d5f092284a | ||
|
|
0e50310a66 | ||
|
|
5939ac4801 | ||
|
|
db274f1093 | ||
|
|
6bc5c64a3a | ||
|
|
70e035315e | ||
|
|
8a1249878a | ||
|
|
5e375f56dd | ||
|
|
28f1d66ae5 | ||
|
|
79060d37a7 | ||
|
|
800e64404b | ||
|
|
54c0c1b80d | ||
|
|
f7c7e2951d | ||
|
|
f249286cb1 | ||
|
|
d6dc3a507e | ||
|
|
0286da2356 | ||
|
|
76c08baaa0 | ||
|
|
67ea75cf03 | ||
|
|
4c658bb6f0 | ||
|
|
1ab02d5891 | ||
|
|
055506e518 | ||
|
|
88122ba2f8 | ||
|
|
bfe0c18976 | ||
|
|
df41f0c556 | ||
|
|
561c5021dd | ||
|
|
ad07fc78eb | ||
|
|
3243181c5f | ||
|
|
895117718e | ||
|
|
534b253c20 | ||
|
|
901cafc6da | ||
|
|
a6e36e7cad | ||
|
|
b566457e12 | ||
|
|
ca3e15578e | ||
|
|
4b2edff6dd | ||
|
|
2146b83343 | ||
|
|
3e1b07324d | ||
|
|
8cc2dfe5c2 | ||
|
|
78a837e8f1 | ||
|
|
49830516be | ||
|
|
41e1d9e68a | ||
|
|
5da4f931c5 | ||
|
|
552a96533e | ||
|
|
cebd069c77 | ||
|
|
be9230e85b | ||
|
|
b1ce6eb85b | ||
|
|
46176a54b4 | ||
|
|
a21ccad174 | ||
|
|
1129a868a5 | ||
|
|
1ac66d27b6 | ||
|
|
6a6e8fffbc | ||
|
|
51f110bc7b | ||
|
|
4ddfe41f23 | ||
|
|
ddd06fc2ac | ||
|
|
1bccb93fcb | ||
|
|
db80781716 | ||
|
|
a2a99f9b57 | ||
|
|
cd4a68cc96 | ||
|
|
b37eb68993 | ||
|
|
b13958a8d6 | ||
|
|
17e2b234a0 | ||
|
|
4ef1775e9a | ||
|
|
363977b474 | ||
|
|
05ae0ea5f2 | ||
|
|
8de7a81674 | ||
|
|
d32b195a57 | ||
|
|
267d9f1831 | ||
|
|
17a42a0c11 | ||
|
|
a219d25cac | ||
|
|
ce731020a7 | ||
|
|
fc9082c422 | ||
|
|
4872ba2ea0 | ||
|
|
70bb3c34ce | ||
|
|
1cde50f050 | ||
|
|
e9dd4ecdf0 | ||
|
|
f863530653 | ||
|
|
4f609cfa30 | ||
|
|
78bf808322 | ||
|
|
afe1da92c5 | ||
|
|
9985224966 | ||
|
|
02679d6df3 | ||
|
|
c2bbd468c4 | ||
|
|
46ab8f8d78 | ||
|
|
54321c5240 | ||
|
|
5fcbf2528f | ||
|
|
ea096db8e4 | ||
|
|
0caeb68680 | ||
|
|
2b9ba1d520 | ||
|
|
80f5ccd357 | ||
|
|
820169c5c6 | ||
|
|
aff75953e3 | ||
|
|
c0e09374a8 | ||
|
|
57976b4085 | ||
|
|
899f1a1844 | ||
|
|
41a1af863e | ||
|
|
778ec9b88f | ||
|
|
d42fcc3786 | ||
|
|
5d4f758c47 | ||
|
|
a8a17a223a | ||
|
|
aa40b04576 | ||
|
|
daac90c4e1 | ||
|
|
72b2c83392 | ||
|
|
c3410a3d91 | ||
|
|
173c1820e1 | ||
|
|
684f4ba1a6 | ||
|
|
6d84c5b9e3 | ||
|
|
4b522a2455 | ||
|
|
1e1c46ae1b | ||
|
|
d6b3acdb62 | ||
|
|
037d89a320 | ||
|
|
30eb3c4a99 | ||
|
|
0966d44c0f | ||
|
|
40e759c983 | ||
|
|
141ca6777c | ||
|
|
3c16a19269 | ||
|
|
b3c6d79f51 | ||
|
|
0c56b6d504 | ||
|
|
3d2da88da9 | ||
|
|
80c06d6b59 | ||
|
|
e536a638c9 | ||
|
|
bc397002d4 | ||
|
|
2a95d031ea | ||
|
|
1dce1eff48 | ||
|
|
5b1d8666b3 | ||
|
|
187a5b1908 | ||
|
|
7ab7941ddd | ||
|
|
c69d63c1f8 | ||
|
|
743b350fdd | ||
|
|
1ac610da1a | ||
|
|
bcf0fa073e | ||
|
|
140380716d | ||
|
|
143df87fee | ||
|
|
6d895843dc | ||
|
|
65e6d5475f | ||
|
|
15609cdbc7 | ||
|
|
f876c728ad | ||
|
|
f34462e3c3 | ||
|
|
ea0bf5e4c8 | ||
|
|
14d1b82f6b | ||
|
|
ed43ddd79d | ||
|
|
23192a3be7 | ||
|
|
72e3d464b8 | ||
|
|
a6985075b9 | ||
|
|
4d5494912d | ||
|
|
50982229e1 | ||
|
|
6977a4a18b | ||
|
|
ab1bf2ad44 | ||
|
|
c451f742aa | ||
|
|
034d89876d | ||
|
|
4a88ea5c03 | ||
|
|
95c6d41c35 | ||
|
|
2a9ed0abca | ||
|
|
3ff6b1bf64 | ||
|
|
a67276ccc2 | ||
|
|
87b51a6fd5 | ||
|
|
940836b25b | ||
|
|
634b723b5d | ||
|
|
af0c9b76c4 | ||
|
|
2142ef20c5 | ||
|
|
224ce5fe81 | ||
|
|
4bb9d07dde | ||
|
|
2054dfd83d | ||
|
|
6699f5c2cc | ||
|
|
70bde8b2bc | ||
|
|
ff73e5f53c | ||
|
|
0609188d3f | ||
|
|
99cd1ccfe5 | ||
|
|
dccc583b5d | ||
|
|
ac435b7890 | ||
|
|
37fc589896 | ||
|
|
5d789a01b7 | ||
|
|
ca0ff0d630 | ||
|
|
969b38586e | ||
|
|
e3eca424f1 | ||
|
|
a6355e298e | ||
|
|
c0f47a58f2 | ||
|
|
dc845fa2f4 | ||
|
|
7e855c83b3 | ||
|
|
3b8a9e0963 | ||
|
|
4445834fd3 | ||
|
|
19a619ff65 | ||
|
|
66a538dc9c | ||
|
|
1a6863f4b1 | ||
|
|
fbd9919afa | ||
|
|
eec8bc73f4 | ||
|
|
5720d40fee | ||
|
|
38e0cba675 | ||
|
|
4c5a0d663e | ||
|
|
093df15fac | ||
|
|
957430e675 | ||
|
|
14035f407e | ||
|
|
bf2b2525a9 | ||
|
|
4edb9cd6b9 | ||
|
|
c38d242bea | ||
|
|
c6ab6f94e7 | ||
|
|
36151d1ba9 | ||
|
|
1d5d184720 | ||
|
|
0119fd03a6 | ||
|
|
0a14297b48 | ||
|
|
442efa0607 | ||
|
|
6ad4cc317c | ||
|
|
57bec976ae | ||
|
|
641493e31a | ||
|
|
5b4e9ad982 | ||
|
|
950a5ad9ea | ||
|
|
fcfdd633f6 | ||
|
|
ebb18fa57d | ||
|
|
58b0ca585c | ||
|
|
5bc1c2de2d | ||
|
|
ec00613202 | ||
|
|
02ec3a5f48 | ||
|
|
ac3bae00fc | ||
|
|
e54828a7b8 | ||
|
|
f2acde789d | ||
|
|
9b49f63a97 | ||
|
|
14bcc6f2fc | ||
|
|
975a2f3632 | ||
|
|
5ff8f75917 | ||
|
|
db7e81e9d0 | ||
|
|
6a8039e76a | ||
|
|
56bf8364cd | ||
|
|
75750e3a79 | ||
|
|
bb5207ad77 | ||
|
|
b51d795e04 | ||
|
|
d12819932a | ||
|
|
d812c86812 | ||
|
|
1625cd5a9f | ||
|
|
756c3d0503 | ||
|
|
f884447b26 | ||
|
|
555394b95e | ||
|
|
00510a6af8 | ||
|
|
6c0839e197 | ||
|
|
5b79379c90 | ||
|
|
47fed45700 | ||
|
|
80d695f3a2 | ||
|
|
8d4f40ccd2 | ||
|
|
765bad5edd | ||
|
|
0c0382c9b5 | ||
|
|
bbab6149e8 | ||
|
|
ce9387f1ab | ||
|
|
d126c5736a | ||
|
|
5048d54d32 | ||
|
|
f22fe6af76 | ||
|
|
8034f289d1 | ||
|
|
eed61ac510 | ||
|
|
412d6096c0 | ||
|
|
c289ae07d2 | ||
|
|
87f78b07b3 | ||
|
|
5e2db77ef9 | ||
|
|
c992072286 | ||
|
|
0ef826c090 | ||
|
|
5da75c3915 | ||
|
|
8222baa7ed | ||
|
|
7b76b51314 | ||
|
|
c96dbbd3b5 | ||
|
|
da6ccedf24 | ||
|
|
13b37a835f | ||
|
|
863fa33309 | ||
|
|
9f4c54a212 | ||
|
|
2a7bff4c0e | ||
|
|
17406d1aab | ||
|
|
6537c53d43 | ||
|
|
b4bd10521a | ||
|
|
65cbef1962 | ||
|
|
a8d355900a | ||
|
|
ffd9c381ce | ||
|
|
2a0bce0beb | ||
|
|
f1f7b81088 | ||
|
|
f9827f958b | ||
|
|
3e2afc35ba | ||
|
|
c65dd86d5e | ||
|
|
2d6c0388af | ||
|
|
4d19d87720 | ||
|
|
5eabaf98e0 | ||
|
|
d1f0e9ae9f | ||
|
|
cd56039ab7 | ||
|
|
55515fee95 | ||
|
|
13d43a2d31 | ||
|
|
001261433b | ||
|
|
03bf595525 | ||
|
|
4ebedace1e | ||
|
|
b23276c002 | ||
|
|
bf708cb8bc | ||
|
|
a550d082a3 | ||
|
|
6c1a7449fe | ||
|
|
f0c9b55036 | ||
|
|
209badf10c | ||
|
|
242dde4480 | ||
|
|
2df0dd1f70 | ||
|
|
98a6d138d4 | ||
|
|
38f06ab373 | ||
|
|
3c1300721c | ||
|
|
61003c8079 | ||
|
|
01850c7399 | ||
|
|
b9c381e26f | ||
|
|
542554fb2c | ||
|
|
bdf18fa862 | ||
|
|
afc411c51b | ||
|
|
a59163e56c | ||
|
|
8391b19abb | ||
|
|
3925c7ff95 | ||
|
|
dbed110d02 | ||
|
|
f978355520 | ||
|
|
4748e6f54d | ||
|
|
91a4c64390 | ||
|
|
600a107699 | ||
|
|
2746c0b0f1 | ||
|
|
701a6115f8 | ||
|
|
56b00addc4 | ||
|
|
02e35ee002 | ||
|
|
5208e4a4ca | ||
|
|
7381a867ba | ||
|
|
f41ce6619f | ||
|
|
933427310d | ||
|
|
8b0a1817b3 | ||
|
|
04c9a5c008 | ||
|
|
bbc8c091e6 | ||
|
|
f3228713bc | ||
|
|
fa5333784b | ||
|
|
0dbf0cc81f | ||
|
|
196a56726e | ||
|
|
fe857dcb1b | ||
|
|
aa0ed5dbd0 | ||
|
|
a9e21c282a | ||
|
|
9a15a54885 | ||
|
|
91dcc349de | ||
|
|
fa41bfd06a | ||
|
|
8839c34d53 | ||
|
|
11ceaa8850 | ||
|
|
2a9f7db1e2 | ||
|
|
22338ed478 | ||
|
|
59a21158a6 | ||
|
|
93ce96d011 | ||
|
|
cc2f04b0e4 | ||
|
|
aa5191fa1b | ||
|
|
4e41a5583d | ||
|
|
ded8fad5e4 | ||
|
|
3702bc8413 | ||
|
|
7896d2eef7 | ||
|
|
da0f470f1c | ||
|
|
8fddb742df | ||
|
|
95fe26f3e3 | ||
|
|
1e10381143 | ||
|
|
96cbce52f9 | ||
|
|
0ea2ca3141 | ||
|
|
42877dd915 | ||
|
|
790c11c453 | ||
|
|
1ac4baa00a | ||
|
|
fc32286045 | ||
|
|
ee1131f254 | ||
|
|
c5dc3ee3b6 | ||
|
|
dd593b1035 | ||
|
|
4814786556 | ||
|
|
4f0a936ca0 | ||
|
|
aec372ca31 | ||
|
|
d2a739f8f6 | ||
|
|
165110872b | ||
|
|
6ab4e9f533 | ||
|
|
cf541d62ea | ||
|
|
19fc58dd1f | ||
|
|
ac9c475849 | ||
|
|
ddf99ab706 | ||
|
|
0056984d4b | ||
|
|
44fb276464 | ||
|
|
558a1b4050 | ||
|
|
8f934f2648 | ||
|
|
403b9a8310 | ||
|
|
33436488e2 | ||
|
|
3c28366fed | ||
|
|
b80abe6c05 | ||
|
|
8cb47817f6 | ||
|
|
23a80b01b6 | ||
|
|
b30614e9d8 | ||
|
|
e86089a9f3 | ||
|
|
3ceba7a147 | ||
|
|
c491133aff | ||
|
|
37418a7630 | ||
|
|
73a9c002e0 | ||
|
|
3d48fa7382 | ||
|
|
8e22dd1b13 | ||
|
|
7807fa7cc2 | ||
|
|
cd380973df | ||
|
|
01d681faa3 | ||
|
|
c231b663a3 | ||
|
|
8306b6bde6 | ||
|
|
dc011af90d | ||
|
|
c27e3ef436 | ||
|
|
760cc9ba5a | ||
|
|
5665e9c0e7 | ||
|
|
ad53429cf1 | ||
|
|
15298b0409 | ||
|
|
cfa710037c | ||
|
|
a08dd85efd | ||
|
|
469d36d979 | ||
|
|
7ae8b2cdeb | ||
|
|
cf148db75d | ||
|
|
738c7ab43e | ||
|
|
82fb7f8cf0 | ||
|
|
e0f2ab9c01 | ||
|
|
e0183217b6 | ||
|
|
f066b7fb9c | ||
|
|
0c6e2b566b | ||
|
|
f02e24437a | ||
|
|
e9534be1e6 | ||
|
|
7056997e49 | ||
|
|
155af19aaa | ||
|
|
f369fdf6f2 | ||
|
|
510a95bd6d | ||
|
|
da90dbe645 | ||
|
|
b006c5f914 | ||
|
|
3f75d46a16 | ||
|
|
14c2a244b7 | ||
|
|
94ff9d7346 | ||
|
|
14196167b0 | ||
|
|
d70959c34c | ||
|
|
67c6f27064 | ||
|
|
6bfbb27856 | ||
|
|
baac3749b3 | ||
|
|
d377181b25 | ||
|
|
ebd6a11f3a | ||
|
|
0a47412e8c | ||
|
|
4f668bf558 | ||
|
|
9248c5a987 | ||
|
|
b0ed190591 | ||
|
|
37357b2d63 | ||
|
|
9b06e0a3b7 | ||
|
|
5a5912ea66 | ||
|
|
b1c7317cf6 | ||
|
|
a0fe4cf5e4 | ||
|
|
7fe3c965e3 | ||
|
|
fd9b3c2767 | ||
|
|
fb9e188e36 | ||
|
|
c93d8cecfc | ||
|
|
94b46e57f1 | ||
|
|
9046acbe68 | ||
|
|
075bbe2aef | ||
|
|
b52d078986 | ||
|
|
0a9c4914aa | ||
|
|
f284008fb5 | ||
|
|
4759254e10 | ||
|
|
e22d377203 | ||
|
|
0787e3e595 | ||
|
|
c1194d558c | ||
|
|
952b10a9f6 | ||
|
|
f55851bdc8 | ||
|
|
76bb361393 | ||
|
|
81c8e8d898 | ||
|
|
f4e872c782 | ||
|
|
ddcb500c51 | ||
|
|
e8664c0ce4 | ||
|
|
3b002ddc86 | ||
|
|
1770da545d | ||
|
|
de3e69f846 | ||
|
|
cdf1233065 | ||
|
|
6b70f0b25f | ||
|
|
4fe51f7cce | ||
|
|
7221bf9b25 | ||
|
|
61f20f5449 | ||
|
|
5dafbb54de | ||
|
|
ec8ab28a22 | ||
|
|
aaa6d0c71f | ||
|
|
ea74aca165 | ||
|
|
9b119765fc | ||
|
|
df02496145 | ||
|
|
31d285813e | ||
|
|
f9fc5792fd | ||
|
|
6ad9c5ae64 | ||
|
|
016abda6d7 | ||
|
|
2eea6c95b9 | ||
|
|
7f1946de34 | ||
|
|
d0a81cabab | ||
|
|
df67f4ef34 | ||
|
|
ed11e1128a | ||
|
|
ed1c7cac17 | ||
|
|
7c115aa9c8 | ||
|
|
3ffddcba92 | ||
|
|
833b702b90 | ||
|
|
b885d453a2 | ||
|
|
7163fde724 | ||
|
|
830e3d1f64 | ||
|
|
637463a068 | ||
|
|
e69536f540 | ||
|
|
c516780289 | ||
|
|
eb1b869b73 | ||
|
|
703ccc8d91 | ||
|
|
45b9366dd0 | ||
|
|
382c6fda89 | ||
|
|
0f80de86b2 | ||
|
|
bc11cdd8d5 | ||
|
|
62b20cd884 | ||
|
|
ae8ed75ae5 | ||
|
|
c8cc81cf4a | ||
|
|
79c8b6cbc2 | ||
|
|
58253968fc | ||
|
|
dbd75209df | ||
|
|
da01848855 | ||
|
|
0b7be1175d | ||
|
|
3479a7e438 | ||
|
|
4f5c67e7df | ||
|
|
b99495ebbb | ||
|
|
0061978db8 | ||
|
|
e011f793ec | ||
|
|
5b47292366 | ||
|
|
8eff2df39c | ||
|
|
7bfe0ddf80 | ||
|
|
d6fa853a37 | ||
|
|
553cceab42 | ||
|
|
5bfaae9202 | ||
|
|
9359669cd4 | ||
|
|
8b039e0447 | ||
|
|
c64c080159 | ||
|
|
bcaf283c35 | ||
|
|
31a433cda2 | ||
|
|
e647f3626e | ||
|
|
3491717c26 | ||
|
|
45a13f5f4e | ||
|
|
6884ce3f3e | ||
|
|
5f4efc9148 | ||
|
|
d481294519 | ||
|
|
7ebbbe5c29 | ||
|
|
9ff303b8c0 | ||
|
|
4463421028 | ||
|
|
d75f56b914 | ||
|
|
a4834f4a83 | ||
|
|
0b42dbc3c3 | ||
|
|
2013ba497c | ||
|
|
2be37217cf | ||
|
|
55a03c2e2b | ||
|
|
27dbcf0066 | ||
|
|
ec194d73d2 | ||
|
|
1d9bb54073 | ||
|
|
5cf4a47b48 | ||
|
|
eec60d6309 | ||
|
|
37c602a477 | ||
|
|
8e604f888a | ||
|
|
531091906d | ||
|
|
e046b35b97 | ||
|
|
eb2ddc4798 | ||
|
|
aee9312cea | ||
|
|
6a99a5cb9f | ||
|
|
8e0ca88421 | ||
|
|
c7e55db4e0 | ||
|
|
1b1c740b29 | ||
|
|
ad5436e3f6 | ||
|
|
20606a0043 | ||
|
|
80e9902324 | ||
|
|
741e65a3a1 | ||
|
|
6aff8c2f5e | ||
|
|
e5770af2fa | ||
|
|
ae094d3479 | ||
|
|
f49c280a7f | ||
|
|
ae812e55af | ||
|
|
489101668c | ||
|
|
f8fd5c067c | ||
|
|
39fc9b73e9 | ||
|
|
80a5138115 | ||
|
|
bc844105b2 | ||
|
|
7de0fe467a | ||
|
|
0a025aabfd | ||
|
|
7a8b1645ef | ||
|
|
b3729e4666 | ||
|
|
9c4dbbc3f8 | ||
|
|
ca6cb34d98 | ||
|
|
fc442d7862 | ||
|
|
3ac5af47f2 | ||
|
|
bb37a1c1ce | ||
|
|
206799ff1c | ||
|
|
f3de83707f | ||
|
|
5be1d1be69 | ||
|
|
08e85f6be9 | ||
|
|
acdf351047 | ||
|
|
c2ff50c917 | ||
|
|
363b9ccc7f | ||
|
|
74367a65cc | ||
|
|
2221d3307a | ||
|
|
4ff34b3e53 | ||
|
|
191da1ecaf | ||
|
|
77db2bd3d1 | ||
|
|
758f2ee834 | ||
|
|
c5a6ca5650 | ||
|
|
b409523d5c | ||
|
|
8cd0aec417 | ||
|
|
a4cddf4f7f | ||
|
|
d970e757eb | ||
|
|
083d9f76c6 | ||
|
|
2003d08c59 | ||
|
|
9cf4653425 | ||
|
|
4f6bb00996 | ||
|
|
25f97bbe62 | ||
|
|
44d403cf9c | ||
|
|
8682ae4a54 | ||
|
|
f98a29a1e0 | ||
|
|
2932dba5cc | ||
|
|
24914e90d1 | ||
|
|
587c8a0347 | ||
|
|
62224debd2 | ||
|
|
871e2ccbbf | ||
|
|
4b5ce3913e | ||
|
|
1d917561fe | ||
|
|
4f36c1de06 | ||
|
|
5ffa34dacb | ||
|
|
60d4a3e1b5 | ||
|
|
3e0558c0e9 | ||
|
|
c74d90407b | ||
|
|
557831fa0d | ||
|
|
afd312f26a | ||
|
|
bb0338870a | ||
|
|
fb8ccfe824 | ||
|
|
0b4ff97a1a | ||
|
|
00dd5db226 | ||
|
|
71093bbe1b | ||
|
|
088e187e6a |
12
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: [drakkan] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
20
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
version: 2
|
||||
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 2
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 2
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 2
|
||||
2
.github/workflows/.editorconfig
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
[*.yml]
|
||||
indent_size = 2
|
||||
554
.github/workflows/development.yml
vendored
Normal file
@@ -0,0 +1,554 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [2.4.x]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test-deploy:
|
||||
name: Test and deploy
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go: [1.19]
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
upload-coverage: [true]
|
||||
include:
|
||||
- go: 1.19
|
||||
os: windows-latest
|
||||
upload-coverage: false
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
|
||||
- name: Build for Linux/macOS x86_64
|
||||
if: startsWith(matrix.os, 'windows-') != true
|
||||
run: |
|
||||
go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
cd tests/eventsearcher
|
||||
go build -trimpath -ldflags "-s -w" -o eventsearcher
|
||||
cd -
|
||||
cd tests/ipfilter
|
||||
go build -trimpath -ldflags "-s -w" -o ipfilter
|
||||
cd -
|
||||
./sftpgo initprovider
|
||||
./sftpgo resetprovider --force
|
||||
|
||||
- name: Build for macOS arm64
|
||||
if: startsWith(matrix.os, 'macos-') == true
|
||||
run: CGO_ENABLED=1 GOOS=darwin GOARCH=arm64 SDKROOT=$(xcrun --sdk macosx --show-sdk-path) go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo_arm64
|
||||
|
||||
- name: Build for Windows
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
run: |
|
||||
$GIT_COMMIT = (git describe --always --abbrev=8 --dirty) | Out-String
|
||||
$DATE_TIME = ([datetime]::Now.ToUniversalTime().toString("yyyy-MM-ddTHH:mm:ssZ")) | Out-String
|
||||
$LATEST_TAG = ((git describe --tags $(git rev-list --tags --max-count=1)) | Out-String).Trim()
|
||||
$REV_LIST=$LATEST_TAG+"..HEAD"
|
||||
$COMMITS_FROM_TAG= ((git rev-list $REV_LIST --count) | Out-String).Trim()
|
||||
$FILE_VERSION = $LATEST_TAG.substring(1) + "." + $COMMITS_FROM_TAG
|
||||
go install github.com/tc-hib/go-winres@latest
|
||||
go-winres simply --arch amd64 --product-version $LATEST_TAG-dev-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o sftpgo.exe
|
||||
cd tests/eventsearcher
|
||||
go build -trimpath -ldflags "-s -w" -o eventsearcher.exe
|
||||
cd ../..
|
||||
cd tests/ipfilter
|
||||
go build -trimpath -ldflags "-s -w" -o ipfilter.exe
|
||||
cd ../..
|
||||
mkdir arm64
|
||||
$Env:CGO_ENABLED='0'
|
||||
$Env:GOOS='windows'
|
||||
$Env:GOARCH='arm64'
|
||||
go-winres simply --arch arm64 --product-version $LATEST_TAG-dev-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -tags nopgxregisterdefaulttypes,nosqlite -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o .\arm64\sftpgo.exe
|
||||
mkdir x86
|
||||
$Env:GOARCH='386'
|
||||
go-winres simply --arch 386 --product-version $LATEST_TAG-dev-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -tags nopgxregisterdefaulttypes,nosqlite -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o .\x86\sftpgo.exe
|
||||
Remove-Item Env:\CGO_ENABLED
|
||||
Remove-Item Env:\GOOS
|
||||
Remove-Item Env:\GOARCH
|
||||
|
||||
- name: Run test cases using SQLite provider
|
||||
run: go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -coverprofile=coverage.txt -covermode=atomic
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
if: ${{ matrix.upload-coverage }}
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
fail_ci_if_error: false
|
||||
|
||||
- name: Run test cases using bolt provider
|
||||
run: |
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 2m ./internal/config -covermode=atomic
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 5m ./internal/common -covermode=atomic
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 5m ./internal/httpd -covermode=atomic
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 8m ./internal/sftpd -covermode=atomic
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 5m ./internal/ftpd -covermode=atomic
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 5m ./internal/webdavd -covermode=atomic
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 2m ./internal/telemetry -covermode=atomic
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 2m ./internal/mfa -covermode=atomic
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 2m ./internal/command -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: bolt
|
||||
SFTPGO_DATA_PROVIDER__NAME: 'sftpgo_bolt.db'
|
||||
|
||||
- name: Run test cases using memory provider
|
||||
run: go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: memory
|
||||
SFTPGO_DATA_PROVIDER__NAME: ''
|
||||
|
||||
- name: Prepare build artifact for macOS
|
||||
if: startsWith(matrix.os, 'macos-') == true
|
||||
run: |
|
||||
mkdir -p output/{init,bash_completion,zsh_completion}
|
||||
cp sftpgo output/sftpgo_x86_64
|
||||
cp sftpgo_arm64 output/
|
||||
cp sftpgo.json output/
|
||||
cp -r templates output/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp init/com.github.drakkan.sftpgo.plist output/init/
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
|
||||
- name: Prepare Windows installer
|
||||
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||
run: |
|
||||
Remove-Item -LiteralPath "output" -Force -Recurse -ErrorAction Ignore
|
||||
mkdir output
|
||||
copy .\sftpgo.exe .\output
|
||||
copy .\sftpgo.json .\output
|
||||
copy .\sftpgo.db .\output
|
||||
copy .\LICENSE .\output\LICENSE.txt
|
||||
mkdir output\templates
|
||||
xcopy .\templates .\output\templates\ /E
|
||||
mkdir output\static
|
||||
xcopy .\static .\output\static\ /E
|
||||
mkdir output\openapi
|
||||
xcopy .\openapi .\output\openapi\ /E
|
||||
$LATEST_TAG = ((git describe --tags $(git rev-list --tags --max-count=1)) | Out-String).Trim()
|
||||
$REV_LIST=$LATEST_TAG+"..HEAD"
|
||||
$COMMITS_FROM_TAG= ((git rev-list $REV_LIST --count) | Out-String).Trim()
|
||||
$Env:SFTPGO_ISS_DEV_VERSION = $LATEST_TAG + "." + $COMMITS_FROM_TAG
|
||||
$CERT_PATH=(Get-Location -PSProvider FileSystem).ProviderPath + "\cert.pfx"
|
||||
[IO.File]::WriteAllBytes($CERT_PATH,[System.Convert]::FromBase64String($Env:CERT_DATA))
|
||||
certutil -f -p "$Env:CERT_PASS" -importpfx MY "$CERT_PATH"
|
||||
rm "$CERT_PATH"
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\sftpgo.exe
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\arm64\sftpgo.exe
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\x86\sftpgo.exe
|
||||
$INNO_S='/Ssigntool=$qC:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe$q sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n $qNicola Murino$q /d $qSFTPGo$q $f'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
|
||||
rm .\output\sftpgo.exe
|
||||
rm .\output\sftpgo.db
|
||||
copy .\arm64\sftpgo.exe .\output
|
||||
(Get-Content .\output\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\output\sftpgo.json
|
||||
$Env:SFTPGO_DATA_PROVIDER__DRIVER='bolt'
|
||||
$Env:SFTPGO_DATA_PROVIDER__NAME='.\output\sftpgo.db'
|
||||
.\sftpgo.exe initprovider
|
||||
Remove-Item Env:\SFTPGO_DATA_PROVIDER__DRIVER
|
||||
Remove-Item Env:\SFTPGO_DATA_PROVIDER__NAME
|
||||
$Env:SFTPGO_ISS_ARCH='arm64'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
|
||||
rm .\output\sftpgo.exe
|
||||
copy .\x86\sftpgo.exe .\output
|
||||
$Env:SFTPGO_ISS_ARCH='x86'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
certutil -delstore MY "Nicola Murino"
|
||||
env:
|
||||
CERT_DATA: ${{ secrets.CERT_DATA }}
|
||||
CERT_PASS: ${{ secrets.CERT_PASS }}
|
||||
|
||||
- name: Upload Windows installer x86_64 artifact
|
||||
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_windows_installer_x86_64
|
||||
path: ./sftpgo_windows_x86_64.exe
|
||||
|
||||
- name: Upload Windows installer arm64 artifact
|
||||
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_windows_installer_arm64
|
||||
path: ./sftpgo_windows_arm64.exe
|
||||
|
||||
- name: Upload Windows installer x86 artifact
|
||||
if: ${{ startsWith(matrix.os, 'windows-') && github.event_name != 'pull_request' }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_windows_installer_x86
|
||||
path: ./sftpgo_windows_x86.exe
|
||||
|
||||
- name: Prepare build artifact for Windows
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
run: |
|
||||
Remove-Item -LiteralPath "output" -Force -Recurse -ErrorAction Ignore
|
||||
mkdir output
|
||||
copy .\sftpgo.exe .\output
|
||||
mkdir output\arm64
|
||||
copy .\arm64\sftpgo.exe .\output\arm64
|
||||
mkdir output\x86
|
||||
copy .\x86\sftpgo.exe .\output\x86
|
||||
copy .\sftpgo.json .\output
|
||||
(Get-Content .\output\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\output\sftpgo.json
|
||||
mkdir output\templates
|
||||
xcopy .\templates .\output\templates\ /E
|
||||
mkdir output\static
|
||||
xcopy .\static .\output\static\ /E
|
||||
mkdir output\openapi
|
||||
xcopy .\openapi .\output\openapi\ /E
|
||||
|
||||
- name: Upload build artifact
|
||||
if: startsWith(matrix.os, 'ubuntu-') != true
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ matrix.os }}-go-${{ matrix.go }}
|
||||
path: output
|
||||
|
||||
test-build-flags:
|
||||
name: Test build flags
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
go build -trimpath -tags nopgxregisterdefaulttypes,nogcs,nos3,noportable,nobolt,nomysql,nopgsql,nosqlite,nometrics,noazblob -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
./sftpgo -v
|
||||
cp -r openapi static templates internal/bundle/
|
||||
go build -trimpath -tags nopgxregisterdefaulttypes,bundle -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
./sftpgo -v
|
||||
|
||||
test-goarch-386:
|
||||
name: Run test cases on 32-bit arch
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
cd tests/eventsearcher
|
||||
go build -trimpath -ldflags "-s -w" -o eventsearcher
|
||||
cd -
|
||||
cd tests/ipfilter
|
||||
go build -trimpath -ldflags "-s -w" -o ipfilter
|
||||
cd -
|
||||
env:
|
||||
GOARCH: 386
|
||||
|
||||
- name: Run test cases
|
||||
run: go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: memory
|
||||
SFTPGO_DATA_PROVIDER__NAME: ''
|
||||
GOARCH: 386
|
||||
|
||||
test-postgresql-mysql-crdb:
|
||||
name: Test with PgSQL/MySQL/Cockroach
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:latest
|
||||
env:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: sftpgo
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
ports:
|
||||
- 5432:5432
|
||||
|
||||
mariadb:
|
||||
image: mariadb:latest
|
||||
env:
|
||||
MYSQL_ROOT_PASSWORD: mysql
|
||||
MYSQL_DATABASE: sftpgo
|
||||
MYSQL_USER: sftpgo
|
||||
MYSQL_PASSWORD: sftpgo
|
||||
options: >-
|
||||
--health-cmd "mysqladmin status -h 127.0.0.1 -P 3306 -u root -p$MYSQL_ROOT_PASSWORD"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 6
|
||||
ports:
|
||||
- 3307:3306
|
||||
|
||||
mysql:
|
||||
image: mysql:latest
|
||||
env:
|
||||
MYSQL_ROOT_PASSWORD: mysql
|
||||
MYSQL_DATABASE: sftpgo
|
||||
MYSQL_USER: sftpgo
|
||||
MYSQL_PASSWORD: sftpgo
|
||||
options: >-
|
||||
--health-cmd "mysqladmin status -h 127.0.0.1 -P 3306 -u root -p$MYSQL_ROOT_PASSWORD"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 6
|
||||
ports:
|
||||
- 3308:3306
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
cd tests/eventsearcher
|
||||
go build -trimpath -ldflags "-s -w" -o eventsearcher
|
||||
cd -
|
||||
cd tests/ipfilter
|
||||
go build -trimpath -ldflags "-s -w" -o ipfilter
|
||||
cd -
|
||||
|
||||
- name: Run tests using PostgreSQL provider
|
||||
run: |
|
||||
./sftpgo initprovider
|
||||
./sftpgo resetprovider --force
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: postgresql
|
||||
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__HOST: localhost
|
||||
SFTPGO_DATA_PROVIDER__PORT: 5432
|
||||
SFTPGO_DATA_PROVIDER__USERNAME: postgres
|
||||
SFTPGO_DATA_PROVIDER__PASSWORD: postgres
|
||||
|
||||
- name: Run tests using MySQL provider
|
||||
run: |
|
||||
./sftpgo initprovider
|
||||
./sftpgo resetprovider --force
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: mysql
|
||||
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__HOST: localhost
|
||||
SFTPGO_DATA_PROVIDER__PORT: 3308
|
||||
SFTPGO_DATA_PROVIDER__USERNAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__PASSWORD: sftpgo
|
||||
|
||||
- name: Run tests using MariaDB provider
|
||||
run: |
|
||||
./sftpgo initprovider
|
||||
./sftpgo resetprovider --force
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -covermode=atomic
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: mysql
|
||||
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__HOST: localhost
|
||||
SFTPGO_DATA_PROVIDER__PORT: 3307
|
||||
SFTPGO_DATA_PROVIDER__USERNAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__PASSWORD: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__SQL_TABLES_PREFIX: prefix_
|
||||
|
||||
- name: Run tests using CockroachDB provider
|
||||
run: |
|
||||
docker run --rm --name crdb --health-cmd "curl -I http://127.0.0.1:8080" --health-interval 10s --health-timeout 5s --health-retries 6 -p 26257:26257 -d cockroachdb/cockroach:latest start-single-node --insecure --listen-addr :26257
|
||||
sleep 10
|
||||
docker exec crdb cockroach sql --insecure -e 'create database "sftpgo"'
|
||||
./sftpgo initprovider
|
||||
./sftpgo resetprovider --force
|
||||
go test -v -tags nopgxregisterdefaulttypes -p 1 -timeout 15m ./... -covermode=atomic
|
||||
docker stop crdb
|
||||
env:
|
||||
SFTPGO_DATA_PROVIDER__DRIVER: cockroachdb
|
||||
SFTPGO_DATA_PROVIDER__NAME: sftpgo
|
||||
SFTPGO_DATA_PROVIDER__HOST: localhost
|
||||
SFTPGO_DATA_PROVIDER__PORT: 26257
|
||||
SFTPGO_DATA_PROVIDER__USERNAME: root
|
||||
SFTPGO_DATA_PROVIDER__PASSWORD:
|
||||
SFTPGO_DATA_PROVIDER__SQL_TABLES_PREFIX: prefix_
|
||||
|
||||
build-linux-packages:
|
||||
name: Build Linux packages
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- arch: amd64
|
||||
distro: ubuntu:18.04
|
||||
go: latest
|
||||
go-arch: amd64
|
||||
- arch: aarch64
|
||||
distro: ubuntu18.04
|
||||
go: latest
|
||||
go-arch: arm64
|
||||
- arch: ppc64le
|
||||
distro: ubuntu18.04
|
||||
go: latest
|
||||
go-arch: ppc64le
|
||||
- arch: armv7
|
||||
distro: ubuntu18.04
|
||||
go: latest
|
||||
go-arch: arm7
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get commit SHA
|
||||
id: get_commit
|
||||
run: echo "COMMIT=${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
|
||||
- name: Build on amd64
|
||||
if: ${{ matrix.arch == 'amd64' }}
|
||||
run: |
|
||||
echo '#!/bin/bash' > build.sh
|
||||
echo '' >> build.sh
|
||||
echo 'set -e' >> build.sh
|
||||
echo 'apt-get update -q -y' >> build.sh
|
||||
echo 'apt-get install -q -y curl gcc' >> build.sh
|
||||
if [ ${{ matrix.go }} == 'latest' ]
|
||||
then
|
||||
echo 'GO_VERSION=$(curl -L https://go.dev/VERSION?m=text)' >> build.sh
|
||||
else
|
||||
echo 'GO_VERSION=${{ matrix.go }}' >> build.sh
|
||||
fi
|
||||
echo 'GO_DOWNLOAD_ARCH=${{ matrix.go-arch }}' >> build.sh
|
||||
echo 'curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/${GO_VERSION}.linux-${GO_DOWNLOAD_ARCH}.tar.gz' >> build.sh
|
||||
echo 'tar -C /usr/local -xzf go.tar.gz' >> build.sh
|
||||
echo 'export PATH=$PATH:/usr/local/go/bin' >> build.sh
|
||||
echo 'go version' >> build.sh
|
||||
echo 'cd /usr/local/src' >> build.sh
|
||||
echo 'go build -buildvcs=false -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${{ steps.get_commit.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo' >> build.sh
|
||||
|
||||
chmod 755 build.sh
|
||||
docker run --rm --name ubuntu-build --mount type=bind,source=`pwd`,target=/usr/local/src ${{ matrix.distro }} /usr/local/src/build.sh
|
||||
mkdir -p output/{init,bash_completion,zsh_completion}
|
||||
cp sftpgo.json output/
|
||||
cp -r templates output/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp init/sftpgo.service output/init/
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
cp sftpgo output/
|
||||
|
||||
- uses: uraimo/run-on-arch-action@v2
|
||||
if: ${{ matrix.arch != 'amd64' }}
|
||||
name: Build for ${{ matrix.arch }}
|
||||
id: build
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
distro: ${{ matrix.distro }}
|
||||
setup: |
|
||||
mkdir -p "${PWD}/output"
|
||||
dockerRunArgs: |
|
||||
--volume "${PWD}/output:/output"
|
||||
shell: /bin/bash
|
||||
install: |
|
||||
apt-get update -q -y
|
||||
apt-get install -q -y curl gcc
|
||||
if [ ${{ matrix.go }} == 'latest' ]
|
||||
then
|
||||
GO_VERSION=$(curl -L https://go.dev/VERSION?m=text)
|
||||
else
|
||||
GO_VERSION=${{ matrix.go }}
|
||||
fi
|
||||
GO_DOWNLOAD_ARCH=${{ matrix.go-arch }}
|
||||
if [ ${{ matrix.arch}} == 'armv7' ]
|
||||
then
|
||||
GO_DOWNLOAD_ARCH=armv6l
|
||||
fi
|
||||
curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/${GO_VERSION}.linux-${GO_DOWNLOAD_ARCH}.tar.gz
|
||||
tar -C /usr/local -xzf go.tar.gz
|
||||
run: |
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
go version
|
||||
if [ ${{ matrix.arch}} == 'armv7' ]
|
||||
then
|
||||
export GOARM=7
|
||||
fi
|
||||
go build -buildvcs=false -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${{ steps.get_commit.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
mkdir -p output/{init,bash_completion,zsh_completion}
|
||||
cp sftpgo.json output/
|
||||
cp -r templates output/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp init/sftpgo.service output/init/
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
cp sftpgo output/
|
||||
|
||||
- name: Upload build artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo-linux-${{ matrix.arch }}-go-${{ matrix.go }}
|
||||
path: output
|
||||
|
||||
- name: Build Packages
|
||||
id: build_linux_pkgs
|
||||
run: |
|
||||
export NFPM_ARCH=${{ matrix.go-arch }}
|
||||
cd pkgs
|
||||
./build.sh
|
||||
PKG_VERSION=$(cat dist/version)
|
||||
echo "pkg-version=${PKG_VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload Debian Package
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-${{ matrix.go-arch }}-deb
|
||||
path: pkgs/dist/deb/*
|
||||
|
||||
- name: Upload RPM Package
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-${{ matrix.go-arch }}-rpm
|
||||
path: pkgs/dist/rpm/*
|
||||
|
||||
golangci-lint:
|
||||
name: golangci-lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: latest
|
||||
185
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
name: Docker
|
||||
|
||||
on:
|
||||
#schedule:
|
||||
# - cron: '0 4 * * *' # everyday at 4:00 AM UTC
|
||||
push:
|
||||
branches:
|
||||
- 2.4.x
|
||||
tags:
|
||||
- v*
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os:
|
||||
- ubuntu-latest
|
||||
docker_pkg:
|
||||
- debian
|
||||
- alpine
|
||||
optional_deps:
|
||||
- true
|
||||
- false
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
docker_pkg: distroless
|
||||
optional_deps: false
|
||||
- os: ubuntu-latest
|
||||
docker_pkg: debian-plugins
|
||||
optional_deps: true
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Gather image information
|
||||
id: info
|
||||
run: |
|
||||
VERSION=noop
|
||||
DOCKERFILE=Dockerfile
|
||||
MINOR=""
|
||||
MAJOR=""
|
||||
FEATURES="nopgxregisterdefaulttypes"
|
||||
if [ "${{ github.event_name }}" = "schedule" ]; then
|
||||
VERSION=nightly
|
||||
elif [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=edge
|
||||
fi
|
||||
elif [[ $GITHUB_REF == refs/pull/* ]]; then
|
||||
VERSION=pr-${{ github.event.number }}
|
||||
fi
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
MINOR=${VERSION%.*}
|
||||
MAJOR=${MINOR%.*}
|
||||
fi
|
||||
VERSION_SLIM="${VERSION}-slim"
|
||||
if [[ $DOCKER_PKG == alpine ]]; then
|
||||
VERSION="${VERSION}-alpine"
|
||||
VERSION_SLIM="${VERSION}-slim"
|
||||
DOCKERFILE=Dockerfile.alpine
|
||||
elif [[ $DOCKER_PKG == distroless ]]; then
|
||||
VERSION="${VERSION}-distroless"
|
||||
VERSION_SLIM="${VERSION}-slim"
|
||||
DOCKERFILE=Dockerfile.distroless
|
||||
FEATURES="${FEATURES},nosqlite"
|
||||
elif [[ $DOCKER_PKG == debian-plugins ]]; then
|
||||
VERSION="${VERSION}-plugins"
|
||||
VERSION_SLIM="${VERSION}-slim"
|
||||
fi
|
||||
DOCKER_IMAGES=("drakkan/sftpgo" "ghcr.io/drakkan/sftpgo")
|
||||
TAGS="${DOCKER_IMAGES[0]}:${VERSION}"
|
||||
TAGS_SLIM="${DOCKER_IMAGES[0]}:${VERSION_SLIM}"
|
||||
|
||||
for DOCKER_IMAGE in ${DOCKER_IMAGES[@]}; do
|
||||
if [[ ${DOCKER_IMAGE} != ${DOCKER_IMAGES[0]} ]]; then
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:${VERSION}"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${VERSION_SLIM}"
|
||||
fi
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
if [[ $DOCKER_PKG == debian ]]; then
|
||||
if [[ -n $MAJOR && -n $MINOR ]]; then
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR},${DOCKER_IMAGE}:${MAJOR}"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${MINOR}-slim,${DOCKER_IMAGE}:${MAJOR}-slim"
|
||||
fi
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:latest"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:slim"
|
||||
elif [[ $DOCKER_PKG == distroless ]]; then
|
||||
if [[ -n $MAJOR && -n $MINOR ]]; then
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR}-distroless,${DOCKER_IMAGE}:${MAJOR}-distroless"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${MINOR}-distroless-slim,${DOCKER_IMAGE}:${MAJOR}-distroless-slim"
|
||||
fi
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:distroless"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:distroless-slim"
|
||||
elif [[ $DOCKER_PKG == debian-plugins ]]; then
|
||||
if [[ -n $MAJOR && -n $MINOR ]]; then
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR}-plugins,${DOCKER_IMAGE}:${MAJOR}-plugins"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${MINOR}-plugins-slim,${DOCKER_IMAGE}:${MAJOR}-plugins-slim"
|
||||
fi
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:plugins"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:plugins-slim"
|
||||
else
|
||||
if [[ -n $MAJOR && -n $MINOR ]]; then
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:${MINOR}-alpine,${DOCKER_IMAGE}:${MAJOR}-alpine"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:${MINOR}-alpine-slim,${DOCKER_IMAGE}:${MAJOR}-alpine-slim"
|
||||
fi
|
||||
TAGS="${TAGS},${DOCKER_IMAGE}:alpine"
|
||||
TAGS_SLIM="${TAGS_SLIM},${DOCKER_IMAGE}:alpine-slim"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $OPTIONAL_DEPS == true ]]; then
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "tags=${TAGS}" >> $GITHUB_OUTPUT
|
||||
echo "full=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "version=${VERSION_SLIM}" >> $GITHUB_OUTPUT
|
||||
echo "tags=${TAGS_SLIM}" >> $GITHUB_OUTPUT
|
||||
echo "full=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
if [[ $DOCKER_PKG == debian-plugins ]]; then
|
||||
echo "plugins=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "plugins=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
echo "dockerfile=${DOCKERFILE}" >> $GITHUB_OUTPUT
|
||||
echo "features=${FEATURES}" >> $GITHUB_OUTPUT
|
||||
echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
echo "sha=${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
DOCKER_PKG: ${{ matrix.docker_pkg }}
|
||||
OPTIONAL_DEPS: ${{ matrix.optional_deps }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up builder
|
||||
uses: docker/setup-buildx-action@v2
|
||||
id: builder
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: .
|
||||
builder: ${{ steps.builder.outputs.name }}
|
||||
file: ./${{ steps.info.outputs.dockerfile }}
|
||||
platforms: linux/amd64,linux/arm64,linux/ppc64le
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.info.outputs.tags }}
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ steps.info.outputs.sha }}
|
||||
INSTALL_OPTIONAL_PACKAGES=${{ steps.info.outputs.full }}
|
||||
DOWNLOAD_PLUGINS=${{ steps.info.outputs.plugins }}
|
||||
FEATURES=${{ steps.info.outputs.features }}
|
||||
labels: |
|
||||
org.opencontainers.image.title=SFTPGo
|
||||
org.opencontainers.image.description=Fully featured and highly configurable SFTP server with optional HTTP, FTP/S and WebDAV support
|
||||
org.opencontainers.image.url=https://github.com/drakkan/sftpgo
|
||||
org.opencontainers.image.documentation=https://github.com/drakkan/sftpgo/blob/${{ github.sha }}/docker/README.md
|
||||
org.opencontainers.image.source=https://github.com/drakkan/sftpgo
|
||||
org.opencontainers.image.version=${{ steps.info.outputs.version }}
|
||||
org.opencontainers.image.created=${{ steps.info.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.licenses=AGPL-3.0-only
|
||||
607
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,607 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: 'v*'
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.19.4
|
||||
|
||||
jobs:
|
||||
prepare-sources-with-deps:
|
||||
name: Prepare sources with deps
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Get SFTPGo version
|
||||
id: get_version
|
||||
run: echo "VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Prepare release
|
||||
run: |
|
||||
go mod vendor
|
||||
echo "${SFTPGO_VERSION}" > VERSION.txt
|
||||
echo "${GITHUB_SHA::8}" >> VERSION.txt
|
||||
tar cJvf sftpgo_${SFTPGO_VERSION}_src_with_deps.tar.xz *
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- name: Upload build artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_src_with_deps.tar.xz
|
||||
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_src_with_deps.tar.xz
|
||||
retention-days: 1
|
||||
|
||||
prepare-window-mac:
|
||||
name: Prepare binaries
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-11, windows-2022]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Get SFTPGo version
|
||||
id: get_version
|
||||
run: echo "VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
|
||||
- name: Get OS name
|
||||
id: get_os_name
|
||||
run: |
|
||||
if [[ $MATRIX_OS =~ ^macos.* ]]
|
||||
then
|
||||
echo "OS=macOS" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "OS=windows" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
shell: bash
|
||||
env:
|
||||
MATRIX_OS: ${{ matrix.os }}
|
||||
|
||||
- name: Build for macOS x86_64
|
||||
if: startsWith(matrix.os, 'windows-') != true
|
||||
run: go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
|
||||
- name: Build for macOS arm64
|
||||
if: startsWith(matrix.os, 'macos-') == true
|
||||
run: CGO_ENABLED=1 GOOS=darwin GOARCH=arm64 SDKROOT=$(xcrun --sdk macosx --show-sdk-path) go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo_arm64
|
||||
|
||||
- name: Build for Windows
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
run: |
|
||||
$GIT_COMMIT = (git describe --always --abbrev=8 --dirty) | Out-String
|
||||
$DATE_TIME = ([datetime]::Now.ToUniversalTime().toString("yyyy-MM-ddTHH:mm:ssZ")) | Out-String
|
||||
$FILE_VERSION = $Env:SFTPGO_VERSION.substring(1) + ".0"
|
||||
go install github.com/tc-hib/go-winres@latest
|
||||
go-winres simply --arch amd64 --product-version $Env:SFTPGO_VERSION-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o sftpgo.exe
|
||||
mkdir arm64
|
||||
$Env:CGO_ENABLED='0'
|
||||
$Env:GOOS='windows'
|
||||
$Env:GOARCH='arm64'
|
||||
go-winres simply --arch arm64 --product-version $Env:SFTPGO_VERSION-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -tags nopgxregisterdefaulttypes,nosqlite -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o .\arm64\sftpgo.exe
|
||||
mkdir x86
|
||||
$Env:GOARCH='386'
|
||||
go-winres simply --arch 386 --product-version $Env:SFTPGO_VERSION-$GIT_COMMIT --file-version $FILE_VERSION --file-description "SFTPGo server" --product-name SFTPGo --copyright "AGPL-3.0" --original-filename sftpgo.exe --icon .\windows-installer\icon.ico
|
||||
go build -trimpath -tags nopgxregisterdefaulttypes,nosqlite -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/v2/internal/version.date=$DATE_TIME" -o .\x86\sftpgo.exe
|
||||
Remove-Item Env:\CGO_ENABLED
|
||||
Remove-Item Env:\GOOS
|
||||
Remove-Item Env:\GOARCH
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
|
||||
- name: Initialize data provider
|
||||
run: ./sftpgo initprovider
|
||||
shell: bash
|
||||
|
||||
- name: Prepare Release for macOS
|
||||
if: startsWith(matrix.os, 'macos-')
|
||||
run: |
|
||||
mkdir -p output/{init,sqlite,bash_completion,zsh_completion}
|
||||
echo "For documentation please take a look here:" > output/README.txt
|
||||
echo "" >> output/README.txt
|
||||
echo "https://github.com/drakkan/sftpgo/blob/${SFTPGO_VERSION}/README.md" >> output/README.txt
|
||||
cp LICENSE output/
|
||||
cp sftpgo output/
|
||||
cp sftpgo.json output/
|
||||
cp sftpgo.db output/sqlite/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp -r templates output/
|
||||
cp init/com.github.drakkan.sftpgo.plist output/init/
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
cd output
|
||||
tar cJvf ../sftpgo_${SFTPGO_VERSION}_${OS}_x86_64.tar.xz *
|
||||
cd ..
|
||||
cp sftpgo_arm64 output/sftpgo
|
||||
cd output
|
||||
tar cJvf ../sftpgo_${SFTPGO_VERSION}_${OS}_arm64.tar.xz *
|
||||
cd ..
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
OS: ${{ steps.get_os_name.outputs.OS }}
|
||||
|
||||
- name: Prepare Release for Windows
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
run: |
|
||||
mkdir output
|
||||
copy .\sftpgo.exe .\output
|
||||
copy .\sftpgo.json .\output
|
||||
copy .\sftpgo.db .\output
|
||||
copy .\LICENSE .\output\LICENSE.txt
|
||||
mkdir output\templates
|
||||
xcopy .\templates .\output\templates\ /E
|
||||
mkdir output\static
|
||||
xcopy .\static .\output\static\ /E
|
||||
mkdir output\openapi
|
||||
xcopy .\openapi .\output\openapi\ /E
|
||||
$CERT_PATH=(Get-Location -PSProvider FileSystem).ProviderPath + "\cert.pfx"
|
||||
[IO.File]::WriteAllBytes($CERT_PATH,[System.Convert]::FromBase64String($Env:CERT_DATA))
|
||||
certutil -f -p "$Env:CERT_PASS" -importpfx MY "$CERT_PATH"
|
||||
rm "$CERT_PATH"
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\sftpgo.exe
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\arm64\sftpgo.exe
|
||||
& 'C:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe' sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n "Nicola Murino" /d "SFTPGo" .\x86\sftpgo.exe
|
||||
$INNO_S='/Ssigntool=$qC:/Program Files (x86)/Windows Kits/10/bin/10.0.20348.0/x86/signtool.exe$q sign /sm /tr http://timestamp.sectigo.com /td sha256 /fd sha256 /n $qNicola Murino$q /d $qSFTPGo$q $f'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
|
||||
rm .\output\sftpgo.exe
|
||||
rm .\output\sftpgo.db
|
||||
copy .\arm64\sftpgo.exe .\output
|
||||
(Get-Content .\output\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\output\sftpgo.json
|
||||
$Env:SFTPGO_DATA_PROVIDER__DRIVER='bolt'
|
||||
$Env:SFTPGO_DATA_PROVIDER__NAME='.\output\sftpgo.db'
|
||||
.\sftpgo.exe initprovider
|
||||
Remove-Item Env:\SFTPGO_DATA_PROVIDER__DRIVER
|
||||
Remove-Item Env:\SFTPGO_DATA_PROVIDER__NAME
|
||||
$Env:SFTPGO_ISS_ARCH='arm64'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
|
||||
rm .\output\sftpgo.exe
|
||||
copy .\x86\sftpgo.exe .\output
|
||||
$Env:SFTPGO_ISS_ARCH='x86'
|
||||
iscc "$INNO_S" .\windows-installer\sftpgo.iss
|
||||
certutil -delstore MY "Nicola Murino"
|
||||
env:
|
||||
SFTPGO_ISS_VERSION: ${{ steps.get_version.outputs.VERSION }}
|
||||
SFTPGO_ISS_DOC_URL: https://github.com/drakkan/sftpgo/blob/${{ steps.get_version.outputs.VERSION }}/README.md
|
||||
CERT_DATA: ${{ secrets.CERT_DATA }}
|
||||
CERT_PASS: ${{ secrets.CERT_PASS }}
|
||||
|
||||
- name: Prepare Portable Release for Windows
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
run: |
|
||||
mkdir win-portable
|
||||
copy .\sftpgo.exe .\win-portable
|
||||
mkdir win-portable\arm64
|
||||
copy .\arm64\sftpgo.exe .\win-portable\arm64
|
||||
mkdir win-portable\x86
|
||||
copy .\x86\sftpgo.exe .\win-portable\x86
|
||||
copy .\sftpgo.json .\win-portable
|
||||
(Get-Content .\win-portable\sftpgo.json).replace('"sqlite"', '"bolt"') | Set-Content .\win-portable\sftpgo.json
|
||||
copy .\output\sftpgo.db .\win-portable
|
||||
copy .\LICENSE .\win-portable\LICENSE.txt
|
||||
mkdir win-portable\templates
|
||||
xcopy .\templates .\win-portable\templates\ /E
|
||||
mkdir win-portable\static
|
||||
xcopy .\static .\win-portable\static\ /E
|
||||
mkdir win-portable\openapi
|
||||
xcopy .\openapi .\win-portable\openapi\ /E
|
||||
Compress-Archive .\win-portable\* sftpgo_portable.zip
|
||||
|
||||
- name: Upload macOS x86_64 artifact
|
||||
if: startsWith(matrix.os, 'macos-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.tar.xz
|
||||
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.tar.xz
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload macOS arm64 artifact
|
||||
if: startsWith(matrix.os, 'macos-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_arm64.tar.xz
|
||||
path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_arm64.tar.xz
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload Windows installer x86_64 artifact
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.exe
|
||||
path: ./sftpgo_windows_x86_64.exe
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload Windows installer arm64 artifact
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_arm64.exe
|
||||
path: ./sftpgo_windows_arm64.exe
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload Windows installer x86 artifact
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86.exe
|
||||
path: ./sftpgo_windows_x86.exe
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload Windows portable artifact
|
||||
if: startsWith(matrix.os, 'windows-')
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_portable.zip
|
||||
path: ./sftpgo_portable.zip
|
||||
retention-days: 1
|
||||
|
||||
prepare-linux:
|
||||
name: Prepare Linux binaries
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- arch: amd64
|
||||
distro: ubuntu:18.04
|
||||
go-arch: amd64
|
||||
deb-arch: amd64
|
||||
rpm-arch: x86_64
|
||||
tar-arch: x86_64
|
||||
- arch: aarch64
|
||||
distro: ubuntu18.04
|
||||
go-arch: arm64
|
||||
deb-arch: arm64
|
||||
rpm-arch: aarch64
|
||||
tar-arch: arm64
|
||||
- arch: ppc64le
|
||||
distro: ubuntu18.04
|
||||
go-arch: ppc64le
|
||||
deb-arch: ppc64el
|
||||
rpm-arch: ppc64le
|
||||
tar-arch: ppc64le
|
||||
- arch: armv7
|
||||
distro: ubuntu18.04
|
||||
go-arch: arm7
|
||||
deb-arch: armhf
|
||||
rpm-arch: armv7hl
|
||||
tar-arch: armv7
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Get versions
|
||||
id: get_version
|
||||
run: |
|
||||
echo "SFTPGO_VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
|
||||
echo "GO_VERSION=${GO_VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "COMMIT=${GITHUB_SHA::8}" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
env:
|
||||
GO_VERSION: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Build on amd64
|
||||
if: ${{ matrix.arch == 'amd64' }}
|
||||
run: |
|
||||
echo '#!/bin/bash' > build.sh
|
||||
echo '' >> build.sh
|
||||
echo 'set -e' >> build.sh
|
||||
echo 'apt-get update -q -y' >> build.sh
|
||||
echo 'apt-get install -q -y curl gcc' >> build.sh
|
||||
echo 'curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/go${{ steps.get_version.outputs.GO_VERSION }}.linux-${{ matrix.go-arch }}.tar.gz' >> build.sh
|
||||
echo 'tar -C /usr/local -xzf go.tar.gz' >> build.sh
|
||||
echo 'export PATH=$PATH:/usr/local/go/bin' >> build.sh
|
||||
echo 'go version' >> build.sh
|
||||
echo 'cd /usr/local/src' >> build.sh
|
||||
echo 'go build -buildvcs=false -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${{ steps.get_version.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo' >> build.sh
|
||||
|
||||
chmod 755 build.sh
|
||||
docker run --rm --name ubuntu-build --mount type=bind,source=`pwd`,target=/usr/local/src ${{ matrix.distro }} /usr/local/src/build.sh
|
||||
mkdir -p output/{init,sqlite,bash_completion,zsh_completion}
|
||||
echo "For documentation please take a look here:" > output/README.txt
|
||||
echo "" >> output/README.txt
|
||||
echo "https://github.com/drakkan/sftpgo/blob/${SFTPGO_VERSION}/README.md" >> output/README.txt
|
||||
cp LICENSE output/
|
||||
cp sftpgo.json output/
|
||||
cp -r templates output/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp init/sftpgo.service output/init/
|
||||
./sftpgo initprovider
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
cp sftpgo output/
|
||||
cp sftpgo.db output/sqlite/
|
||||
cd output
|
||||
tar cJvf sftpgo_${SFTPGO_VERSION}_linux_${{ matrix.tar-arch }}.tar.xz *
|
||||
cd ..
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
||||
|
||||
- uses: uraimo/run-on-arch-action@v2
|
||||
if: ${{ matrix.arch != 'amd64' }}
|
||||
name: Build for ${{ matrix.arch }}
|
||||
id: build
|
||||
with:
|
||||
arch: ${{ matrix.arch }}
|
||||
distro: ${{ matrix.distro }}
|
||||
setup: |
|
||||
mkdir -p "${PWD}/output"
|
||||
dockerRunArgs: |
|
||||
--volume "${PWD}/output:/output"
|
||||
shell: /bin/bash
|
||||
install: |
|
||||
apt-get update -q -y
|
||||
apt-get install -q -y curl gcc xz-utils
|
||||
GO_DOWNLOAD_ARCH=${{ matrix.go-arch }}
|
||||
if [ ${{ matrix.arch}} == 'armv7' ]
|
||||
then
|
||||
GO_DOWNLOAD_ARCH=armv6l
|
||||
fi
|
||||
curl --retry 5 --retry-delay 2 --connect-timeout 10 -o go.tar.gz -L https://go.dev/dl/go${{ steps.get_version.outputs.GO_VERSION }}.linux-${GO_DOWNLOAD_ARCH}.tar.gz
|
||||
tar -C /usr/local -xzf go.tar.gz
|
||||
run: |
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
go version
|
||||
go build -buildvcs=false -trimpath -tags nopgxregisterdefaulttypes -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${{ steps.get_version.outputs.COMMIT }} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
mkdir -p output/{init,sqlite,bash_completion,zsh_completion}
|
||||
echo "For documentation please take a look here:" > output/README.txt
|
||||
echo "" >> output/README.txt
|
||||
echo "https://github.com/drakkan/sftpgo/blob/${{ steps.get_version.outputs.SFTPGO_VERSION }}/README.md" >> output/README.txt
|
||||
cp LICENSE output/
|
||||
cp sftpgo.json output/
|
||||
cp -r templates output/
|
||||
cp -r static output/
|
||||
cp -r openapi output/
|
||||
cp init/sftpgo.service output/init/
|
||||
./sftpgo initprovider
|
||||
./sftpgo gen completion bash > output/bash_completion/sftpgo
|
||||
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
|
||||
./sftpgo gen man -d output/man/man1
|
||||
gzip output/man/man1/*
|
||||
cp sftpgo output/
|
||||
cp sftpgo.db output/sqlite/
|
||||
cd output
|
||||
tar cJvf sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_${{ matrix.tar-arch }}.tar.xz *
|
||||
cd ..
|
||||
|
||||
- name: Upload build artifact for ${{ matrix.arch }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_${{ matrix.tar-arch }}.tar.xz
|
||||
path: ./output/sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_${{ matrix.tar-arch }}.tar.xz
|
||||
retention-days: 1
|
||||
|
||||
- name: Build Packages
|
||||
id: build_linux_pkgs
|
||||
run: |
|
||||
export NFPM_ARCH=${{ matrix.go-arch }}
|
||||
cd pkgs
|
||||
./build.sh
|
||||
PKG_VERSION=${SFTPGO_VERSION:1}
|
||||
echo "pkg-version=${PKG_VERSION}" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
||||
|
||||
- name: Upload Deb Package
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.build_linux_pkgs.outputs.pkg-version }}-1_${{ matrix.deb-arch}}.deb
|
||||
path: ./pkgs/dist/deb/sftpgo_${{ steps.build_linux_pkgs.outputs.pkg-version }}-1_${{ matrix.deb-arch}}.deb
|
||||
retention-days: 1
|
||||
|
||||
- name: Upload RPM Package
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-1.${{ matrix.rpm-arch}}.rpm
|
||||
path: ./pkgs/dist/rpm/sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-1.${{ matrix.rpm-arch}}.rpm
|
||||
retention-days: 1
|
||||
|
||||
prepare-linux-bundle:
|
||||
name: Prepare Linux bundle
|
||||
needs: prepare-linux
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Get versions
|
||||
id: get_version
|
||||
run: |
|
||||
echo "SFTPGO_VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
|
||||
- name: Download amd64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_x86_64.tar.xz
|
||||
|
||||
- name: Download arm64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_arm64.tar.xz
|
||||
|
||||
- name: Download ppc64le artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_ppc64le.tar.xz
|
||||
|
||||
- name: Download armv7 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_armv7.tar.xz
|
||||
|
||||
- name: Build bundle
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p bundle/{arm64,ppc64le,armv7}
|
||||
cd bundle
|
||||
tar xvf ../sftpgo_${SFTPGO_VERSION}_linux_x86_64.tar.xz
|
||||
cd arm64
|
||||
tar xvf ../../sftpgo_${SFTPGO_VERSION}_linux_arm64.tar.xz sftpgo
|
||||
cd ../ppc64le
|
||||
tar xvf ../../sftpgo_${SFTPGO_VERSION}_linux_ppc64le.tar.xz sftpgo
|
||||
cd ../armv7
|
||||
tar xvf ../../sftpgo_${SFTPGO_VERSION}_linux_armv7.tar.xz sftpgo
|
||||
cd ..
|
||||
tar cJvf sftpgo_${SFTPGO_VERSION}_linux_bundle.tar.xz *
|
||||
cd ..
|
||||
env:
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
||||
|
||||
- name: Upload Linux bundle
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
||||
path: ./bundle/sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
||||
retention-days: 1
|
||||
|
||||
create-release:
|
||||
name: Release
|
||||
needs: [prepare-linux-bundle, prepare-sources-with-deps, prepare-window-mac]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Get versions
|
||||
id: get_version
|
||||
run: |
|
||||
SFTPGO_VERSION=${GITHUB_REF/refs\/tags\//}
|
||||
PKG_VERSION=${SFTPGO_VERSION:1}
|
||||
echo "SFTPGO_VERSION=${SFTPGO_VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "PKG_VERSION=${PKG_VERSION}" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
|
||||
- name: Download amd64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_x86_64.tar.xz
|
||||
|
||||
- name: Download arm64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_arm64.tar.xz
|
||||
|
||||
- name: Download ppc64le artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_ppc64le.tar.xz
|
||||
|
||||
- name: Download armv7 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_armv7.tar.xz
|
||||
|
||||
- name: Download Linux bundle artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_linux_bundle.tar.xz
|
||||
|
||||
- name: Download Deb amd64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_amd64.deb
|
||||
|
||||
- name: Download Deb arm64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_arm64.deb
|
||||
|
||||
- name: Download Deb ppc64le artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_ppc64el.deb
|
||||
|
||||
- name: Download Deb armv7 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.PKG_VERSION }}-1_armhf.deb
|
||||
|
||||
- name: Download RPM x86_64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.x86_64.rpm
|
||||
|
||||
- name: Download RPM aarch64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.aarch64.rpm
|
||||
|
||||
- name: Download RPM ppc64le artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.ppc64le.rpm
|
||||
|
||||
- name: Download RPM armv7 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo-${{ steps.get_version.outputs.PKG_VERSION }}-1.armv7hl.rpm
|
||||
|
||||
- name: Download macOS x86_64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_macOS_x86_64.tar.xz
|
||||
|
||||
- name: Download macOS arm64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_macOS_arm64.tar.xz
|
||||
|
||||
- name: Download Windows installer x86_64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_x86_64.exe
|
||||
|
||||
- name: Download Windows installer arm64 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_arm64.exe
|
||||
|
||||
- name: Download Windows installer x86 artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_x86.exe
|
||||
|
||||
- name: Download Windows portable artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_windows_portable.zip
|
||||
|
||||
- name: Download source with deps artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: sftpgo_${{ steps.get_version.outputs.SFTPGO_VERSION }}_src_with_deps.tar.xz
|
||||
|
||||
- name: Create release
|
||||
run: |
|
||||
mv sftpgo_windows_x86_64.exe sftpgo_${SFTPGO_VERSION}_windows_x86_64.exe
|
||||
mv sftpgo_windows_arm64.exe sftpgo_${SFTPGO_VERSION}_windows_arm64.exe
|
||||
mv sftpgo_windows_x86.exe sftpgo_${SFTPGO_VERSION}_windows_x86.exe
|
||||
mv sftpgo_portable.zip sftpgo_${SFTPGO_VERSION}_windows_portable.zip
|
||||
gh release create "${SFTPGO_VERSION}" -t "${SFTPGO_VERSION}"
|
||||
gh release upload "${SFTPGO_VERSION}" sftpgo_*.xz --clobber
|
||||
gh release upload "${SFTPGO_VERSION}" sftpgo-*.rpm --clobber
|
||||
gh release upload "${SFTPGO_VERSION}" sftpgo_*.deb --clobber
|
||||
gh release upload "${SFTPGO_VERSION}" sftpgo_*.exe --clobber
|
||||
gh release upload "${SFTPGO_VERSION}" sftpgo_*.zip --clobber
|
||||
gh release view "${SFTPGO_VERSION}"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
SFTPGO_VERSION: ${{ steps.get_version.outputs.SFTPGO_VERSION }}
|
||||
3
.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# compilation output
|
||||
sftpgo
|
||||
sftpgo.exe
|
||||
52
.golangci.yml
Normal file
@@ -0,0 +1,52 @@
|
||||
run:
|
||||
timeout: 10m
|
||||
issues-exit-code: 1
|
||||
tests: true
|
||||
|
||||
|
||||
linters-settings:
|
||||
dupl:
|
||||
threshold: 150
|
||||
errcheck:
|
||||
check-type-assertions: false
|
||||
check-blank: false
|
||||
goconst:
|
||||
min-len: 3
|
||||
min-occurrences: 3
|
||||
gocyclo:
|
||||
min-complexity: 15
|
||||
gofmt:
|
||||
simplify: true
|
||||
goimports:
|
||||
local-prefixes: github.com/drakkan/sftpgo
|
||||
#govet:
|
||||
# report about shadowed variables
|
||||
#check-shadowing: true
|
||||
#enable:
|
||||
# - fieldalignment
|
||||
|
||||
issues:
|
||||
include:
|
||||
- EXC0002
|
||||
- EXC0012
|
||||
- EXC0013
|
||||
- EXC0014
|
||||
- EXC0015
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- goconst
|
||||
- errcheck
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
- unconvert
|
||||
- unparam
|
||||
- bodyclose
|
||||
- gocyclo
|
||||
- misspell
|
||||
- whitespace
|
||||
- dupl
|
||||
- rowserrcheck
|
||||
- dogsled
|
||||
- govet
|
||||
24
.travis.yml
@@ -1,24 +0,0 @@
|
||||
language: go
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
go:
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
before_script:
|
||||
- sqlite3 sftpgo.db 'CREATE TABLE "users" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL UNIQUE, "password" varchar(255) NULL, "public_keys" text NULL, "home_dir" varchar(255) NOT NULL, "uid" integer NOT NULL, "gid" integer NOT NULL, "max_sessions" integer NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "permissions" text NOT NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL, "upload_bandwidth" integer NOT NULL, "download_bandwidth" integer NOT NULL);'
|
||||
|
||||
install:
|
||||
- go get -v -t ./...
|
||||
|
||||
script:
|
||||
- go test -v ./... -coverprofile=coverage.txt -covermode=atomic
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
1
CODEOWNERS
Normal file
@@ -0,0 +1 @@
|
||||
* @drakkan
|
||||
34
DCO
Normal file
@@ -0,0 +1,34 @@
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
68
Dockerfile
Normal file
@@ -0,0 +1,68 @@
|
||||
FROM golang:1.19-bullseye as builder
|
||||
|
||||
ENV GOFLAGS="-mod=readonly"
|
||||
|
||||
RUN mkdir -p /workspace
|
||||
WORKDIR /workspace
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
ARG COMMIT_SHA
|
||||
|
||||
# This ARG allows to disable some optional features and it might be useful if you build the image yourself.
|
||||
# For example you can disable S3 and GCS support like this:
|
||||
# --build-arg FEATURES=nos3,nogcs
|
||||
ARG FEATURES
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN set -xe && \
|
||||
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --abbrev=8 --dirty)} && \
|
||||
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
||||
|
||||
# Set to "true" to download the "official" plugins in /usr/local/bin
|
||||
ARG DOWNLOAD_PLUGINS=false
|
||||
|
||||
RUN if [ "${DOWNLOAD_PLUGINS}" = "true" ]; then apt-get update && apt-get install --no-install-recommends -y curl && ./docker/scripts/download-plugins.sh; fi
|
||||
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y openssh-server && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
# Set to "true" to install jq and the optional git and rsync dependencies
|
||||
ARG INSTALL_OPTIONAL_PACKAGES=false
|
||||
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y ca-certificates media-types && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN if [ "${INSTALL_OPTIONAL_PACKAGES}" = "true" ]; then apt-get update && apt-get install --no-install-recommends -y jq git rsync && rm -rf /var/lib/apt/lists/*; fi
|
||||
|
||||
RUN mkdir -p /etc/sftpgo /var/lib/sftpgo /usr/share/sftpgo /srv/sftpgo/data /srv/sftpgo/backups
|
||||
|
||||
RUN groupadd --system -g 1000 sftpgo && \
|
||||
useradd --system --gid sftpgo --no-create-home \
|
||||
--home-dir /var/lib/sftpgo --shell /usr/sbin/nologin \
|
||||
--comment "SFTPGo user" --uid 1000 sftpgo
|
||||
|
||||
COPY --from=builder /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
|
||||
COPY --from=builder /etc/ssh/moduli /etc/sftpgo/moduli
|
||||
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
|
||||
COPY --from=builder /workspace/static /usr/share/sftpgo/static
|
||||
COPY --from=builder /workspace/openapi /usr/share/sftpgo/openapi
|
||||
COPY --from=builder /workspace/sftpgo /usr/local/bin/sftpgo-plugin-* /usr/local/bin/
|
||||
|
||||
# Log to the stdout so the logs will be available using docker logs
|
||||
ENV SFTPGO_LOG_FILE_PATH=""
|
||||
|
||||
# Modify the default configuration file
|
||||
RUN sed -i 's|"users_base_dir": "",|"users_base_dir": "/srv/sftpgo/data",|' /etc/sftpgo/sftpgo.json && \
|
||||
sed -i 's|"backups"|"/srv/sftpgo/backups"|' /etc/sftpgo/sftpgo.json
|
||||
|
||||
RUN chown -R sftpgo:sftpgo /etc/sftpgo /srv/sftpgo && chown sftpgo:sftpgo /var/lib/sftpgo && chmod 700 /srv/sftpgo/backups
|
||||
|
||||
WORKDIR /var/lib/sftpgo
|
||||
USER 1000:1000
|
||||
|
||||
CMD ["sftpgo", "serve"]
|
||||
63
Dockerfile.alpine
Normal file
@@ -0,0 +1,63 @@
|
||||
FROM golang:1.19-alpine3.16 AS builder
|
||||
|
||||
ENV GOFLAGS="-mod=readonly"
|
||||
|
||||
RUN apk add --update --no-cache bash ca-certificates curl git gcc g++
|
||||
|
||||
RUN mkdir -p /workspace
|
||||
WORKDIR /workspace
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
ARG COMMIT_SHA
|
||||
|
||||
# This ARG allows to disable some optional features and it might be useful if you build the image yourself.
|
||||
# For example you can disable S3 and GCS support like this:
|
||||
# --build-arg FEATURES=nos3,nogcs
|
||||
ARG FEATURES
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN set -xe && \
|
||||
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --abbrev=8 --dirty)} && \
|
||||
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
||||
|
||||
RUN apk add --update --no-cache openssh-client-common
|
||||
|
||||
FROM alpine:3.16
|
||||
|
||||
# Set to "true" to install jq and the optional git and rsync dependencies
|
||||
ARG INSTALL_OPTIONAL_PACKAGES=false
|
||||
|
||||
RUN apk add --update --no-cache ca-certificates tzdata mailcap
|
||||
|
||||
RUN if [ "${INSTALL_OPTIONAL_PACKAGES}" = "true" ]; then apk add --update --no-cache jq git rsync; fi
|
||||
|
||||
RUN mkdir -p /etc/sftpgo /var/lib/sftpgo /usr/share/sftpgo /srv/sftpgo/data /srv/sftpgo/backups
|
||||
|
||||
RUN addgroup -g 1000 -S sftpgo && \
|
||||
adduser -u 1000 -h /var/lib/sftpgo -s /sbin/nologin -G sftpgo -S -D -H -g "SFTPGo user" sftpgo
|
||||
|
||||
COPY --from=builder /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
|
||||
COPY --from=builder /etc/ssh/moduli /etc/sftpgo/moduli
|
||||
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
|
||||
COPY --from=builder /workspace/static /usr/share/sftpgo/static
|
||||
COPY --from=builder /workspace/openapi /usr/share/sftpgo/openapi
|
||||
COPY --from=builder /workspace/sftpgo /usr/local/bin/
|
||||
|
||||
# Log to the stdout so the logs will be available using docker logs
|
||||
ENV SFTPGO_LOG_FILE_PATH=""
|
||||
|
||||
# Modify the default configuration file
|
||||
RUN sed -i 's|"users_base_dir": "",|"users_base_dir": "/srv/sftpgo/data",|' /etc/sftpgo/sftpgo.json && \
|
||||
sed -i 's|"backups"|"/srv/sftpgo/backups"|' /etc/sftpgo/sftpgo.json
|
||||
|
||||
RUN chown -R sftpgo:sftpgo /etc/sftpgo /srv/sftpgo && chown sftpgo:sftpgo /var/lib/sftpgo && chmod 700 /srv/sftpgo/backups
|
||||
|
||||
WORKDIR /var/lib/sftpgo
|
||||
USER 1000:1000
|
||||
|
||||
CMD ["sftpgo", "serve"]
|
||||
58
Dockerfile.distroless
Normal file
@@ -0,0 +1,58 @@
|
||||
FROM golang:1.19-bullseye as builder
|
||||
|
||||
ENV CGO_ENABLED=0 GOFLAGS="-mod=readonly"
|
||||
|
||||
RUN mkdir -p /workspace
|
||||
WORKDIR /workspace
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
ARG COMMIT_SHA
|
||||
|
||||
# This ARG allows to disable some optional features and it might be useful if you build the image yourself.
|
||||
# For this variant we disable SQLite support since it requires CGO and so a C runtime which is not installed
|
||||
# in distroless/static-* images
|
||||
ARG FEATURES
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN set -xe && \
|
||||
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --abbrev=8 --dirty)} && \
|
||||
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -trimpath -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -v -o sftpgo
|
||||
|
||||
# Modify the default configuration file
|
||||
RUN sed -i 's|"users_base_dir": "",|"users_base_dir": "/srv/sftpgo/data",|' sftpgo.json && \
|
||||
sed -i 's|"backups"|"/srv/sftpgo/backups"|' sftpgo.json && \
|
||||
sed -i 's|"sqlite"|"bolt"|' sftpgo.json
|
||||
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y media-types openssh-server && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir /etc/sftpgo /var/lib/sftpgo /srv/sftpgo
|
||||
|
||||
FROM gcr.io/distroless/static-debian11
|
||||
|
||||
COPY --from=builder --chown=1000:1000 /etc/sftpgo /etc/sftpgo
|
||||
COPY --from=builder --chown=1000:1000 /srv/sftpgo /srv/sftpgo
|
||||
COPY --from=builder --chown=1000:1000 /var/lib/sftpgo /var/lib/sftpgo
|
||||
COPY --from=builder --chown=1000:1000 /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
|
||||
COPY --from=builder --chown=1000:1000 /etc/ssh/moduli /etc/sftpgo/moduli
|
||||
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
|
||||
COPY --from=builder /workspace/static /usr/share/sftpgo/static
|
||||
COPY --from=builder /workspace/openapi /usr/share/sftpgo/openapi
|
||||
COPY --from=builder /workspace/sftpgo /usr/local/bin/
|
||||
COPY --from=builder /etc/mime.types /etc/mime.types
|
||||
|
||||
# Log to the stdout so the logs will be available using docker logs
|
||||
ENV SFTPGO_LOG_FILE_PATH=""
|
||||
# These env vars are required to avoid the following error when calling user.Current():
|
||||
# unable to get the current user: user: Current requires cgo or $USER set in environment
|
||||
ENV USER=sftpgo
|
||||
ENV HOME=/var/lib/sftpgo
|
||||
|
||||
WORKDIR /var/lib/sftpgo
|
||||
USER 1000:1000
|
||||
|
||||
CMD ["sftpgo", "serve"]
|
||||
145
LICENSE
@@ -1,5 +1,5 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
@@ -7,17 +7,15 @@
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
software for all its users.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
@@ -26,44 +24,34 @@ them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
@@ -72,7 +60,7 @@ modification follow.
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
@@ -549,35 +537,45 @@ to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
under version 3 of the GNU General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
@@ -635,40 +633,29 @@ the "copyright" line and a pointer to where the full notice is found.
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
it under the terms of the GNU Affero General Public License as published
|
||||
by the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
641
README.md
@@ -1,410 +1,363 @@
|
||||
# SFTPGo
|
||||
[](https://travis-ci.org/drakkan/sftpgo) [](https://codecov.io/gh/drakkan/sftpgo/branch/master) [](https://goreportcard.com/report/github.com/drakkan/sftpgo) [](https://www.gnu.org/licenses/gpl-3.0) [](https://github.com/avelino/awesome-go)
|
||||
|
||||
Full featured and highly configurable SFTP server
|
||||
[](https://github.com/drakkan/sftpgo/workflows/CI/badge.svg?branch=main&event=push)
|
||||
[](https://codecov.io/gh/drakkan/sftpgo/branch/main)
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://hub.docker.com/r/drakkan/sftpgo)
|
||||
[](https://github.com/avelino/awesome-go)
|
||||
|
||||
[English](./README.md) | [简体中文](./README.zh_CN.md)
|
||||
|
||||
Fully featured and highly configurable SFTP server with optional HTTP/S, FTP/S and WebDAV support.
|
||||
Several storage backends are supported: local filesystem, encrypted local filesystem, S3 (compatible) Object Storage, Google Cloud Storage, Azure Blob Storage, SFTP.
|
||||
|
||||
## Sponsors
|
||||
|
||||
If you find SFTPGo useful please consider supporting this Open Source project.
|
||||
|
||||
Maintaining and evolving SFTPGo is a lot of work - easily the equivalent of a full time job - for me.
|
||||
|
||||
I'd like to make SFTPGo into a sustainable long term project and would not like to introduce a dual licensing option and limit some features to the proprietary version only.
|
||||
|
||||
If you use SFTPGo, it is in your best interest to ensure that the project you rely on stays healthy and well maintained.
|
||||
This can only happen with your donations and [sponsorships](https://github.com/sponsors/drakkan) :heart:
|
||||
|
||||
If you just take and don't return anything back, the project will die in the long run and you will be forced to pay for a similar proprietary solution.
|
||||
|
||||
More [info](https://github.com/drakkan/sftpgo/issues/452).
|
||||
|
||||
### Thank you to our sponsors
|
||||
|
||||
#### Platinum sponsors
|
||||
|
||||
[<img src="./img/Aledade_logo.png" alt="Aledade logo" width="202" height="70">](https://www.aledade.com/)
|
||||
|
||||
#### Bronze sponsors
|
||||
|
||||
[<img src="https://www.7digital.com/wp-content/themes/sevendigital/images/top_logo.png" alt="7digital logo">](https://www.7digital.com/)
|
||||
|
||||
## Support policy
|
||||
|
||||
SFTPGo is an Open Source project and you can of course use it for free but please don't ask for free support as well.
|
||||
|
||||
We will check the reported issues to see if you are experiencing a bug and if so we'll will fix it, but will only provide support to project [sponsors/donors](#sponsors).
|
||||
|
||||
If you report an invalid issue or ask for step-by-step support, your issue will remain open with no answer or will be closed as invalid without further explanation. Thanks for understanding.
|
||||
|
||||
## Features
|
||||
|
||||
- Each account is chrooted to his Home Dir.
|
||||
- SFTP accounts are virtual accounts stored in a "data provider".
|
||||
- SQLite, MySQL, PostgreSQL and bbolt (key/value store in pure Go) data providers are supported.
|
||||
- Public key and password authentication. Multiple public keys per user are supported.
|
||||
- Quota support: accounts can have individual quota expressed as max total size and/or max number of files.
|
||||
- Bandwidth throttling is supported, with distinct settings for upload and download.
|
||||
- Per user maximum concurrent sessions.
|
||||
- Per user permissions: list directories content, upload, overwrite, download, delete, rename, create directories, create symlinks can be enabled or disabled.
|
||||
- Per user files/folders ownership: you can map all the users to the system account that runs SFTPGo (all platforms are supported) or you can run SFTPGo as root user and map each user or group of users to a different system account (*NIX only).
|
||||
- Configurable custom commands and/or HTTP notifications on upload, download, delete or rename.
|
||||
- Support for serving local filesystem, encrypted local filesystem, S3 Compatible Object Storage, Google Cloud Storage, Azure Blob Storage or other SFTP accounts over SFTP/SCP/FTP/WebDAV.
|
||||
- Virtual folders are supported: a virtual folder can use any of the supported storage backends. So you can have, for example, an S3 user that exposes a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one. Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
||||
- Configurable [custom commands and/or HTTP hooks](./docs/custom-actions.md) on upload, pre-upload, download, pre-download, delete, pre-delete, rename, mkdir, rmdir on SSH commands and on user add, update and delete.
|
||||
- Virtual accounts stored within a "data provider".
|
||||
- SQLite, MySQL, PostgreSQL, CockroachDB, Bolt (key/value store in pure Go) and in-memory data providers are supported.
|
||||
- Chroot isolation for local accounts. Cloud-based accounts can be restricted to a certain base path.
|
||||
- Per-user and per-directory virtual permissions, for each exposed path you can allow or deny: directory listing, upload, overwrite, download, delete, rename, create directories, create symlinks, change owner/group/file mode and modification time.
|
||||
- [REST API](./docs/rest-api.md) for users and folders management, data retention, backup, restore and real time reports of the active connections with possibility of forcibly closing a connection.
|
||||
- The [Event Manager](./docs/eventmanager.md) allows to define custom workflows based on server events or schedules.
|
||||
- [Web based administration interface](./docs/web-admin.md) to easily manage users, folders and connections.
|
||||
- [Web client interface](./docs/web-client.md) so that end users can change their credentials, manage and share their files in the browser.
|
||||
- Public key and password authentication. Multiple public keys per-user are supported.
|
||||
- SSH user [certificate authentication](https://cvsweb.openbsd.org/src/usr.bin/ssh/PROTOCOL.certkeys?rev=1.8).
|
||||
- Keyboard interactive authentication. You can easily setup a customizable multi-factor authentication.
|
||||
- Partial authentication. You can configure multi-step authentication requiring, for example, the user password after successful public key authentication.
|
||||
- Per-user authentication methods.
|
||||
- [Two-factor authentication](./docs/howto/two-factor-authentication.md) based on time-based one time passwords (RFC 6238) which works with Authy, Google Authenticator and other compatible apps.
|
||||
- Simplified user administrations using [groups](./docs/groups.md).
|
||||
- Custom authentication via [external programs/HTTP API](./docs/external-auth.md).
|
||||
- Web Client and Web Admin user interfaces support [OpenID Connect](https://openid.net/connect/) authentication and so they can be integrated with identity providers such as [Keycloak](https://www.keycloak.org/). You can find more details [here](./docs/oidc.md).
|
||||
- [Data At Rest Encryption](./docs/dare.md).
|
||||
- Dynamic user modification before login via [external programs/HTTP API](./docs/dynamic-user-mod.md).
|
||||
- Quota support: accounts can have individual disk quota expressed as max total size and/or max number of files.
|
||||
- Bandwidth throttling, with separate settings for upload and download and overrides based on the client's IP address.
|
||||
- Data transfer bandwidth limits, with total limit or separate settings for uploads and downloads and overrides based on the client's IP address. Limits can be reset using the REST API.
|
||||
- Per-protocol [rate limiting](./docs/rate-limiting.md) is supported and can be optionally connected to the built-in defender to automatically block hosts that repeatedly exceed the configured limit.
|
||||
- Per-user maximum concurrent sessions.
|
||||
- Per-user and global IP filters: login can be restricted to specific ranges of IP addresses or to a specific IP address.
|
||||
- Per-user and per-directory shell like patterns filters: files can be allowed, denied and optionally hidden based on shell like patterns.
|
||||
- Automatically terminating idle connections.
|
||||
- Automatic blocklist management using the built-in [defender](./docs/defender.md).
|
||||
- Geo-IP filtering using a [plugin](https://github.com/sftpgo/sftpgo-plugin-geoipfilter).
|
||||
- Atomic uploads are configurable.
|
||||
- Optional SCP support.
|
||||
- REST API for users and quota management and real time reports for the active connections with possibility of forcibly closing a connection.
|
||||
- Prometheus metrics are exposed.
|
||||
- Configuration is a your choice: JSON, TOML, YAML, HCL, envfile are supported.
|
||||
- Log files are accurate and they are saved in the easily parsable JSON format.
|
||||
- Per-user files/folders ownership mapping: you can map all the users to the system account that runs SFTPGo (all platforms are supported) or you can run SFTPGo as root user and map each user or group of users to a different system account (\*NIX only).
|
||||
- Support for Git repositories over SSH.
|
||||
- SCP and rsync are supported.
|
||||
- FTP/S is supported. You can configure the FTP service to require TLS for both control and data connections.
|
||||
- [WebDAV](./docs/webdav.md) is supported.
|
||||
- ACME protocol is supported. SFTPGo can obtain and automatically renew TLS certificates for HTTPS, WebDAV and FTPS from `Let's Encrypt` or other ACME compliant certificate authorities, using the the `HTTP-01` or `TLS-ALPN-01` [challenge types](https://letsencrypt.org/docs/challenge-types/).
|
||||
- Two-Way TLS authentication, aka TLS with client certificate authentication, is supported for REST API/Web Admin, FTPS and WebDAV over HTTPS.
|
||||
- Per-user protocols restrictions. You can configure the allowed protocols (SSH/HTTP/FTP/WebDAV) for each user.
|
||||
- [Prometheus metrics](./docs/metrics.md) are exposed.
|
||||
- Support for HAProxy PROXY protocol: you can proxy and/or load balance the SFTP/SCP/FTP service without losing the information about the client's address.
|
||||
- Easy [migration](./examples/convertusers) from Linux system user accounts.
|
||||
- [Portable mode](./docs/portable-mode.md): a convenient way to share a single directory on demand.
|
||||
- [SFTP subsystem mode](./docs/sftp-subsystem.md): you can use SFTPGo as OpenSSH's SFTP subsystem.
|
||||
- Performance analysis using built-in [profiler](./docs/profiling.md).
|
||||
- Configuration format is at your choice: JSON, TOML, YAML, HCL, envfile are supported.
|
||||
- Log files are accurate and they are saved in the easily parsable JSON format ([more information](./docs/logs.md)).
|
||||
- SFTPGo supports a [plugin system](./docs/plugins.md) and therefore can be extended using external plugins.
|
||||
|
||||
## Platforms
|
||||
|
||||
SFTPGo is developed and tested on Linux. After each commit the code is automatically built and tested on Linux and macOS using Travis CI.
|
||||
Regularly the test cases are manually executed and pass on Windows. Other UNIX variants such as *BSD should work too.
|
||||
SFTPGo is developed and tested on Linux. After each commit, the code is automatically built and tested on Linux, macOS and Windows using [GitHub Actions](./.github/workflows/development.yml). The test cases are regularly manually executed and passed on FreeBSD. Other *BSD variants should work too.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Go 1.12 or higher.
|
||||
- A suitable SQL server or key/value store to use as data provider: PostreSQL 9.4+ or MySQL 5.6+ or SQLite 3.x or bbolt 1.3.x
|
||||
- Go as build only dependency. We support the Go version(s) used in [continuous integration workflows](./.github/workflows).
|
||||
- A suitable SQL server to use as data provider:
|
||||
- upstream supported versions of PostgreSQL, MySQL and MariaDB.
|
||||
- CockroachDB stable.
|
||||
- The SQL server is optional: you can choose to use an embedded SQLite, bolt or in memory data provider.
|
||||
|
||||
## Installation
|
||||
|
||||
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
|
||||
Binary releases for Linux, macOS, and Windows are available. Please visit the [releases](https://github.com/drakkan/sftpgo/releases "releases") page.
|
||||
|
||||
```
|
||||
$ go get -u github.com/drakkan/sftpgo
|
||||
```
|
||||
An official Docker image is available. Documentation is [here](./docker/README.md).
|
||||
|
||||
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
|
||||
<details>
|
||||
|
||||
SFTPGo depends on [go-sqlite3](https://github.com/mattn/go-sqlite3) that is a CGO package and so it requires a `C` compiler at build time.
|
||||
On Linux and macOS a compiler is easy to install or already installed, on Windows you need to download [MinGW-w64](https://sourceforge.net/projects/mingw-w64/files/) and build SFTPGo from its command prompt.
|
||||
<summary>Some Linux distro packages are available</summary>
|
||||
|
||||
The compiler is a build time only dependency, it is not not required at runtime.
|
||||
- For Arch Linux via AUR:
|
||||
- [sftpgo](https://aur.archlinux.org/packages/sftpgo/). This package follows stable releases. It requires `git`, `gcc` and `go` to build.
|
||||
- [sftpgo-bin](https://aur.archlinux.org/packages/sftpgo-bin/). This package follows stable releases downloading the prebuilt linux binary from GitHub. It does not require `git`, `gcc` and `go` to build.
|
||||
- [sftpgo-git](https://aur.archlinux.org/packages/sftpgo-git/). This package builds and installs the latest git `main` branch. It requires `git`, `gcc` and `go` to build.
|
||||
- Deb and RPM packages are built after each commit and for each release.
|
||||
- For Ubuntu a PPA is available [here](https://launchpad.net/~sftpgo/+archive/ubuntu/sftpgo).
|
||||
- Void Linux provides an [official package](https://github.com/void-linux/void-packages/tree/master/srcpkgs/sftpgo).
|
||||
|
||||
If you don't need SQLite, you can also get/build SFTPGo setting the environment variable `GCO_ENABLED` to 0, this way SQLite support will be disabled but PostgreSQL, MySQL and bbolt will work and you don't need a `C` compiler for building.
|
||||
</details>
|
||||
|
||||
Version info, such as git commit and build date, can be embedded setting the following string variables at build time:
|
||||
APT and YUM repositories are [available](./docs/repo.md).
|
||||
|
||||
- `github.com/drakkan/sftpgo/utils.commit`
|
||||
- `github.com/drakkan/sftpgo/utils.date`
|
||||
SFTPGo is also available on some marketplaces:
|
||||
|
||||
For example you can build using the following command:
|
||||
- [AWS Marketplace](https://aws.amazon.com/marketplace/seller-profile?id=6e849ab8-70a6-47de-9a43-13c3fa849335)
|
||||
- [Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/eliamarzia1667381463185.sftpgo_linux)
|
||||
- [Elest.io](https://elest.io/open-source/sftpgo)
|
||||
|
||||
```
|
||||
go build -i -ldflags "-s -w -X github.com/drakkan/sftpgo/utils.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/utils.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
```
|
||||
Purchasing from there will help keep SFTPGo a long-term sustainable project.
|
||||
|
||||
and you will get a version that includes git commit and build date like this one:
|
||||
<details><summary>Windows packages</summary>
|
||||
|
||||
```
|
||||
sftpgo -v
|
||||
SFTPGo version: 0.9.0-dev-90607d4-dirty-2019-08-08T19:28:36Z
|
||||
```
|
||||
- The Windows installer to install and run SFTPGo as a Windows service.
|
||||
- The portable package to start SFTPGo on demand.
|
||||
- The [winget](https://docs.microsoft.com/en-us/windows/package-manager/winget/install) package to install and run SFTPGo as a Windows service: `winget install SFTPGo`.
|
||||
- The [Chocolatey package](https://community.chocolatey.org/packages/sftpgo) to install and run SFTPGo as a Windows service.
|
||||
|
||||
For Linux, a `systemd` sample [service](https://github.com/drakkan/sftpgo/tree/master/init/sftpgo.service "systemd service") can be found inside the source tree.
|
||||
</details>
|
||||
|
||||
Alternately you can use distro packages:
|
||||
On macOS you can install from the Homebrew [Formula](https://formulae.brew.sh/formula/sftpgo).
|
||||
On FreeBSD you can install from the [SFTPGo port](https://www.freshports.org/ftp/sftpgo).
|
||||
On DragonFlyBSD you can install SFTPGo from [DPorts](https://github.com/DragonFlyBSD/DPorts/tree/master/ftp/sftpgo).
|
||||
|
||||
- Arch Linux PKGBUILD is available on [AUR](https://aur.archlinux.org/packages/sftpgo/ "SFTPGo")
|
||||
You can easily test new features selecting a commit from the [Actions](https://github.com/drakkan/sftpgo/actions) page and downloading the matching build artifacts for Linux, macOS or Windows. GitHub stores artifacts for 90 days.
|
||||
|
||||
For macOS a `launchd` sample [service](https://github.com/drakkan/sftpgo/tree/master/init/com.github.drakkan.sftpgo.plist "launchd plist") can be found inside the source tree. The `launchd` plist assumes that `sftpgo` has `/usr/local/opt/sftpgo` as base directory.
|
||||
Alternately, you can [build from source](./docs/build-from-source.md).
|
||||
|
||||
On Windows you can run `SFTPGo` as Windows Service, please read the "Configuration" section below for more details.
|
||||
[Getting Started Guide for the Impatient](./docs/howto/getting-started.md).
|
||||
|
||||
## Configuration
|
||||
|
||||
The `sftpgo` executable can be used this way:
|
||||
A full explanation of all configuration methods can be found [here](./docs/full-configuration.md).
|
||||
|
||||
```
|
||||
Usage:
|
||||
sftpgo [command]
|
||||
Please make sure to [initialize the data provider](#data-provider-initialization-and-management) before running the daemon.
|
||||
|
||||
Available Commands:
|
||||
help Help about any command
|
||||
serve Start the SFTP Server
|
||||
To start SFTPGo with the default settings, simply run:
|
||||
|
||||
Flags:
|
||||
-h, --help help for sftpgo
|
||||
-v, --version
|
||||
|
||||
Use "sftpgo [command] --help" for more information about a command
|
||||
```
|
||||
|
||||
The `serve` subcommand supports the following flags:
|
||||
|
||||
- `--config-dir` string. Location of the config dir. This directory should contain the `sftpgo` configuration file and is used as the base for files with a relative path (eg. the private keys for the SFTP server, the SQLite or bblot database if you use SQLite or bbolt as data provider). The default value is "." or the value of `SFTPGO_CONFIG_DIR` environment variable.
|
||||
- `--config-file` string. Name of the configuration file. It must be the name of a file stored in config-dir not the absolute path to the configuration file. The specified file name must have no extension we automatically load JSON, YAML, TOML, HCL and Java properties. The default value is "sftpgo" (and therefore `sftpgo.json`, `sftpgo.yaml` and so on are searched) or the value of `SFTPGO_CONFIG_FILE` environment variable.
|
||||
- `--log-compress` boolean. Determine if the rotated log files should be compressed using gzip. Default `false` or the value of `SFTPGO_LOG_COMPRESS` environment variable (1 or `true`, 0 or `false`). It is unused if `log-file-path` is empty.
|
||||
- `--log-file-path` string. Location for the log file, default "sftpgo.log" or the value of `SFTPGO_LOG_FILE_PATH` environment variable. Leave empty to write logs to the standard error.
|
||||
- `--log-max-age` int. Maximum number of days to retain old log files. Default 28 or the value of `SFTPGO_LOG_MAX_AGE` environment variable. It is unused if `log-file-path` is empty.
|
||||
- `--log-max-backups` int. Maximum number of old log files to retain. Default 5 or the value of `SFTPGO_LOG_MAX_BACKUPS` environment variable. It is unused if `log-file-path` is empty.
|
||||
- `--log-max-size` int. Maximum size in megabytes of the log file before it gets rotated. Default 10 or the value of `SFTPGO_LOG_MAX_SIZE` environment variable. It is unused if `log-file-path` is empty.
|
||||
- `--log-verbose` boolean. Enable verbose logs. Default `true` or the value of `SFTPGO_LOG_VERBOSE` environment variable (1 or `true`, 0 or `false`).
|
||||
|
||||
If you don't configure any private host keys, the daemon will use `id_rsa` in the configuration directory. If that file doesn't exist, the daemon will attempt to autogenerate it (if the user that executes SFTPGo has write access to the config-dir). The server supports any private key format supported by [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/keys.go#L32).
|
||||
|
||||
Before starting `sftpgo` a dataprovider must be configured.
|
||||
|
||||
Sample SQL scripts to create the required database structure can be found inside the source tree [sql](https://github.com/drakkan/sftpgo/tree/master/sql "sql") directory. The SQL scripts filename's is, by convention, the date as `YYYYMMDD` and the suffix `.sql`. You need to apply all the SQL scripts for your database ordered by name, for example `20190706.sql` must be applied before `20190728.sql` and so on.
|
||||
|
||||
The `sftpgo` configuration file contains the following sections:
|
||||
|
||||
- **"sftpd"**, the configuration for the SFTP server
|
||||
- `bind_port`, integer. The port used for serving SFTP requests. Default: 2022
|
||||
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: ""
|
||||
- `idle_timeout`, integer. Time in minutes after which an idle client will be disconnected. 0 menas disabled. Default: 15
|
||||
- `max_auth_tries` integer. Maximum number of authentication attempts permitted per connection. If set to a negative number, the number of attempts are unlimited. If set to zero, the number of attempts are limited to 6.
|
||||
- `umask`, string. Umask for the new files and directories. This setting has no effect on Windows. Default: "0022"
|
||||
- `banner`, string. Identification string used by the server. Leave empty to use the default banner. Default "SFTPGo_version"
|
||||
- `upload_mode` integer. 0 means standard, the files are uploaded directly to the requested path. 1 means atomic: files are uploaded to a temporary path and renamed to the requested path when the client ends the upload. Atomic mode avoids problems such as a web server that serves partial files when the files are being uploaded. In atomic mode if there is an upload error the temporary file is deleted and so the requested upload path will not contain a partial file.
|
||||
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions
|
||||
- `execute_on`, list of strings. Valid values are `download`, `upload`, `delete`, `rename`. On folder deletion a `delete` notification will be sent for each deleted file. Actions will be not executed if an error is detected and so a partial file is uploaded or downloaded. Leave empty to disable actions. The `upload` condition includes both uploads to new files and overwrite existing files
|
||||
- `command`, string. Absolute path to the command to execute. Leave empty to disable. The command is invoked with the following arguments:
|
||||
- `action`, any valid `execute_on` string
|
||||
- `username`, user who did the action
|
||||
- `path` to the affected file. For `rename` action this is the old file name
|
||||
- `target_path`, non empty for `rename` action, this is the new file name
|
||||
- `http_notification_url`, a valid URL. An HTTP GET request will be executed to this URL. Leave empty to disable. The query string will contain the following parameters that have the same meaning of the command's arguments:
|
||||
- `action`
|
||||
- `username`
|
||||
- `path`
|
||||
- `target_path`, added for `rename` action only
|
||||
- `keys`, struct array. It contains the daemon's private keys. If empty or missing the daemon will search or try to generate `id_rsa` in the configuration directory.
|
||||
- `private_key`, path to the private key file. It can be a path relative to the config dir or an absolute one.
|
||||
- `enable_scp`, boolean. Default disabled. Set to `true` to enable SCP support. SCP is an experimental feature, we have our own SCP implementation since we can't rely on `scp` system command to proper handle permissions, quota and user's home dir restrictions. The SCP protocol is quite simple but there is no official docs about it, so we need more testing and feedbacks before enabling it by default. We may not handle some borderline cases or have sneaky bugs. Please do accurate tests yourself before enabling SCP and let us known if something does not work as expected for your use cases. SCP between two remote hosts is supported using the `-3` scp option.
|
||||
- `kex_algorithms`, list of strings. Available KEX (Key Exchange) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L46 "Supported kex algos")
|
||||
- `ciphers`, list of strings. Allowed ciphers. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L28 "Supported ciphers")
|
||||
- `macs`, list of strings. available MAC (message authentication code) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L84 "Supported MACs")
|
||||
- `login_banner_file`, path to the login banner file. The contents of the specified file, if any, are sent to the remote user before authentication is allowed. It can be a path relative to the config dir or an absolute one. Leave empty to send no login banner
|
||||
- **"data_provider"**, the configuration for the data provider
|
||||
- `driver`, string. Supported drivers are `sqlite`, `mysql`, `postgresql`, `bolt`
|
||||
- `name`, string. Database name. For driver `sqlite` this can be the database name relative to the config dir or the absolute path to the SQLite database.
|
||||
- `host`, string. Database host. Leave empty for driver `sqlite` and `bolt`
|
||||
- `port`, integer. Database port. Leave empty for driver `sqlite` and `bolt`
|
||||
- `username`, string. Database user. Leave empty for driver `sqlite` and `bolt`
|
||||
- `password`, string. Database password. Leave empty for driver `sqlite` and `bolt`
|
||||
- `sslmode`, integer. Used for drivers `mysql` and `postgresql`. 0 disable SSL/TLS connections, 1 require ssl, 2 set ssl mode to `verify-ca` for driver `postgresql` and `skip-verify` for driver `mysql`, 3 set ssl mode to `verify-full` for driver `postgresql` and `preferred` for driver `mysql`
|
||||
- `connectionstring`, string. Provide a custom database connection string. If not empty this connection string will be used instead of build one using the previous parameters. Leave empty for driver `bolt`
|
||||
- `users_table`, string. Database table for SFTP users
|
||||
- `manage_users`, integer. Set to 0 to disable users management, 1 to enable
|
||||
- `track_quota`, integer. Set the preferred way to track users quota between the following choices:
|
||||
- 0, disable quota tracking. REST API to scan user dir and update quota will do nothing
|
||||
- 1, quota is updated each time a user upload or delete a file even if the user has no quota restrictions
|
||||
- 2, quota is updated each time a user upload or delete a file but only for users with quota restrictions. With this configuration the "quota scan" REST API can still be used to periodically update space usage for users without quota restrictions
|
||||
- `pool_size`, integer. Sets the maximum number of open connections for `mysql` and `postgresql` driver. Default 0 (unlimited)
|
||||
- **"httpd"**, the configuration for the HTTP server used to serve REST API
|
||||
- `bind_port`, integer. The port used for serving HTTP requests. Set to 0 to disable HTTP server. Default: 8080
|
||||
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: "127.0.0.1"
|
||||
|
||||
Here is a full example showing the default config in JSON format:
|
||||
|
||||
```json
|
||||
{
|
||||
"sftpd": {
|
||||
"bind_port": 2022,
|
||||
"bind_address": "",
|
||||
"idle_timeout": 15,
|
||||
"max_auth_tries": 0,
|
||||
"umask": "0022",
|
||||
"banner": "SFTPGo",
|
||||
"actions": {
|
||||
"execute_on": [],
|
||||
"command": "",
|
||||
"http_notification_url": ""
|
||||
},
|
||||
"keys": [],
|
||||
"enable_scp": false
|
||||
},
|
||||
"data_provider": {
|
||||
"driver": "sqlite",
|
||||
"name": "sftpgo.db",
|
||||
"host": "",
|
||||
"port": 5432,
|
||||
"username": "",
|
||||
"password": "",
|
||||
"sslmode": 0,
|
||||
"connection_string": "",
|
||||
"users_table": "users",
|
||||
"manage_users": 1,
|
||||
"track_quota": 2,
|
||||
"pool_size": 0
|
||||
},
|
||||
"httpd": {
|
||||
"bind_port": 8080,
|
||||
"bind_address": "127.0.0.1"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If you want to use a private key that use an algorithm different from RSA or more than one private key then replace the empty `keys` array with something like this:
|
||||
|
||||
```json
|
||||
"keys": [
|
||||
{
|
||||
"private_key": "id_rsa"
|
||||
},
|
||||
{
|
||||
"private_key": "id_ecdsa"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
The configuration can be read from JSON, TOML, YAML, HCL, envfile and Java properties config files, if your `config-file` flag is set to `sftpgo` (default value) you need to create a configuration file called `sftpgo.json` or `sftpgo.yaml` and so on inside `config-dir`.
|
||||
|
||||
You can also override all the available configuration options using environment variables, sftpgo will check for environment variables with a name matching the key uppercased and prefixed with the `SFTPGO_`. You need to use `__` to traverse a struct.
|
||||
|
||||
Let's see some examples:
|
||||
|
||||
- To set sftpd `bind_port` you need to define the env var `SFTPGO_SFTPD__BIND_PORT`
|
||||
- To set the `execute_on` actions you need to define the env var `SFTPGO_SFTPD__ACTIONS__EXECUTE_ON` for example `SFTPGO_SFTPD__ACTIONS__EXECUTE_ON=upload,download`
|
||||
|
||||
Please note that to override configuration options with environment variables a configuration file containing the options to override is required. You can, for example, deploy the default configuration file and then override the options you need to customize using environment variables.
|
||||
|
||||
To start the SFTP Server with the default values for the command line flags simply use:
|
||||
|
||||
```
|
||||
```bash
|
||||
sftpgo serve
|
||||
```
|
||||
|
||||
On Windows you can register `SFTPGo` as Windows Service, take a look at the CLI usage to learn how:
|
||||
Check out [this documentation](./docs/service.md) if you want to run SFTPGo as a service.
|
||||
|
||||
```
|
||||
sftpgo.exe service --help
|
||||
Install, Uninstall, Start, Stop and retrieve status for SFTPGo Windows Service
|
||||
### Data provider initialization and management
|
||||
|
||||
Usage:
|
||||
sftpgo service [command]
|
||||
Before starting the SFTPGo server please ensure that the configured data provider is properly initialized/updated.
|
||||
|
||||
Available Commands:
|
||||
install Install SFTPGo as Windows Service
|
||||
start Start SFTPGo Windows Service
|
||||
status Retrieve the status for the SFTPGo Windows Service
|
||||
stop Stop SFTPGo Windows Service
|
||||
uninstall Uninstall SFTPGo Windows Service
|
||||
For PostgreSQL, MySQL and CockroachDB providers, you need to create the configured database. For SQLite, the configured database will be automatically created at startup. Memory and bolt data providers do not require an initialization but they could require an update to the existing data after upgrading SFTPGo.
|
||||
|
||||
Flags:
|
||||
-h, --help help for service
|
||||
SFTPGo will attempt to automatically detect if the data provider is initialized/updated and if not, will attempt to initialize/ update it on startup as needed.
|
||||
|
||||
Use "sftpgo service [command] --help" for more information about a command.
|
||||
Alternately, you can create/update the required data provider structures yourself using the `initprovider` command.
|
||||
|
||||
For example, you can simply execute the following command from the configuration directory:
|
||||
|
||||
```bash
|
||||
sftpgo initprovider
|
||||
```
|
||||
|
||||
`install` subcommand accepts the same flags valid for `serve`.
|
||||
Take a look at the CLI usage to learn how to specify a different configuration file:
|
||||
|
||||
After installing as Windows Service please remember to allow network access to the SFTPGo executable using something like this:
|
||||
|
||||
```
|
||||
netsh advfirewall firewall add rule name="SFTPGo Service" dir=in action=allow program="C:\Program Files\SFTPGo\sftpgo.exe"
|
||||
```bash
|
||||
sftpgo initprovider --help
|
||||
```
|
||||
|
||||
or through the Windows Firewall GUI.
|
||||
You can disable automatic data provider checks/updates at startup by setting the `update_mode` configuration key to `1`.
|
||||
|
||||
You can also reset your provider by using the `resetprovider` sub-command. Take a look at the CLI usage for more details:
|
||||
|
||||
```bash
|
||||
sftpgo resetprovider --help
|
||||
```
|
||||
|
||||
:warning: Please note that some data providers (e.g. MySQL and CockroachDB) do not support schema changes within a transaction, this means that you may end up with an inconsistent schema if migrations are forcibly aborted. CockroachDB doesn't support database-level locks, so make sure you don't execute migrations concurrently.
|
||||
|
||||
## Create the first admin
|
||||
|
||||
To start using SFTPGo you need to create an admin user, you can do it in several ways:
|
||||
|
||||
- by using the web admin interface. The default URL is [http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web/admin)
|
||||
- by loading initial data
|
||||
- by enabling `create_default_admin` in your configuration file and setting the environment variables `SFTPGO_DEFAULT_ADMIN_USERNAME` and `SFTPGO_DEFAULT_ADMIN_PASSWORD`
|
||||
|
||||
## Upgrading
|
||||
|
||||
SFTPGo supports upgrading from the previous release branch to the current one.
|
||||
Some examples for supported upgrade paths are:
|
||||
|
||||
- from 1.2.x to 2.0.x
|
||||
- from 2.0.x to 2.1.x and so on.
|
||||
|
||||
For supported upgrade paths, the data and schema are migrated automatically, alternately you can use the `initprovider` command.
|
||||
|
||||
So if, for example, you want to upgrade from a version before 1.2.x to 2.0.x, you must first install version 1.2.x, update the data provider and finally install the version 2.0.x. It is recommended to always install the latest available minor version, ie do not install 1.2.0 if 1.2.2 is available.
|
||||
|
||||
Loading data from a provider independent JSON dump is supported from the previous release branch to the current one too. After upgrading SFTPGo it is advisable to regenerate the JSON dump from the new version.
|
||||
|
||||
## Downgrading
|
||||
|
||||
If for some reason you want to downgrade SFTPGo, you may need to downgrade your data provider schema and data as well. You can use the `revertprovider` command for this task.
|
||||
|
||||
As for upgrading, SFTPGo supports downgrading from the previous release branch to the current one.
|
||||
|
||||
So, if you plan to downgrade from 2.0.x to 1.2.x, before uninstalling 2.0.x version, you can prepare your data provider executing the following command from the configuration directory:
|
||||
|
||||
```shell
|
||||
sftpgo revertprovider --to-version 4
|
||||
```
|
||||
|
||||
Take a look at the CLI usage to see the supported parameter for the `--to-version` argument and to learn how to specify a different configuration file:
|
||||
|
||||
```shell
|
||||
sftpgo revertprovider --help
|
||||
```
|
||||
|
||||
The `revertprovider` command is not supported for the memory provider.
|
||||
|
||||
Please note that we only support the current release branch and the current main branch, if you find a bug it is better to report it rather than downgrading to an older unsupported version.
|
||||
|
||||
## Users, groups and folders management
|
||||
|
||||
After starting SFTPGo you can manage users, groups, folders and other resources using:
|
||||
|
||||
- the [web based administration interface](./docs/web-admin.md)
|
||||
- the [REST API](./docs/rest-api.md)
|
||||
|
||||
To support embedded data providers like `bolt` and `SQLite`, which do not support concurrent connections, we can't have a CLI that directly write users and other resources to the data provider, we always have to use the REST API.
|
||||
|
||||
Full details for users, groups, folders, admins and other resources are documented in the [OpenAPI](./openapi/openapi.yaml) schema. If you want to render the schema without importing it manually, you can explore it on [Stoplight](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml).
|
||||
|
||||
:warning: SFTPGo users, groups and folders are virtual and therefore unrelated to the system ones. There is no need to create system-wide users and groups.
|
||||
|
||||
## Tutorials
|
||||
|
||||
Some step-to-step tutorials can be found inside the source tree [howto](./docs/howto "How-to") directory.
|
||||
|
||||
## Authentication options
|
||||
|
||||
<details><summary> External Authentication</summary>
|
||||
|
||||
Custom authentication methods can easily be added. SFTPGo supports external authentication modules, and writing a new backend can be as simple as a few lines of shell script. More information can be found [here](./docs/external-auth.md).
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary> Keyboard Interactive Authentication</summary>
|
||||
|
||||
Keyboard interactive authentication is, in general, a series of questions asked by the server with responses provided by the client.
|
||||
This authentication method is typically used for multi-factor authentication.
|
||||
|
||||
More information can be found [here](./docs/keyboard-interactive.md).
|
||||
|
||||
</details>
|
||||
|
||||
## Dynamic user creation or modification
|
||||
|
||||
A user can be created or modified by an external program just before the login. More information about this can be found [here](./docs/dynamic-user-mod.md).
|
||||
|
||||
## Custom Actions
|
||||
|
||||
SFTPGo allows you to configure custom commands and/or HTTP hooks to receive notifications about file uploads, deletions and several other events.
|
||||
|
||||
More information about custom actions can be found [here](./docs/custom-actions.md).
|
||||
|
||||
## Virtual folders
|
||||
|
||||
Directories outside the user home directory or based on a different storage provider can be exposed as virtual folders, more information [here](./docs/virtual-folders.md).
|
||||
|
||||
## Other hooks
|
||||
|
||||
You can get notified as soon as a new connection is established using the [Post-connect hook](./docs/post-connect-hook.md) and after each login using the [Post-login hook](./docs/post-login-hook.md).
|
||||
You can use your own hook to [check passwords](./docs/check-password-hook.md).
|
||||
|
||||
## Storage backends
|
||||
|
||||
### S3/GCP/Azure
|
||||
|
||||
Each user can be mapped with a [S3 Compatible Object Storage](./docs/s3.md) /[Google Cloud Storage](./docs/google-cloud-storage.md)/[Azure Blob Storage](./docs/azure-blob-storage.md) bucket or a bucket virtual folder that is exposed over SFTP/SCP/FTP/WebDAV.
|
||||
|
||||
### SFTP backend
|
||||
|
||||
Each user can be mapped to another SFTP server account or a subfolder of it. More information can be found [here](./docs/sftpfs.md).
|
||||
|
||||
### Encrypted backend
|
||||
|
||||
Data at-rest encryption is supported via the [cryptfs backend](./docs/dare.md).
|
||||
|
||||
### HTTP/S backend
|
||||
|
||||
HTTP/S backend allows you to write your own custom storage backend by implementing a REST API. More information can be found [here](./docs/httpfs.md).
|
||||
|
||||
### Other Storage backends
|
||||
|
||||
Adding new storage backends is quite easy:
|
||||
|
||||
- implement the [Fs interface](./vfs/vfs.go#L28 "interface for filesystem backends").
|
||||
- update the user method `GetFilesystem` to return the new backend
|
||||
- update the web interface and the REST API CLI
|
||||
- add the flags for the new storage backed to the `portable` mode
|
||||
|
||||
Anyway, some backends require a pay per-use account (or they offer free account for a limited time period only). To be able to add support for such backends or to review pull requests, please provide a test account. The test account must be available for enough time to be able to maintain the backend and do basic tests before each new release.
|
||||
|
||||
## Brute force protection
|
||||
|
||||
SFTPGo supports a built-in [defender](./docs/defender.md).
|
||||
|
||||
Alternately you can use the [connection failed logs](./docs/logs.md) for integration in tools such as [Fail2ban](http://www.fail2ban.org/). Example of [jails](./fail2ban/jails) and [filters](./fail2ban/filters) working with `systemd`/`journald` are available in fail2ban directory.
|
||||
|
||||
## Account's configuration properties
|
||||
|
||||
For each account the following properties can be configured:
|
||||
Details information about account configuration properties can be found [here](./docs/account.md).
|
||||
|
||||
- `username`
|
||||
- `password` used for password authentication. For users created using SFTPGo REST API if the password has no known hashing algo prefix it will be stored using argon2id. SFTPGo supports checking passwords stored with bcrypt, pbkdf2 and sha512crypt too. For pbkdf2 the supported format is `$<algo>$<iterations>$<salt>$<hashed pwd base64 encoded>`, where algo is `pbkdf2-sha1` or `pbkdf2-sha256` or `pbkdf2-sha512`. For example the `pbkdf2-sha256` of the word `password` using 150000 iterations and `E86a9YMX3zC7` as salt must be stored as `$pbkdf2-sha256$150000$E86a9YMX3zC7$R5J62hsSq+pYw00hLLPKBbcGXmq7fj5+/M0IFoYtZbo=`. For bcrypt the format must be the one supported by golang's [crypto/bcrypt](https://godoc.org/golang.org/x/crypto/bcrypt) package, for example the password `secret` with cost `14` must be stored as `$2a$14$ajq8Q7fbtFRQvXpdCq7Jcuy.Rx1h/L4J60Otx.gyNLbAYctGMJ9tK`. For sha512crypt we support the format used in `/etc/shadow` with the `$6$` prefix, this is useful if you are migrating from system user's accounts. Using the REST API you can send a password hashed as bcrypt, pbkdf2 or sha512crypt and it will be stored as is.
|
||||
- `public_keys` array of public keys. At least one public key or the password is mandatory.
|
||||
- `home_dir` The user cannot upload or download files outside this directory. Must be an absolute path
|
||||
- `uid`, `gid`. If sftpgo runs as root system user then the created files and directories will be assigned to this system uid/gid. Ignored on windows and if sftpgo runs as non root user: in this case files and directories for all SFTP users will be owned by the system user that runs sftpgo.
|
||||
- `max_sessions` maximum concurrent sessions. 0 means unlimited
|
||||
- `quota_size` maximum size allowed as bytes. 0 means unlimited
|
||||
- `quota_files` maximum number of files allowed. 0 means unlimited
|
||||
- `permissions` the following permissions are supported:
|
||||
- `*` all permissions are granted
|
||||
- `list` list items is allowed
|
||||
- `download` download files is allowed
|
||||
- `upload` upload files is allowed
|
||||
- `overwrite` overwrite an existing file, while uploading, is allowed. `upload` permission is required to allow file overwrite
|
||||
- `delete` delete files or directories is allowed
|
||||
- `rename` rename files or directories is allowed
|
||||
- `create_dirs` create directories is allowed
|
||||
- `create_symlinks` create symbolic links is allowed
|
||||
- `upload_bandwidth` maximum upload bandwidth as KB/s, 0 means unlimited
|
||||
- `download_bandwidth` maximum download bandwidth as KB/s, 0 means unlimited
|
||||
## Performance
|
||||
|
||||
These properties are stored inside the data provider. If you want to use your existing accounts, you can create a database view. Since a view is read only, you have to disable user management and quota tracking so SFTPGo will never try to write to the view.
|
||||
SFTPGo can easily saturate a Gigabit connection on low end hardware with no special configuration, this is generally enough for most use cases.
|
||||
|
||||
## REST API
|
||||
More in-depth analysis of performance can be found [here](./docs/performance.md).
|
||||
|
||||
SFTPGo exposes REST API to manage users and quota and to get real time reports for the active connections with possibility of forcibly closing a connection.
|
||||
## Release Cadence
|
||||
|
||||
If quota tracking is enabled in `sftpgo` configuration file, then the used size and number of files are updated each time a file is added/removed. If files are added/removed not using SFTP or if you change `track_quota` from `2` to `1`, you can rescan the user home dir and update the used quota using the REST API.
|
||||
|
||||
REST API is designed to run on localhost or on a trusted network, if you need HTTPS or authentication you can setup a reverse proxy using an HTTP Server such as Apache or NGNIX.
|
||||
|
||||
For example you can keep SFTPGo listening on localhost and expose it externally configuring a reverse proxy using Apache HTTP Server this way:
|
||||
|
||||
```
|
||||
ProxyPass /api/v1 http://127.0.0.1:8080/api/v1
|
||||
ProxyPassReverse /api/v1 http://127.0.0.1:8080/api/v1
|
||||
```
|
||||
|
||||
and you can add authentication with something like this:
|
||||
|
||||
```
|
||||
<Location /api/v1>
|
||||
AuthType Digest
|
||||
AuthName "Private"
|
||||
AuthDigestDomain "/api/v1"
|
||||
AuthDigestProvider file
|
||||
AuthUserFile "/etc/httpd/conf/auth_digest"
|
||||
Require valid-user
|
||||
</Location>
|
||||
```
|
||||
|
||||
and, of course, you can configure the web server to use HTTPS.
|
||||
|
||||
The OpenAPI 3 schema for the exposed API can be found inside the source tree: [openapi.yaml](https://github.com/drakkan/sftpgo/tree/master/api/schema/openapi.yaml "OpenAPI 3 specs").
|
||||
|
||||
A sample CLI client for the REST API can be found inside the source tree [scripts](https://github.com/drakkan/sftpgo/tree/master/scripts "scripts") directory.
|
||||
|
||||
You can also generate your own REST client, in your preferred programming language or even bash scripts, using an OpenAPI generator such as [swagger-codegen](https://github.com/swagger-api/swagger-codegen) or [OpenAPI Generator](https://openapi-generator.tech/)
|
||||
|
||||
## Metrics
|
||||
|
||||
SFTPGo exposes [Prometheus](https://prometheus.io/) metrics at the `/metrics` HTTP endpoint.
|
||||
Several counters and gauges are available, for example:
|
||||
|
||||
- Total uploads and downloads
|
||||
- Total uploads and downloads size
|
||||
- Total uploads and downloads errors
|
||||
- Number of active connections
|
||||
- Data provider availability
|
||||
- Total successful and failed logins using a password or a public key
|
||||
- Total HTTP requests served and totals for response code
|
||||
- Go's runtime like details about GC, number of gouroutines and OS threads
|
||||
- Process information like CPU, memory, file descriptor usage and start time
|
||||
|
||||
Please check the `/metrics` page for more details.
|
||||
|
||||
## Logs
|
||||
|
||||
Inside the log file each line is a JSON struct, each struct has a `sender` fields that identify the log type.
|
||||
|
||||
The logs can be divided into the following categories:
|
||||
|
||||
- **"app logs"**, internal logs used to debug `sftpgo`:
|
||||
- `sender` string. This is generally the package name that emits the log
|
||||
- `time` string. Date/time with millisecond precision
|
||||
- `level` string
|
||||
- `message` string
|
||||
- **"transfer logs"**, SFTP/SCP transfer logs:
|
||||
- `sender` string. `Upload` or `Download`
|
||||
- `time` string. Date/time with millisecond precision
|
||||
- `level` string
|
||||
- `elapsed_ms`, int64. Elapsed time, as milliseconds, for the upload/download
|
||||
- `size_bytes`, int64. Size, as bytes, of the download/upload
|
||||
- `username`, string
|
||||
- `file_path` string
|
||||
- `connection_id` string. Unique connection identifier
|
||||
- `protocol` string. `SFTP` or `SCP`
|
||||
- **"command logs"**, SFTP/SCP command logs:
|
||||
- `sender` string. `Rename`, `Rmdir`, `Mkdir`, `Symlink`, `Remove`
|
||||
- `level` string
|
||||
- `username`, string
|
||||
- `file_path` string
|
||||
- `target_path` string
|
||||
- `connection_id` string. Unique connection identifier
|
||||
- `protocol` string. `SFTP` or `SCP`
|
||||
- **"http logs"**, REST API logs:
|
||||
- `sender` string. `httpd`
|
||||
- `level` string
|
||||
- `remote_addr` string. IP and port of the remote client
|
||||
- `proto` string, for example `HTTP/1.1`
|
||||
- `method` string. HTTP method (`GET`, `POST`, `PUT`, `DELETE` etc.)
|
||||
- `user_agent` string
|
||||
- `uri` string. Full uri
|
||||
- `resp_status` integer. HTTP response status code
|
||||
- `resp_size` integer. Size in bytes of the HTTP response
|
||||
- `elapsed_ms` int64. Elapsed time, as milliseconds, to complete the request
|
||||
- `request_id` string. Unique request identifier
|
||||
SFTPGo releases are feature-driven, we don't have a fixed time based schedule. As a rough estimate, you can expect 1 or 2 new releases per year.
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
- [pkg/sftp](https://github.com/pkg/sftp)
|
||||
- [go-chi](https://github.com/go-chi/chi)
|
||||
- [zerolog](https://github.com/rs/zerolog)
|
||||
- [lumberjack](https://gopkg.in/natefinch/lumberjack.v2)
|
||||
- [argon2id](https://github.com/alexedwards/argon2id)
|
||||
- [go-sqlite3](https://github.com/mattn/go-sqlite3)
|
||||
- [go-sql-driver/mysql](https://github.com/go-sql-driver/mysql)
|
||||
- [bbolt](https://github.com/etcd-io/bbolt)
|
||||
- [lib/pq](https://github.com/lib/pq)
|
||||
- [viper](https://github.com/spf13/viper)
|
||||
- [cobra](https://github.com/spf13/cobra)
|
||||
- [xid](https://github.com/rs/xid)
|
||||
- [nathanaelle/password](https://github.com/nathanaelle/password)
|
||||
SFTPGo makes use of the third party libraries listed inside [go.mod](./go.mod).
|
||||
|
||||
Some code was initially taken from [Pterodactyl sftp server](https://github.com/pterodactyl/sftp-server)
|
||||
We are very grateful to all the people who contributed with ideas and/or pull requests.
|
||||
|
||||
Thank you [ysura](https://www.ysura.com/) for granting me stable access to a test AWS S3 account.
|
||||
|
||||
## License
|
||||
|
||||
GNU GPLv3
|
||||
GNU AGPL-3.0-only
|
||||
|
||||
351
README.zh_CN.md
Normal file
@@ -0,0 +1,351 @@
|
||||
# SFTPGo
|
||||
|
||||
[](https://github.com/drakkan/sftpgo/workflows/CI/badge.svg?branch=main&event=push)
|
||||
[](https://codecov.io/gh/drakkan/sftpgo/branch/main)
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://hub.docker.com/r/drakkan/sftpgo)
|
||||
[](https://github.com/avelino/awesome-go)
|
||||
|
||||
[English](./README.md) | [简体中文](./README.zh_CN.md)
|
||||
|
||||
功能齐全、高度可配置化、支持自定义 HTTP/S,FTP/S 和 WebDAV 的 SFTP 服务。
|
||||
一些存储后端支持:本地文件系统、加密本地文件系统、S3(兼容)对象存储,Google Cloud 存储,Azure Blob 存储,SFTP。
|
||||
|
||||
## 赞助商
|
||||
|
||||
如果你觉得 SFTPGo 有用,请考虑支持这个开源项目。
|
||||
|
||||
维护和发展 SFTPGo 对我来说是很多工作——很容易相当于一份全职工作。
|
||||
|
||||
我想让 SFTPGo 成为一个可持续的长期项目,并且不想引入双重许可选项并将某些功能仅限于专有版本。
|
||||
|
||||
如果您使用 SFTPGo,确保您所依赖的项目保持健康和维护良好符合您的最大利益。
|
||||
这只能通过您的捐款和[赞助](https://github.com/sponsors/drakkan) 发生:heart:
|
||||
|
||||
如果您只是拿走任何东西而不返回任何东西,从长远来看,该项目将失败,您将被迫为类似的专有解决方案付费。
|
||||
|
||||
[更多信息](https://github.com/drakkan/sftpgo/issues/452)。
|
||||
|
||||
### 感谢我们的赞助商
|
||||
|
||||
#### 白金赞助商
|
||||
|
||||
[<img src="./img/Aledade_logo.png" alt="Aledade logo" width="202" height="70">](https://www.aledade.com/)
|
||||
|
||||
#### 铜牌赞助商
|
||||
|
||||
[<img src="https://www.7digital.com/wp-content/themes/sevendigital/images/top_logo.png" alt="7digital logo">](https://www.7digital.com/)
|
||||
|
||||
## 支持政策
|
||||
|
||||
SFTPGo 是一个开源项目,您当然可以免费使用它,但也请不要要求免费支持。
|
||||
|
||||
我们将检查报告的问题以查看您是否遇到错误,如果是,我们将修复它,但只会为项目赞助商/捐助者提供支持。
|
||||
|
||||
如果您报告无效问题或要求逐步支持,您的问题将保持打开状态而没有答案,或者将被关闭为无效而无需进一步解释。 感谢您的理解。
|
||||
|
||||
## 特性
|
||||
|
||||
- 支持服务本地文件系统、加密本地文件系统、S3 兼容对象存储、Google Cloud 存储、Azure Blob 存储或其它基于 SFTP/SCP/FTP/WebDAV 协议的 SFTP 账户。
|
||||
- 虚拟目录支持:一个虚拟目录可以用于支持的存储后端。你可以,比如,一个 S3 用户暴露了一个 GCS bucket(或者其中一部分)在特定的路径下、一个加密本地文件系统在另一个。虚拟目录可以对于大量用户作为私密或者共享,分享虚拟目录你可以为每个用户定义不同的配额。
|
||||
- 可配置的 [自定义命令 和/或 HTTP 钩子](./docs/custom-actions.md) 在 SSH 命令的 upload, pre-upload, download, pre-download, delete, pre-delete, rename, mkdir, rmdir 阶段,和用户添加、更新、删除阶段。
|
||||
- 存储在 “数据提供程序” 中的虚拟账户。
|
||||
- 支持 SQLite, MySQL, PostgreSQL, CockroachDB, Bolt (Go 原生键/值存储) 和内存数据提供程序。
|
||||
- 为本地账户提供 Chroot 隔离。云端账户可以限制为特定的基本路径。
|
||||
- 每个用户和每个目录虚拟权限,对于每个暴露的路径你可以允许或禁止:目录展示、上传、覆盖、下载、删除、重命名、创建文件夹、创建软连接、修改 owner/group/file 模式和更改时间。
|
||||
- 为用户和目录管理提供、数据保留、备份、恢复和即时活动连接的实时报告,可能会强制关闭连接,提供 [REST API](./docs/rest-api.md)。
|
||||
- [基于 Web 的管理员界面](./docs/web-admin.md) 可以容易地管理用户、目录和连接。
|
||||
- [Web 客户端界面](./docs/web-client.md) 以便终端用户可以在浏览器中更改他们的凭据、管理和共享他们的文件。
|
||||
- 公钥和密码认证。支持每个用户多个公钥。
|
||||
- SSH 用户 [证书认证](https://cvsweb.openbsd.org/src/usr.bin/ssh/PROTOCOL.certkeys?rev=1.8).
|
||||
- 键盘交互认证。您可以轻松设置可定制的多因素身份认证。
|
||||
- 部分验证。你可以配置多步验证请求,例如,用户密码在公钥验证之后。
|
||||
- 每个用户的身份验证方法。
|
||||
- [双重验证](./docs/howto/two-factor-authentication.md) 基于实现一次性密码 (RFC 6238) 可以与 Authy、Google Authenticator 和其他兼容的应用程序配合使用。
|
||||
- 通过 [群组](./docs/groups.md) 精简用户管理。
|
||||
- 通过外部 程序/HTTP API 自定义验证。
|
||||
- Web 客户端和 Web 管理员他用户界面支持 [OpenID Connect](https://openid.net/connect/) 验证,所以它们很容易被集成在诸如 [Keycloak](https://www.keycloak.org/) 之类的身份认证程序。你可以在 [此](./docs/oidc.md) 获取更多信息。
|
||||
- [静态数据加密](./docs/dare.md)。
|
||||
- 在登录之前通过 程序/HTTP API 进行动态用户修改。
|
||||
- 配额支持:账户拥有独立的磁盘配额表示为总计最大体积 和/或 最大文件数量。
|
||||
- 带宽节流,基于客户端 IP 地址独立设置上传、下载和覆盖。
|
||||
- 数据传输带宽限制,限制总量或基于客户端 IP 地址设置上传、下载和覆盖。限制可以通过 REST API 重置。
|
||||
- 支持每个协议[限速](./docs/rate-limiting.md),可以可选与内置的防护连接实现自动封禁重复超过设置限制的主机。
|
||||
- 每个用户的最大并发会话。
|
||||
- 每个用户和全局 IP 过滤:登录可以被限制在特定的 IP 段和指定的 IP 地址。
|
||||
- 每个用户和每个文件夹类似于 shell 的模式过滤:文件可以被允许、禁止和隐藏基于类 shell 模式。
|
||||
- 自动使 idle 连接终止。
|
||||
- 通过内置的 [防护](./docs/defender.md) 自动管理禁止名单。
|
||||
- 通过 [插件](https://github.com/sftpgo/sftpgo-plugin-geoipfilter) 实现 地理-IP 过滤。
|
||||
- 原子上传是可配置的。
|
||||
- 每个用户 文件/目录 所有权映射:你可以将所有用户映射到运行 SFEPGo 的系统账户(所有的平台都是支持的),或者你可以使用 root 用户运行 SFTPGo 并且映射每个用户或用户组到一个不同系统账户(仅支持 \*NIX)。
|
||||
- 通过 SSH 支持 Git 仓库。
|
||||
- 支持 SCP 和 rsync。
|
||||
- 支持 FTP/S。你可以配置 FTP 服务为控制和数据连接都需要 TLS。
|
||||
- [WebDAV](./docs/webdav.md) 是支持的。
|
||||
- 两步 TLS 验证,具有客户端证书身份验证的 aka TLS,支持 REST API/Web Admin、FTPS 和 基于 HTTPS 的 WebDAV。
|
||||
- 每个用户协议限制。你可以为每个用户配置允许的协议(SSH/HTTP/FTP/WebDAV)。
|
||||
- 暴露 [输出指标](./docs/metrics.md)。
|
||||
- 支持 HAProxy PROXY 协议:你可以不需要丢失客户端地址信息代理 和/或 负载平衡 SFTP/SCP/FTP 服务。
|
||||
- 简单从 Linux 系统用户账户进行 [迁移](./examples/convertusers)。
|
||||
- [可携带模式](./docs/portable-mode.md):按需共享单个目录的便捷方式。
|
||||
- [SFTP 子系统模式](./docs/sftp-subsystem.md):你可以使用 SFTPGo 作为 OpenSSH 的 SFTP 子系统。
|
||||
- 性能分析基于内置的 [分析器](./docs/profiling.md)。
|
||||
- 配置项格式基于你的选择:JSON, TOML, YAML, HCL, envfile 都是支持的。
|
||||
- 日志文件是精确的,它们被存储为易被解析的 JSON 格式。([更多信息](./docs/logs.md))
|
||||
- SFTPGo 支持 [插件系统](./docs/plugins.md),因此可以使用外部插件拓展。
|
||||
|
||||
## 平台
|
||||
|
||||
SFTPGo 基于 Linux 开发和创建。在每一次提交之后,代码会自动通过 [GitHub Actions](./.github/workflows/development.yml) 在 Linux、macOS 和 Windows 构建和测试。测试用例定期手动在 FreeBSD 执行,其他的 *BSD 变体同样适用。
|
||||
|
||||
## 要求
|
||||
|
||||
- Go 作为构建仅有的依赖。我们支持 [持续集成工作流](./.github/workflows) 中使用的 Go 版本。
|
||||
- 使用适配的 SQL 服务作为数据提供程序:PostgreSQL 9.4+, MySQL 5.6+, SQLite 3.x, CockroachDB stable.
|
||||
- SQL 服务是可选的:你可以使用一个内置的 bolt 数据库以 键/值 存储,或者一个内存中的数据提供程序。
|
||||
|
||||
## 安装
|
||||
|
||||
为 Linux、macOS 和 Windows 提供的二进制发行版是可用的。请参考 [发行版](https://github.com/drakkan/sftpgo/releases "releases") 页面。
|
||||
|
||||
一个官方的 Docker 镜像是可用的。文档参考 [Docker](./docker/README.md)。
|
||||
|
||||
<details>
|
||||
|
||||
<summary>一些 Linux 分支包是可用的</summary>
|
||||
|
||||
- Arch Linux 通过 AUR:
|
||||
- [sftpgo](https://aur.archlinux.org/packages/sftpgo/)。这个包跟随稳定的发行版。需要 `git`、`gcc` 和 `go` 进行构建。
|
||||
- [sftpgo-bin](https://aur.archlinux.org/packages/sftpgo-bin/)。这个包跟随稳定的发行版从 GitHub 下载预构建 Linux 二进制文件。不需要 `git`、`gcc` 和 `go` 进行构建。
|
||||
- [sftpgo-git](https://aur.archlinux.org/packages/sftpgo-git/)。这个包构建和下载基于最新的 `git` 主分支。需要 `git`、`gcc` 和 `go` 进行构建。
|
||||
- Deb and RPM 包在每次提交和发行之后构建。
|
||||
- Ubuntu PPA 在 [此](https://launchpad.net/~sftpgo/+archive/ubuntu/sftpgo) 可用。
|
||||
- Void Linux 提供一个 [官方包](https://github.com/void-linux/void-packages/tree/master/srcpkgs/sftpgo)。
|
||||
|
||||
</details>
|
||||
|
||||
SFTPGo 在 [AWS Marketplace](https://aws.amazon.com/marketplace/seller-profile?id=6e849ab8-70a6-47de-9a43-13c3fa849335) 和 [Azure Marketplace](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/eliamarzia1667381463185.sftpgo_linux) 同样可用,在此付费可以帮助 SFTPGo 成为一个可持续发展的长期项目。
|
||||
|
||||
<details><summary>Windows 包</summary>
|
||||
|
||||
- Windows installer 安装和运行 SFTPGo 作为一个 Windows 服务。
|
||||
- 开箱即用的包启动按需使用的 SFTPGo。
|
||||
- [winget](https://docs.microsoft.com/en-us/windows/package-manager/winget/install) 包下载和运行 SFTPGo 作为一个 Windows 服务:`winget install SFTPGo`。
|
||||
- [Chocolatey 包](https://community.chocolatey.org/packages/sftpgo) 下载和运行 SFTPGo 作为一个 Windows 服务。
|
||||
|
||||
</details>
|
||||
|
||||
在 FreeBSD,你可以从 [SFTPGo port](https://www.freshports.org/ftp/sftpgo) 下载。
|
||||
在 DragonFlyBSD,你可以从 [DPorts](https://github.com/DragonFlyBSD/DPorts/tree/master/ftp/sftpgo) 下载。
|
||||
您可以从 [Actions](https://github.com/drakkan/sftpgo/Actions) 页面选择一个 commit 并下载 Linux、macOS 或 Windows 的匹配构建,从而轻松测试新特性。GitHub 存储 90 天。
|
||||
|
||||
另外,你可以 [从源码构建](./docs/build-from-source.md)。
|
||||
|
||||
[不耐烦的快速上手指南](./docs/howto/getting-started.md).
|
||||
|
||||
## 配置项
|
||||
|
||||
可以完整的配置项方法说明可以参考 [配置项](./docs/full-configuration.md)。
|
||||
|
||||
请确保按需运行之前,[初始化数据提供程序](#数据提供程序初始化和管理)。
|
||||
|
||||
默认配置启动 STFPGo,运行:
|
||||
|
||||
```bash
|
||||
sftpgo serve
|
||||
```
|
||||
|
||||
如果你将 SFTPGo作为服务,请参阅 [这篇文档](./docs/service.md)。
|
||||
|
||||
### 数据提供程序初始化和管理
|
||||
|
||||
在启动 SFTPGo 服务之前,请确保配置的数据提供程序已经被适当的 初始化/更新。
|
||||
|
||||
对于 PostgreSQL, MySQL 和 CockroachDB 提供,你需要创建一个配置数据库。对于 SQLite,配置数据库将会在启动时被自动创建。内存和 bolt 数据提供程序不需要初始化,但是它们需要在升级 SFTPGo 之后更新现有的数据。
|
||||
|
||||
SFTPGo 会尝试自动探测数据提供程序是否被 初始化/更新;如果没有,将会在启动时尝试 初始化/更新。
|
||||
|
||||
或者,你可以通过 `initprovider` 命令自行 创建/更新 需要的数据提供程序结构。
|
||||
|
||||
比如,你可以执行在配置文件目录下面的命令:
|
||||
|
||||
```bash
|
||||
sftpgo initprovider
|
||||
```
|
||||
|
||||
看一看 CLI 用法学习如何指定一个不同的配置文件:
|
||||
|
||||
```bash
|
||||
sftpgo initprovider --help
|
||||
```
|
||||
|
||||
你可以在启动阶段通过设置 `update_mode` 配置项为 `1`,禁止自动数据提供程序 检查/更新。
|
||||
|
||||
你可以通过使用 `resetprovider` 子命令重置你的数据提供程序。看一看 CLI 用法获取更多细节信息:
|
||||
|
||||
```bash
|
||||
sftpgo resetprovider --help
|
||||
```
|
||||
|
||||
:warning: 请注意一些数据提供程序(比如 MySQL 和 CockroachDB)不支持事务内的方案更改,这意味着如果迁移被强制中止或由多个实例同时运行,您可能会得到不一致的方案。
|
||||
|
||||
## 创建第一个管理员
|
||||
|
||||
开始使用 SFTPGo,你需要创建一个管理员用户,你可以通过不同的方式进行实现:
|
||||
|
||||
- 通过 web 管理员界面。默认 URL 是 [http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web/admin)
|
||||
- 通过加载初始数据
|
||||
- 通过在你的配置文件启用 `create_default_admin` 并设置环境变量 `SFTPGO_DEFAULT_ADMIN_USERNAME` 和 `SFTPGO_DEFAULT_ADMIN_PASSWORD`
|
||||
|
||||
## 升级
|
||||
|
||||
SFTPGo 支持从之前的发行版分支升级到当前分支。
|
||||
一些支持的升级路径如下:
|
||||
|
||||
- 从 1.2.x 到 2.0.x
|
||||
- 从 2.0.x 到 2.1.x 等。
|
||||
|
||||
对支持的升级路径,数据和方案将会自动迁移,你可以使用 `initprovider` 命令作为替代。
|
||||
|
||||
所以,比如,你想从 1.2.x 之前的版本升级到 2.0.x,你必须首先安装 1.2.x 版本,升级数据提供程序并最终安装版本 2.0.x。建议安装最新的可用小版本,如果 1.2.2 可用就不要安装 1.2.0 版本。
|
||||
|
||||
从以前发行版分支到当前版本,都支持从独立于数据提供程序的 JSON 转储中加载数据。升级 SFTPGo 后,建议从新版本重新生成 JSON 转储。
|
||||
|
||||
## 降级
|
||||
|
||||
如果因为一些原因你想降级 SFTPGo,你可能需要降级你的用户数据提供程序方案和数据。你可以使用 `revertprovider` 命令执行这项任务。
|
||||
|
||||
对于升级,SFTPGo 支持从先前的发行版分支降级到当前分支。
|
||||
|
||||
所以,如果你有计划从 2.0.x 降级到 1.2.x,之前先卸载 2.0.x 版本,你可以通过从配置目录执行以下命令来准备你的数据提供程序:
|
||||
|
||||
```shell
|
||||
sftpgo revertprovider --to-version 4
|
||||
```
|
||||
|
||||
看一看 CLI 的用法、了解 `--to-version` 参数支持的参数,了解如何去指定一个不同的配置文件:
|
||||
|
||||
```shell
|
||||
sftpgo revertprovider --help
|
||||
```
|
||||
|
||||
`revertprovider` 命令不支持内存数据提供程序。
|
||||
|
||||
请注意我们只支持当前发行版分支和当前主分支,如果你发现了个 bug,最好是报告这个问题而不是降级到一个老的、不被支持的版本。
|
||||
|
||||
## 用户和目录管理
|
||||
|
||||
在启动 SFTPGo 之后,你可以管理用户和目录使用:
|
||||
|
||||
- [基于 Web 的管理员界面](./docs/web-admin.md)
|
||||
- [REST API](./docs/rest-api.md)
|
||||
|
||||
支持内置的数据提供程序比如 `bolt` 和 `SQLite`。我们不能使用 CLI 直接将用户和文件夹写到数据提供程序,通常使用 REAST API。
|
||||
|
||||
对于用户、目录、管理员和其它资源的细节,都记录在 [OpenAPI](./openapi/openapi.yaml) 方案。如果你想在不手动引入的情况下渲染方案,你可以在 [Stoplight](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml) 上暴露它。
|
||||
|
||||
## 教程
|
||||
|
||||
一些手把手教程可以在源码文件树中的 [howto](./docs/howto "How-to") 目录找到。
|
||||
|
||||
## 认证选项
|
||||
|
||||
<details><summary>外部认证</summary>
|
||||
|
||||
自定义认证方法可以很容易被添加。SFTPGo 支持外部认证模块,编写一个后端可以如编写几行 shell 脚本那样简单。更多的信息可以参考 [外部认证](./docs/external-auth.md)。
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>键盘交互认证</summary>
|
||||
|
||||
一般来说,键盘交互身份验证是服务器提出的一系列问题,由客户端提供响应。
|
||||
|
||||
这种身份认证方法通常用于多因素身份认证。
|
||||
|
||||
更多信息参考 [键盘交互](./docs/keyboard-interactive.md)。
|
||||
|
||||
</details>
|
||||
|
||||
## 动态用户创建或修改
|
||||
|
||||
一个用户可以通过外部程序在登录之前被创建和修改。更多关于此可以参考 [动态用户修改](./docs/dynamic-user-mod.md)。
|
||||
|
||||
## 自定义动作
|
||||
|
||||
SFTPGo 允许你配置自定义的命令 和/或 HTTP 钩子去获取关于文件上传、删除和一些其它操作的通知。
|
||||
|
||||
更多关于自定义动作的信息你可以参考 [自定义动作](./docs/custom-actions.md)。
|
||||
|
||||
## 虚拟目录
|
||||
|
||||
用户 home 文件夹外或者基于不同存储提供的目录,可以作为虚拟目录进行暴露,详细信息参考 [虚拟目录](./docs/virtual-folders.md)。
|
||||
|
||||
## 其它钩子
|
||||
|
||||
你可以使用 [Post-connect 钩子](./docs/post-connect-hook.md) 及时获取新的连接建立,使用 [Post-login hook](./docs/post-login-hook.md) 获取每次登录之后的通知。你可以使用你自己的钩子去 [验证密码](./docs/check-password-hook.md)。
|
||||
|
||||
## 存储后端
|
||||
|
||||
### S3/GCP/Azure
|
||||
|
||||
每个用户可以被映射到 [S3 兼容对象存储](./docs/s3.md) /[Google Cloud 存储](./docs/google-cloud-storage.md)/[Azure Blob 存储](./docs/azure-blob-storage.md) bucket 或者一个 bucket 虚拟目录,通过 SFTP/SCP/FTP/WebDAV 进行暴露。
|
||||
|
||||
### SFTP 后端
|
||||
|
||||
每个用户可以被映射到另一个 SFTP 服务器账户或者它的子目录。更多的信息可以参考 [sftpfs](./docs/sftpfs.md)。
|
||||
|
||||
### 加密后端
|
||||
|
||||
数据静态加密通过 [cryptfs 后端](./docs/dare.md) 进行支持。
|
||||
|
||||
### 其它存储后端
|
||||
|
||||
添加新的存储后端非常简单:
|
||||
|
||||
- 实现 [Fs 接口](./vfs/vfs.go#L28 "interface for filesystem backends")
|
||||
- 更新用户方法 `GetFilesystem` 返回新的后端
|
||||
- 更新 web 接口和 REST API CLI
|
||||
- 为新的存储后端添加向 `portable` 模式添加 flags
|
||||
|
||||
无论如何,一些后端需要按次付费账户(或者他们提供限制期限内提供免费账户)。为了能够添加这些账户支持或者预览 PRs,请提供一个测试账户。测试账户必须在提供足够长时间维护此后端,并且支持每一次新的发行版之前做基本测试。
|
||||
|
||||
## 强力保护
|
||||
|
||||
SFTPGo 支持内置 [防护](./docs/defender.md)。
|
||||
|
||||
你可以使用 [连接失败日志](./docs/logs.md) 在诸如 [Fail2ban](http://www.fail2ban.org/) 进行工具内集成。[jails](./fail2ban/jails) 和 [filters](./fail2ban/filters) 示例,在 fail2ban 目录中与 `systemd`/`journald` 是可以同时工作的。
|
||||
|
||||
## 账户配置属性
|
||||
|
||||
关于账户配置属性的细节信息,请参考 [账户](./docs/account.md)。
|
||||
|
||||
## 性能
|
||||
|
||||
SFTPGo 在没有特殊配置的情况下,可以实现低端硬件轻松达到 GB 量级连接,对于大多数场景足够使用了。
|
||||
|
||||
更多深度性能分析可以参考 [性能](./docs/performance.md)。
|
||||
|
||||
## 发行节奏
|
||||
|
||||
STFPGo 发行版是特性驱动的,我们没有基于计划的固定时间。粗略估计,你可以每年期待一到两个新的发行版。
|
||||
|
||||
## 感谢
|
||||
|
||||
SFTPGo 使用了 [go.mod](./go.mod) 中列出的第三方库。
|
||||
|
||||
我们非常感激所有贡献想法 和/或 PRs。
|
||||
|
||||
感谢 [ysura](https://www.ysura.com/) 给予我测试 AWS S3 账户的稳定权限。
|
||||
|
||||
## 赞助者
|
||||
|
||||
我希望可以使 STFPGo 成为一个可持续发展的长期项目,你的 [赞助](https://github.com/sponsors/drakkan) 对我很有帮助!:heart:
|
||||
|
||||
感谢我们的赞助者!
|
||||
|
||||
[<img src="https://www.7digital.com/wp-content/themes/sevendigital/images/top_logo.png" alt="7digital logo">](https://www.7digital.com/)
|
||||
|
||||
## 许可证
|
||||
|
||||
GNU AGPL-3.0-only
|
||||
12
SECURITY.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Only the current release of the software is actively supported. If you need
|
||||
help backporting fixes into an older release, feel free to ask.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Email your vulnerability information to SFTPGo's maintainer:
|
||||
|
||||
Nicola Murino <nicola.murino@gmail.com>
|
||||
78
api/api.go
@@ -1,78 +0,0 @@
|
||||
// Package api implements REST API for sftpgo.
|
||||
// REST API allows to manage users and quota and to get real time reports for the active connections
|
||||
// with possibility of forcibly closing a connection.
|
||||
// The OpenAPI 3 schema for the exposed API can be found inside the source tree:
|
||||
// https://github.com/drakkan/sftpgo/tree/master/api/schema/openapi.yaml
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/go-chi/chi"
|
||||
"github.com/go-chi/render"
|
||||
)
|
||||
|
||||
const (
|
||||
logSender = "api"
|
||||
activeConnectionsPath = "/api/v1/connection"
|
||||
quotaScanPath = "/api/v1/quota_scan"
|
||||
userPath = "/api/v1/user"
|
||||
versionPath = "/api/v1/version"
|
||||
metricsPath = "/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
router *chi.Mux
|
||||
dataProvider dataprovider.Provider
|
||||
)
|
||||
|
||||
// HTTPDConf httpd daemon configuration
|
||||
type HTTPDConf struct {
|
||||
// The port used for serving HTTP requests. 0 disable the HTTP server. Default: 8080
|
||||
BindPort int `json:"bind_port" mapstructure:"bind_port"`
|
||||
// The address to listen on. A blank value means listen on all available network interfaces. Default: "127.0.0.1"
|
||||
BindAddress string `json:"bind_address" mapstructure:"bind_address"`
|
||||
}
|
||||
|
||||
type apiResponse struct {
|
||||
Error string `json:"error"`
|
||||
Message string `json:"message"`
|
||||
HTTPStatus int `json:"status"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
initializeRouter()
|
||||
}
|
||||
|
||||
// SetDataProvider sets the data provider to use to fetch the data about users
|
||||
func SetDataProvider(provider dataprovider.Provider) {
|
||||
dataProvider = provider
|
||||
}
|
||||
|
||||
func sendAPIResponse(w http.ResponseWriter, r *http.Request, err error, message string, code int) {
|
||||
var errorString string
|
||||
if err != nil {
|
||||
errorString = err.Error()
|
||||
}
|
||||
resp := apiResponse{
|
||||
Error: errorString,
|
||||
Message: message,
|
||||
HTTPStatus: code,
|
||||
}
|
||||
if code != http.StatusOK {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
w.WriteHeader(code)
|
||||
}
|
||||
render.JSON(w, r, resp)
|
||||
}
|
||||
|
||||
func getRespStatus(err error) int {
|
||||
if _, ok := err.(*dataprovider.ValidationError); ok {
|
||||
return http.StatusBadRequest
|
||||
}
|
||||
if _, ok := err.(*dataprovider.MethodDisabledError); ok {
|
||||
return http.StatusForbidden
|
||||
}
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
762
api/api_test.go
@@ -1,762 +0,0 @@
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/render"
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
_ "github.com/lib/pq"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"github.com/drakkan/sftpgo/api"
|
||||
"github.com/drakkan/sftpgo/config"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultUsername = "test_user"
|
||||
defaultPassword = "test_password"
|
||||
testPubKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03jj0D+djk7pxIf/0OhrxrchJTRZklofJ1NoIu4752Sq02mdXmarMVsqJ1cAjV5LBVy3D1F5U6XW4rppkXeVtd04Pxb09ehtH0pRRPaoHHlALiJt8CoMpbKYMA8b3KXPPriGxgGomvtU2T2RMURSwOZbMtpsugfjYSWenyYX+VORYhylWnSXL961LTyC21ehd6d6QnW9G7E5hYMITMY9TuQZz3bROYzXiTsgN0+g6Hn7exFQp50p45StUMfV/SftCMdCxlxuyGny2CrN/vfjO7xxOo2uv7q1qm10Q46KPWJQv+pgZ/OfL+EDjy07n5QVSKHlbx+2nT4Q0EgOSQaCTYwn3YjtABfIxWwgAFdyj6YlPulCL22qU4MYhDcA6PSBwDdf8hvxBfvsiHdM+JcSHvv8/VeJhk6CmnZxGY0fxBupov27z3yEO8nAg8k+6PaUiW1MSUfuGMF/ktB8LOstXsEPXSszuyXiOv4DaryOXUiSn7bmRqKcEFlJusO6aZP0= nicola@p1"
|
||||
logSender = "APITesting"
|
||||
userPath = "/api/v1/user"
|
||||
activeConnectionsPath = "/api/v1/connection"
|
||||
quotaScanPath = "/api/v1/quota_scan"
|
||||
versionPath = "/api/v1/version"
|
||||
metricsPath = "/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultPerms = []string{dataprovider.PermAny}
|
||||
homeBasePath string
|
||||
testServer *httptest.Server
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if runtime.GOOS == "windows" {
|
||||
homeBasePath = "C:\\"
|
||||
} else {
|
||||
homeBasePath = "/tmp"
|
||||
}
|
||||
configDir := ".."
|
||||
logfilePath := filepath.Join(configDir, "sftpgo_api_test.log")
|
||||
logger.InitLogger(logfilePath, 5, 1, 28, false, zerolog.DebugLevel)
|
||||
config.LoadConfig(configDir, "")
|
||||
providerConf := config.GetProviderConf()
|
||||
|
||||
err := dataprovider.Initialize(providerConf, configDir)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error initializing data provider: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
dataProvider := dataprovider.GetProvider()
|
||||
httpdConf := config.GetHTTPDConfig()
|
||||
router := api.GetHTTPRouter()
|
||||
|
||||
httpdConf.BindPort = 8081
|
||||
api.SetBaseURL("http://127.0.0.1:8081")
|
||||
|
||||
sftpd.SetDataProvider(dataProvider)
|
||||
api.SetDataProvider(dataProvider)
|
||||
|
||||
go func() {
|
||||
logger.Debug(logSender, "", "initializing HTTP server with config %+v", httpdConf)
|
||||
s := &http.Server{
|
||||
Addr: fmt.Sprintf("%s:%d", httpdConf.BindAddress, httpdConf.BindPort),
|
||||
Handler: router,
|
||||
ReadTimeout: 300 * time.Second,
|
||||
WriteTimeout: 300 * time.Second,
|
||||
MaxHeaderBytes: 1 << 20, // 1MB
|
||||
}
|
||||
if err := s.ListenAndServe(); err != nil {
|
||||
logger.Error(logSender, "", "could not start HTTP server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
testServer = httptest.NewServer(api.GetHTTPRouter())
|
||||
defer testServer.Close()
|
||||
|
||||
waitTCPListening(fmt.Sprintf("%s:%d", httpdConf.BindAddress, httpdConf.BindPort))
|
||||
|
||||
exitCode := m.Run()
|
||||
os.Remove(logfilePath)
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func TestBasicUserHandling(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.MaxSessions = 10
|
||||
user.QuotaSize = 4096
|
||||
user.QuotaFiles = 2
|
||||
user.UploadBandwidth = 128
|
||||
user.DownloadBandwidth = 64
|
||||
user, _, err = api.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
}
|
||||
users, _, err := api.GetUsers(0, 0, defaultUsername, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get users: %v", err)
|
||||
}
|
||||
if len(users) != 1 {
|
||||
t.Errorf("number of users mismatch, expected: 1, actual: %v", len(users))
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserNoCredentials(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.Password = ""
|
||||
u.PublicKeys = []string{}
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with no credentials: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserNoUsername(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.Username = ""
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with no home dir: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserNoHomeDir(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.HomeDir = ""
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with no home dir: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserInvalidHomeDir(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.HomeDir = "relative_path"
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with invalid home dir: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserNoPerms(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.Permissions = []string{}
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with no perms: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUserInvalidPerms(t *testing.T) {
|
||||
u := getTestUser()
|
||||
u.Permissions = []string{"invalidPerm"}
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with no perms: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUserPublicKey(t *testing.T) {
|
||||
u := getTestUser()
|
||||
invalidPubKey := "invalid"
|
||||
validPubKey := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03jj0D+djk7pxIf/0OhrxrchJTRZklofJ1NoIu4752Sq02mdXmarMVsqJ1cAjV5LBVy3D1F5U6XW4rppkXeVtd04Pxb09ehtH0pRRPaoHHlALiJt8CoMpbKYMA8b3KXPPriGxgGomvtU2T2RMURSwOZbMtpsugfjYSWenyYX+VORYhylWnSXL961LTyC21ehd6d6QnW9G7E5hYMITMY9TuQZz3bROYzXiTsgN0+g6Hn7exFQp50p45StUMfV/SftCMdCxlxuyGny2CrN/vfjO7xxOo2uv7q1qm10Q46KPWJQv+pgZ/OfL+EDjy07n5QVSKHlbx+2nT4Q0EgOSQaCTYwn3YjtABfIxWwgAFdyj6YlPulCL22qU4MYhDcA6PSBwDdf8hvxBfvsiHdM+JcSHvv8/VeJhk6CmnZxGY0fxBupov27z3yEO8nAg8k+6PaUiW1MSUfuGMF/ktB8LOstXsEPXSszuyXiOv4DaryOXUiSn7bmRqKcEFlJusO6aZP0= nicola@p1"
|
||||
u.PublicKeys = []string{invalidPubKey}
|
||||
_, _, err := api.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with invalid pub key: %v", err)
|
||||
}
|
||||
u.PublicKeys = []string{validPubKey}
|
||||
user, _, err := api.AddUser(u, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.PublicKeys = []string{validPubKey, invalidPubKey}
|
||||
_, _, err = api.UpdateUser(user, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("update user with invalid public key must fail: %v", err)
|
||||
}
|
||||
user.PublicKeys = []string{validPubKey, validPubKey, validPubKey}
|
||||
_, _, err = api.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateUser(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.HomeDir = filepath.Join(homeBasePath, "testmod")
|
||||
user.UID = 33
|
||||
user.GID = 101
|
||||
user.MaxSessions = 10
|
||||
user.QuotaSize = 4096
|
||||
user.QuotaFiles = 2
|
||||
user.Permissions = []string{dataprovider.PermCreateDirs, dataprovider.PermDelete, dataprovider.PermDownload}
|
||||
user.UploadBandwidth = 1024
|
||||
user.DownloadBandwidth = 512
|
||||
user, _, err = api.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateUserNoCredentials(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.Password = ""
|
||||
user.PublicKeys = []string{}
|
||||
// password and public key will be omitted from json serialization if empty and so they will remain unchanged
|
||||
// and no validation error will be raised
|
||||
_, _, err = api.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error updating user with no credentials: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateUserEmptyHomeDir(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.HomeDir = ""
|
||||
_, _, err = api.UpdateUser(user, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error updating user with empty home dir: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateUserInvalidHomeDir(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
user.HomeDir = "relative_path"
|
||||
_, _, err = api.UpdateUser(user, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error updating user with empty home dir: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateNonExistentUser(t *testing.T) {
|
||||
_, _, err := api.UpdateUser(getTestUser(), http.StatusNotFound)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNonExistentUser(t *testing.T) {
|
||||
_, _, err := api.GetUserByID(0, http.StatusNotFound)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteNonExistentUser(t *testing.T) {
|
||||
_, err := api.RemoveUser(getTestUser(), http.StatusNotFound)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddDuplicateUser(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
_, _, err = api.AddUser(getTestUser(), http.StatusInternalServerError)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add second user: %v", err)
|
||||
}
|
||||
_, _, err = api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err == nil {
|
||||
t.Errorf("adding a duplicate user must fail")
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetUsers(t *testing.T) {
|
||||
user1, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
u := getTestUser()
|
||||
u.Username = defaultUsername + "1"
|
||||
user2, _, err := api.AddUser(u, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add second user: %v", err)
|
||||
}
|
||||
users, _, err := api.GetUsers(0, 0, "", http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get users: %v", err)
|
||||
}
|
||||
if len(users) < 2 {
|
||||
t.Errorf("at least 2 users are expected")
|
||||
}
|
||||
users, _, err = api.GetUsers(1, 0, "", http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get users: %v", err)
|
||||
}
|
||||
if len(users) != 1 {
|
||||
t.Errorf("1 user is expected")
|
||||
}
|
||||
users, _, err = api.GetUsers(1, 1, "", http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get users: %v", err)
|
||||
}
|
||||
if len(users) != 1 {
|
||||
t.Errorf("1 user is expected")
|
||||
}
|
||||
_, _, err = api.GetUsers(1, 1, "", http.StatusInternalServerError)
|
||||
if err == nil {
|
||||
t.Errorf("get users must succeed, we requested a fail for a good request")
|
||||
}
|
||||
_, err = api.RemoveUser(user1, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove user: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user2, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetQuotaScans(t *testing.T) {
|
||||
_, _, err := api.GetQuotaScans(http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get quota scans: %v", err)
|
||||
}
|
||||
_, _, err = api.GetQuotaScans(http.StatusInternalServerError)
|
||||
if err == nil {
|
||||
t.Errorf("quota scan request must succeed, we requested to check a wrong status code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartQuotaScan(t *testing.T) {
|
||||
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to add user: %v", err)
|
||||
}
|
||||
_, err = api.StartQuotaScan(user, http.StatusCreated)
|
||||
if err != nil {
|
||||
t.Errorf("unable to start quota scan: %v", err)
|
||||
}
|
||||
_, err = api.RemoveUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to remove user: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetVersion(t *testing.T) {
|
||||
_, _, err := api.GetVersion(http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get sftp version: %v", err)
|
||||
}
|
||||
_, _, err = api.GetVersion(http.StatusInternalServerError)
|
||||
if err == nil {
|
||||
t.Errorf("get version request must succeed, we requested to check a wrong status code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConnections(t *testing.T) {
|
||||
_, _, err := api.GetConnections(http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to get sftp connections: %v", err)
|
||||
}
|
||||
_, _, err = api.GetConnections(http.StatusInternalServerError)
|
||||
if err == nil {
|
||||
t.Errorf("get sftp connections request must succeed, we requested to check a wrong status code")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloseActiveConnection(t *testing.T) {
|
||||
_, err := api.CloseConnection("non_existent_id", http.StatusNotFound)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error closing non existent sftp connection: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// test using mock http server
|
||||
|
||||
func TestBasicUserHandlingMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
if err != nil {
|
||||
t.Errorf("Error get user: %v", err)
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusInternalServerError, rr.Code)
|
||||
user.MaxSessions = 10
|
||||
user.UploadBandwidth = 128
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(user.ID, 10), bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
|
||||
var updatedUser dataprovider.User
|
||||
err = render.DecodeJSON(rr.Body, &updatedUser)
|
||||
if err != nil {
|
||||
t.Errorf("Error decoding updated user: %v", err)
|
||||
}
|
||||
if user.MaxSessions != updatedUser.MaxSessions || user.UploadBandwidth != updatedUser.UploadBandwidth {
|
||||
t.Errorf("Error modifying user actual: %v, %v", updatedUser.MaxSessions, updatedUser.UploadBandwidth)
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestGetUserByIdInvalidParamsMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, userPath+"/0", nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"/a", nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestAddUserNoUsernameMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
user.Username = ""
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestAddUserInvalidHomeDirMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
user.HomeDir = "relative_path"
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestAddUserInvalidPermsMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
user.Permissions = []string{}
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestAddUserInvalidJsonMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer([]byte("invalid json")))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestUpdateUserInvalidJsonMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
if err != nil {
|
||||
t.Errorf("Error get user: %v", err)
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(user.ID, 10), bytes.NewBuffer([]byte("Invalid json")))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestUpdateUserInvalidParamsMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
if err != nil {
|
||||
t.Errorf("Error get user: %v", err)
|
||||
}
|
||||
user.HomeDir = ""
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(user.ID, 10), bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
userID := user.ID
|
||||
user.ID = 0
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(userID, 10), bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
user.ID = userID
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/0", bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodPut, userPath+"/a", bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestGetUsersMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
if err != nil {
|
||||
t.Errorf("Error get user: %v", err)
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=510&offset=0&order=ASC&username="+defaultUsername, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
var users []dataprovider.User
|
||||
err = render.DecodeJSON(rr.Body, &users)
|
||||
if err != nil {
|
||||
t.Errorf("Error decoding users: %v", err)
|
||||
}
|
||||
if len(users) != 1 {
|
||||
t.Errorf("1 user is expected")
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=a&offset=0&order=ASC", nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=a&order=ASC", nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASCa", nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestDeleteUserInvalidParamsMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodDelete, userPath+"/0", nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/a", nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestGetQuotaScansMock(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", quotaScanPath, nil)
|
||||
if err != nil {
|
||||
t.Errorf("error get quota scan: %v", err)
|
||||
}
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestStartQuotaScanMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err := render.DecodeJSON(rr.Body, &user)
|
||||
if err != nil {
|
||||
t.Errorf("Error get user: %v", err)
|
||||
}
|
||||
_, err = os.Stat(user.HomeDir)
|
||||
if err == nil {
|
||||
os.Remove(user.HomeDir)
|
||||
}
|
||||
// simulate a duplicate quota scan
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
sftpd.AddQuotaScan(user.Username)
|
||||
req, _ = http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusConflict, rr.Code)
|
||||
sftpd.RemoveQuotaScan(user.Username)
|
||||
|
||||
userAsJSON = getUserAsJSON(t, user)
|
||||
req, _ = http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusCreated, rr.Code)
|
||||
|
||||
req, _ = http.NewRequest(http.MethodGet, quotaScanPath, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
var scans []sftpd.ActiveQuotaScan
|
||||
err = render.DecodeJSON(rr.Body, &scans)
|
||||
if err != nil {
|
||||
t.Errorf("Error get active scans: %v", err)
|
||||
}
|
||||
for len(scans) > 0 {
|
||||
req, _ = http.NewRequest(http.MethodGet, quotaScanPath, nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
err = render.DecodeJSON(rr.Body, &scans)
|
||||
if err != nil {
|
||||
t.Errorf("Error get active scans: %v", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
_, err = os.Stat(user.HomeDir)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
os.MkdirAll(user.HomeDir, 0777)
|
||||
}
|
||||
req, _ = http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusCreated, rr.Code)
|
||||
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestStartQuotaScanBadUserMock(t *testing.T) {
|
||||
user := getTestUser()
|
||||
userAsJSON := getUserAsJSON(t, user)
|
||||
req, _ := http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
}
|
||||
|
||||
func TestStartQuotaScanNonExistentUserMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer([]byte("invalid json")))
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusBadRequest, rr.Code)
|
||||
}
|
||||
|
||||
func TestGetVersionMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, versionPath, nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestGetConnectionsMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, activeConnectionsPath, nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func TestDeleteActiveConnectionMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodDelete, activeConnectionsPath+"/connectionID", nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
}
|
||||
|
||||
func TestNotFoundMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, "/non/existing/path", nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusNotFound, rr.Code)
|
||||
}
|
||||
|
||||
func TestMethodNotAllowedMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodPost, activeConnectionsPath, nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusMethodNotAllowed, rr.Code)
|
||||
}
|
||||
|
||||
func TestMetricsMock(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodGet, metricsPath, nil)
|
||||
rr := executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
}
|
||||
|
||||
func waitTCPListening(address string) {
|
||||
for {
|
||||
conn, err := net.Dial("tcp", address)
|
||||
if err != nil {
|
||||
logger.WarnToConsole("tcp server %v not listening: %v\n", address, err)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
logger.InfoToConsole("tcp server %v now listening\n", address)
|
||||
defer conn.Close()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
func getTestUser() dataprovider.User {
|
||||
return dataprovider.User{
|
||||
Username: defaultUsername,
|
||||
Password: defaultPassword,
|
||||
HomeDir: filepath.Join(homeBasePath, defaultUsername),
|
||||
Permissions: defaultPerms,
|
||||
}
|
||||
}
|
||||
|
||||
func getUserAsJSON(t *testing.T, user dataprovider.User) []byte {
|
||||
json, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
t.Errorf("error get user as json: %v", err)
|
||||
return []byte("{}")
|
||||
}
|
||||
return json
|
||||
}
|
||||
|
||||
func executeRequest(req *http.Request) *httptest.ResponseRecorder {
|
||||
rr := httptest.NewRecorder()
|
||||
testServer.Config.Handler.ServeHTTP(rr, req)
|
||||
return rr
|
||||
}
|
||||
|
||||
func checkResponseCode(t *testing.T, expected, actual int) {
|
||||
if expected != actual {
|
||||
t.Errorf("Expected response code %d. Got %d", expected, actual)
|
||||
}
|
||||
}
|
||||
330
api/api_utils.go
@@ -1,330 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/go-chi/render"
|
||||
)
|
||||
|
||||
var (
|
||||
httpBaseURL = "http://127.0.0.1:8080"
|
||||
)
|
||||
|
||||
// SetBaseURL sets the base url to use for HTTP requests, default is "http://127.0.0.1:8080"
|
||||
func SetBaseURL(url string) {
|
||||
httpBaseURL = url
|
||||
}
|
||||
|
||||
// gets an HTTP Client with a timeout
|
||||
func getHTTPClient() *http.Client {
|
||||
return &http.Client{
|
||||
Timeout: 15 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
func buildURLRelativeToBase(paths ...string) string {
|
||||
// we need to use path.Join and not filepath.Join
|
||||
// since filepath.Join will use backslash separator on Windows
|
||||
p := path.Join(paths...)
|
||||
return fmt.Sprintf("%s/%s", strings.TrimRight(httpBaseURL, "/"), strings.TrimLeft(p, "/"))
|
||||
}
|
||||
|
||||
// AddUser adds a new user and checks the received HTTP Status code against expectedStatusCode.
|
||||
func AddUser(user dataprovider.User, expectedStatusCode int) (dataprovider.User, []byte, error) {
|
||||
var newUser dataprovider.User
|
||||
var body []byte
|
||||
userAsJSON, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return newUser, body, err
|
||||
}
|
||||
resp, err := getHTTPClient().Post(buildURLRelativeToBase(userPath), "application/json", bytes.NewBuffer(userAsJSON))
|
||||
if err != nil {
|
||||
return newUser, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if expectedStatusCode != http.StatusOK {
|
||||
body, _ = getResponseBody(resp)
|
||||
return newUser, body, err
|
||||
}
|
||||
if err == nil {
|
||||
err = render.DecodeJSON(resp.Body, &newUser)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
if err == nil {
|
||||
err = checkUser(user, newUser)
|
||||
}
|
||||
return newUser, body, err
|
||||
}
|
||||
|
||||
// UpdateUser updates an existing user and checks the received HTTP Status code against expectedStatusCode.
|
||||
func UpdateUser(user dataprovider.User, expectedStatusCode int) (dataprovider.User, []byte, error) {
|
||||
var newUser dataprovider.User
|
||||
var body []byte
|
||||
userAsJSON, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return user, body, err
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPut, buildURLRelativeToBase(userPath, strconv.FormatInt(user.ID, 10)),
|
||||
bytes.NewBuffer(userAsJSON))
|
||||
if err != nil {
|
||||
return user, body, err
|
||||
}
|
||||
resp, err := getHTTPClient().Do(req)
|
||||
if err != nil {
|
||||
return user, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, _ = getResponseBody(resp)
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if expectedStatusCode != http.StatusOK {
|
||||
return newUser, body, err
|
||||
}
|
||||
if err == nil {
|
||||
newUser, body, err = GetUserByID(user.ID, expectedStatusCode)
|
||||
}
|
||||
if err == nil {
|
||||
err = checkUser(user, newUser)
|
||||
}
|
||||
return newUser, body, err
|
||||
}
|
||||
|
||||
// RemoveUser removes an existing user and checks the received HTTP Status code against expectedStatusCode.
|
||||
func RemoveUser(user dataprovider.User, expectedStatusCode int) ([]byte, error) {
|
||||
var body []byte
|
||||
req, err := http.NewRequest(http.MethodDelete, buildURLRelativeToBase(userPath, strconv.FormatInt(user.ID, 10)), nil)
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
resp, err := getHTTPClient().Do(req)
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, _ = getResponseBody(resp)
|
||||
return body, checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
}
|
||||
|
||||
// GetUserByID gets an user by database id and checks the received HTTP Status code against expectedStatusCode.
|
||||
func GetUserByID(userID int64, expectedStatusCode int) (dataprovider.User, []byte, error) {
|
||||
var user dataprovider.User
|
||||
var body []byte
|
||||
resp, err := getHTTPClient().Get(buildURLRelativeToBase(userPath, strconv.FormatInt(userID, 10)))
|
||||
if err != nil {
|
||||
return user, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if err == nil && expectedStatusCode == http.StatusOK {
|
||||
err = render.DecodeJSON(resp.Body, &user)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
return user, body, err
|
||||
}
|
||||
|
||||
// GetUsers allows to get a list of users and checks the received HTTP Status code against expectedStatusCode.
|
||||
// The number of results can be limited specifying a limit.
|
||||
// Some results can be skipped specifying an offset.
|
||||
// The results can be filtered specifying an username, the username filter is an exact match
|
||||
func GetUsers(limit int64, offset int64, username string, expectedStatusCode int) ([]dataprovider.User, []byte, error) {
|
||||
var users []dataprovider.User
|
||||
var body []byte
|
||||
url, err := url.Parse(buildURLRelativeToBase(userPath))
|
||||
if err != nil {
|
||||
return users, body, err
|
||||
}
|
||||
q := url.Query()
|
||||
if limit > 0 {
|
||||
q.Add("limit", strconv.FormatInt(limit, 10))
|
||||
}
|
||||
if offset > 0 {
|
||||
q.Add("offset", strconv.FormatInt(offset, 10))
|
||||
}
|
||||
if len(username) > 0 {
|
||||
q.Add("username", username)
|
||||
}
|
||||
url.RawQuery = q.Encode()
|
||||
resp, err := getHTTPClient().Get(url.String())
|
||||
if err != nil {
|
||||
return users, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if err == nil && expectedStatusCode == http.StatusOK {
|
||||
err = render.DecodeJSON(resp.Body, &users)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
return users, body, err
|
||||
}
|
||||
|
||||
// GetQuotaScans gets active quota scans and checks the received HTTP Status code against expectedStatusCode.
|
||||
func GetQuotaScans(expectedStatusCode int) ([]sftpd.ActiveQuotaScan, []byte, error) {
|
||||
var quotaScans []sftpd.ActiveQuotaScan
|
||||
var body []byte
|
||||
resp, err := getHTTPClient().Get(buildURLRelativeToBase(quotaScanPath))
|
||||
if err != nil {
|
||||
return quotaScans, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if err == nil && expectedStatusCode == http.StatusOK {
|
||||
err = render.DecodeJSON(resp.Body, "aScans)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
return quotaScans, body, err
|
||||
}
|
||||
|
||||
// StartQuotaScan start a new quota scan for the given user and checks the received HTTP Status code against expectedStatusCode.
|
||||
func StartQuotaScan(user dataprovider.User, expectedStatusCode int) ([]byte, error) {
|
||||
var body []byte
|
||||
userAsJSON, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
resp, err := getHTTPClient().Post(buildURLRelativeToBase(quotaScanPath), "application/json", bytes.NewBuffer(userAsJSON))
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, _ = getResponseBody(resp)
|
||||
return body, checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
}
|
||||
|
||||
// GetConnections returns status and stats for active SFTP/SCP connections
|
||||
func GetConnections(expectedStatusCode int) ([]sftpd.ConnectionStatus, []byte, error) {
|
||||
var connections []sftpd.ConnectionStatus
|
||||
var body []byte
|
||||
resp, err := getHTTPClient().Get(buildURLRelativeToBase(activeConnectionsPath))
|
||||
if err != nil {
|
||||
return connections, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if err == nil && expectedStatusCode == http.StatusOK {
|
||||
err = render.DecodeJSON(resp.Body, &connections)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
return connections, body, err
|
||||
}
|
||||
|
||||
// CloseConnection closes an active connection identified by connectionID
|
||||
func CloseConnection(connectionID string, expectedStatusCode int) ([]byte, error) {
|
||||
var body []byte
|
||||
req, err := http.NewRequest(http.MethodDelete, buildURLRelativeToBase(activeConnectionsPath, connectionID), nil)
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
resp, err := getHTTPClient().Do(req)
|
||||
if err != nil {
|
||||
return body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
body, _ = getResponseBody(resp)
|
||||
return body, err
|
||||
}
|
||||
|
||||
// GetVersion returns version details
|
||||
func GetVersion(expectedStatusCode int) (utils.VersionInfo, []byte, error) {
|
||||
var version utils.VersionInfo
|
||||
var body []byte
|
||||
resp, err := getHTTPClient().Get(buildURLRelativeToBase(versionPath))
|
||||
if err != nil {
|
||||
return version, body, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err = checkResponse(resp.StatusCode, expectedStatusCode)
|
||||
if err == nil && expectedStatusCode == http.StatusOK {
|
||||
err = render.DecodeJSON(resp.Body, &version)
|
||||
} else {
|
||||
body, _ = getResponseBody(resp)
|
||||
}
|
||||
return version, body, err
|
||||
}
|
||||
|
||||
func checkResponse(actual int, expected int) error {
|
||||
if expected != actual {
|
||||
return fmt.Errorf("wrong status code: got %v want %v", actual, expected)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getResponseBody(resp *http.Response) ([]byte, error) {
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
func checkUser(expected dataprovider.User, actual dataprovider.User) error {
|
||||
if len(actual.Password) > 0 {
|
||||
return errors.New("User password must not be visible")
|
||||
}
|
||||
if len(actual.PublicKeys) > 0 {
|
||||
return errors.New("User public keys must not be visible")
|
||||
}
|
||||
if expected.ID <= 0 {
|
||||
if actual.ID <= 0 {
|
||||
return errors.New("actual user ID must be > 0")
|
||||
}
|
||||
} else {
|
||||
if actual.ID != expected.ID {
|
||||
return errors.New("user ID mismatch")
|
||||
}
|
||||
}
|
||||
for _, v := range expected.Permissions {
|
||||
if !utils.IsStringInSlice(v, actual.Permissions) {
|
||||
return errors.New("Permissions contents mismatch")
|
||||
}
|
||||
}
|
||||
return compareEqualsUserFields(expected, actual)
|
||||
}
|
||||
|
||||
func compareEqualsUserFields(expected dataprovider.User, actual dataprovider.User) error {
|
||||
if expected.Username != actual.Username {
|
||||
return errors.New("Username mismatch")
|
||||
}
|
||||
if expected.HomeDir != actual.HomeDir {
|
||||
return errors.New("HomeDir mismatch")
|
||||
}
|
||||
if expected.UID != actual.UID {
|
||||
return errors.New("UID mismatch")
|
||||
}
|
||||
if expected.GID != actual.GID {
|
||||
return errors.New("GID mismatch")
|
||||
}
|
||||
if expected.MaxSessions != actual.MaxSessions {
|
||||
return errors.New("MaxSessions mismatch")
|
||||
}
|
||||
if expected.QuotaSize != actual.QuotaSize {
|
||||
return errors.New("QuotaSize mismatch")
|
||||
}
|
||||
if expected.QuotaFiles != actual.QuotaFiles {
|
||||
return errors.New("QuotaFiles mismatch")
|
||||
}
|
||||
if len(expected.Permissions) != len(actual.Permissions) {
|
||||
return errors.New("Permissions mismatch")
|
||||
}
|
||||
if expected.UploadBandwidth != actual.UploadBandwidth {
|
||||
return errors.New("UploadBandwidth mismatch")
|
||||
}
|
||||
if expected.DownloadBandwidth != actual.DownloadBandwidth {
|
||||
return errors.New("DownloadBandwidth mismatch")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,228 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/go-chi/chi"
|
||||
)
|
||||
|
||||
const (
|
||||
invalidURL = "http://foo\x7f.com/"
|
||||
inactiveURL = "http://127.0.0.1:12345"
|
||||
)
|
||||
|
||||
func TestGetRespStatus(t *testing.T) {
|
||||
var err error
|
||||
err = &dataprovider.MethodDisabledError{}
|
||||
respStatus := getRespStatus(err)
|
||||
if respStatus != http.StatusForbidden {
|
||||
t.Errorf("wrong resp status extected: %d got: %d", http.StatusForbidden, respStatus)
|
||||
}
|
||||
err = fmt.Errorf("generic error")
|
||||
respStatus = getRespStatus(err)
|
||||
if respStatus != http.StatusInternalServerError {
|
||||
t.Errorf("wrong resp status extected: %d got: %d", http.StatusInternalServerError, respStatus)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckResponse(t *testing.T) {
|
||||
err := checkResponse(http.StatusOK, http.StatusCreated)
|
||||
if err == nil {
|
||||
t.Errorf("check must fail")
|
||||
}
|
||||
err = checkResponse(http.StatusBadRequest, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("test must succeed, error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckUser(t *testing.T) {
|
||||
expected := dataprovider.User{}
|
||||
actual := dataprovider.User{}
|
||||
actual.Password = "password"
|
||||
err := checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("actual password must be nil")
|
||||
}
|
||||
actual.Password = ""
|
||||
actual.PublicKeys = []string{"pub key"}
|
||||
err = checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("actual public key must be nil")
|
||||
}
|
||||
actual.PublicKeys = []string{}
|
||||
err = checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("actual ID must be > 0")
|
||||
}
|
||||
expected.ID = 1
|
||||
actual.ID = 2
|
||||
err = checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("actual ID must be equal to expected ID")
|
||||
}
|
||||
expected.ID = 2
|
||||
actual.ID = 2
|
||||
expected.Permissions = []string{dataprovider.PermCreateDirs, dataprovider.PermDelete, dataprovider.PermDownload}
|
||||
actual.Permissions = []string{dataprovider.PermCreateDirs, dataprovider.PermCreateSymlinks}
|
||||
err = checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("Permissions are not equal")
|
||||
}
|
||||
expected.Permissions = append(expected.Permissions, dataprovider.PermRename)
|
||||
err = checkUser(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("Permissions are not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareUserFields(t *testing.T) {
|
||||
expected := dataprovider.User{}
|
||||
actual := dataprovider.User{}
|
||||
expected.Username = "test"
|
||||
err := compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("Username does not match")
|
||||
}
|
||||
expected.Username = ""
|
||||
expected.HomeDir = "homedir"
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("HomeDir does not match")
|
||||
}
|
||||
expected.HomeDir = ""
|
||||
expected.UID = 1
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("UID does not match")
|
||||
}
|
||||
expected.UID = 0
|
||||
expected.GID = 1
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("GID does not match")
|
||||
}
|
||||
expected.GID = 0
|
||||
expected.MaxSessions = 2
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("MaxSessions do not match")
|
||||
}
|
||||
expected.MaxSessions = 0
|
||||
expected.QuotaSize = 4096
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("QuotaSize does not match")
|
||||
}
|
||||
expected.QuotaSize = 0
|
||||
expected.QuotaFiles = 2
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("QuotaFiles do not match")
|
||||
}
|
||||
expected.QuotaFiles = 0
|
||||
expected.Permissions = []string{dataprovider.PermCreateDirs}
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("Permissions are not equal")
|
||||
}
|
||||
expected.Permissions = nil
|
||||
expected.UploadBandwidth = 64
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("UploadBandwidth does not match")
|
||||
}
|
||||
expected.UploadBandwidth = 0
|
||||
expected.DownloadBandwidth = 128
|
||||
err = compareEqualsUserFields(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("DownloadBandwidth does not match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestApiCallsWithBadURL(t *testing.T) {
|
||||
oldBaseURL := httpBaseURL
|
||||
SetBaseURL(invalidURL)
|
||||
u := dataprovider.User{}
|
||||
_, _, err := UpdateUser(u, http.StatusBadRequest)
|
||||
if err == nil {
|
||||
t.Errorf("request with invalid URL must fail")
|
||||
}
|
||||
_, err = RemoveUser(u, http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request with invalid URL must fail")
|
||||
}
|
||||
_, _, err = GetUsers(1, 0, "", http.StatusBadRequest)
|
||||
if err == nil {
|
||||
t.Errorf("request with invalid URL must fail")
|
||||
}
|
||||
_, err = CloseConnection("non_existent_id", http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request with invalid URL must fail")
|
||||
}
|
||||
SetBaseURL(oldBaseURL)
|
||||
}
|
||||
|
||||
func TestApiCallToNotListeningServer(t *testing.T) {
|
||||
oldBaseURL := httpBaseURL
|
||||
SetBaseURL(inactiveURL)
|
||||
u := dataprovider.User{}
|
||||
_, _, err := AddUser(u, http.StatusBadRequest)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = UpdateUser(u, http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, err = RemoveUser(u, http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = GetUserByID(-1, http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = GetUsers(100, 0, "", http.StatusOK)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = GetQuotaScans(http.StatusOK)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, err = StartQuotaScan(u, http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = GetConnections(http.StatusOK)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, err = CloseConnection("non_existent_id", http.StatusNotFound)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
_, _, err = GetVersion(http.StatusOK)
|
||||
if err == nil {
|
||||
t.Errorf("request to an inactive URL must fail")
|
||||
}
|
||||
SetBaseURL(oldBaseURL)
|
||||
}
|
||||
|
||||
func TestCloseConnectionHandler(t *testing.T) {
|
||||
req, _ := http.NewRequest(http.MethodDelete, activeConnectionsPath+"/connectionID", nil)
|
||||
rctx := chi.NewRouteContext()
|
||||
rctx.URLParams.Add("connectionID", "")
|
||||
req = req.WithContext(context.WithValue(req.Context(), chi.RouteCtxKey, rctx))
|
||||
rr := httptest.NewRecorder()
|
||||
handleCloseConnection(rr, req)
|
||||
if rr.Code != http.StatusBadRequest {
|
||||
t.Errorf("Expected response code 400. Got %d", rr.Code)
|
||||
}
|
||||
}
|
||||
44
api/quota.go
@@ -1,44 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/go-chi/render"
|
||||
)
|
||||
|
||||
func getQuotaScans(w http.ResponseWriter, r *http.Request) {
|
||||
render.JSON(w, r, sftpd.GetQuotaScans())
|
||||
}
|
||||
|
||||
func startQuotaScan(w http.ResponseWriter, r *http.Request) {
|
||||
var u dataprovider.User
|
||||
err := render.DecodeJSON(r.Body, &u)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
user, err := dataprovider.UserExists(dataProvider, u.Username)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if sftpd.AddQuotaScan(user.Username) {
|
||||
sendAPIResponse(w, r, err, "Scan started", http.StatusCreated)
|
||||
go func() {
|
||||
numFiles, size, _, err := utils.ScanDirContents(user.HomeDir)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error scanning user home dir %v: %v", user.HomeDir, err)
|
||||
} else {
|
||||
err := dataprovider.UpdateUserQuota(dataProvider, user, numFiles, size, true)
|
||||
logger.Debug(logSender, "", "user dir scanned, user: %v, dir: %v, error: %v", user.Username, user.HomeDir, err)
|
||||
}
|
||||
sftpd.RemoveQuotaScan(user.Username)
|
||||
}()
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "Another scan is already in progress", http.StatusConflict)
|
||||
}
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/go-chi/chi"
|
||||
"github.com/go-chi/chi/middleware"
|
||||
"github.com/go-chi/render"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
// GetHTTPRouter returns the configured HTTP handler
|
||||
func GetHTTPRouter() http.Handler {
|
||||
return router
|
||||
}
|
||||
|
||||
func initializeRouter() {
|
||||
router = chi.NewRouter()
|
||||
router.Use(middleware.RequestID)
|
||||
router.Use(middleware.RealIP)
|
||||
router.Use(logger.NewStructuredLogger(logger.GetLogger()))
|
||||
router.Use(middleware.Recoverer)
|
||||
|
||||
router.NotFound(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
sendAPIResponse(w, r, nil, "Not Found", http.StatusNotFound)
|
||||
}))
|
||||
|
||||
router.MethodNotAllowed(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
sendAPIResponse(w, r, nil, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}))
|
||||
|
||||
router.Handle(metricsPath, promhttp.Handler())
|
||||
|
||||
router.Get(versionPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
render.JSON(w, r, utils.GetAppVersion())
|
||||
})
|
||||
|
||||
router.Get(activeConnectionsPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
render.JSON(w, r, sftpd.GetConnectionsStats())
|
||||
})
|
||||
|
||||
router.Delete(activeConnectionsPath+"/{connectionID}", func(w http.ResponseWriter, r *http.Request) {
|
||||
handleCloseConnection(w, r)
|
||||
})
|
||||
|
||||
router.Get(quotaScanPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
getQuotaScans(w, r)
|
||||
})
|
||||
|
||||
router.Post(quotaScanPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
startQuotaScan(w, r)
|
||||
})
|
||||
|
||||
router.Get(userPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
getUsers(w, r)
|
||||
})
|
||||
|
||||
router.Post(userPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
addUser(w, r)
|
||||
})
|
||||
|
||||
router.Get(userPath+"/{userID}", func(w http.ResponseWriter, r *http.Request) {
|
||||
getUserByID(w, r)
|
||||
})
|
||||
|
||||
router.Put(userPath+"/{userID}", func(w http.ResponseWriter, r *http.Request) {
|
||||
updateUser(w, r)
|
||||
})
|
||||
|
||||
router.Delete(userPath+"/{userID}", func(w http.ResponseWriter, r *http.Request) {
|
||||
deleteUser(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func handleCloseConnection(w http.ResponseWriter, r *http.Request) {
|
||||
connectionID := chi.URLParam(r, "connectionID")
|
||||
if connectionID == "" {
|
||||
sendAPIResponse(w, r, nil, "connectionID is mandatory", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if sftpd.CloseActiveConnection(connectionID) {
|
||||
sendAPIResponse(w, r, nil, "Connection closed", http.StatusOK)
|
||||
} else {
|
||||
sendAPIResponse(w, r, nil, "Not Found", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
@@ -1,691 +0,0 @@
|
||||
openapi: 3.0.1
|
||||
info:
|
||||
title: SFTPGo
|
||||
description: 'SFTPGo REST API'
|
||||
version: 1.0.0
|
||||
|
||||
servers:
|
||||
- url: /api/v1
|
||||
paths:
|
||||
/version:
|
||||
get:
|
||||
tags:
|
||||
- version
|
||||
summary: Get version details
|
||||
operationId: get_version
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref : '#/components/schemas/VersionInfo'
|
||||
/connection:
|
||||
get:
|
||||
tags:
|
||||
- connections
|
||||
summary: Get the active users and info about their uploads/downloads
|
||||
operationId: get_connections
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref : '#/components/schemas/ConnectionStatus'
|
||||
/connection/{connectionID}:
|
||||
delete:
|
||||
tags:
|
||||
- connections
|
||||
summary: Terminate an active connection
|
||||
operationId: close_connection
|
||||
parameters:
|
||||
- name: connectionID
|
||||
in: path
|
||||
description: ID of the connection to close
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 200
|
||||
message: "Connection closed"
|
||||
error: ""
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
404:
|
||||
description: Not Found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 404
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
/quota_scan:
|
||||
get:
|
||||
tags:
|
||||
- quota
|
||||
summary: Get the active quota scans
|
||||
operationId: get_quota_scans
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref : '#/components/schemas/QuotaScan'
|
||||
post:
|
||||
tags:
|
||||
- quota
|
||||
summary: start a new quota scan
|
||||
description: A quota scan update the number of files and their total size for the given user
|
||||
operationId: start_quota_scan
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/User'
|
||||
responses:
|
||||
201:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 201
|
||||
message: "Scan started"
|
||||
error: ""
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
404:
|
||||
description: Not Found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 404
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
409:
|
||||
description: Another scan is already in progress for this user
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 409
|
||||
message: "Another scan is already in progress"
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
/user:
|
||||
get:
|
||||
tags:
|
||||
- users
|
||||
summary: Returns an array with one or more users
|
||||
description: For security reasons password and public key are empty in the response
|
||||
operationId: get_users
|
||||
parameters:
|
||||
- in: query
|
||||
name: offset
|
||||
schema:
|
||||
type: integer
|
||||
minimum: 0
|
||||
default: 0
|
||||
required: false
|
||||
- in: query
|
||||
name: limit
|
||||
schema:
|
||||
type: integer
|
||||
minimum: 1
|
||||
maximum: 500
|
||||
default: 100
|
||||
required: false
|
||||
description: The maximum number of items to return. Max value is 500, default is 100
|
||||
- in: query
|
||||
name: order
|
||||
required: false
|
||||
description: Ordering users by username
|
||||
schema:
|
||||
type: string
|
||||
enum:
|
||||
- ASC
|
||||
- DESC
|
||||
example: ASC
|
||||
- in: query
|
||||
name: username
|
||||
required: false
|
||||
description: Filter by username, extact match case sensitive
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref : '#/components/schemas/User'
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
post:
|
||||
tags:
|
||||
- users
|
||||
summary: Adds a new SFTP/SCP user
|
||||
operationId: add_user
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/User'
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/User'
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
/user/{userID}:
|
||||
get:
|
||||
tags:
|
||||
- users
|
||||
summary: Find user by ID
|
||||
description: For security reasons password and public key are empty in the response
|
||||
operationId: get_user_by_id
|
||||
parameters:
|
||||
- name: userID
|
||||
in: path
|
||||
description: ID of the user to retrieve
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/User'
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
404:
|
||||
description: Not Found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 404
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
put:
|
||||
tags:
|
||||
- users
|
||||
summary: Update an existing user
|
||||
operationId: update_user
|
||||
parameters:
|
||||
- name: userID
|
||||
in: path
|
||||
description: ID of the user to update
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/User'
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 200
|
||||
message: "User updated"
|
||||
error: ""
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
404:
|
||||
description: Not Found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 404
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
delete:
|
||||
tags:
|
||||
- users
|
||||
summary: Delete an existing user
|
||||
operationId: delete_user
|
||||
parameters:
|
||||
- name: userID
|
||||
in: path
|
||||
description: ID of the user to delete
|
||||
required: true
|
||||
schema:
|
||||
type: integer
|
||||
format: int32
|
||||
responses:
|
||||
200:
|
||||
description: successful operation
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref : '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 200
|
||||
message: "User deleted"
|
||||
error: ""
|
||||
400:
|
||||
description: Bad request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 400
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
403:
|
||||
description: Forbidden
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 403
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
404:
|
||||
description: Not Found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 404
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
500:
|
||||
description: Internal Server Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/ApiResponse'
|
||||
example:
|
||||
status: 500
|
||||
message: ""
|
||||
error: "Error description if any"
|
||||
components:
|
||||
schemas:
|
||||
Permission:
|
||||
type: string
|
||||
enum:
|
||||
- '*'
|
||||
- list
|
||||
- download
|
||||
- upload
|
||||
- overwrite
|
||||
- delete
|
||||
- rename
|
||||
- create_dirs
|
||||
- create_symlinks
|
||||
description: >
|
||||
Permissions:
|
||||
* `*` - all permissions are granted
|
||||
* `list` - list items is allowed
|
||||
* `download` - download files is allowed
|
||||
* `upload` - upload files is allowed
|
||||
* `overwrite` - overwrite an existing file, while uploading, is allowed. upload permission is required to allow file overwrite
|
||||
* `delete` - delete files or directories is allowed
|
||||
* `rename` - rename files or directories is allowed
|
||||
* `create_dirs` - create directories is allowed
|
||||
* `create_symlinks` - create links is allowed
|
||||
User:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 1
|
||||
username:
|
||||
type: string
|
||||
password:
|
||||
type: string
|
||||
nullable: true
|
||||
description: password or public key are mandatory. If the password has no known hashing algo prefix it will be stored using argon2id. You can send a password hashed as bcrypt or pbkdf2 and it will be stored as is. For security reasons this field is omitted when you search/get users
|
||||
public_keys:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
description: a password or at least one public key are mandatory. For security reasons this field is omitted when you search/get users.
|
||||
home_dir:
|
||||
type: string
|
||||
description: path to the user home directory. The user cannot upload or download files outside this directory. SFTPGo tries to automatically create this folder if missing. Must be an absolute path
|
||||
uid:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 0
|
||||
maximum: 65535
|
||||
description: if you run sftpgo as root user the created files and directories will be assigned to this uid. 0 means no change, the owner will be the user that runs sftpgo. Ignored on windows
|
||||
gid:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 0
|
||||
maximum: 65535
|
||||
description: if you run sftpgo as root user the created files and directories will be assigned to this gid. 0 means no change, the group will be the one of the user that runs sftpgo. Ignored on windows
|
||||
max_sessions:
|
||||
type: integer
|
||||
format: int32
|
||||
description: limit the sessions that an user can open. 0 means unlimited
|
||||
quota_size:
|
||||
type: integer
|
||||
format: int64
|
||||
description: quota as size. 0 menas unlimited. Please note that quota is updated if files are added/removed via SFTP/SCP otherwise a quota scan is needed
|
||||
quota_files:
|
||||
type: integer
|
||||
format: int32
|
||||
description: quota as number of files. 0 menas unlimited. Please note that quota is updated if files are added/removed via SFTP/SCP otherwise a quota scan is needed
|
||||
permissions:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Permission'
|
||||
minItems: 1
|
||||
used_quota_size:
|
||||
type: integer
|
||||
format: int64
|
||||
used_quota_file:
|
||||
type: integer
|
||||
format: int32
|
||||
last_quota_update:
|
||||
type: integer
|
||||
format: int64
|
||||
description: last quota update as unix timestamp in milliseconds
|
||||
upload_bandwidth:
|
||||
type: integer
|
||||
format: int32
|
||||
description: Maximum upload bandwidth as KB/s, 0 means unlimited
|
||||
download_bandwidth:
|
||||
type: integer
|
||||
format: int32
|
||||
description: Maximum download bandwidth as KB/s, 0 means unlimited
|
||||
Transfer:
|
||||
type: object
|
||||
properties:
|
||||
operation_type:
|
||||
type: string
|
||||
enum:
|
||||
- upload
|
||||
- download
|
||||
path:
|
||||
type: string
|
||||
description: SFTP/SCP file path for the upload/download
|
||||
start_time:
|
||||
type: integer
|
||||
format: int64
|
||||
description: start time as unix timestamp in milliseconds
|
||||
size:
|
||||
type: integer
|
||||
format: int64
|
||||
description: bytes transferred
|
||||
last_activity:
|
||||
type: integer
|
||||
format: int64
|
||||
description: last transfer activity as unix timestamp in milliseconds
|
||||
ConnectionStatus:
|
||||
type: object
|
||||
properties:
|
||||
username:
|
||||
type: string
|
||||
description: connected username
|
||||
connection_id:
|
||||
type: string
|
||||
description: unique connection identifier
|
||||
client_version:
|
||||
type: string
|
||||
description: SFTP/SCP client version
|
||||
remote_address:
|
||||
type: string
|
||||
description: Remote address for the connected SFTP/SCP client
|
||||
connection_time:
|
||||
type: integer
|
||||
format: int64
|
||||
description: connection time as unix timestamp in milliseconds
|
||||
last_activity:
|
||||
type: integer
|
||||
format: int64
|
||||
description: last client activity as unix timestamp in milliseconds
|
||||
protocol:
|
||||
type: string
|
||||
enum:
|
||||
- SFTP
|
||||
- SCP
|
||||
active_transfers:
|
||||
type: array
|
||||
items:
|
||||
$ref : '#/components/schemas/Transfer'
|
||||
QuotaScan:
|
||||
type: object
|
||||
properties:
|
||||
username:
|
||||
type: string
|
||||
description: username with an active scan
|
||||
start_time:
|
||||
type: integer
|
||||
format: int64
|
||||
description: scan start time as unix timestamp in milliseconds
|
||||
ApiResponse:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: integer
|
||||
format: int32
|
||||
minimum: 200
|
||||
maximum: 500
|
||||
example: 200
|
||||
description: HTTP Status code, for example 200 OK, 400 Bad request and so on
|
||||
message:
|
||||
type: string
|
||||
nullable: true
|
||||
description: additional message if any
|
||||
error:
|
||||
type: string
|
||||
nullable: true
|
||||
description: error description if any
|
||||
VersionInfo:
|
||||
type: object
|
||||
properties:
|
||||
version:
|
||||
type: string
|
||||
build_date:
|
||||
type: string
|
||||
commit_hash:
|
||||
type: string
|
||||
|
||||
151
api/user.go
@@ -1,151 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/go-chi/chi"
|
||||
"github.com/go-chi/render"
|
||||
)
|
||||
|
||||
func getUsers(w http.ResponseWriter, r *http.Request) {
|
||||
limit := 100
|
||||
offset := 0
|
||||
order := "ASC"
|
||||
username := ""
|
||||
var err error
|
||||
if _, ok := r.URL.Query()["limit"]; ok {
|
||||
limit, err = strconv.Atoi(r.URL.Query().Get("limit"))
|
||||
if err != nil {
|
||||
err = errors.New("Invalid limit")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if limit > 500 {
|
||||
limit = 500
|
||||
}
|
||||
}
|
||||
if _, ok := r.URL.Query()["offset"]; ok {
|
||||
offset, err = strconv.Atoi(r.URL.Query().Get("offset"))
|
||||
if err != nil {
|
||||
err = errors.New("Invalid offset")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, ok := r.URL.Query()["order"]; ok {
|
||||
order = r.URL.Query().Get("order")
|
||||
if order != "ASC" && order != "DESC" {
|
||||
err = errors.New("Invalid order")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, ok := r.URL.Query()["username"]; ok {
|
||||
username = r.URL.Query().Get("username")
|
||||
}
|
||||
users, err := dataprovider.GetUsers(dataProvider, limit, offset, order, username)
|
||||
if err == nil {
|
||||
render.JSON(w, r, users)
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func getUserByID(w http.ResponseWriter, r *http.Request) {
|
||||
userID, err := strconv.ParseInt(chi.URLParam(r, "userID"), 10, 64)
|
||||
if err != nil {
|
||||
err = errors.New("Invalid userID")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
user, err := dataprovider.GetUserByID(dataProvider, userID)
|
||||
if err == nil {
|
||||
user.Password = ""
|
||||
user.PublicKeys = []string{}
|
||||
render.JSON(w, r, user)
|
||||
} else if _, ok := err.(*dataprovider.RecordNotFoundError); ok {
|
||||
sendAPIResponse(w, r, err, "", http.StatusNotFound)
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func addUser(w http.ResponseWriter, r *http.Request) {
|
||||
var user dataprovider.User
|
||||
err := render.DecodeJSON(r.Body, &user)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err = dataprovider.AddUser(dataProvider, user)
|
||||
if err == nil {
|
||||
user, err = dataprovider.UserExists(dataProvider, user.Username)
|
||||
if err == nil {
|
||||
user.Password = ""
|
||||
user.PublicKeys = []string{}
|
||||
render.JSON(w, r, user)
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
}
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
}
|
||||
}
|
||||
|
||||
func updateUser(w http.ResponseWriter, r *http.Request) {
|
||||
userID, err := strconv.ParseInt(chi.URLParam(r, "userID"), 10, 64)
|
||||
if err != nil {
|
||||
err = errors.New("Invalid userID")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
user, err := dataprovider.GetUserByID(dataProvider, userID)
|
||||
if _, ok := err.(*dataprovider.RecordNotFoundError); ok {
|
||||
sendAPIResponse(w, r, err, "", http.StatusNotFound)
|
||||
return
|
||||
} else if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
err = render.DecodeJSON(r.Body, &user)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if user.ID != userID {
|
||||
sendAPIResponse(w, r, err, "user ID in request body does not match user ID in path parameter", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err = dataprovider.UpdateUser(dataProvider, user)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", getRespStatus(err))
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "User updated", http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteUser(w http.ResponseWriter, r *http.Request) {
|
||||
userID, err := strconv.ParseInt(chi.URLParam(r, "userID"), 10, 64)
|
||||
if err != nil {
|
||||
err = errors.New("Invalid userID")
|
||||
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
user, err := dataprovider.GetUserByID(dataProvider, userID)
|
||||
if _, ok := err.(*dataprovider.RecordNotFoundError); ok {
|
||||
sendAPIResponse(w, r, err, "", http.StatusNotFound)
|
||||
return
|
||||
} else if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
err = dataprovider.DeleteUser(dataProvider, user)
|
||||
if err != nil {
|
||||
sendAPIResponse(w, r, err, "", http.StatusInternalServerError)
|
||||
} else {
|
||||
sendAPIResponse(w, r, err, "User deleted", http.StatusOK)
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
installCmd = &cobra.Command{
|
||||
Use: "install",
|
||||
Short: "Install SFTPGo as Windows Service",
|
||||
Long: `To install the SFTPGo Windows Service with the default values for the command line flags simply use:
|
||||
|
||||
sftpgo service install
|
||||
|
||||
Please take a look at the usage below to customize the startup options`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
s := service.Service{
|
||||
ConfigDir: configDir,
|
||||
ConfigFile: configFile,
|
||||
LogFilePath: logFilePath,
|
||||
LogMaxSize: logMaxSize,
|
||||
LogMaxBackups: logMaxBackups,
|
||||
LogMaxAge: logMaxAge,
|
||||
LogCompress: logCompress,
|
||||
LogVerbose: logVerbose,
|
||||
Shutdown: make(chan bool),
|
||||
}
|
||||
winService := service.WindowsService{
|
||||
Service: s,
|
||||
}
|
||||
serviceArgs := []string{"service", "start"}
|
||||
customFlags := getCustomServeFlags()
|
||||
if len(customFlags) > 0 {
|
||||
serviceArgs = append(serviceArgs, customFlags...)
|
||||
}
|
||||
err := winService.Install(serviceArgs...)
|
||||
if err != nil {
|
||||
fmt.Printf("Error installing service: %v\r\n", err)
|
||||
} else {
|
||||
fmt.Printf("Service installed!\r\n")
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
serviceCmd.AddCommand(installCmd)
|
||||
addServeFlags(installCmd)
|
||||
}
|
||||
168
cmd/root.go
@@ -1,168 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/drakkan/sftpgo/config"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
const (
|
||||
logSender = "cmd"
|
||||
configDirFlag = "config-dir"
|
||||
configDirKey = "config_dir"
|
||||
configFileFlag = "config-file"
|
||||
configFileKey = "config_file"
|
||||
logFilePathFlag = "log-file-path"
|
||||
logFilePathKey = "log_file_path"
|
||||
logMaxSizeFlag = "log-max-size"
|
||||
logMaxSizeKey = "log_max_size"
|
||||
logMaxBackupFlag = "log-max-backups"
|
||||
logMaxBackupKey = "log_max_backups"
|
||||
logMaxAgeFlag = "log-max-age"
|
||||
logMaxAgeKey = "log_max_age"
|
||||
logCompressFlag = "log-compress"
|
||||
logCompressKey = "log_compress"
|
||||
logVerboseFlag = "log-verbose"
|
||||
logVerboseKey = "log_verbose"
|
||||
defaultConfigDir = "."
|
||||
defaultConfigName = config.DefaultConfigName
|
||||
defaultLogFile = "sftpgo.log"
|
||||
defaultLogMaxSize = 10
|
||||
defaultLogMaxBackup = 5
|
||||
defaultLogMaxAge = 28
|
||||
defaultLogCompress = false
|
||||
defaultLogVerbose = true
|
||||
)
|
||||
|
||||
var (
|
||||
configDir string
|
||||
configFile string
|
||||
logFilePath string
|
||||
logMaxSize int
|
||||
logMaxBackups int
|
||||
logMaxAge int
|
||||
logCompress bool
|
||||
logVerbose bool
|
||||
|
||||
rootCmd = &cobra.Command{
|
||||
Use: "sftpgo",
|
||||
Short: "Full featured and highly configurable SFTP server",
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
version := utils.GetAppVersion()
|
||||
rootCmd.Flags().BoolP("version", "v", false, "")
|
||||
rootCmd.Version = version.GetVersionAsString()
|
||||
rootCmd.SetVersionTemplate(`{{printf "SFTPGo version: "}}{{printf "%s" .Version}}
|
||||
`)
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func addServeFlags(cmd *cobra.Command) {
|
||||
viper.SetDefault(configDirKey, defaultConfigDir)
|
||||
viper.BindEnv(configDirKey, "SFTPGO_CONFIG_DIR")
|
||||
cmd.Flags().StringVarP(&configDir, configDirFlag, "c", viper.GetString(configDirKey),
|
||||
"Location for SFTPGo config dir. This directory should contain the \"sftpgo\" configuration file or the configured "+
|
||||
"config-file and it is used as the base for files with a relative path (eg. the private keys for the SFTP server, "+
|
||||
"the SQLite database if you use SQLite as data provider). This flag can be set using SFTPGO_CONFIG_DIR env var too.")
|
||||
viper.BindPFlag(configDirKey, cmd.Flags().Lookup(configDirFlag))
|
||||
|
||||
viper.SetDefault(configFileKey, defaultConfigName)
|
||||
viper.BindEnv(configFileKey, "SFTPGO_CONFIG_FILE")
|
||||
cmd.Flags().StringVarP(&configFile, configFileFlag, "f", viper.GetString(configFileKey),
|
||||
"Name for SFTPGo configuration file. It must be the name of a file stored in config-dir not the absolute path to the "+
|
||||
"configuration file. The specified file name must have no extension we automatically load JSON, YAML, TOML, HCL and "+
|
||||
"Java properties. Therefore if you set \"sftpgo\" then \"sftpgo.json\", \"sftpgo.yaml\" and so on are searched. "+
|
||||
"This flag can be set using SFTPGO_CONFIG_FILE env var too.")
|
||||
viper.BindPFlag(configFileKey, cmd.Flags().Lookup(configFileFlag))
|
||||
|
||||
viper.SetDefault(logFilePathKey, defaultLogFile)
|
||||
viper.BindEnv(logFilePathKey, "SFTPGO_LOG_FILE_PATH")
|
||||
cmd.Flags().StringVarP(&logFilePath, logFilePathFlag, "l", viper.GetString(logFilePathKey),
|
||||
"Location for the log file. Leave empty to write logs to the standard output. This flag can be set using SFTPGO_LOG_FILE_PATH "+
|
||||
"env var too.")
|
||||
viper.BindPFlag(logFilePathKey, cmd.Flags().Lookup(logFilePathFlag))
|
||||
|
||||
viper.SetDefault(logMaxSizeKey, defaultLogMaxSize)
|
||||
viper.BindEnv(logMaxSizeKey, "SFTPGO_LOG_MAX_SIZE")
|
||||
cmd.Flags().IntVarP(&logMaxSize, logMaxSizeFlag, "s", viper.GetInt(logMaxSizeKey),
|
||||
"Maximum size in megabytes of the log file before it gets rotated. This flag can be set using SFTPGO_LOG_MAX_SIZE "+
|
||||
"env var too. It is unused if log-file-path is empty.")
|
||||
viper.BindPFlag(logMaxSizeKey, cmd.Flags().Lookup(logMaxSizeFlag))
|
||||
|
||||
viper.SetDefault(logMaxBackupKey, defaultLogMaxBackup)
|
||||
viper.BindEnv(logMaxBackupKey, "SFTPGO_LOG_MAX_BACKUPS")
|
||||
cmd.Flags().IntVarP(&logMaxBackups, "log-max-backups", "b", viper.GetInt(logMaxBackupKey),
|
||||
"Maximum number of old log files to retain. This flag can be set using SFTPGO_LOG_MAX_BACKUPS env var too. "+
|
||||
"It is unused if log-file-path is empty.")
|
||||
viper.BindPFlag(logMaxBackupKey, cmd.Flags().Lookup(logMaxBackupFlag))
|
||||
|
||||
viper.SetDefault(logMaxAgeKey, defaultLogMaxAge)
|
||||
viper.BindEnv(logMaxAgeKey, "SFTPGO_LOG_MAX_AGE")
|
||||
cmd.Flags().IntVarP(&logMaxAge, "log-max-age", "a", viper.GetInt(logMaxAgeKey),
|
||||
"Maximum number of days to retain old log files. This flag can be set using SFTPGO_LOG_MAX_AGE env var too. "+
|
||||
"It is unused if log-file-path is empty.")
|
||||
viper.BindPFlag(logMaxAgeKey, cmd.Flags().Lookup(logMaxAgeFlag))
|
||||
|
||||
viper.SetDefault(logCompressKey, defaultLogCompress)
|
||||
viper.BindEnv(logCompressKey, "SFTPGO_LOG_COMPRESS")
|
||||
cmd.Flags().BoolVarP(&logCompress, logCompressFlag, "z", viper.GetBool(logCompressKey), "Determine if the rotated "+
|
||||
"log files should be compressed using gzip. This flag can be set using SFTPGO_LOG_COMPRESS env var too. "+
|
||||
"It is unused if log-file-path is empty.")
|
||||
viper.BindPFlag(logCompressKey, cmd.Flags().Lookup(logCompressFlag))
|
||||
|
||||
viper.SetDefault(logVerboseKey, defaultLogVerbose)
|
||||
viper.BindEnv(logVerboseKey, "SFTPGO_LOG_VERBOSE")
|
||||
cmd.Flags().BoolVarP(&logVerbose, logVerboseFlag, "v", viper.GetBool(logVerboseKey), "Enable verbose logs. "+
|
||||
"This flag can be set using SFTPGO_LOG_VERBOSE env var too.")
|
||||
viper.BindPFlag(logVerboseKey, cmd.Flags().Lookup(logVerboseFlag))
|
||||
}
|
||||
|
||||
func getCustomServeFlags() []string {
|
||||
result := []string{}
|
||||
if configDir != defaultConfigDir {
|
||||
result = append(result, "--"+configDirFlag)
|
||||
result = append(result, configDir)
|
||||
}
|
||||
if configFile != defaultConfigName {
|
||||
result = append(result, "--"+configFileFlag)
|
||||
result = append(result, configFile)
|
||||
}
|
||||
if logFilePath != defaultLogFile {
|
||||
result = append(result, "--"+logFilePathFlag)
|
||||
result = append(result, logFilePath)
|
||||
}
|
||||
if logMaxSize != defaultLogMaxSize {
|
||||
result = append(result, "--"+logMaxSizeFlag)
|
||||
result = append(result, strconv.Itoa(logMaxSize))
|
||||
}
|
||||
if logMaxBackups != defaultLogMaxBackup {
|
||||
result = append(result, "--"+logMaxBackupFlag)
|
||||
result = append(result, strconv.Itoa(logMaxBackups))
|
||||
}
|
||||
if logMaxAge != defaultLogMaxAge {
|
||||
result = append(result, "--"+logMaxAgeFlag)
|
||||
result = append(result, strconv.Itoa(logMaxAge))
|
||||
}
|
||||
if logVerbose != defaultLogVerbose {
|
||||
result = append(result, "--"+logVerboseFlag+"=false")
|
||||
}
|
||||
if logCompress != defaultLogCompress {
|
||||
result = append(result, "--"+logCompressFlag+"=true")
|
||||
}
|
||||
return result
|
||||
}
|
||||
39
cmd/serve.go
@@ -1,39 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
serveCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Start the SFTP Server",
|
||||
Long: `To start the SFTPGo with the default values for the command line flags simply use:
|
||||
|
||||
sftpgo serve
|
||||
|
||||
Please take a look at the usage below to customize the startup options`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
service := service.Service{
|
||||
ConfigDir: configDir,
|
||||
ConfigFile: configFile,
|
||||
LogFilePath: logFilePath,
|
||||
LogMaxSize: logMaxSize,
|
||||
LogMaxBackups: logMaxBackups,
|
||||
LogMaxAge: logMaxAge,
|
||||
LogCompress: logCompress,
|
||||
LogVerbose: logVerbose,
|
||||
Shutdown: make(chan bool),
|
||||
}
|
||||
if err := service.Start(); err == nil {
|
||||
service.Wait()
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(serveCmd)
|
||||
addServeFlags(serveCmd)
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
serviceCmd = &cobra.Command{
|
||||
Use: "service",
|
||||
Short: "Install, Uninstall, Start, Stop and retrieve status for SFTPGo Windows Service",
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(serviceCmd)
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
startCmd = &cobra.Command{
|
||||
Use: "start",
|
||||
Short: "Start SFTPGo Windows Service",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
s := service.Service{
|
||||
ConfigDir: configDir,
|
||||
ConfigFile: configFile,
|
||||
LogFilePath: logFilePath,
|
||||
LogMaxSize: logMaxSize,
|
||||
LogMaxBackups: logMaxBackups,
|
||||
LogMaxAge: logMaxAge,
|
||||
LogCompress: logCompress,
|
||||
LogVerbose: logVerbose,
|
||||
Shutdown: make(chan bool),
|
||||
}
|
||||
winService := service.WindowsService{
|
||||
Service: s,
|
||||
}
|
||||
err := winService.RunService()
|
||||
if err != nil {
|
||||
fmt.Printf("Error starting service: %v\r\n", err)
|
||||
} else {
|
||||
fmt.Printf("Service started!\r\n")
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
serviceCmd.AddCommand(startCmd)
|
||||
addServeFlags(startCmd)
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
statusCmd = &cobra.Command{
|
||||
Use: "status",
|
||||
Short: "Retrieve the status for the SFTPGo Windows Service",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
s := service.WindowsService{
|
||||
Service: service.Service{
|
||||
Shutdown: make(chan bool),
|
||||
},
|
||||
}
|
||||
status, err := s.Status()
|
||||
if err != nil {
|
||||
fmt.Printf("Error querying service status: %v\r\n", err)
|
||||
} else {
|
||||
fmt.Printf("Service status: %#v\r\n", status.String())
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
serviceCmd.AddCommand(statusCmd)
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
stopCmd = &cobra.Command{
|
||||
Use: "stop",
|
||||
Short: "Stop SFTPGo Windows Service",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
s := service.WindowsService{
|
||||
Service: service.Service{
|
||||
Shutdown: make(chan bool),
|
||||
},
|
||||
}
|
||||
err := s.Stop()
|
||||
if err != nil {
|
||||
fmt.Printf("Error stopping service: %v\r\n", err)
|
||||
} else {
|
||||
fmt.Printf("Service stopped!\r\n")
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
serviceCmd.AddCommand(stopCmd)
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/drakkan/sftpgo/service"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
uninstallCmd = &cobra.Command{
|
||||
Use: "uninstall",
|
||||
Short: "Uninstall SFTPGo Windows Service",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
s := service.WindowsService{
|
||||
Service: service.Service{
|
||||
Shutdown: make(chan bool),
|
||||
},
|
||||
}
|
||||
err := s.Uninstall()
|
||||
if err != nil {
|
||||
fmt.Printf("Error removing service: %v\r\n", err)
|
||||
} else {
|
||||
fmt.Printf("Service uninstalled\r\n")
|
||||
}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
serviceCmd.AddCommand(uninstallCmd)
|
||||
}
|
||||
149
config/config.go
@@ -1,149 +0,0 @@
|
||||
// Package config manages the configuration.
|
||||
// Configuration is loaded from sftpgo.conf file.
|
||||
// If sftpgo.conf is not found or cannot be readed or decoded as json the default configuration is used.
|
||||
// The default configuration an be found inside the source tree:
|
||||
// https://github.com/drakkan/sftpgo/blob/master/sftpgo.conf
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/drakkan/sftpgo/api"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
const (
|
||||
logSender = "config"
|
||||
// DefaultConfigName defines the name for the default config file.
|
||||
// This is the file name without extension, we use viper and so we
|
||||
// support all the config files format supported by viper
|
||||
DefaultConfigName = "sftpgo"
|
||||
// ConfigEnvPrefix defines a prefix that ENVIRONMENT variables will use
|
||||
configEnvPrefix = "sftpgo"
|
||||
)
|
||||
|
||||
var (
|
||||
globalConf globalConfig
|
||||
defaultBanner = fmt.Sprintf("SFTPGo_%v", utils.GetAppVersion().Version)
|
||||
)
|
||||
|
||||
type globalConfig struct {
|
||||
SFTPD sftpd.Configuration `json:"sftpd" mapstructure:"sftpd"`
|
||||
ProviderConf dataprovider.Config `json:"data_provider" mapstructure:"data_provider"`
|
||||
HTTPDConfig api.HTTPDConf `json:"httpd" mapstructure:"httpd"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
// create a default configuration to use if no config file is provided
|
||||
globalConf = globalConfig{
|
||||
SFTPD: sftpd.Configuration{
|
||||
Banner: defaultBanner,
|
||||
BindPort: 2022,
|
||||
BindAddress: "",
|
||||
IdleTimeout: 15,
|
||||
MaxAuthTries: 0,
|
||||
Umask: "0022",
|
||||
UploadMode: 0,
|
||||
Actions: sftpd.Actions{
|
||||
ExecuteOn: []string{},
|
||||
Command: "",
|
||||
HTTPNotificationURL: "",
|
||||
},
|
||||
Keys: []sftpd.Key{},
|
||||
IsSCPEnabled: false,
|
||||
KexAlgorithms: []string{},
|
||||
Ciphers: []string{},
|
||||
MACs: []string{},
|
||||
LoginBannerFile: "",
|
||||
},
|
||||
ProviderConf: dataprovider.Config{
|
||||
Driver: "sqlite",
|
||||
Name: "sftpgo.db",
|
||||
Host: "",
|
||||
Port: 5432,
|
||||
Username: "",
|
||||
Password: "",
|
||||
ConnectionString: "",
|
||||
UsersTable: "users",
|
||||
ManageUsers: 1,
|
||||
SSLMode: 0,
|
||||
TrackQuota: 1,
|
||||
PoolSize: 0,
|
||||
},
|
||||
HTTPDConfig: api.HTTPDConf{
|
||||
BindPort: 8080,
|
||||
BindAddress: "127.0.0.1",
|
||||
},
|
||||
}
|
||||
|
||||
viper.SetEnvPrefix(configEnvPrefix)
|
||||
replacer := strings.NewReplacer(".", "__")
|
||||
viper.SetEnvKeyReplacer(replacer)
|
||||
viper.SetConfigName(DefaultConfigName)
|
||||
viper.AutomaticEnv()
|
||||
viper.AllowEmptyEnv(true)
|
||||
}
|
||||
|
||||
// GetSFTPDConfig returns the configuration for the SFTP server
|
||||
func GetSFTPDConfig() sftpd.Configuration {
|
||||
return globalConf.SFTPD
|
||||
}
|
||||
|
||||
// GetHTTPDConfig returns the configuration for the HTTP server
|
||||
func GetHTTPDConfig() api.HTTPDConf {
|
||||
return globalConf.HTTPDConfig
|
||||
}
|
||||
|
||||
//GetProviderConf returns the configuration for the data provider
|
||||
func GetProviderConf() dataprovider.Config {
|
||||
return globalConf.ProviderConf
|
||||
}
|
||||
|
||||
func getRedactedGlobalConf() globalConfig {
|
||||
conf := globalConf
|
||||
conf.ProviderConf.Password = "[redacted]"
|
||||
return conf
|
||||
}
|
||||
|
||||
// LoadConfig loads the configuration
|
||||
// configDir will be added to the configuration search paths.
|
||||
// The search path contains by default the current directory and on linux it contains
|
||||
// $HOME/.config/sftpgo and /etc/sftpgo too.
|
||||
// configName is the name of the configuration to search without extension
|
||||
func LoadConfig(configDir, configName string) error {
|
||||
var err error
|
||||
viper.AddConfigPath(configDir)
|
||||
setViperAdditionalConfigPaths()
|
||||
viper.AddConfigPath(".")
|
||||
viper.SetConfigName(configName)
|
||||
if err = viper.ReadInConfig(); err != nil {
|
||||
logger.Warn(logSender, "", "error loading configuration file: %v. Default configuration will be used: %+v",
|
||||
err, getRedactedGlobalConf())
|
||||
logger.WarnToConsole("error loading configuration file: %v. Default configuration will be used.", err)
|
||||
return err
|
||||
}
|
||||
err = viper.Unmarshal(&globalConf)
|
||||
if err != nil {
|
||||
logger.Warn(logSender, "", "error parsing configuration file: %v. Default configuration will be used: %+v",
|
||||
err, getRedactedGlobalConf())
|
||||
logger.WarnToConsole("error parsing configuration file: %v. Default configuration will be used.", err)
|
||||
return err
|
||||
}
|
||||
if strings.TrimSpace(globalConf.SFTPD.Banner) == "" {
|
||||
globalConf.SFTPD.Banner = defaultBanner
|
||||
}
|
||||
if globalConf.SFTPD.UploadMode < 0 || globalConf.SFTPD.UploadMode > 1 {
|
||||
err = fmt.Errorf("Invalid upload_mode 0 and 1 are supported, configured: %v reset upload_mode to 0",
|
||||
globalConf.SFTPD.UploadMode)
|
||||
globalConf.SFTPD.UploadMode = 0
|
||||
logger.Warn(logSender, "", "Configuration error: %v", err)
|
||||
logger.WarnToConsole("Configuration error: %v", err)
|
||||
}
|
||||
logger.Debug(logSender, "", "config file used: '%v', config loaded: %+v", viper.ConfigFileUsed(), getRedactedGlobalConf())
|
||||
return err
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package config
|
||||
|
||||
import "github.com/spf13/viper"
|
||||
|
||||
// linux specific config search path
|
||||
func setViperAdditionalConfigPaths() {
|
||||
viper.AddConfigPath("$HOME/.config/sftpgo")
|
||||
viper.AddConfigPath("/etc/sftpgo")
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package config
|
||||
|
||||
func setViperAdditionalConfigPaths() {
|
||||
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
package config_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/drakkan/sftpgo/api"
|
||||
"github.com/drakkan/sftpgo/config"
|
||||
"github.com/drakkan/sftpgo/dataprovider"
|
||||
"github.com/drakkan/sftpgo/sftpd"
|
||||
)
|
||||
|
||||
const (
|
||||
tempConfigName = "temp"
|
||||
)
|
||||
|
||||
func TestLoadConfigTest(t *testing.T) {
|
||||
configDir := ".."
|
||||
err := config.LoadConfig(configDir, "")
|
||||
if err != nil {
|
||||
t.Errorf("error loading config")
|
||||
}
|
||||
emptyHTTPDConf := api.HTTPDConf{}
|
||||
if config.GetHTTPDConfig() == emptyHTTPDConf {
|
||||
t.Errorf("error loading httpd conf")
|
||||
}
|
||||
emptyProviderConf := dataprovider.Config{}
|
||||
if config.GetProviderConf() == emptyProviderConf {
|
||||
t.Errorf("error loading provider conf")
|
||||
}
|
||||
emptySFTPDConf := sftpd.Configuration{}
|
||||
if config.GetSFTPDConfig().BindPort == emptySFTPDConf.BindPort {
|
||||
t.Errorf("error loading SFTPD conf")
|
||||
}
|
||||
confName := tempConfigName + ".json"
|
||||
configFilePath := filepath.Join(configDir, confName)
|
||||
err = config.LoadConfig(configDir, tempConfigName)
|
||||
if err == nil {
|
||||
t.Errorf("loading a non existent config file must fail")
|
||||
}
|
||||
ioutil.WriteFile(configFilePath, []byte("{invalid json}"), 0666)
|
||||
err = config.LoadConfig(configDir, tempConfigName)
|
||||
if err == nil {
|
||||
t.Errorf("loading an invalid config file must fail")
|
||||
}
|
||||
ioutil.WriteFile(configFilePath, []byte("{\"sftpd\": {\"bind_port\": \"a\"}}"), 0666)
|
||||
err = config.LoadConfig(configDir, tempConfigName)
|
||||
if err == nil {
|
||||
t.Errorf("loading a config with an invalid bond_port must fail")
|
||||
}
|
||||
os.Remove(configFilePath)
|
||||
}
|
||||
|
||||
func TestEmptyBanner(t *testing.T) {
|
||||
configDir := ".."
|
||||
confName := tempConfigName + ".json"
|
||||
configFilePath := filepath.Join(configDir, confName)
|
||||
config.LoadConfig(configDir, "")
|
||||
sftpdConf := config.GetSFTPDConfig()
|
||||
sftpdConf.Banner = " "
|
||||
c := make(map[string]sftpd.Configuration)
|
||||
c["sftpd"] = sftpdConf
|
||||
jsonConf, _ := json.Marshal(c)
|
||||
err := ioutil.WriteFile(configFilePath, jsonConf, 0666)
|
||||
if err != nil {
|
||||
t.Errorf("error saving temporary configuration")
|
||||
}
|
||||
config.LoadConfig(configDir, tempConfigName)
|
||||
sftpdConf = config.GetSFTPDConfig()
|
||||
if strings.TrimSpace(sftpdConf.Banner) == "" {
|
||||
t.Errorf("SFTPD banner cannot be empty")
|
||||
}
|
||||
os.Remove(configFilePath)
|
||||
}
|
||||
|
||||
func TestInvalidUploadMode(t *testing.T) {
|
||||
configDir := ".."
|
||||
confName := tempConfigName + ".json"
|
||||
configFilePath := filepath.Join(configDir, confName)
|
||||
config.LoadConfig(configDir, "")
|
||||
sftpdConf := config.GetSFTPDConfig()
|
||||
sftpdConf.UploadMode = 10
|
||||
c := make(map[string]sftpd.Configuration)
|
||||
c["sftpd"] = sftpdConf
|
||||
jsonConf, _ := json.Marshal(c)
|
||||
err := ioutil.WriteFile(configFilePath, jsonConf, 0666)
|
||||
if err != nil {
|
||||
t.Errorf("error saving temporary configuration")
|
||||
}
|
||||
err = config.LoadConfig(configDir, tempConfigName)
|
||||
if err == nil {
|
||||
t.Errorf("Loading configuration with invalid upload_mode must fail")
|
||||
}
|
||||
os.Remove(configFilePath)
|
||||
}
|
||||
@@ -1,321 +0,0 @@
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var (
|
||||
usersBucket = []byte("users")
|
||||
usersIDIdxBucket = []byte("users_id_idx")
|
||||
)
|
||||
|
||||
// BoltProvider auth provider for bolt key/value store
|
||||
type BoltProvider struct {
|
||||
dbHandle *bolt.DB
|
||||
}
|
||||
|
||||
func initializeBoltProvider(basePath string) error {
|
||||
var err error
|
||||
logSender = BoltDataProviderName
|
||||
dbPath := config.Name
|
||||
if !filepath.IsAbs(dbPath) {
|
||||
dbPath = filepath.Join(basePath, dbPath)
|
||||
}
|
||||
dbHandle, err := bolt.Open(dbPath, 0600, &bolt.Options{
|
||||
NoGrowSync: false,
|
||||
FreelistType: bolt.FreelistArrayType,
|
||||
Timeout: 5 * time.Second})
|
||||
if err == nil {
|
||||
providerLog(logger.LevelDebug, "bolt key store handle created")
|
||||
err = dbHandle.Update(func(tx *bolt.Tx) error {
|
||||
_, e := tx.CreateBucketIfNotExists(usersBucket)
|
||||
return e
|
||||
})
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error creating users bucket: %v", err)
|
||||
return err
|
||||
}
|
||||
err = dbHandle.Update(func(tx *bolt.Tx) error {
|
||||
_, e := tx.CreateBucketIfNotExists(usersIDIdxBucket)
|
||||
return e
|
||||
})
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error creating username idx bucket: %v", err)
|
||||
return err
|
||||
}
|
||||
provider = BoltProvider{dbHandle: dbHandle}
|
||||
} else {
|
||||
providerLog(logger.LevelWarn, "error creating bolt key/value store handler: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p BoltProvider) checkAvailability() error {
|
||||
_, err := p.getUsers(1, 0, "ASC", "")
|
||||
return err
|
||||
}
|
||||
|
||||
func (p BoltProvider) validateUserAndPass(username string, password string) (User, error) {
|
||||
var user User
|
||||
if len(password) == 0 {
|
||||
return user, errors.New("Credentials cannot be null or empty")
|
||||
}
|
||||
user, err := p.userExists(username)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error authenticating user: %v, error: %v", username, err)
|
||||
return user, err
|
||||
}
|
||||
return checkUserAndPass(user, password)
|
||||
}
|
||||
|
||||
func (p BoltProvider) validateUserAndPubKey(username string, pubKey string) (User, string, error) {
|
||||
var user User
|
||||
if len(pubKey) == 0 {
|
||||
return user, "", errors.New("Credentials cannot be null or empty")
|
||||
}
|
||||
user, err := p.userExists(username)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error authenticating user: %v, error: %v", username, err)
|
||||
return user, "", err
|
||||
}
|
||||
return checkUserAndPubKey(user, pubKey)
|
||||
}
|
||||
|
||||
func (p BoltProvider) getUserByID(ID int64) (User, error) {
|
||||
var user User
|
||||
err := p.dbHandle.View(func(tx *bolt.Tx) error {
|
||||
bucket, idxBucket, err := getBuckets(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userIDAsBytes := itob(ID)
|
||||
username := idxBucket.Get(userIDAsBytes)
|
||||
if username == nil {
|
||||
return &RecordNotFoundError{err: fmt.Sprintf("user with ID %v does not exist", ID)}
|
||||
}
|
||||
u := bucket.Get(username)
|
||||
if u == nil {
|
||||
return &RecordNotFoundError{err: fmt.Sprintf("username %v and ID: %v does not exist", string(username), ID)}
|
||||
}
|
||||
return json.Unmarshal(u, &user)
|
||||
})
|
||||
|
||||
return user, err
|
||||
}
|
||||
|
||||
func (p BoltProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
return p.dbHandle.Update(func(tx *bolt.Tx) error {
|
||||
bucket, _, err := getBuckets(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var u []byte
|
||||
if u = bucket.Get([]byte(username)); u == nil {
|
||||
return &RecordNotFoundError{err: fmt.Sprintf("username %v does not exist, unable to update quota", username)}
|
||||
}
|
||||
var user User
|
||||
err = json.Unmarshal(u, &user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if reset {
|
||||
user.UsedQuotaSize = sizeAdd
|
||||
user.UsedQuotaFiles = filesAdd
|
||||
} else {
|
||||
user.UsedQuotaSize += sizeAdd
|
||||
user.UsedQuotaFiles += filesAdd
|
||||
}
|
||||
user.LastQuotaUpdate = utils.GetTimeAsMsSinceEpoch(time.Now())
|
||||
buf, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bucket.Put([]byte(username), buf)
|
||||
})
|
||||
}
|
||||
|
||||
func (p BoltProvider) getUsedQuota(username string) (int, int64, error) {
|
||||
user, err := p.userExists(username)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "unable to get quota for user %v error: %v", username, err)
|
||||
return 0, 0, err
|
||||
}
|
||||
return user.UsedQuotaFiles, user.UsedQuotaSize, err
|
||||
}
|
||||
|
||||
func (p BoltProvider) userExists(username string) (User, error) {
|
||||
var user User
|
||||
err := p.dbHandle.View(func(tx *bolt.Tx) error {
|
||||
bucket, _, err := getBuckets(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u := bucket.Get([]byte(username))
|
||||
if u == nil {
|
||||
return &RecordNotFoundError{err: fmt.Sprintf("username %v does not exist", user.Username)}
|
||||
}
|
||||
return json.Unmarshal(u, &user)
|
||||
})
|
||||
return user, err
|
||||
}
|
||||
|
||||
func (p BoltProvider) addUser(user User) error {
|
||||
err := validateUser(&user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.dbHandle.Update(func(tx *bolt.Tx) error {
|
||||
bucket, idxBucket, err := getBuckets(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u := bucket.Get([]byte(user.Username)); u != nil {
|
||||
return fmt.Errorf("username %v already exists", user.Username)
|
||||
}
|
||||
id, err := bucket.NextSequence()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
user.ID = int64(id)
|
||||
buf, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userIDAsBytes := itob(user.ID)
|
||||
err = bucket.Put([]byte(user.Username), buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return idxBucket.Put(userIDAsBytes, []byte(user.Username))
|
||||
})
|
||||
}
|
||||
|
||||
func (p BoltProvider) updateUser(user User) error {
|
||||
err := validateUser(&user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return p.dbHandle.Update(func(tx *bolt.Tx) error {
|
||||
bucket, _, err := getBuckets(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if u := bucket.Get([]byte(user.Username)); u == nil {
|
||||
return &RecordNotFoundError{err: fmt.Sprintf("username %v does not exist", user.Username)}
|
||||
}
|
||||
buf, err := json.Marshal(user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bucket.Put([]byte(user.Username), buf)
|
||||
})
|
||||
}
|
||||
|
||||
func (p BoltProvider) deleteUser(user User) error {
|
||||
return p.dbHandle.Update(func(tx *bolt.Tx) error {
|
||||
bucket, idxBucket, err := getBuckets(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userIDAsBytes := itob(user.ID)
|
||||
userName := idxBucket.Get(userIDAsBytes)
|
||||
if userName == nil {
|
||||
return &RecordNotFoundError{err: fmt.Sprintf("user with id %v does not exist", user.ID)}
|
||||
}
|
||||
err = bucket.Delete(userName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return idxBucket.Delete(userIDAsBytes)
|
||||
})
|
||||
}
|
||||
|
||||
func (p BoltProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
|
||||
users := []User{}
|
||||
var err error
|
||||
if len(username) > 0 {
|
||||
if offset == 0 {
|
||||
user, err := p.userExists(username)
|
||||
if err == nil {
|
||||
users = append(users, getUserNoCredentials(&user))
|
||||
}
|
||||
}
|
||||
return users, err
|
||||
}
|
||||
err = p.dbHandle.View(func(tx *bolt.Tx) error {
|
||||
if limit <= 0 {
|
||||
return nil
|
||||
}
|
||||
bucket, _, err := getBuckets(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cursor := bucket.Cursor()
|
||||
itNum := 0
|
||||
if order == "ASC" {
|
||||
for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
|
||||
itNum++
|
||||
if itNum <= offset {
|
||||
continue
|
||||
}
|
||||
var user User
|
||||
err = json.Unmarshal(v, &user)
|
||||
if err == nil {
|
||||
users = append(users, getUserNoCredentials(&user))
|
||||
}
|
||||
if len(users) >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for k, v := cursor.Last(); k != nil; k, v = cursor.Prev() {
|
||||
itNum++
|
||||
if itNum <= offset {
|
||||
continue
|
||||
}
|
||||
var user User
|
||||
err = json.Unmarshal(v, &user)
|
||||
if err == nil {
|
||||
users = append(users, getUserNoCredentials(&user))
|
||||
}
|
||||
if len(users) >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
return users, err
|
||||
}
|
||||
|
||||
func getUserNoCredentials(user *User) User {
|
||||
user.Password = ""
|
||||
user.PublicKeys = []string{}
|
||||
return *user
|
||||
}
|
||||
|
||||
// itob returns an 8-byte big endian representation of v.
|
||||
func itob(v int64) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(v))
|
||||
return b
|
||||
}
|
||||
|
||||
func getBuckets(tx *bolt.Tx) (*bolt.Bucket, *bolt.Bucket, error) {
|
||||
var err error
|
||||
bucket := tx.Bucket(usersBucket)
|
||||
idxBucket := tx.Bucket(usersIDIdxBucket)
|
||||
if bucket == nil || idxBucket == nil {
|
||||
err = fmt.Errorf("Unable to find required buckets, bolt database structure not correcly defined")
|
||||
}
|
||||
return bucket, idxBucket, err
|
||||
}
|
||||
@@ -1,426 +0,0 @@
|
||||
// Package dataprovider provides data access.
|
||||
// It abstract different data providers and exposes a common API.
|
||||
// Currently the supported data providers are: PostreSQL (9+), MySQL (4.1+) and SQLite 3.x
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alexedwards/argon2id"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"golang.org/x/crypto/ssh"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/metrics"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
sha512crypt "github.com/nathanaelle/password"
|
||||
)
|
||||
|
||||
const (
|
||||
// SQLiteDataProviderName name for SQLite database provider
|
||||
SQLiteDataProviderName = "sqlite"
|
||||
// PGSQLDataProviderName name for PostgreSQL database provider
|
||||
PGSQLDataProviderName = "postgresql"
|
||||
// MySQLDataProviderName name for MySQL database provider
|
||||
MySQLDataProviderName = "mysql"
|
||||
// BoltDataProviderName name for bbolt key/value store provider
|
||||
BoltDataProviderName = "bolt"
|
||||
|
||||
argonPwdPrefix = "$argon2id$"
|
||||
bcryptPwdPrefix = "$2a$"
|
||||
pbkdf2SHA1Prefix = "$pbkdf2-sha1$"
|
||||
pbkdf2SHA256Prefix = "$pbkdf2-sha256$"
|
||||
pbkdf2SHA512Prefix = "$pbkdf2-sha512$"
|
||||
sha512cryptPwdPrefix = "$6$"
|
||||
manageUsersDisabledError = "please set manage_users to 1 in your configuration to enable this method"
|
||||
trackQuotaDisabledError = "please enable track_quota in your configuration to use this method"
|
||||
)
|
||||
|
||||
var (
|
||||
// SupportedProviders data provider configured in the sftpgo.conf file must match of these strings
|
||||
SupportedProviders = []string{SQLiteDataProviderName, PGSQLDataProviderName, MySQLDataProviderName, BoltDataProviderName}
|
||||
config Config
|
||||
provider Provider
|
||||
sqlPlaceholders []string
|
||||
validPerms = []string{PermAny, PermListItems, PermDownload, PermUpload, PermDelete, PermRename,
|
||||
PermCreateDirs, PermCreateSymlinks, PermOverwrite}
|
||||
hashPwdPrefixes = []string{argonPwdPrefix, bcryptPwdPrefix, pbkdf2SHA1Prefix, pbkdf2SHA256Prefix,
|
||||
pbkdf2SHA512Prefix, sha512cryptPwdPrefix}
|
||||
pbkdfPwdPrefixes = []string{pbkdf2SHA1Prefix, pbkdf2SHA256Prefix, pbkdf2SHA512Prefix}
|
||||
logSender = "dataProvider"
|
||||
availabilityTicker *time.Ticker
|
||||
)
|
||||
|
||||
// Config provider configuration
|
||||
type Config struct {
|
||||
// Driver name, must be one of the SupportedProviders
|
||||
Driver string `json:"driver" mapstructure:"driver"`
|
||||
// Database name
|
||||
Name string `json:"name" mapstructure:"name"`
|
||||
// Database host
|
||||
Host string `json:"host" mapstructure:"host"`
|
||||
// Database port
|
||||
Port int `json:"port" mapstructure:"port"`
|
||||
// Database username
|
||||
Username string `json:"username" mapstructure:"username"`
|
||||
// Database password
|
||||
Password string `json:"password" mapstructure:"password"`
|
||||
// Used for drivers mysql and postgresql.
|
||||
// 0 disable SSL/TLS connections.
|
||||
// 1 require ssl.
|
||||
// 2 set ssl mode to verify-ca for driver postgresql and skip-verify for driver mysql.
|
||||
// 3 set ssl mode to verify-full for driver postgresql and preferred for driver mysql.
|
||||
SSLMode int `json:"sslmode" mapstructure:"sslmode"`
|
||||
// Custom database connection string.
|
||||
// If not empty this connection string will be used instead of build one using the previous parameters
|
||||
ConnectionString string `json:"connection_string" mapstructure:"connection_string"`
|
||||
// Database table for SFTP users
|
||||
UsersTable string `json:"users_table" mapstructure:"users_table"`
|
||||
// Set to 0 to disable users management, 1 to enable
|
||||
ManageUsers int `json:"manage_users" mapstructure:"manage_users"`
|
||||
// Set the preferred way to track users quota between the following choices:
|
||||
// 0, disable quota tracking. REST API to scan user dir and update quota will do nothing
|
||||
// 1, quota is updated each time a user upload or delete a file even if the user has no quota restrictions
|
||||
// 2, quota is updated each time a user upload or delete a file but only for users with quota restrictions.
|
||||
// With this configuration the "quota scan" REST API can still be used to periodically update space usage
|
||||
// for users without quota restrictions
|
||||
TrackQuota int `json:"track_quota" mapstructure:"track_quota"`
|
||||
// Sets the maximum number of open connections for mysql and postgresql driver.
|
||||
// Default 0 (unlimited)
|
||||
PoolSize int `json:"pool_size" mapstructure:"pool_size"`
|
||||
}
|
||||
|
||||
// ValidationError raised if input data is not valid
|
||||
type ValidationError struct {
|
||||
err string
|
||||
}
|
||||
|
||||
// Validation error details
|
||||
func (e *ValidationError) Error() string {
|
||||
return fmt.Sprintf("Validation error: %s", e.err)
|
||||
}
|
||||
|
||||
// MethodDisabledError raised if a method is disabled in config file.
|
||||
// For example, if user management is disabled, this error is raised
|
||||
// every time an user operation is done using the REST API
|
||||
type MethodDisabledError struct {
|
||||
err string
|
||||
}
|
||||
|
||||
// Method disabled error details
|
||||
func (e *MethodDisabledError) Error() string {
|
||||
return fmt.Sprintf("Method disabled error: %s", e.err)
|
||||
}
|
||||
|
||||
// RecordNotFoundError raised if a requested user is not found
|
||||
type RecordNotFoundError struct {
|
||||
err string
|
||||
}
|
||||
|
||||
func (e *RecordNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Not found: %s", e.err)
|
||||
}
|
||||
|
||||
// GetProvider returns the configured provider
|
||||
func GetProvider() Provider {
|
||||
return provider
|
||||
}
|
||||
|
||||
// Provider interface that data providers must implement.
|
||||
type Provider interface {
|
||||
validateUserAndPass(username string, password string) (User, error)
|
||||
validateUserAndPubKey(username string, pubKey string) (User, string, error)
|
||||
updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error
|
||||
getUsedQuota(username string) (int, int64, error)
|
||||
userExists(username string) (User, error)
|
||||
addUser(user User) error
|
||||
updateUser(user User) error
|
||||
deleteUser(user User) error
|
||||
getUsers(limit int, offset int, order string, username string) ([]User, error)
|
||||
getUserByID(ID int64) (User, error)
|
||||
checkAvailability() error
|
||||
}
|
||||
|
||||
func init() {
|
||||
availabilityTicker = time.NewTicker(30 * time.Second)
|
||||
}
|
||||
|
||||
// Initialize the data provider.
|
||||
// An error is returned if the configured driver is invalid or if the data provider cannot be initialized
|
||||
func Initialize(cnf Config, basePath string) error {
|
||||
var err error
|
||||
config = cnf
|
||||
sqlPlaceholders = getSQLPlaceholders()
|
||||
if config.Driver == SQLiteDataProviderName {
|
||||
err = initializeSQLiteProvider(basePath)
|
||||
} else if config.Driver == PGSQLDataProviderName {
|
||||
err = initializePGSQLProvider()
|
||||
} else if config.Driver == MySQLDataProviderName {
|
||||
err = initializeMySQLProvider()
|
||||
} else if config.Driver == BoltDataProviderName {
|
||||
err = initializeBoltProvider(basePath)
|
||||
} else {
|
||||
err = fmt.Errorf("Unsupported data provider: %v", config.Driver)
|
||||
}
|
||||
if err == nil {
|
||||
startAvailabilityTimer()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckUserAndPass retrieves the SFTP user with the given username and password if a match is found or an error
|
||||
func CheckUserAndPass(p Provider, username string, password string) (User, error) {
|
||||
return p.validateUserAndPass(username, password)
|
||||
}
|
||||
|
||||
// CheckUserAndPubKey retrieves the SFTP user with the given username and public key if a match is found or an error
|
||||
func CheckUserAndPubKey(p Provider, username string, pubKey string) (User, string, error) {
|
||||
return p.validateUserAndPubKey(username, pubKey)
|
||||
}
|
||||
|
||||
// UpdateUserQuota updates the quota for the given SFTP user adding filesAdd and sizeAdd.
|
||||
// If reset is true filesAdd and sizeAdd indicates the total files and the total size instead of the difference.
|
||||
func UpdateUserQuota(p Provider, user User, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
if config.TrackQuota == 0 {
|
||||
return &MethodDisabledError{err: trackQuotaDisabledError}
|
||||
} else if config.TrackQuota == 2 && !reset && !user.HasQuotaRestrictions() {
|
||||
return nil
|
||||
}
|
||||
return p.updateQuota(user.Username, filesAdd, sizeAdd, reset)
|
||||
}
|
||||
|
||||
// GetUsedQuota returns the used quota for the given SFTP user.
|
||||
// TrackQuota must be >=1 to enable this method
|
||||
func GetUsedQuota(p Provider, username string) (int, int64, error) {
|
||||
if config.TrackQuota == 0 {
|
||||
return 0, 0, &MethodDisabledError{err: trackQuotaDisabledError}
|
||||
}
|
||||
return p.getUsedQuota(username)
|
||||
}
|
||||
|
||||
// UserExists checks if the given SFTP username exists, returns an error if no match is found
|
||||
func UserExists(p Provider, username string) (User, error) {
|
||||
return p.userExists(username)
|
||||
}
|
||||
|
||||
// AddUser adds a new SFTP user.
|
||||
// ManageUsers configuration must be set to 1 to enable this method
|
||||
func AddUser(p Provider, user User) error {
|
||||
if config.ManageUsers == 0 {
|
||||
return &MethodDisabledError{err: manageUsersDisabledError}
|
||||
}
|
||||
return p.addUser(user)
|
||||
}
|
||||
|
||||
// UpdateUser updates an existing SFTP user.
|
||||
// ManageUsers configuration must be set to 1 to enable this method
|
||||
func UpdateUser(p Provider, user User) error {
|
||||
if config.ManageUsers == 0 {
|
||||
return &MethodDisabledError{err: manageUsersDisabledError}
|
||||
}
|
||||
return p.updateUser(user)
|
||||
}
|
||||
|
||||
// DeleteUser deletes an existing SFTP user.
|
||||
// ManageUsers configuration must be set to 1 to enable this method
|
||||
func DeleteUser(p Provider, user User) error {
|
||||
if config.ManageUsers == 0 {
|
||||
return &MethodDisabledError{err: manageUsersDisabledError}
|
||||
}
|
||||
return p.deleteUser(user)
|
||||
}
|
||||
|
||||
// GetUsers returns an array of users respecting limit and offset and filtered by username exact match if not empty
|
||||
func GetUsers(p Provider, limit int, offset int, order string, username string) ([]User, error) {
|
||||
return p.getUsers(limit, offset, order, username)
|
||||
}
|
||||
|
||||
// GetUserByID returns the user with the given database ID if a match is found or an error
|
||||
func GetUserByID(p Provider, ID int64) (User, error) {
|
||||
return p.getUserByID(ID)
|
||||
}
|
||||
|
||||
func validateUser(user *User) error {
|
||||
if len(user.Username) == 0 || len(user.HomeDir) == 0 {
|
||||
return &ValidationError{err: "Mandatory parameters missing"}
|
||||
}
|
||||
if len(user.Password) == 0 && len(user.PublicKeys) == 0 {
|
||||
return &ValidationError{err: "Please set password or at least a public_key"}
|
||||
}
|
||||
if len(user.Permissions) == 0 {
|
||||
return &ValidationError{err: "Please grant some permissions to this user"}
|
||||
}
|
||||
if !filepath.IsAbs(user.HomeDir) {
|
||||
return &ValidationError{err: fmt.Sprintf("home_dir must be an absolute path, actual value: %v", user.HomeDir)}
|
||||
}
|
||||
for _, p := range user.Permissions {
|
||||
if !utils.IsStringInSlice(p, validPerms) {
|
||||
return &ValidationError{err: fmt.Sprintf("Invalid permission: %v", p)}
|
||||
}
|
||||
}
|
||||
if len(user.Password) > 0 && !utils.IsStringPrefixInSlice(user.Password, hashPwdPrefixes) {
|
||||
pwd, err := argon2id.CreateHash(user.Password, argon2id.DefaultParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
user.Password = pwd
|
||||
}
|
||||
for i, k := range user.PublicKeys {
|
||||
_, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k))
|
||||
if err != nil {
|
||||
return &ValidationError{err: fmt.Sprintf("Could not parse key nr. %d: %s", i, err)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkUserAndPass(user User, password string) (User, error) {
|
||||
var err error
|
||||
if len(user.Password) == 0 {
|
||||
return user, errors.New("Credentials cannot be null or empty")
|
||||
}
|
||||
var match bool
|
||||
if strings.HasPrefix(user.Password, argonPwdPrefix) {
|
||||
match, err = argon2id.ComparePasswordAndHash(password, user.Password)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error comparing password with argon hash: %v", err)
|
||||
return user, err
|
||||
}
|
||||
} else if strings.HasPrefix(user.Password, bcryptPwdPrefix) {
|
||||
if err = bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)); err != nil {
|
||||
providerLog(logger.LevelWarn, "error comparing password with bcrypt hash: %v", err)
|
||||
return user, err
|
||||
}
|
||||
match = true
|
||||
} else if utils.IsStringPrefixInSlice(user.Password, pbkdfPwdPrefixes) {
|
||||
match, err = comparePbkdf2PasswordAndHash(password, user.Password)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error comparing password with pbkdf2 sha256 hash: %v", err)
|
||||
return user, err
|
||||
}
|
||||
} else if strings.HasPrefix(user.Password, sha512cryptPwdPrefix) {
|
||||
crypter, ok := sha512crypt.SHA512.CrypterFound(user.Password)
|
||||
if !ok {
|
||||
err = errors.New("cannot found matching SHA512 crypter")
|
||||
providerLog(logger.LevelWarn, "error comparing password with SHA512 hash: %v", err)
|
||||
return user, err
|
||||
}
|
||||
if !crypter.Verify([]byte(password)) {
|
||||
err = errors.New("password does not match")
|
||||
providerLog(logger.LevelWarn, "error comparing password with SHA512 hash: %v", err)
|
||||
return user, err
|
||||
}
|
||||
match = true
|
||||
}
|
||||
if !match {
|
||||
err = errors.New("Invalid credentials")
|
||||
}
|
||||
return user, err
|
||||
}
|
||||
|
||||
func checkUserAndPubKey(user User, pubKey string) (User, string, error) {
|
||||
if len(user.PublicKeys) == 0 {
|
||||
return user, "", errors.New("Invalid credentials")
|
||||
}
|
||||
for i, k := range user.PublicKeys {
|
||||
storedPubKey, comment, _, _, err := ssh.ParseAuthorizedKey([]byte(k))
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error parsing stored public key %d for user %v: %v", i, user.Username, err)
|
||||
return user, "", err
|
||||
}
|
||||
if string(storedPubKey.Marshal()) == pubKey {
|
||||
fp := ssh.FingerprintSHA256(storedPubKey)
|
||||
return user, fp + ":" + comment, nil
|
||||
}
|
||||
}
|
||||
return user, "", errors.New("Invalid credentials")
|
||||
}
|
||||
|
||||
func comparePbkdf2PasswordAndHash(password, hashedPassword string) (bool, error) {
|
||||
vals := strings.Split(hashedPassword, "$")
|
||||
if len(vals) != 5 {
|
||||
return false, fmt.Errorf("pbkdf2: hash is not in the correct format")
|
||||
}
|
||||
var hashFunc func() hash.Hash
|
||||
var hashSize int
|
||||
if strings.HasPrefix(hashedPassword, pbkdf2SHA256Prefix) {
|
||||
hashSize = sha256.Size
|
||||
hashFunc = sha256.New
|
||||
} else if strings.HasPrefix(hashedPassword, pbkdf2SHA512Prefix) {
|
||||
hashSize = sha512.Size
|
||||
hashFunc = sha512.New
|
||||
} else if strings.HasPrefix(hashedPassword, pbkdf2SHA1Prefix) {
|
||||
hashSize = sha1.Size
|
||||
hashFunc = sha1.New
|
||||
} else {
|
||||
return false, fmt.Errorf("pbkdf2: invalid or unsupported hash format %v", vals[1])
|
||||
}
|
||||
iterations, err := strconv.Atoi(vals[2])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
salt := vals[3]
|
||||
expected := vals[4]
|
||||
df := pbkdf2.Key([]byte(password), []byte(salt), iterations, hashSize, hashFunc)
|
||||
buf := make([]byte, base64.StdEncoding.EncodedLen(len(df)))
|
||||
base64.StdEncoding.Encode(buf, df)
|
||||
return subtle.ConstantTimeCompare(buf, []byte(expected)) == 1, nil
|
||||
}
|
||||
|
||||
func getSSLMode() string {
|
||||
if config.Driver == PGSQLDataProviderName {
|
||||
if config.SSLMode == 0 {
|
||||
return "disable"
|
||||
} else if config.SSLMode == 1 {
|
||||
return "require"
|
||||
} else if config.SSLMode == 2 {
|
||||
return "verify-ca"
|
||||
} else if config.SSLMode == 3 {
|
||||
return "verify-full"
|
||||
}
|
||||
} else if config.Driver == MySQLDataProviderName {
|
||||
if config.SSLMode == 0 {
|
||||
return "false"
|
||||
} else if config.SSLMode == 1 {
|
||||
return "true"
|
||||
} else if config.SSLMode == 2 {
|
||||
return "skip-verify"
|
||||
} else if config.SSLMode == 3 {
|
||||
return "preferred"
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func startAvailabilityTimer() {
|
||||
checkDataprovider()
|
||||
go func() {
|
||||
for range availabilityTicker.C {
|
||||
checkDataprovider()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func checkDataprovider() {
|
||||
err := provider.checkAvailability()
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "check availability error: %v", err)
|
||||
}
|
||||
metrics.UpdateDataProviderAvailability(err)
|
||||
}
|
||||
|
||||
func providerLog(level logger.LogLevel, format string, v ...interface{}) {
|
||||
logger.Log(level, logSender, "", format, v...)
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
)
|
||||
|
||||
// MySQLProvider auth provider for MySQL/MariaDB database
|
||||
type MySQLProvider struct {
|
||||
dbHandle *sql.DB
|
||||
}
|
||||
|
||||
func initializeMySQLProvider() error {
|
||||
var err error
|
||||
logSender = MySQLDataProviderName
|
||||
dbHandle, err := sql.Open("mysql", getMySQLConnectionString(false))
|
||||
if err == nil {
|
||||
providerLog(logger.LevelDebug, "mysql database handle created, connection string: %#v, pool size: %v",
|
||||
getMySQLConnectionString(true), config.PoolSize)
|
||||
dbHandle.SetMaxOpenConns(config.PoolSize)
|
||||
dbHandle.SetConnMaxLifetime(1800 * time.Second)
|
||||
provider = MySQLProvider{dbHandle: dbHandle}
|
||||
} else {
|
||||
providerLog(logger.LevelWarn, "error creating mysql database handler, connection string: %#v, error: %v",
|
||||
getMySQLConnectionString(true), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
func getMySQLConnectionString(redactedPwd bool) string {
|
||||
var connectionString string
|
||||
if len(config.ConnectionString) == 0 {
|
||||
password := config.Password
|
||||
if redactedPwd {
|
||||
password = "[redacted]"
|
||||
}
|
||||
connectionString = fmt.Sprintf("%v:%v@tcp([%v]:%v)/%v?charset=utf8&interpolateParams=true&timeout=10s&tls=%v&writeTimeout=10s&readTimeout=10s",
|
||||
config.Username, password, config.Host, config.Port, config.Name, getSSLMode())
|
||||
} else {
|
||||
connectionString = config.ConnectionString
|
||||
}
|
||||
return connectionString
|
||||
}
|
||||
|
||||
func (p MySQLProvider) checkAvailability() error {
|
||||
return sqlCommonCheckAvailability(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) validateUserAndPass(username string, password string) (User, error) {
|
||||
return sqlCommonValidateUserAndPass(username, password, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) validateUserAndPubKey(username string, publicKey string) (User, string, error) {
|
||||
return sqlCommonValidateUserAndPubKey(username, publicKey, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) getUserByID(ID int64) (User, error) {
|
||||
return sqlCommonGetUserByID(ID, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
return sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) getUsedQuota(username string) (int, int64, error) {
|
||||
return sqlCommonGetUsedQuota(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) userExists(username string) (User, error) {
|
||||
return sqlCommonCheckUserExists(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) addUser(user User) error {
|
||||
return sqlCommonAddUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) updateUser(user User) error {
|
||||
return sqlCommonUpdateUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) deleteUser(user User) error {
|
||||
return sqlCommonDeleteUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p MySQLProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
|
||||
return sqlCommonGetUsers(limit, offset, order, username, p.dbHandle)
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
)
|
||||
|
||||
// PGSQLProvider auth provider for PostgreSQL database
|
||||
type PGSQLProvider struct {
|
||||
dbHandle *sql.DB
|
||||
}
|
||||
|
||||
func initializePGSQLProvider() error {
|
||||
var err error
|
||||
logSender = PGSQLDataProviderName
|
||||
dbHandle, err := sql.Open("postgres", getPGSQLConnectionString(false))
|
||||
if err == nil {
|
||||
providerLog(logger.LevelDebug, "postgres database handle created, connection string: %#v, pool size: %v",
|
||||
getPGSQLConnectionString(true), config.PoolSize)
|
||||
dbHandle.SetMaxOpenConns(config.PoolSize)
|
||||
provider = PGSQLProvider{dbHandle: dbHandle}
|
||||
} else {
|
||||
providerLog(logger.LevelWarn, "error creating postgres database handler, connection string: %#v, error: %v",
|
||||
getPGSQLConnectionString(true), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getPGSQLConnectionString(redactedPwd bool) string {
|
||||
var connectionString string
|
||||
if len(config.ConnectionString) == 0 {
|
||||
password := config.Password
|
||||
if redactedPwd {
|
||||
password = "[redacted]"
|
||||
}
|
||||
connectionString = fmt.Sprintf("host='%v' port=%v dbname='%v' user='%v' password='%v' sslmode=%v connect_timeout=10",
|
||||
config.Host, config.Port, config.Name, config.Username, password, getSSLMode())
|
||||
} else {
|
||||
connectionString = config.ConnectionString
|
||||
}
|
||||
return connectionString
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) checkAvailability() error {
|
||||
return sqlCommonCheckAvailability(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) validateUserAndPass(username string, password string) (User, error) {
|
||||
return sqlCommonValidateUserAndPass(username, password, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) validateUserAndPubKey(username string, publicKey string) (User, string, error) {
|
||||
return sqlCommonValidateUserAndPubKey(username, publicKey, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) getUserByID(ID int64) (User, error) {
|
||||
return sqlCommonGetUserByID(ID, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
return sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) getUsedQuota(username string) (int, int64, error) {
|
||||
return sqlCommonGetUsedQuota(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) userExists(username string) (User, error) {
|
||||
return sqlCommonCheckUserExists(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) addUser(user User) error {
|
||||
return sqlCommonAddUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) updateUser(user User) error {
|
||||
return sqlCommonUpdateUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) deleteUser(user User) error {
|
||||
return sqlCommonDeleteUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p PGSQLProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
|
||||
return sqlCommonGetUsers(limit, offset, order, username, p.dbHandle)
|
||||
}
|
||||
@@ -1,259 +0,0 @@
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
)
|
||||
|
||||
func getUserByUsername(username string, dbHandle *sql.DB) (User, error) {
|
||||
var user User
|
||||
q := getUserByUsernameQuery()
|
||||
stmt, err := dbHandle.Prepare(q)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
|
||||
return user, err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
row := stmt.QueryRow(username)
|
||||
return getUserFromDbRow(row, nil)
|
||||
}
|
||||
|
||||
func sqlCommonValidateUserAndPass(username string, password string, dbHandle *sql.DB) (User, error) {
|
||||
var user User
|
||||
if len(password) == 0 {
|
||||
return user, errors.New("Credentials cannot be null or empty")
|
||||
}
|
||||
user, err := getUserByUsername(username, dbHandle)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error authenticating user: %v, error: %v", username, err)
|
||||
return user, err
|
||||
}
|
||||
return checkUserAndPass(user, password)
|
||||
}
|
||||
|
||||
func sqlCommonValidateUserAndPubKey(username string, pubKey string, dbHandle *sql.DB) (User, string, error) {
|
||||
var user User
|
||||
if len(pubKey) == 0 {
|
||||
return user, "", errors.New("Credentials cannot be null or empty")
|
||||
}
|
||||
user, err := getUserByUsername(username, dbHandle)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error authenticating user: %v, error: %v", username, err)
|
||||
return user, "", err
|
||||
}
|
||||
return checkUserAndPubKey(user, pubKey)
|
||||
}
|
||||
|
||||
func sqlCommonCheckAvailability(dbHandle *sql.DB) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
return dbHandle.PingContext(ctx)
|
||||
}
|
||||
|
||||
func sqlCommonGetUserByID(ID int64, dbHandle *sql.DB) (User, error) {
|
||||
var user User
|
||||
q := getUserByIDQuery()
|
||||
stmt, err := dbHandle.Prepare(q)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
|
||||
return user, err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
row := stmt.QueryRow(ID)
|
||||
return getUserFromDbRow(row, nil)
|
||||
}
|
||||
|
||||
func sqlCommonUpdateQuota(username string, filesAdd int, sizeAdd int64, reset bool, dbHandle *sql.DB) error {
|
||||
q := getUpdateQuotaQuery(reset)
|
||||
stmt, err := dbHandle.Prepare(q)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
|
||||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
_, err = stmt.Exec(sizeAdd, filesAdd, utils.GetTimeAsMsSinceEpoch(time.Now()), username)
|
||||
if err == nil {
|
||||
providerLog(logger.LevelDebug, "quota updated for user %v, files increment: %v size increment: %v is reset? %v",
|
||||
username, filesAdd, sizeAdd, reset)
|
||||
} else {
|
||||
providerLog(logger.LevelWarn, "error updating quota for username %v: %v", username, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func sqlCommonGetUsedQuota(username string, dbHandle *sql.DB) (int, int64, error) {
|
||||
q := getQuotaQuery()
|
||||
stmt, err := dbHandle.Prepare(q)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
|
||||
return 0, 0, err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
var usedFiles int
|
||||
var usedSize int64
|
||||
err = stmt.QueryRow(username).Scan(&usedSize, &usedFiles)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error getting quota for user: %v, error: %v", username, err)
|
||||
return 0, 0, err
|
||||
}
|
||||
return usedFiles, usedSize, err
|
||||
}
|
||||
|
||||
func sqlCommonCheckUserExists(username string, dbHandle *sql.DB) (User, error) {
|
||||
var user User
|
||||
q := getUserByUsernameQuery()
|
||||
stmt, err := dbHandle.Prepare(q)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
|
||||
return user, err
|
||||
}
|
||||
defer stmt.Close()
|
||||
row := stmt.QueryRow(username)
|
||||
return getUserFromDbRow(row, nil)
|
||||
}
|
||||
|
||||
func sqlCommonAddUser(user User, dbHandle *sql.DB) error {
|
||||
err := validateUser(&user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
q := getAddUserQuery()
|
||||
stmt, err := dbHandle.Prepare(q)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
|
||||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
permissions, err := user.GetPermissionsAsJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
publicKeys, err := user.GetPublicKeysAsJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = stmt.Exec(user.Username, user.Password, string(publicKeys), user.HomeDir, user.UID, user.GID, user.MaxSessions, user.QuotaSize,
|
||||
user.QuotaFiles, string(permissions), user.UploadBandwidth, user.DownloadBandwidth)
|
||||
return err
|
||||
}
|
||||
|
||||
func sqlCommonUpdateUser(user User, dbHandle *sql.DB) error {
|
||||
err := validateUser(&user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
q := getUpdateUserQuery()
|
||||
stmt, err := dbHandle.Prepare(q)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
|
||||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
permissions, err := user.GetPermissionsAsJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
publicKeys, err := user.GetPublicKeysAsJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = stmt.Exec(user.Password, string(publicKeys), user.HomeDir, user.UID, user.GID, user.MaxSessions, user.QuotaSize,
|
||||
user.QuotaFiles, string(permissions), user.UploadBandwidth, user.DownloadBandwidth, user.ID)
|
||||
return err
|
||||
}
|
||||
|
||||
func sqlCommonDeleteUser(user User, dbHandle *sql.DB) error {
|
||||
q := getDeleteUserQuery()
|
||||
stmt, err := dbHandle.Prepare(q)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
|
||||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
_, err = stmt.Exec(user.ID)
|
||||
return err
|
||||
}
|
||||
|
||||
func sqlCommonGetUsers(limit int, offset int, order string, username string, dbHandle *sql.DB) ([]User, error) {
|
||||
users := []User{}
|
||||
q := getUsersQuery(order, username)
|
||||
stmt, err := dbHandle.Prepare(q)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
|
||||
return nil, err
|
||||
}
|
||||
defer stmt.Close()
|
||||
var rows *sql.Rows
|
||||
if len(username) > 0 {
|
||||
rows, err = stmt.Query(username, limit, offset)
|
||||
} else {
|
||||
rows, err = stmt.Query(limit, offset)
|
||||
}
|
||||
if err == nil {
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
u, err := getUserFromDbRow(nil, rows)
|
||||
// hide password and public key
|
||||
if err == nil {
|
||||
u.Password = ""
|
||||
u.PublicKeys = []string{}
|
||||
users = append(users, u)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return users, err
|
||||
}
|
||||
|
||||
func getUserFromDbRow(row *sql.Row, rows *sql.Rows) (User, error) {
|
||||
var user User
|
||||
var permissions sql.NullString
|
||||
var password sql.NullString
|
||||
var publicKey sql.NullString
|
||||
var err error
|
||||
if row != nil {
|
||||
err = row.Scan(&user.ID, &user.Username, &password, &publicKey, &user.HomeDir, &user.UID, &user.GID, &user.MaxSessions,
|
||||
&user.QuotaSize, &user.QuotaFiles, &permissions, &user.UsedQuotaSize, &user.UsedQuotaFiles, &user.LastQuotaUpdate,
|
||||
&user.UploadBandwidth, &user.DownloadBandwidth)
|
||||
|
||||
} else {
|
||||
err = rows.Scan(&user.ID, &user.Username, &password, &publicKey, &user.HomeDir, &user.UID, &user.GID, &user.MaxSessions,
|
||||
&user.QuotaSize, &user.QuotaFiles, &permissions, &user.UsedQuotaSize, &user.UsedQuotaFiles, &user.LastQuotaUpdate,
|
||||
&user.UploadBandwidth, &user.DownloadBandwidth)
|
||||
}
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return user, &RecordNotFoundError{err: err.Error()}
|
||||
}
|
||||
return user, err
|
||||
}
|
||||
if password.Valid {
|
||||
user.Password = password.String
|
||||
}
|
||||
if publicKey.Valid {
|
||||
var list []string
|
||||
err = json.Unmarshal([]byte(publicKey.String), &list)
|
||||
if err == nil {
|
||||
user.PublicKeys = list
|
||||
}
|
||||
}
|
||||
if permissions.Valid {
|
||||
var list []string
|
||||
err = json.Unmarshal([]byte(permissions.String), &list)
|
||||
if err == nil {
|
||||
user.Permissions = list
|
||||
}
|
||||
}
|
||||
return user, err
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/drakkan/sftpgo/logger"
|
||||
)
|
||||
|
||||
// SQLiteProvider auth provider for SQLite database
|
||||
type SQLiteProvider struct {
|
||||
dbHandle *sql.DB
|
||||
}
|
||||
|
||||
func initializeSQLiteProvider(basePath string) error {
|
||||
var err error
|
||||
var connectionString string
|
||||
logSender = SQLiteDataProviderName
|
||||
if len(config.ConnectionString) == 0 {
|
||||
dbPath := config.Name
|
||||
if !filepath.IsAbs(dbPath) {
|
||||
dbPath = filepath.Join(basePath, dbPath)
|
||||
}
|
||||
fi, err := os.Stat(dbPath)
|
||||
if err != nil {
|
||||
providerLog(logger.LevelWarn, "sqlite database file does not exists, please be sure to create and initialize"+
|
||||
" a database before starting sftpgo")
|
||||
return err
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
return errors.New("sqlite database file is invalid, please be sure to create and initialize" +
|
||||
" a database before starting sftpgo")
|
||||
}
|
||||
connectionString = fmt.Sprintf("file:%v?cache=shared", dbPath)
|
||||
} else {
|
||||
connectionString = config.ConnectionString
|
||||
}
|
||||
dbHandle, err := sql.Open("sqlite3", connectionString)
|
||||
if err == nil {
|
||||
providerLog(logger.LevelDebug, "sqlite database handle created, connection string: %#v", connectionString)
|
||||
dbHandle.SetMaxOpenConns(1)
|
||||
provider = SQLiteProvider{dbHandle: dbHandle}
|
||||
} else {
|
||||
providerLog(logger.LevelWarn, "error creating sqlite database handler, connection string: %#v, error: %v",
|
||||
connectionString, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p SQLiteProvider) checkAvailability() error {
|
||||
return sqlCommonCheckAvailability(p.dbHandle)
|
||||
}
|
||||
|
||||
func (p SQLiteProvider) validateUserAndPass(username string, password string) (User, error) {
|
||||
return sqlCommonValidateUserAndPass(username, password, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p SQLiteProvider) validateUserAndPubKey(username string, publicKey string) (User, string, error) {
|
||||
return sqlCommonValidateUserAndPubKey(username, publicKey, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p SQLiteProvider) getUserByID(ID int64) (User, error) {
|
||||
return sqlCommonGetUserByID(ID, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p SQLiteProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
|
||||
return sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p SQLiteProvider) getUsedQuota(username string) (int, int64, error) {
|
||||
return sqlCommonGetUsedQuota(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p SQLiteProvider) userExists(username string) (User, error) {
|
||||
return sqlCommonCheckUserExists(username, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p SQLiteProvider) addUser(user User) error {
|
||||
return sqlCommonAddUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p SQLiteProvider) updateUser(user User) error {
|
||||
return sqlCommonUpdateUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p SQLiteProvider) deleteUser(user User) error {
|
||||
return sqlCommonDeleteUser(user, p.dbHandle)
|
||||
}
|
||||
|
||||
func (p SQLiteProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
|
||||
return sqlCommonGetUsers(limit, offset, order, username, p.dbHandle)
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
package dataprovider
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
selectUserFields = "id,username,password,public_keys,home_dir,uid,gid,max_sessions,quota_size,quota_files,permissions," +
|
||||
"used_quota_size,used_quota_files,last_quota_update,upload_bandwidth,download_bandwidth"
|
||||
)
|
||||
|
||||
func getSQLPlaceholders() []string {
|
||||
var placeholders []string
|
||||
for i := 1; i <= 20; i++ {
|
||||
if config.Driver == PGSQLDataProviderName {
|
||||
placeholders = append(placeholders, fmt.Sprintf("$%v", i))
|
||||
} else {
|
||||
placeholders = append(placeholders, "?")
|
||||
}
|
||||
}
|
||||
return placeholders
|
||||
}
|
||||
|
||||
func getUserByUsernameQuery() string {
|
||||
return fmt.Sprintf(`SELECT %v FROM %v WHERE username = %v`, selectUserFields, config.UsersTable, sqlPlaceholders[0])
|
||||
}
|
||||
|
||||
func getUserByIDQuery() string {
|
||||
return fmt.Sprintf(`SELECT %v FROM %v WHERE id = %v`, selectUserFields, config.UsersTable, sqlPlaceholders[0])
|
||||
}
|
||||
|
||||
func getUsersQuery(order string, username string) string {
|
||||
if len(username) > 0 {
|
||||
return fmt.Sprintf(`SELECT %v FROM %v WHERE username = %v ORDER BY username %v LIMIT %v OFFSET %v`,
|
||||
selectUserFields, config.UsersTable, sqlPlaceholders[0], order, sqlPlaceholders[1], sqlPlaceholders[2])
|
||||
}
|
||||
return fmt.Sprintf(`SELECT %v FROM %v ORDER BY username %v LIMIT %v OFFSET %v`, selectUserFields, config.UsersTable,
|
||||
order, sqlPlaceholders[0], sqlPlaceholders[1])
|
||||
}
|
||||
|
||||
func getUpdateQuotaQuery(reset bool) string {
|
||||
if reset {
|
||||
return fmt.Sprintf(`UPDATE %v SET used_quota_size = %v,used_quota_files = %v,last_quota_update = %v
|
||||
WHERE username = %v`, config.UsersTable, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
|
||||
}
|
||||
return fmt.Sprintf(`UPDATE %v SET used_quota_size = used_quota_size + %v,used_quota_files = used_quota_files + %v,last_quota_update = %v
|
||||
WHERE username = %v`, config.UsersTable, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
|
||||
}
|
||||
|
||||
func getQuotaQuery() string {
|
||||
return fmt.Sprintf(`SELECT used_quota_size,used_quota_files FROM %v WHERE username = %v`, config.UsersTable,
|
||||
sqlPlaceholders[0])
|
||||
}
|
||||
|
||||
func getAddUserQuery() string {
|
||||
return fmt.Sprintf(`INSERT INTO %v (username,password,public_keys,home_dir,uid,gid,max_sessions,quota_size,quota_files,permissions,
|
||||
used_quota_size,used_quota_files,last_quota_update,upload_bandwidth,download_bandwidth)
|
||||
VALUES (%v,%v,%v,%v,%v,%v,%v,%v,%v,%v,0,0,0,%v,%v)`, config.UsersTable, sqlPlaceholders[0], sqlPlaceholders[1],
|
||||
sqlPlaceholders[2], sqlPlaceholders[3], sqlPlaceholders[4], sqlPlaceholders[5], sqlPlaceholders[6], sqlPlaceholders[7],
|
||||
sqlPlaceholders[8], sqlPlaceholders[9], sqlPlaceholders[10], sqlPlaceholders[11])
|
||||
}
|
||||
|
||||
func getUpdateUserQuery() string {
|
||||
return fmt.Sprintf(`UPDATE %v SET password=%v,public_keys=%v,home_dir=%v,uid=%v,gid=%v,max_sessions=%v,quota_size=%v,
|
||||
quota_files=%v,permissions=%v,upload_bandwidth=%v,download_bandwidth=%v WHERE id = %v`, config.UsersTable,
|
||||
sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3], sqlPlaceholders[4], sqlPlaceholders[5],
|
||||
sqlPlaceholders[6], sqlPlaceholders[7], sqlPlaceholders[8], sqlPlaceholders[9], sqlPlaceholders[10], sqlPlaceholders[11])
|
||||
}
|
||||
|
||||
func getDeleteUserQuery() string {
|
||||
return fmt.Sprintf(`DELETE FROM %v WHERE id = %v`, config.UsersTable, sqlPlaceholders[0])
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
package dataprovider
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/drakkan/sftpgo/utils"
|
||||
)
|
||||
|
||||
// Available permissions for SFTP users
|
||||
const (
|
||||
// All permissions are granted
|
||||
PermAny = "*"
|
||||
// List items such as files and directories is allowed
|
||||
PermListItems = "list"
|
||||
// download files is allowed
|
||||
PermDownload = "download"
|
||||
// upload files is allowed
|
||||
PermUpload = "upload"
|
||||
// overwrite an existing file, while uploading, is allowed
|
||||
// upload permission is required to allow file overwrite
|
||||
PermOverwrite = "overwrite"
|
||||
// delete files or directories is allowed
|
||||
PermDelete = "delete"
|
||||
// rename files or directories is allowed
|
||||
PermRename = "rename"
|
||||
// create directories is allowed
|
||||
PermCreateDirs = "create_dirs"
|
||||
// create symbolic links is allowed
|
||||
PermCreateSymlinks = "create_symlinks"
|
||||
)
|
||||
|
||||
// User defines an SFTP user
|
||||
type User struct {
|
||||
// Database unique identifier
|
||||
ID int64 `json:"id"`
|
||||
// Username
|
||||
Username string `json:"username"`
|
||||
// Password used for password authentication.
|
||||
// For users created using SFTPGo REST API the password is be stored using argon2id hashing algo.
|
||||
// Checking passwords stored with bcrypt is supported too.
|
||||
// Currently, as fallback, there is a clear text password checking but you should not store passwords
|
||||
// as clear text and this support could be removed at any time, so please don't depend on it.
|
||||
Password string `json:"password,omitempty"`
|
||||
// PublicKeys used for public key authentication. At least one between password and a public key is mandatory
|
||||
PublicKeys []string `json:"public_keys,omitempty"`
|
||||
// The user cannot upload or download files outside this directory. Must be an absolute path
|
||||
HomeDir string `json:"home_dir"`
|
||||
// If sftpgo runs as root system user then the created files and directories will be assigned to this system UID
|
||||
UID int `json:"uid"`
|
||||
// If sftpgo runs as root system user then the created files and directories will be assigned to this system GID
|
||||
GID int `json:"gid"`
|
||||
// Maximum concurrent sessions. 0 means unlimited
|
||||
MaxSessions int `json:"max_sessions"`
|
||||
// Maximum size allowed as bytes. 0 means unlimited
|
||||
QuotaSize int64 `json:"quota_size"`
|
||||
// Maximum number of files allowed. 0 means unlimited
|
||||
QuotaFiles int `json:"quota_files"`
|
||||
// List of the granted permissions
|
||||
Permissions []string `json:"permissions"`
|
||||
// Used quota as bytes
|
||||
UsedQuotaSize int64 `json:"used_quota_size"`
|
||||
// Used quota as number of files
|
||||
UsedQuotaFiles int `json:"used_quota_files"`
|
||||
// Last quota update as unix timestamp in milliseconds
|
||||
LastQuotaUpdate int64 `json:"last_quota_update"`
|
||||
// Maximum upload bandwidth as KB/s, 0 means unlimited
|
||||
UploadBandwidth int64 `json:"upload_bandwidth"`
|
||||
// Maximum download bandwidth as KB/s, 0 means unlimited
|
||||
DownloadBandwidth int64 `json:"download_bandwidth"`
|
||||
}
|
||||
|
||||
// HasPerm returns true if the user has the given permission or any permission
|
||||
func (u *User) HasPerm(permission string) bool {
|
||||
if utils.IsStringInSlice(PermAny, u.Permissions) {
|
||||
return true
|
||||
}
|
||||
return utils.IsStringInSlice(permission, u.Permissions)
|
||||
}
|
||||
|
||||
// GetPermissionsAsJSON returns the permissions as json byte array
|
||||
func (u *User) GetPermissionsAsJSON() ([]byte, error) {
|
||||
return json.Marshal(u.Permissions)
|
||||
}
|
||||
|
||||
// GetPublicKeysAsJSON returns the public keys as json byte array
|
||||
func (u *User) GetPublicKeysAsJSON() ([]byte, error) {
|
||||
return json.Marshal(u.PublicKeys)
|
||||
}
|
||||
|
||||
// GetUID returns a validate uid, suitable for use with os.Chown
|
||||
func (u *User) GetUID() int {
|
||||
if u.UID <= 0 || u.UID > 65535 {
|
||||
return -1
|
||||
}
|
||||
return u.UID
|
||||
}
|
||||
|
||||
// GetGID returns a validate gid, suitable for use with os.Chown
|
||||
func (u *User) GetGID() int {
|
||||
if u.GID <= 0 || u.GID > 65535 {
|
||||
return -1
|
||||
}
|
||||
return u.GID
|
||||
}
|
||||
|
||||
// GetHomeDir returns the shortest path name equivalent to the user's home directory
|
||||
func (u *User) GetHomeDir() string {
|
||||
return filepath.Clean(u.HomeDir)
|
||||
}
|
||||
|
||||
// HasQuotaRestrictions returns true if there is a quota restriction on number of files or size or both
|
||||
func (u *User) HasQuotaRestrictions() bool {
|
||||
return u.QuotaFiles > 0 || u.QuotaSize > 0
|
||||
}
|
||||
|
||||
// GetRelativePath returns the path for a file relative to the user's home dir.
|
||||
// This is the path as seen by SFTP users
|
||||
func (u *User) GetRelativePath(path string) string {
|
||||
rel, err := filepath.Rel(u.GetHomeDir(), path)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return "/" + filepath.ToSlash(rel)
|
||||
}
|
||||
216
docker/README.md
@@ -1,5 +1,215 @@
|
||||
## Dockerfile examples
|
||||
# Official Docker image
|
||||
|
||||
Sample Dockerfiles for `sftpgo` daemon and the REST API CLI.
|
||||
SFTPGo provides an official Docker image, it is available on both [Docker Hub](https://hub.docker.com/r/drakkan/sftpgo) and on [GitHub Container Registry](https://github.com/users/drakkan/packages/container/package/sftpgo).
|
||||
|
||||
We don't want to add a `Dockerfile` for each single `sftpgo` configuration options or data provider. You can use the docker configurations here as starting point that you can customize to run `sftpgo` with [Docker](http://www.docker.io "Docker").
|
||||
## Supported tags and respective Dockerfile links
|
||||
|
||||
- [v2.4.3, v2.4, v2, latest](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile)
|
||||
- [v2.4.3-plugins, v2.4-plugins, v2-plugins, plugins](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile)
|
||||
- [v2.4.3-alpine, v2.4-alpine, v2-alpine, alpine](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile.alpine)
|
||||
- [v2.4.3-slim, v2.4-slim, v2-slim, slim](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile)
|
||||
- [v2.4.3-alpine-slim, v2.4-alpine-slim, v2-alpine-slim, alpine-slim](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile.alpine)
|
||||
- [v2.4.3-distroless-slim, v2.4-distroless-slim, v2-distroless-slim, distroless-slim](https://github.com/drakkan/sftpgo/blob/v2.4.3/Dockerfile.distroless)
|
||||
- [edge](../Dockerfile)
|
||||
- [edge-plugins](../Dockerfile)
|
||||
- [edge-alpine](../Dockerfile.alpine)
|
||||
- [edge-slim](../Dockerfile)
|
||||
- [edge-alpine-slim](../Dockerfile.alpine)
|
||||
- [edge-distroless-slim](../Dockerfile.distroless)
|
||||
|
||||
## How to use the SFTPGo image
|
||||
|
||||
### Start a `sftpgo` server instance
|
||||
|
||||
Starting a SFTPGo instance is simple:
|
||||
|
||||
```shell
|
||||
docker run --name some-sftpgo -p 8080:8080 -p 2022:2022 -d "drakkan/sftpgo:tag"
|
||||
```
|
||||
|
||||
... where `some-sftpgo` is the name you want to assign to your container, and `tag` is the tag specifying the SFTPGo version you want. See the list above for relevant tags.
|
||||
|
||||
Now visit [http://localhost:8080/web/admin](http://localhost:8080/web/admin), replacing `localhost` with the appropriate IP address if SFTPGo is not reachable on localhost, create the first admin and a new SFTPGo user. The SFTP service is available on port 2022.
|
||||
|
||||
If you don't want to persist any files, for example for testing purposes, you can run an SFTPGo instance like this:
|
||||
|
||||
```shell
|
||||
docker run --rm --name some-sftpgo -p 8080:8080 -p 2022:2022 -d "drakkan/sftpgo:tag"
|
||||
```
|
||||
|
||||
If you prefer GitHub Container Registry to Docker Hub replace `drakkan/sftpgo:tag` with `ghcr.io/drakkan/sftpgo:tag`.
|
||||
|
||||
### Enable FTP service
|
||||
|
||||
FTP is disabled by default, you can enable the FTP service by starting the SFTPGo instance in this way:
|
||||
|
||||
```shell
|
||||
docker run --name some-sftpgo \
|
||||
-p 8080:8080 \
|
||||
-p 2022:2022 \
|
||||
-p 2121:2121 \
|
||||
-p 50000-50100:50000-50100 \
|
||||
-e SFTPGO_FTPD__BINDINGS__0__PORT=2121 \
|
||||
-e SFTPGO_FTPD__BINDINGS__0__FORCE_PASSIVE_IP=<your external ip here> \
|
||||
-d "drakkan/sftpgo:tag"
|
||||
```
|
||||
|
||||
The FTP service is now available on port 2121 and SFTP on port 2022.
|
||||
|
||||
You can change the passive ports range (`50000-50100` by default) by setting the environment variables `SFTPGO_FTPD__PASSIVE_PORT_RANGE__START` and `SFTPGO_FTPD__PASSIVE_PORT_RANGE__END`.
|
||||
|
||||
It is recommended that you provide a certificate and key file to expose FTP over TLS. You should prefer SFTP to FTP even if you configure TLS, please don't blindly enable the old FTP protocol.
|
||||
|
||||
### Enable WebDAV service
|
||||
|
||||
WebDAV is disabled by default, you can enable the WebDAV service by starting the SFTPGo instance in this way:
|
||||
|
||||
```shell
|
||||
docker run --name some-sftpgo \
|
||||
-p 8080:8080 \
|
||||
-p 2022:2022 \
|
||||
-p 10080:10080 \
|
||||
-e SFTPGO_WEBDAVD__BINDINGS__0__PORT=10080 \
|
||||
-d "drakkan/sftpgo:tag"
|
||||
```
|
||||
|
||||
The WebDAV service is now available on port 10080 and SFTP on port 2022.
|
||||
|
||||
It is recommended that you provide a certificate and key file to expose WebDAV over https.
|
||||
|
||||
### Container shell access and viewing SFTPGo logs
|
||||
|
||||
The docker exec command allows you to run commands inside a Docker container. The following command line will give you a shell inside your `sftpgo` container:
|
||||
|
||||
```shell
|
||||
docker exec -it some-sftpgo sh
|
||||
```
|
||||
|
||||
The logs are available through Docker's container log:
|
||||
|
||||
```shell
|
||||
docker logs some-sftpgo
|
||||
```
|
||||
|
||||
**Note:** [distroless](../Dockerfile.distroless) image contains only a statically linked sftpgo binary and its minimal runtime dependencies. Shell is not available on this image.
|
||||
|
||||
### Where to Store Data
|
||||
|
||||
Important note: There are several ways to store data used by applications that run in Docker containers. We encourage users of the SFTPGo images to familiarize themselves with the options available, including:
|
||||
|
||||
- Let Docker manage the storage for SFTPGo data by [writing them to disk on the host system using its own internal volume management](https://docs.docker.com/engine/tutorials/dockervolumes/#adding-a-data-volume). This is the default and is easy and fairly transparent to the user. The downside is that the files may be hard to locate for tools and applications that run directly on the host system, i.e. outside containers.
|
||||
- Create a data directory on the host system (outside the container) and [mount this to a directory visible from inside the container]((https://docs.docker.com/engine/tutorials/dockervolumes/#mount-a-host-directory-as-a-data-volume)). This places the SFTPGo files in a known location on the host system, and makes it easy for tools and applications on the host system to access the files. The downside is that the user needs to make sure that the directory exists, and that e.g. directory permissions and other security mechanisms on the host system are set up correctly. The SFTPGo image runs using `1000` as UID/GID by default.
|
||||
|
||||
The Docker documentation is a good starting point for understanding the different storage options and variations, and there are multiple blogs and forum postings that discuss and give advice in this area. We will simply show the basic procedure here for the latter option above:
|
||||
|
||||
1. Create a data directory on a suitable volume on your host system, e.g. `/my/own/sftpgodata`. The user with ID `1000` must be able to write to this directory. Please note that you don't need an actual user with ID `1000` on your host system: `chown -R 1000:1000 /my/own/sftpgodata` is enough even if there is no user/group with UID/GID `1000`.
|
||||
2. Create a home directory for the sftpgo container user on your host system e.g. `/my/own/sftpgohome`. As with the data directory above, make sure that the user with ID `1000` can write to this directory: `chown -R 1000:1000 /my/own/sftpgohome`
|
||||
3. Start your SFTPGo container like this:
|
||||
|
||||
```shell
|
||||
docker run --name some-sftpgo \
|
||||
-p 8080:8090 \
|
||||
-p 2022:2022 \
|
||||
--mount type=bind,source=/my/own/sftpgodata,target=/srv/sftpgo \
|
||||
--mount type=bind,source=/my/own/sftpgohome,target=/var/lib/sftpgo \
|
||||
-e SFTPGO_HTTPD__BINDINGS__0__PORT=8090 \
|
||||
-d "drakkan/sftpgo:tag"
|
||||
```
|
||||
|
||||
As you can see SFTPGo uses two main volumes:
|
||||
|
||||
- `/srv/sftpgo` to handle persistent data. The default home directory for SFTP/FTP/WebDAV users is `/srv/sftpgo/data/<username>`. Backups are stored in `/srv/sftpgo/backups`
|
||||
- `/var/lib/sftpgo` is the home directory for the sftpgo system user defined inside the container. This is the container working directory too, host keys will be created here when using the default configuration.
|
||||
|
||||
If you want to get fine grained control, you can also mount `/srv/sftpgo/data` and `/srv/sftpgo/backups` as separate volumes instead of mounting `/srv/sftpgo`.
|
||||
|
||||
### Configuration
|
||||
|
||||
The runtime configuration can be customized via environment variables that you can set passing the `-e` option to the `docker run` command or inside the `environment` section if you are using [docker stack deploy](https://docs.docker.com/engine/reference/commandline/stack_deploy/) or [docker-compose](https://github.com/docker/compose).
|
||||
|
||||
Please take a look [here](../docs/full-configuration.md) to learn how to configure SFTPGo via environment variables.
|
||||
|
||||
Alternately you can mount your custom configuration file to `/var/lib/sftpgo` or `/var/lib/sftpgo/.config/sftpgo`.
|
||||
|
||||
### Loading initial data
|
||||
|
||||
Initial data can be loaded in the following ways:
|
||||
|
||||
- via the `--loaddata-from` flag or the `SFTPGO_LOADDATA_FROM` environment variable. This flag is supported for both the `serve` command (load initial data and start the service) and the `initprovider` command (initialize the provider, load initial data and exit)
|
||||
- by providing a dump file to the memory provider
|
||||
|
||||
Please take a look [here](../docs/full-configuration.md) for more details.
|
||||
|
||||
### Running as an arbitrary user
|
||||
|
||||
The SFTPGo image runs using `1000` as UID/GID by default. If you know the permissions of your data and/or configuration directory are already set appropriately or you have need of running SFTPGo with a specific UID/GID, it is possible to invoke this image with `--user` set to any value (other than `root/0`) in order to achieve the desired access/configuration:
|
||||
|
||||
```shell
|
||||
$ ls -lnd data
|
||||
drwxr-xr-x 2 1100 1100 6 7 nov 09.09 data
|
||||
$ ls -lnd config
|
||||
drwxr-xr-x 2 1100 1100 6 7 nov 09.19 config
|
||||
```
|
||||
|
||||
With the above directory permissions, you can start a SFTPGo instance like this:
|
||||
|
||||
```shell
|
||||
docker run --name some-sftpgo \
|
||||
--user 1100:1100 \
|
||||
-p 8080:8080 \
|
||||
-p 2022:2022 \
|
||||
--mount type=bind,source="${PWD}/data",target=/srv/sftpgo \
|
||||
--mount type=bind,source="${PWD}/config",target=/var/lib/sftpgo \
|
||||
-d "drakkan/sftpgo:tag"
|
||||
```
|
||||
|
||||
Alternately build your own image using the official one as a base, here is a sample Dockerfile:
|
||||
|
||||
```shell
|
||||
FROM drakkan/sftpgo:tag
|
||||
USER root
|
||||
RUN chown -R 1100:1100 /etc/sftpgo && chown 1100:1100 /var/lib/sftpgo /srv/sftpgo
|
||||
USER 1100:1100
|
||||
```
|
||||
|
||||
**Note:** the above Dockerfile will not work if you use the [distroless](../Dockerfile.distroless) image as base since the `chown` command is not available there.
|
||||
|
||||
## Image Variants
|
||||
|
||||
The `sftpgo` images comes in many flavors, each designed for a specific use case. The `edge`, `edge-slim`, `edge-alpine`, `edge-alpine-slim` and `edge-distroless-slim` tags are updated after each new commit.
|
||||
|
||||
### `sftpgo:<version>`
|
||||
|
||||
This is the defacto image, it is based on [Debian](https://www.debian.org/), available in [the `debian` official image](https://hub.docker.com/_/debian). If you are unsure about what your needs are, you probably want to use this one.
|
||||
|
||||
### `sftpgo:<version>-alpine`
|
||||
|
||||
This image is based on the popular [Alpine Linux project](https://alpinelinux.org/), available in [the `alpine` official image](https://hub.docker.com/_/alpine). Alpine Linux is much smaller than most distribution base images (~5MB), and thus leads to much slimmer images in general.
|
||||
|
||||
This variant is highly recommended when final image size being as small as possible is desired. The main caveat to note is that it does use [musl libc](https://musl.libc.org/) instead of [glibc and friends](https://www.etalabs.net/compare_libcs.html), so certain software might run into issues depending on the depth of their libc requirements. However, most software doesn't have an issue with this, so this variant is usually a very safe choice. See [this Hacker News comment thread](https://news.ycombinator.com/item?id=10782897) for more discussion of the issues that might arise and some pro/con comparisons of using Alpine-based images.
|
||||
|
||||
### `sftpgo:<version>-distroless`
|
||||
|
||||
This image is based on the popular [Distroless project](https://github.com/GoogleContainerTools/distroless). We use the latest Debian based distroless image as base.
|
||||
|
||||
Distroless variant contains only a statically linked sftpgo binary and its minimal runtime dependencies and so it doesn't allow shell access (no shell is installed).
|
||||
SQLite support is disabled since it requires CGO and so a C runtime which is not installed.
|
||||
The default data provider is `bolt`, all the supported data providers except `sqlite` work.
|
||||
We only provide the slim variant and so the optional `git` dependency is not available.
|
||||
|
||||
### `sftpgo:<suite>-slim`
|
||||
|
||||
These tags provide a slimmer image that does not include `jq` and the optional `git` and `rsync` dependencies.
|
||||
|
||||
### `sftpgo:<suite>-plugins`
|
||||
|
||||
These tags provide the standard image with the addition of all "official" plugins installed in `/usr/local/bin`.
|
||||
|
||||
## Helm Chart
|
||||
|
||||
Some helm charts are available:
|
||||
|
||||
- [sagikazarmark/sftpgo](https://artifacthub.io/packages/helm/sagikazarmark/sftpgo)
|
||||
- [truecharts/sftpgo](https://artifacthub.io/packages/helm/truecharts/sftpgo)
|
||||
|
||||
These charts are not maintained by the SFTPGo project and any issues with the charts should be raised to the upstream repo.
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
FROM debian:latest
|
||||
LABEL maintainer="nicola.murino@gmail.com"
|
||||
RUN apt-get update && apt-get install -y curl python3-requests python3-pygments
|
||||
|
||||
RUN curl https://raw.githubusercontent.com/drakkan/sftpgo/master/scripts/sftpgo_api_cli.py --output /usr/bin/sftpgo_api_cli.py
|
||||
|
||||
ENTRYPOINT ["python3", "/usr/bin/sftpgo_api_cli.py" ]
|
||||
CMD []
|
||||
32
docker/scripts/download-plugins.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
ARCH=`uname -m`
|
||||
|
||||
case ${ARCH} in
|
||||
"x86_64")
|
||||
SUFFIX=amd64
|
||||
;;
|
||||
"aarch64")
|
||||
SUFFIX=arm64
|
||||
;;
|
||||
*)
|
||||
SUFFIX=ppc64le
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "download plugins for arch ${SUFFIX}"
|
||||
|
||||
for PLUGIN in geoipfilter kms metadata
|
||||
do
|
||||
echo "download plugin from https://github.com/sftpgo/sftpgo-plugin-${PLUGIN}/releases/latest/download/sftpgo-plugin-${PLUGIN}-linux-${SUFFIX}"
|
||||
curl -L "https://github.com/sftpgo/sftpgo-plugin-${PLUGIN}/releases/latest/download/sftpgo-plugin-${PLUGIN}-linux-${SUFFIX}" --output "/usr/local/bin/sftpgo-plugin-${PLUGIN}"
|
||||
chmod 755 "/usr/local/bin/sftpgo-plugin-${PLUGIN}"
|
||||
done
|
||||
|
||||
for PLUGIN in pubsub eventstore eventsearch
|
||||
do
|
||||
echo "download plugin from https://github.com/sftpgo/sftpgo-plugin-${PLUGIN}/releases/download/v1.0.4/sftpgo-plugin-${PLUGIN}-linux-${SUFFIX}"
|
||||
curl -L "https://github.com/sftpgo/sftpgo-plugin-${PLUGIN}/releases/download/v1.0.4/sftpgo-plugin-${PLUGIN}-linux-${SUFFIX}" --output "/usr/local/bin/sftpgo-plugin-${PLUGIN}"
|
||||
chmod 755 "/usr/local/bin/sftpgo-plugin-${PLUGIN}"
|
||||
done
|
||||
@@ -1,24 +0,0 @@
|
||||
FROM golang:alpine as builder
|
||||
|
||||
RUN apk add --no-cache git gcc g++ ca-certificates \
|
||||
&& go get -d github.com/drakkan/sftpgo
|
||||
WORKDIR /go/src/github.com/drakkan/sftpgo
|
||||
# uncomment the next line to get the latest stable version instead of the latest git
|
||||
#RUN git checkout `git rev-list --tags --max-count=1`
|
||||
RUN go build -i -ldflags "-s -w -X github.com/drakkan/sftpgo/utils.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/utils.date=`date -u +%FT%TZ`" -o /go/bin/sftpgo
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk add --no-cache ca-certificates su-exec \
|
||||
&& mkdir -p /data /etc/sftpgo /srv/sftpgo/config
|
||||
|
||||
COPY --from=builder /go/bin/sftpgo /bin/
|
||||
COPY --from=builder /go/src/github.com/drakkan/sftpgo/sftpgo.json /etc/sftpgo/sftpgo.json
|
||||
COPY docker-entrypoint.sh /bin/entrypoint.sh
|
||||
RUN chmod +x /bin/entrypoint.sh
|
||||
|
||||
VOLUME [ "/data", "/srv/sftpgo/config" ]
|
||||
EXPOSE 2022 8080
|
||||
|
||||
ENTRYPOINT ["/bin/entrypoint.sh"]
|
||||
CMD []
|
||||
@@ -1,39 +0,0 @@
|
||||
# SFTPGo with Docker and Alpine
|
||||
|
||||
This DockerFile is made to build image to host multiple instances of SFTPGo started with different users.
|
||||
|
||||
### Example
|
||||
> 1003 is a custom uid:gid for this instance of SFTPGo
|
||||
```bash
|
||||
# Prereq on docker host
|
||||
sudo groupadd -g 1003 sftpgrp && \
|
||||
sudo useradd -u 1003 -g 1003 sftpuser -d /home/sftpuser/ && \
|
||||
sudo -u sftpuser mkdir /home/sftpuser/{conf,data} && \
|
||||
curl https://raw.githubusercontent.com/drakkan/sftpgo/master/sql/sqlite/20190828.sql | sqlite3 /home/sftpuser/conf/sftpgo.db && \
|
||||
curl https://raw.githubusercontent.com/drakkan/sftpgo/master/sftpgo.json -o /home/sftpuser/conf/sftpgo.json
|
||||
|
||||
# Get and build SFTPGo image
|
||||
git clone https://github.com/drakkan/sftpgo.git && \
|
||||
cd sftpgo && \
|
||||
sudo docker build -t sftpgo docker/sftpgo/alpine/
|
||||
|
||||
# Starting image
|
||||
sudo docker run --name sftpgo \
|
||||
-e SFTPGO_LOG_FILE_PATH= \
|
||||
-e SFTPGO_CONFIG_DIR=/srv/sftpgo/config \
|
||||
-p 8080:8080 \
|
||||
-p 2022:2022 \
|
||||
-e PUID=1003 \
|
||||
-e GUID=1003 \
|
||||
-v /home/sftpuser/conf/:/srv/sftpgo/config \
|
||||
-v /home/sftpuser/data:/data \
|
||||
sftpgo
|
||||
```
|
||||
The script `entrypoint.sh` makes sure to correct the permissions of directories and start the process with the right user
|
||||
|
||||
Several images can be run with different parameters.
|
||||
|
||||
### Custom systemd script
|
||||
An example of systemd script is present [here](sftpgo.service), with `Environment` parameter to set `PUID` and `GUID`
|
||||
|
||||
`WorkingDirectory` parameter must be exist with one file in this directory like `sftpgo-${PUID}.env` corresponding to the variable file for SFTPGo instance.
|
||||
@@ -1,7 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -eu
|
||||
|
||||
chown -R "${PUID}:${GUID}" /data /etc/sftpgo /srv/sftpgo/config \
|
||||
&& exec su-exec "${PUID}:${GUID}" \
|
||||
/bin/sftpgo serve "$@"
|
||||
@@ -1,29 +0,0 @@
|
||||
[Unit]
|
||||
Description=SFTPGo sftp server
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
Group=root
|
||||
WorkingDirectory=/etc/sftpgo
|
||||
Environment=PUID=1003
|
||||
Environment=GUID=1003
|
||||
EnvironmentFile=-/etc/sysconfig/sftpgo.conf
|
||||
ExecStartPre=-docker kill sftpgo
|
||||
ExecStartPre=-docker rm sftpgo
|
||||
ExecStart=docker run --name sftpgo \
|
||||
--env-file sftpgo-${PUID}.env \
|
||||
-e PUID=${PUID} \
|
||||
-e GUID=${GUID} \
|
||||
-p 8080:8080 \
|
||||
-p 2022:2022 \
|
||||
-v /home/sftpuser/conf/:/srv/sftpgo/config \
|
||||
-v /home/sftpuser/data:/data \
|
||||
sftpgo
|
||||
ExecStop=docker stop sftpgo
|
||||
SyslogIdentifier=sftpgo
|
||||
Restart=always
|
||||
RestartSec=10s
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,56 +0,0 @@
|
||||
# we use a multi stage build to have a separate build and run env
|
||||
FROM golang:latest as buildenv
|
||||
LABEL maintainer="nicola.murino@gmail.com"
|
||||
RUN go get -d github.com/drakkan/sftpgo
|
||||
WORKDIR /go/src/github.com/drakkan/sftpgo
|
||||
# uncomment the next line to get the latest stable version instead of the latest git
|
||||
#RUN git checkout `git rev-list --tags --max-count=1`
|
||||
RUN go build -i -ldflags "-s -w -X github.com/drakkan/sftpgo/utils.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/utils.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
|
||||
# now define the run environment
|
||||
FROM debian:latest
|
||||
|
||||
ARG BASE_DIR=/app
|
||||
ARG DATA_REL_DIR=data
|
||||
ARG CONFIG_REL_DIR=config
|
||||
ARG USERNAME=sftpgo
|
||||
ARG GROUPNAME=sftpgo
|
||||
ARG UID=515
|
||||
ARG GID=515
|
||||
|
||||
# HOME_DIR for sftpgo itself
|
||||
ENV HOME_DIR=${BASE_DIR}/${USERNAME}
|
||||
# DATA_DIR, this is a volume that you can use hold user's home dirs
|
||||
ENV DATA_DIR=${BASE_DIR}/${DATA_REL_DIR}
|
||||
# CONFIG_DIR, this is a volume to persist the daemon private keys, configuration file ecc..
|
||||
ENV CONFIG_DIR=${BASE_DIR}/${CONFIG_REL_DIR}
|
||||
|
||||
RUN mkdir -p ${DATA_DIR} ${CONFIG_DIR}
|
||||
RUN groupadd --system -g ${GID} ${GROUPNAME}
|
||||
RUN useradd --system --create-home --no-log-init --home-dir ${HOME_DIR} --comment "SFTPGo user" --shell /bin/false --gid ${GID} --uid ${UID} ${USERNAME}
|
||||
|
||||
WORKDIR ${HOME_DIR}
|
||||
RUN mkdir -p bin .config/sftpgo
|
||||
ENV PATH ${HOME_DIR}/bin:$PATH
|
||||
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/sftpgo bin/sftpgo
|
||||
# default config file to use if no config file is found inside the CONFIG_DIR volume.
|
||||
# You can override each configuration options via env vars too
|
||||
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/sftpgo.json .config/sftpgo/
|
||||
RUN chown -R ${UID}:${GID} ${DATA_DIR}
|
||||
|
||||
# run as non root user
|
||||
USER ${USERNAME}
|
||||
|
||||
EXPOSE 2022 8080
|
||||
|
||||
# the defined volumes must have write access for the UID and GID defined above
|
||||
VOLUME [ "$DATA_DIR", "$CONFIG_DIR" ]
|
||||
|
||||
# override some default configuration options using env vars
|
||||
ENV SFTPGO_CONFIG_DIR=${CONFIG_DIR}
|
||||
# setting SFTPGO_LOG_FILE_PATH to an empty string will log to stdout
|
||||
ENV SFTPGO_LOG_FILE_PATH=${CONFIG_DIR}/sftpgo.log
|
||||
ENV SFTPGO_HTTPD__BIND_ADDRESS=""
|
||||
|
||||
ENTRYPOINT ["sftpgo"]
|
||||
CMD ["serve"]
|
||||
@@ -1,22 +0,0 @@
|
||||
## Dockerfile based on Debian stable
|
||||
|
||||
Please read the comments inside the `Dockerfile` to learn how to customize things for your setup.
|
||||
|
||||
You can build the container image using `docker build`, for example:
|
||||
|
||||
```bash
|
||||
docker build -t="drakkan/sftpgo" .
|
||||
```
|
||||
|
||||
and you can run the Dockerfile using something like this:
|
||||
|
||||
```bash
|
||||
docker run --name sftpgo -p 8080:8080 -p 2022:2022 --mount type=bind,source=/srv/sftpgo/data,target=/app/data --mount type=bind,source=/srv/sftpgo/config,target=/app/config drakkan/sftpgo
|
||||
```
|
||||
|
||||
where `/srv/sftpgo/data` and `/srv/sftpgo/config` are two folders on the host system with write access for UID/GID defined inside the `Dockerfile`. You can choose to create a new user, on the host system, with a matching UID/GID pair or simply do something like:
|
||||
|
||||
|
||||
```bash
|
||||
chown -R <UID>:<GID> /srv/sftpgo/data /srv/sftpgo/config
|
||||
```
|
||||
22
docs/account.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Account's configuration properties
|
||||
|
||||
Please take a look at the [OpenAPI schema](../openapi/openapi.yaml) for the exact definitions of user, folder and admin fields.
|
||||
If you need an example you can export a dump using the Web Admin or by invoking the `dumpdata` endpoint directly, you need to obtain an access token first, for example:
|
||||
|
||||
```shell
|
||||
$ curl "http://admin:password@127.0.0.1:8080/api/v2/token"
|
||||
{"access_token":"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsiQVBJIl0sImV4cCI6MTYxMzMzNTI2MSwianRpIjoiYzBrb2gxZmNkcnBjaHNzMGZwZmciLCJuYmYiOjE2MTMzMzQ2MzEsInBlcm1pc3Npb25zIjpbIioiXSwic3ViIjoiYUJ0SHUwMHNBUmxzZ29yeEtLQ1pZZWVqSTRKVTlXbThHSGNiVWtWVmc1TT0iLCJ1c2VybmFtZSI6ImFkbWluIn0.WiyqvUF-92zCr--y4Q_sxn-tPnISFzGZd_exsG-K7ME","expires_at":"2021-02-14T20:41:01Z"}
|
||||
|
||||
curl -H "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsiQVBJIl0sImV4cCI6MTYxMzMzNTI2MSwianRpIjoiYzBrb2gxZmNkcnBjaHNzMGZwZmciLCJuYmYiOjE2MTMzMzQ2MzEsInBlcm1pc3Npb25zIjpbIioiXSwic3ViIjoiYUJ0SHUwMHNBUmxzZ29yeEtLQ1pZZWVqSTRKVTlXbThHSGNiVWtWVmc1TT0iLCJ1c2VybmFtZSI6ImFkbWluIn0.WiyqvUF-92zCr--y4Q_sxn-tPnISFzGZd_exsG-K7ME" "http://127.0.0.1:8080/api/v2/dumpdata?output-data=1"
|
||||
```
|
||||
|
||||
the dump is a JSON with all SFTPGo data including users, folders, admins.
|
||||
|
||||
These properties are stored inside the configured data provider.
|
||||
|
||||
SFTPGo supports checking passwords stored with argon2id, bcrypt, pbkdf2, md5cryptm sha256crypt and sha512crypt too. For pbkdf2 the supported format is `$<algo>$<iterations>$<salt>$<hashed pwd base64 encoded>`, where algo is `pbkdf2-sha1` or `pbkdf2-sha256` or `pbkdf2-sha512` or `$pbkdf2-b64salt-sha256$`. For example the pbkdf2-sha256 of the word password using 150000 iterations and E86a9YMX3zC7 as salt must be stored as `$pbkdf2-sha256$150000$E86a9YMX3zC7$R5J62hsSq+pYw00hLLPKBbcGXmq7fj5+/M0IFoYtZbo=`. In pbkdf2 variant with b64salt the salt is base64 encoded. For bcrypt the format must be the one supported by golang's crypto/bcrypt package, for example the password secret with cost 14 must be stored as `$2a$14$ajq8Q7fbtFRQvXpdCq7Jcuy.Rx1h/L4J60Otx.gyNLbAYctGMJ9tK`. For md5crypt, sha256crypt and sha512crypt we support the format used in `/etc/shadow` with the `$1$`, `$5$` and `$6$` prefix, this is useful if you are migrating from Unix system user accounts. We support Apache md5crypt (`$apr1$` prefix) too. Using the REST API you can send a password hashed as argon2id, bcrypt, pbkdf2, md5crypt, sha256crypt or sha512crypt and it will be stored as is.
|
||||
|
||||
If you want to use your existing accounts, you have these options:
|
||||
|
||||
- you can import your users inside SFTPGo. Take a look at [convert users](.../examples/convertusers) script, it can convert and import users from Linux system users and Pure-FTPd/ProFTPD virtual users
|
||||
- you can use an external authentication program
|
||||
20
docs/azure-blob-storage.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Azure Blob Storage backend
|
||||
|
||||
To connect SFTPGo to Azure Blob Storage, you need to specify the access credentials. Azure Blob Storage has different options for credentials, we support:
|
||||
|
||||
1. Providing an account name and account key.
|
||||
2. Providing a shared access signature (SAS).
|
||||
|
||||
If you authenticate using account and key you also need to specify a container. The endpoint can generally be left blank, the default is `blob.core.windows.net`.
|
||||
|
||||
If you provide a SAS URL the container is optional and if given it must match the one inside the shared access signature.
|
||||
|
||||
If you want to connect to an emulator such as [Azurite](https://github.com/Azure/Azurite) you need to provide the account name/key pair and an endpoint prefixed with the protocol, for example `http://127.0.0.1:10000`.
|
||||
|
||||
Specifying a different `key_prefix`, you can assign different "folders" of the same container to different users. This is similar to a chroot directory for local filesystem. Each SFTPGo user can only access the assigned folder and its contents. The folder identified by `key_prefix` does not need to be pre-created.
|
||||
|
||||
For multipart uploads you can customize the parts size and the upload concurrency. Please note that if the upload bandwidth between the client and SFTPGo is greater than the upload bandwidth between SFTPGo and the Azure Blob service then the client should wait for the last parts to be uploaded to Azure after finishing uploading the file to SFTPGo, and it may time out. Keep this in mind if you customize these parameters.
|
||||
|
||||
The configured container must exist.
|
||||
|
||||
This backend is very similar to the [S3](./s3.md) backend, and it has the same limitations. As with S3 `chtime` will fail with the default configuration, you can install the [metadata plugin](https://github.com/sftpgo/sftpgo-plugin-metadata) to make it work and thus be able to preserve/change file modification times.
|
||||
41
docs/build-from-source.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Build SFTPGo from source
|
||||
|
||||
Download the sources and use `go build`.
|
||||
|
||||
The following build tags are available:
|
||||
|
||||
- `nogcs`, disable Google Cloud Storage backend, default enabled
|
||||
- `nos3`, disable S3 Compabible Object Storage backends, default enabled
|
||||
- `noazblob`, disable Azure Blob Storage backend, default enabled
|
||||
- `nobolt`, disable Bolt data provider, default enabled
|
||||
- `nomysql`, disable MySQL data provider, default enabled
|
||||
- `nopgsql`, disable PostgreSQL data provider, default enabled
|
||||
- `nosqlite`, disable SQLite data provider, default enabled
|
||||
- `noportable`, disable portable mode, default enabled
|
||||
- `nometrics`, disable Prometheus metrics, default enabled
|
||||
- `bundle`, embed static files and templates. Before building with this tag enabled you have to copy `openapi`, `static` and `templates` dirs to `internal/bundle` directory. Default disabled
|
||||
|
||||
If no build tag is specified the build will include the default features.
|
||||
|
||||
The optional [SQLite driver](https://github.com/mattn/go-sqlite3 "go-sqlite3") is a `CGO` package and so it requires a `C` compiler at build time.
|
||||
On Linux and macOS, a compiler is easy to install or already installed. On Windows, you need to download [MinGW-w64](https://sourceforge.net/projects/mingw-w64/files/) and build SFTPGo from its command prompt.
|
||||
|
||||
The compiler is a build time only dependency. It is not required at runtime.
|
||||
|
||||
Version info, such as git commit and build date, can be embedded setting the following string variables at build time:
|
||||
|
||||
- `github.com/drakkan/sftpgo/v2/internal/version.commit`
|
||||
- `github.com/drakkan/sftpgo/v2/internal/version.date`
|
||||
|
||||
For example, you can build using the following command:
|
||||
|
||||
```bash
|
||||
go build -tags nogcs,nos3,nosqlite -ldflags "-s -w -X github.com/drakkan/sftpgo/v2/internal/version.commit=`git describe --always --abbrev=8 --dirty` -X github.com/drakkan/sftpgo/v2/internal/version.date=`date -u +%FT%TZ`" -o sftpgo
|
||||
```
|
||||
|
||||
You should get a version that includes git commit, build date and available features like this one:
|
||||
|
||||
```bash
|
||||
$ ./sftpgo -v
|
||||
SFTPGo 2.3.1-dev-c8158e1-2022-07-24T17:25:45Z +metrics +azblob +gcs +s3 +bolt +mysql +pgsql +sqlite +portable
|
||||
```
|
||||
47
docs/check-password-hook.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Check password hook
|
||||
|
||||
This hook allows you to externally check the provided password, its main use case is to allow to easily support things like password+OTP for protocols without keyboard interactive support such as FTP and WebDAV. You can ask your users to login using a string consisting of a fixed password and a One Time Token, you can verify the token inside the hook and ask to SFTPGo to verify the fixed part.
|
||||
|
||||
The same thing can be achieved using [External authentication](./external-auth.md) but using this hook is simpler in some use cases.
|
||||
|
||||
The `check password hook` can be defined as the absolute path of your program or an HTTP URL.
|
||||
|
||||
The expected response is a JSON serialized struct containing the following keys:
|
||||
|
||||
- `status` integer. 0 means KO, 1 means OK, 2 means partial success
|
||||
- `to_verify` string. For `status` = 2 SFTPGo will check this password against the one stored inside SFTPGo data provider
|
||||
|
||||
If the hook defines an external program it can read the following environment variables:
|
||||
|
||||
- `SFTPGO_AUTHD_USERNAME`
|
||||
- `SFTPGO_AUTHD_PASSWORD`
|
||||
- `SFTPGO_AUTHD_IP`
|
||||
- `SFTPGO_AUTHD_PROTOCOL`, possible values are `SSH`, `FTP`, `DAV`, `HTTP`
|
||||
|
||||
Global environment variables are cleared, for security reasons, when the script is called. You can set additional environment variables in the "command" configuration section.
|
||||
|
||||
The program must write, on its standard output, the expected JSON serialized response described above.
|
||||
|
||||
If the hook is an HTTP URL then it will be invoked as HTTP POST. The request body will contain a JSON serialized struct with the following fields:
|
||||
|
||||
- `username`
|
||||
- `password`
|
||||
- `ip`
|
||||
- `protocol`, possible values are `SSH`, `FTP`, `DAV`
|
||||
|
||||
If authentication succeeds the HTTP response code must be 200 and the response body must contain the expected JSON serialized response described above.
|
||||
|
||||
The program hook must finish within 30 seconds, the HTTP hook timeout will use the global configuration for HTTP clients.
|
||||
|
||||
You can also restrict the hook scope using the `check_password_scope` configuration key:
|
||||
|
||||
- `0` means all supported protocols.
|
||||
- `1` means SSH only
|
||||
- `2` means FTP only
|
||||
- `4` means WebDAV only
|
||||
|
||||
You can combine the scopes. For example, 6 means FTP and WebDAV.
|
||||
|
||||
You can disable the hook on a per-user basis.
|
||||
|
||||
An example check password program allowing 2FA using password + one time token can be found inside the source tree [checkpwd](../examples/OTP/authy/checkpwd) directory.
|
||||
125
docs/custom-actions.md
Normal file
@@ -0,0 +1,125 @@
|
||||
# Custom Actions
|
||||
|
||||
SFTPGo can notify filesystem and provider events using custom actions. A custom action can be an external program or an HTTP URL.
|
||||
|
||||
## Filesystem events
|
||||
|
||||
The `actions` struct inside the `common` configuration section allows to configure the actions for file operations and SSH commands.
|
||||
The `hook` can be defined as the absolute path of your program or an HTTP URL.
|
||||
|
||||
The following `actions` are supported:
|
||||
|
||||
- `download`
|
||||
- `first-download`
|
||||
- `pre-download`
|
||||
- `upload`
|
||||
- `first-upload`
|
||||
- `pre-upload`
|
||||
- `delete`
|
||||
- `pre-delete`
|
||||
- `rename`
|
||||
- `mkdir`
|
||||
- `rmdir`
|
||||
- `ssh_cmd`
|
||||
|
||||
The `upload` condition includes both uploads to new files and overwrite of existing ones. If an upload is aborted for quota limits SFTPGo tries to remove the partial file, so if the notification reports a zero size file and a quota exceeded error the file has been deleted. The `ssh_cmd` condition will be triggered after a command is successfully executed via SSH. `scp` will trigger the `download` and `upload` conditions and not `ssh_cmd`. The `first-download` and `first-upload` action are executed only if no error occour and they don't exclude the `download` and `upload` notifications, so you will get both the `first-upload` and `upload` notification after the first successful upload and the same for the first successful download.
|
||||
For cloud backends directories are virtual, they are created implicitly when you upload a file and are implicitly removed when the last file within a directory is removed. The `mkdir` and `rmdir` notifications are sent only when a directory is explicitly created or removed.
|
||||
|
||||
The notification will indicate if an error is detected and so, for example, a partial file is uploaded.
|
||||
|
||||
The `pre-delete` action, if defined, will be called just before files deletion. If the external command completes with a zero exit status or the HTTP notification response code is `200` then SFTPGo will assume that the file was already deleted/moved and so it will not try to remove the file and it will not execute the hook defined for the `delete` action.
|
||||
|
||||
The `pre-download` and `pre-upload` actions, will be called before downloads and uploads. If the external command completes with a zero exit status or the HTTP notification response code is `200` then SFTPGo allows the operation, otherwise the client will get a permission denied error.
|
||||
|
||||
If the `hook` defines a path to an external program, then this program can read the following environment variables:
|
||||
|
||||
- `SFTPGO_ACTION`, supported action
|
||||
- `SFTPGO_ACTION_USERNAME`
|
||||
- `SFTPGO_ACTION_PATH`, is the full filesystem path, can be empty for some ssh commands
|
||||
- `SFTPGO_ACTION_TARGET`, full filesystem path, non-empty for `rename` `SFTPGO_ACTION` and for some SSH commands
|
||||
- `SFTPGO_ACTION_VIRTUAL_PATH`, virtual path, seen by SFTPGo users
|
||||
- `SFTPGO_ACTION_VIRTUAL_TARGET`, virtual target path, seen by SFTPGo users
|
||||
- `SFTPGO_ACTION_SSH_CMD`, non-empty for `ssh_cmd` `SFTPGO_ACTION`
|
||||
- `SFTPGO_ACTION_FILE_SIZE`, non-zero for `pre-upload`,`upload`, `download` and `delete` actions if the file size is greater than `0`
|
||||
- `SFTPGO_ACTION_FS_PROVIDER`, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend, `3` for Azure Blob Storage backend, `4` for local encrypted backend, `5` for SFTP backend
|
||||
- `SFTPGO_ACTION_BUCKET`, non-empty for S3, GCS and Azure backends
|
||||
- `SFTPGO_ACTION_ENDPOINT`, non-empty for S3, SFTP and Azure backend if configured
|
||||
- `SFTPGO_ACTION_STATUS`, integer. Status for `upload`, `download` and `ssh_cmd` actions. 1 means no error, 2 means a generic error occurred, 3 means quota exceeded error
|
||||
- `SFTPGO_ACTION_PROTOCOL`, string. Possible values are `SSH`, `SFTP`, `SCP`, `FTP`, `DAV`, `HTTP`, `HTTPShare`, `OIDC`, `DataRetention`, `EventAction`
|
||||
- `SFTPGO_ACTION_IP`, the action was executed from this IP address
|
||||
- `SFTPGO_ACTION_SESSION_ID`, string. Unique protocol session identifier. For stateless protocols such as HTTP the session id will change for each request
|
||||
- `SFTPGO_ACTION_OPEN_FLAGS`, integer. File open flags, can be non-zero for `pre-upload` action. If `SFTPGO_ACTION_FILE_SIZE` is greater than zero and `SFTPGO_ACTION_OPEN_FLAGS&512 == 0` the target file will not be truncated
|
||||
- `SFTPGO_ACTION_TIMESTAMP`, int64. Event timestamp as nanoseconds since epoch
|
||||
|
||||
Global environment variables are cleared, for security reasons, when the script is called. You can set additional environment variables in the "command" configuration section.
|
||||
The program must finish within 30 seconds.
|
||||
|
||||
If the `hook` defines an HTTP URL then this URL will be invoked as HTTP POST. The request body will contain a JSON serialized struct with the following fields:
|
||||
|
||||
- `action`, string
|
||||
- `username`, string
|
||||
- `path`, string
|
||||
- `target_path`, string, included for `rename` action and `sftpgo-copy` SSH command
|
||||
- `virtual_path`, string, virtual path, seen by SFTPGo users
|
||||
- `virtual_target_path`, string, virtual target path, seen by SFTPGo users
|
||||
- `ssh_cmd`, string, included for `ssh_cmd` action
|
||||
- `file_size`, int64, included for `pre-upload`, `upload`, `download`, `delete` actions if the file size is greater than `0`
|
||||
- `fs_provider`, integer, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend, `3` for Azure Blob Storage backend, `4` for local encrypted backend, `5` for SFTP backend, `6` for HTTPFs backend
|
||||
- `bucket`, string, included for S3, GCS and Azure backends
|
||||
- `endpoint`, string, included for S3, SFTP and Azure backend if configured
|
||||
- `status`, integer. Status for `upload`, `download` and `ssh_cmd` actions. 1 means no error, 2 means a generic error occurred, 3 means quota exceeded error
|
||||
- `protocol`, string. Possible values are `SSH`, `SFTP`, `SCP`, `FTP`, `DAV`, `HTTP`, `HTTPShare`, `OIDC`, `DataRetention`, `EventAction`
|
||||
- `ip`, string. The action was executed from this IP address
|
||||
- `session_id`, string. Unique protocol session identifier. For stateless protocols such as HTTP the session id will change for each request
|
||||
- `open_flags`, integer. File open flags, can be non-zero for `pre-upload` action. If `file_size` is greater than zero and `file_size&512 == 0` the target file will not be truncated
|
||||
- `timestamp`, int64. Event timestamp as nanoseconds since epoch
|
||||
|
||||
The HTTP hook will use the global configuration for HTTP clients and will respect the retry configurations.
|
||||
|
||||
The `pre-*` actions are always executed synchronously while the other ones are asynchronous. You can specify the actions to run synchronously via the `execute_sync` configuration key. Executing an action synchronously means that SFTPGo will not return a result code to the client (which is waiting for it) until your hook have completed its execution. If your hook takes a long time to complete this could cause a timeout on the client side, which wouldn't receive the server response in a timely manner and eventually drop the connection.
|
||||
If you add the `upload` action to the `execute_sync` configuration key, SFTPGo will try to delete the uploaded file and return an error to the client if the hook fails. A hook is considered failed if the external command completes with a non-zero exit status or the HTTP notification response code is other than `200` (or the HTTP endpoint cannot be reached or times out).
|
||||
After a hook failure, the uploaded size is removed from the quota if SFTPGo is able to remove the file.
|
||||
|
||||
## Provider events
|
||||
|
||||
The `actions` struct inside the `data_provider` configuration section allows you to configure actions on data provider objects add, update, delete.
|
||||
|
||||
The supported object types are:
|
||||
|
||||
- `user`
|
||||
- `folder`
|
||||
- `group`
|
||||
- `admin`
|
||||
- `api_key`
|
||||
- `share`
|
||||
- `event_action`
|
||||
- `event_rule`
|
||||
|
||||
Actions will not be fired for internal updates, such as the last login or the user quota fields, or after external authentication.
|
||||
|
||||
If the `hook` defines a path to an external program, then this program can read the following environment variables:
|
||||
|
||||
- `SFTPGO_PROVIDER_ACTION`, supported values are `add`, `update`, `delete`
|
||||
- `SFTPGO_PROVIDER_OBJECT_TYPE`, affected object type
|
||||
- `SFTPGO_PROVIDER_OBJECT_NAME`, unique identifier for the affected object, for example username or key id
|
||||
- `SFTPGO_PROVIDER_USERNAME`, the username that executed the action. There are two special usernames: `__self__` identifies a user/admin that updates itself and `__system__` identifies an action that does not have an explicit executor associated with it, for example users/admins can be added/updated by loading them from initial data
|
||||
- `SFTPGO_PROVIDER_IP`, the action was executed from this IP address
|
||||
- `SFTPGO_PROVIDER_TIMESTAMP`, event timestamp as nanoseconds since epoch
|
||||
- `SFTPGO_PROVIDER_OBJECT`, object serialized as JSON with sensitive fields removed
|
||||
|
||||
Global environment variables are cleared, for security reasons, when the script is called. You can set additional environment variables in the "command" configuration section.
|
||||
The program must finish within 15 seconds.
|
||||
|
||||
If the `hook` defines an HTTP URL then this URL will be invoked as HTTP POST. The action, username, ip, object_type and object_name and timestamp are added to the query string, for example `<hook>?action=update&username=admin&ip=127.0.0.1&object_type=user&object_name=user1×tamp=1633860803249`, and the full object is sent serialized as JSON inside the POST body with sensitive fields removed.
|
||||
|
||||
The HTTP hook will use the global configuration for HTTP clients and will respect the retry configurations.
|
||||
|
||||
The structure for SFTPGo objects can be found within the [OpenAPI schema](../openapi/openapi.yaml).
|
||||
|
||||
## Pub/Sub services
|
||||
|
||||
You can forward SFTPGo events to several publish/subscribe systems using the [sftpgo-plugin-pubsub](https://github.com/sftpgo/sftpgo-plugin-pubsub). The notifiers SFTPGo plugins are not suitable for interactive actions such as `pre-*` events. Their scope is to simply forward events to external services. A custom hook is a better choice if you need to react to `pre-*` events.
|
||||
|
||||
## Database services
|
||||
|
||||
You can store SFTPGo events in database systems using the [sftpgo-plugin-eventstore](https://github.com/sftpgo/sftpgo-plugin-eventstore) and you can search the stored events using the [sftpgo-plugin-eventsearch](https://github.com/sftpgo/sftpgo-plugin-eventsearch).
|
||||
20
docs/dare.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Data At Rest Encryption (DARE)
|
||||
|
||||
SFTPGo supports data at-rest encryption via its `cryptfs` virtual file system, in this mode SFTPGo transparently encrypts and decrypts data (to/from the local disk) on-the-fly during uploads and/or downloads, making sure that the files at-rest on the server-side are always encrypted.
|
||||
|
||||
Data At Rest Encryption is supported for local filesystem, for cloud storage backends you can use their server side encryption feature.
|
||||
|
||||
So, because of the way it works, as described here above, when you set up an encrypted filesystem for a user you need to make sure it points to an empty path/directory (that has no files in it). Otherwise, it would try to decrypt existing files that are not encrypted in the first place and fail.
|
||||
|
||||
The SFTPGo's `cryptfs` is a tiny wrapper around [sio](https://github.com/minio/sio) therefore data is encrypted and authenticated using `AES-256-GCM` or `ChaCha20-Poly1305`. AES-GCM will be used if the CPU provides hardware support for it.
|
||||
|
||||
The only required configuration parameter is a `passphrase`, each file will be encrypted using an unique, randomly generated secret key derived from the given passphrase using the HMAC-based Extract-and-Expand Key Derivation Function (HKDF) as defined in [RFC 5869](http://tools.ietf.org/html/rfc5869). It is important to note that the per-object encryption key is never stored anywhere: it is derived from your `passphrase` and a randomly generated initialization vector just before encryption/decryption. The initialization vector is stored with the file.
|
||||
|
||||
The passphrase is stored encrypted itself according to your [KMS configuration](./kms.md) and is required to decrypt any file encrypted using an encryption key derived from it.
|
||||
|
||||
The encrypted filesystem has some limitations compared to the local, unencrypted, one:
|
||||
|
||||
- Resuming uploads is not supported.
|
||||
- Opening a file for both reading and writing at the same time is not supported and so clients that require advanced filesystem-like features such as `sshfs` are not supported too.
|
||||
- Truncate is not supported.
|
||||
- System commands such as `git` or `rsync` are not supported: they will store data unencrypted.
|
||||
32
docs/data-retention-hook.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Data retention hook
|
||||
|
||||
This hook runs after a data retention check completes if you specify `Hook` between notifications methods when you start the check.
|
||||
|
||||
The `data_retention_hook` can be defined as the absolute path of your program or an HTTP URL.
|
||||
|
||||
If the hook defines an external program it can read the following environment variable:
|
||||
|
||||
- `SFTPGO_DATA_RETENTION_RESULT`, it contains the data retention check result JSON serialized.
|
||||
|
||||
Global environment variables are cleared, for security reasons, when the script is called. You can set additional environment variables in the "command" configuration section.
|
||||
The program must finish within 20 seconds.
|
||||
|
||||
If the hook defines an HTTP URL then this URL will be invoked as HTTP POST and the POST body contains the data retention check result JSON serialized.
|
||||
|
||||
The HTTP hook will use the global configuration for HTTP clients and will respect the retry configurations.
|
||||
|
||||
Here is the schema for the data retention check result:
|
||||
|
||||
- `username`, string
|
||||
- `status`, int. 1 means success, 0 error
|
||||
- `start_time`, int64. Start time as UNIX timestamp in milliseconds
|
||||
- `total_deleted_files`, int. Total number of files deleted
|
||||
- `total_deleted_size`, int64. Total size deleted in bytes
|
||||
- `elapsed`, int64. Elapsed time in milliseconds
|
||||
- `details`, list of struct with details for each checked path, each struct contains the following fields:
|
||||
- `path`, string
|
||||
- `retention`, int. Retention time in hours
|
||||
- `deleted_files`, int. Number of files deleted
|
||||
- `deleted_size`, int64. Size deleted in bytes
|
||||
- `info`, string. Informative, non fatal, message if any. For example it can indicates that the check was skipped because the user doesn't have the required permissions on this path
|
||||
- `error`, string. Error message if any
|
||||
69
docs/defender.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# Defender
|
||||
|
||||
The built-in `defender` allows you to configure an auto-blocking policy for SFTPGo and thus helps to prevent DoS (Denial of Service) and brute force password guessing.
|
||||
|
||||
If enabled it will protect SFTP, HTTP, FTP and WebDAV services and it will automatically block hosts (IP addresses) that continually fail to log in or attempt to connect.
|
||||
|
||||
You can configure a score for the following events:
|
||||
|
||||
- `score_valid`, defines the score for valid login attempts, eg. user accounts that exist. Default `1`.
|
||||
- `score_invalid`, defines the score for invalid login attempts, eg. non-existent user accounts or client disconnected for inactivity without authentication attempts. Default `2`.
|
||||
- `score_limit_exceeded`, defines the score for hosts that exceeded the configured rate limits or the configured max connections per host. Default `3`.
|
||||
|
||||
And then you can configure:
|
||||
|
||||
- `observation_time`, defines the time window, in minutes, for tracking client errors.
|
||||
- `threshold`, defines the threshold value before banning a host.
|
||||
- `ban_time`, defines the time to ban a client, as minutes
|
||||
|
||||
So a host is banned, for `ban_time` minutes, if the sum of the scores has exceeded the defined threshold during the last observation time minutes.
|
||||
|
||||
By defining the scores, each type of event can be weighted. Let's see an example: if `score_invalid` is 3 and `threshold` is 8, a host will be banned after 3 login attempts with an non-existent user within the configured `observation_time`.
|
||||
|
||||
A banned IP has no score, it makes no sense to accumulate host events in memory for an already banned IP address.
|
||||
|
||||
If an already banned client tries to log in again, its ban time will be incremented according the `ban_time_increment` configuration.
|
||||
|
||||
The `ban_time_increment` is calculated as percentage of `ban_time`, so if `ban_time` is 30 minutes and `ban_time_increment` is 50 the host will be banned for additionally 15 minutes. You can also specify values greater than 100 for `ban_time_increment` if you want to increase the penalty for already banned hosts.
|
||||
|
||||
SFTPGo can store host scores and banned hosts in memory or within the configured data provider according to the `driver` set in the `defender` configuration section. The available drivers are `memory` and `provider`.
|
||||
The `provider` driver is useful if you want to share the defender data across multiple SFTPGo instances and it requires a shared or distributed data provider: `MySQL`, `PostgreSQL` and `CockroachDB` are supported.
|
||||
If you set the `provider` driver, the defender implementation may do many database queries (at least one query every time a new client connects to check if it is banned), if you have a single SFTPGo instance the `memory` driver is recommended.
|
||||
|
||||
For the `memory` driver, you can limit the memory usage using the `entries_soft_limit` and `entries_hard_limit` configuration keys.
|
||||
|
||||
The `provider` driver will periodically clean up expired hosts and events.
|
||||
|
||||
Using the REST API you can:
|
||||
|
||||
- list hosts within the defender's lists
|
||||
- remove hosts from the defender's lists
|
||||
|
||||
The `defender` can also load a permanent block list and/or a safe list of ip addresses/networks from a file:
|
||||
|
||||
- `safelist_file`, defines the path to a file containing a list of ip addresses and/or networks to never ban.
|
||||
- `blocklist_file`, defines the path to a file containing a list of ip addresses and/or networks to always ban.
|
||||
|
||||
These list must be stored as JSON conforming to the following schema:
|
||||
|
||||
- `addresses`, list of strings. Each string must be a valid IPv4/IPv6 address.
|
||||
- `networks`, list of strings. Each string must be a valid IPv4/IPv6 CIDR address.
|
||||
|
||||
Here is a small example:
|
||||
|
||||
```json
|
||||
{
|
||||
"addresses":[
|
||||
"192.0.2.1",
|
||||
"2001:db8::68"
|
||||
],
|
||||
"networks":[
|
||||
"192.0.3.0/24",
|
||||
"2001:db8:1234::/48"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Small lists can also be set using the `safelist`/`blocklist` configuration parameters and or using environment variables. These lists will be merged with the ones specified via files, if any, so that you can set both.
|
||||
|
||||
These list will be always loaded in memory (even if you use the `provider` driver) for faster lookups. The REST API queries "live" data and not these lists.
|
||||
63
docs/dynamic-user-mod.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Dynamic user creation or modification
|
||||
|
||||
Dynamic user creation or modification is supported via an external program or an HTTP URL that can be invoked just before the user login.
|
||||
To enable dynamic user modification, you must set the absolute path of your program or an HTTP URL using the `pre_login_hook` key in your configuration file.
|
||||
|
||||
The external program can read the following environment variables to get info about the user trying to login:
|
||||
|
||||
- `SFTPGO_LOGIND_USER`, it contains the user trying to login serialized as JSON. A JSON serialized user id equal to zero means the user does not exist inside SFTPGo
|
||||
- `SFTPGO_LOGIND_METHOD`, possible values are: `password`, `publickey`, `keyboard-interactive`, `TLSCertificate`, `IDP` (external identity provider) or empty if the hook is executed after receiving the FTP `USER` command
|
||||
- `SFTPGO_LOGIND_IP`, ip address of the user trying to login
|
||||
- `SFTPGO_LOGIND_PROTOCOL`, possible values are `SSH`, `FTP`, `DAV`, `HTTP`, `OIDC` (OpenID Connect)
|
||||
|
||||
The program must write, on its standard output:
|
||||
|
||||
- an empty string (or no response at all) if the user should not be created/updated
|
||||
- or the SFTPGo user, JSON serialized, if you want to create or update the given user
|
||||
|
||||
If the hook is an HTTP URL then it will be invoked as HTTP POST. The login method, the used protocol and the ip address of the user trying to login are added to the query string, for example `<http_url>?login_method=password&ip=1.2.3.4&protocol=SSH`.
|
||||
The request body will contain the user trying to login serialized as JSON. If no modification is needed the HTTP response code must be 204, otherwise the response code must be 200 and the response body a valid SFTPGo user serialized as JSON.
|
||||
|
||||
Actions defined for user's updates will not be executed in this case and an already logged in user with the same username will not be disconnected, you have to handle these things yourself.
|
||||
|
||||
The JSON response can include only the fields to update instead of the full user. For example, if you want to disable the user, you can return a response like this:
|
||||
|
||||
```json
|
||||
{"status": 0}
|
||||
```
|
||||
|
||||
Please note that if you want to create a new user, the pre-login hook response must include all the mandatory user fields.
|
||||
|
||||
The program hook must finish within 30 seconds, the HTTP hook will use the global configuration for HTTP clients.
|
||||
|
||||
If an error happens while executing the hook then login will be denied.
|
||||
|
||||
"Dynamic user creation or modification" and "External Authentication" are mutually exclusive, they are quite similar, the difference is that "External Authentication" returns an already authenticated user while using "Dynamic users modification" you simply create or update a user. The authentication will be checked inside SFTPGo.
|
||||
In other words while using "External Authentication" the external program receives the credentials of the user trying to login (for example the cleartext password) and it needs to validate them. While using "Dynamic users modification" the pre-login program receives the user stored inside the dataprovider (it includes the hashed password if any) and it can modify it, after the modification SFTPGo will check the credentials of the user trying to login.
|
||||
|
||||
For SFTPGo users (not admins) authenticating using an external identity provider such as OpenID Connect, the pre-login hook will be executed after a successful authentication against the external IDP so that you can create/update the SFTPGo user matching the one authenticated against the identity provider. In this case where the pre-login hook is executed even if an external authentication hook is defined.
|
||||
|
||||
If you enable FTP and allow both encrypted and plain text sessions, the pre-login hook is executed after receiving the FTP `USER` command. If you return an SFTPGo user with `ftp_security` set to `1` and the FTP session is not encrypted, it will be terminated. In this case where the pre-login hook is executed even if an external authentication hook is defined.
|
||||
|
||||
You can disable the hook on a per-user basis.
|
||||
|
||||
Let's see a very basic example. Our sample program will grant access to the existing user `test_user` only in the time range 10:00-18:00. Other users will not be modified since the program will terminate with no output.
|
||||
|
||||
```shell
|
||||
#!/bin/bash
|
||||
|
||||
CURRENT_TIME=`date +%H:%M`
|
||||
if [[ "$SFTPGO_LOGIND_USER" =~ "\"test_user\"" ]]
|
||||
then
|
||||
if [[ $CURRENT_TIME > "18:00" || $CURRENT_TIME < "10:00" ]]
|
||||
then
|
||||
echo '{"status":0}'
|
||||
else
|
||||
echo '{"status":1}'
|
||||
fi
|
||||
fi
|
||||
```
|
||||
|
||||
Please note that this is a demo program and it might not work in all cases. For example, the username should be obtained by parsing the JSON serialized user and not by searching the username inside the JSON as shown here.
|
||||
|
||||
The structure for SFTPGo users can be found within the [OpenAPI schema](../openapi/openapi.yaml).
|
||||
74
docs/eventmanager.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# Event Manager
|
||||
|
||||
The Event Manager allows an administrator to configure HTTP notifications, commands execution, email notifications and carry out certain server operations based on server events or schedules.
|
||||
|
||||
The following actions are supported:
|
||||
|
||||
- `HTTP notification`. You can notify an HTTP/S endpoing via GET, POST, PUT methods. You can define custom headers, query parameters and a body for POST and PUT request. Placeholders are supported for username, body, header and query parameter values.
|
||||
- `Command execution`. You can launch custom commands passing parameters via environment variables. Placeholders are supported for environment variable values.
|
||||
- `Email notification`. Placeholders are supported in subject and body. The email will be sent as plain text. For this action to work you have to configure an SMTP server in the SFTPGo configuration file.
|
||||
- `Backup`. A backup will be saved in the configured backup directory. The backup will contain the week day and the hour in the file name.
|
||||
- `User quota reset`. The quota used by users will be updated based on current usage.
|
||||
- `Folder quota reset`. The quota used by virtual folders will be updated based on current usage.
|
||||
- `Transfer quota reset`. The transfer quota values will be reset to `0`.
|
||||
- `Data retention check`. You can define per-folder retention policies.
|
||||
- `Metadata check`. A metadata check requires a metadata plugin such as [this one](https://github.com/sftpgo/sftpgo-plugin-metadata) and removes the metadata associated to missing items (for example objects deleted outside SFTPGo). A metadata check does nothing is no metadata plugin is installed or external metadata are not supported for a filesystem.
|
||||
- `Filesystem`. For these actions, the required permissions are automatically granted. This is the same as executing the actions from an SFTP client and the same restrictions applies. Supported actions:
|
||||
- `Rename`. You can rename one or more files or directories.
|
||||
- `Delete`. You can delete one or more files and directories.
|
||||
- `Create directories`. You can create one or more directories including sub-directories.
|
||||
- `Path exists`. Check if the specified path exists.
|
||||
- `Compress paths`. You can compress (currently as zip) ore or more files and directories.
|
||||
|
||||
The following placeholders are supported:
|
||||
|
||||
- `{{Name}}`. Username, folder name or admin username for provider events.
|
||||
- `{{Event}}`. Event name, for example `upload`, `download` for filesystem events or `add`, `update` for provider events.
|
||||
- `{{Status}}`. Status for `upload`, `download` and `ssh_cmd` events. 1 means no error, 2 means a generic error occurred, 3 means quota exceeded error.
|
||||
- `{{StatusString}}`. Status as string. Possible values "OK", "KO".
|
||||
- `{{ErrorString}}`. Error details. Replaced with an empty string if no errors occur.
|
||||
- `{{VirtualPath}}`. Path seen by SFTPGo users, for example `/adir/afile.txt`.
|
||||
- `{{VirtualDirPath}}`. Parent directory for VirtualPath, for example if VirtualPath is "/adir/afile.txt", VirtualDirPath is "/adir".
|
||||
- `{{FsPath}}`. Full filesystem path, for example `/user/homedir/adir/afile.txt` or `C:/data/user/homedir/adir/afile.txt` on Windows.
|
||||
- `{{ObjectName}}`. File/directory name, for example `afile.txt` or provider object name.
|
||||
- `{{ObjectType}}`. Object type for provider events: `user`, `group`, `admin`, etc.
|
||||
- `{{VirtualTargetPath}}`. Virtual target path for renames.
|
||||
- `{{VirtualTargetDirPath}}`. Parent directory for VirtualTargetPath.
|
||||
- `{{TargetName}}`. Target object name for renames.
|
||||
- `{{FsTargetPath}}`. Full filesystem target path for renames.
|
||||
- `{{FileSize}}`. File size.
|
||||
- `{{Protocol}}`. Used protocol, for example `SFTP`, `FTP`.
|
||||
- `{{IP}}`. Client IP address.
|
||||
- `{{Timestamp}}`. Event timestamp as nanoseconds since epoch.
|
||||
- `{{ObjectData}}`. Provider object data serialized as JSON with sensitive fields removed.
|
||||
- `{{RetentionReports}}`. Data retention reports as zip compressed CSV files. Supported as email attachment, file path for multipart HTTP request and as single parameter for HTTP requests body. Data retention reports contain details on the number of files deleted and the total size deleted for each folder.
|
||||
|
||||
Event rules are based on the premise that an event occours. To each rule you can associate one or more actions.
|
||||
The following trigger events are supported:
|
||||
|
||||
- `Filesystem events`, for example `upload`, `download` etc.
|
||||
- `Provider events`, for example `add`, `update`, `delete` user or other resources.
|
||||
- `Schedules`. The scheduler uses UTC time.
|
||||
- `IP Blocked`, this event can be generated if you enable the [defender](./defender.md).
|
||||
- `Certificate`, this event is generated when a certificate is renewed using the built-in ACME protocol. Both successful and failed renewals are notified.
|
||||
|
||||
You can further restrict a rule by specifying additional conditions that must be met before the rule’s actions are taken. For example you can react to uploads only if they are performed by a particular user or using a specified protocol.
|
||||
|
||||
Actions such as user quota reset, transfer quota reset, data retention check, folder quota reset and filesystem events are executed for all matching users if the trigger is a schedule or for the affected user if the trigger is a provider event or a filesystem action.
|
||||
|
||||
Actions are executed in a sequential order except for sync actions that are executed before the others. For each action associated to a rule you can define the following settings:
|
||||
|
||||
- `Stop on failure`, the next action will not be executed if the current one fails.
|
||||
- `Failure action`, this action will be executed only if at least another one fails. :warning: Please note that a failure action isn't executed if the event fails, for example if a download fails the main action is executed. The failure action is executed only if one of the non-failure actions associated to a rule fails.
|
||||
- `Execute sync`, for upload events, you can execute the action synchronously. Executing an action synchronously means that SFTPGo will not return a result code to the client (which is waiting for it) until your action have completed its execution. If your acion takes a long time to complete this could cause a timeout on the client side, which wouldn't receive the server response in a timely manner and eventually drop the connection.
|
||||
|
||||
If you are running multiple SFTPGo instances connected to the same data provider, you can choose whether to allow simultaneous execution for scheduled actions.
|
||||
|
||||
Some actions are not supported for some triggers, rules containing incompatible actions are skipped at runtime:
|
||||
|
||||
- `Filesystem events`, folder quota reset cannot be executed, we don't have a direct way to get the affected folder.
|
||||
- `Provider events`, user quota reset, transfer quota reset, data retention check and filesystem actions can be executed only if a user is updated. They will be executed for the affected user. Folder quota reset can be executed only for folders. Filesystem actions are not executed for `delete` user events because the actions is executed after the user deletion.
|
||||
- `IP Blocked`, user quota reset, folder quota reset, transfer quota reset, data retention check and filesystem actions cannot be executed, we only have an IP.
|
||||
- `Certificate`, user quota reset, folder quota reset, transfer quota reset, data retention check and filesystem actions cannot be executed.
|
||||
- `Email with attachments` are supported for filesystem events and provider events if a user is added/updated. We need a user to get the files to attach.
|
||||
- `HTTP multipart requests with files as attachments` are supported for filesystem events and provider events if a user is added/updated. We need a user to get the files to attach.
|
||||
81
docs/external-auth.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# External Authentication
|
||||
|
||||
To enable external authentication, you must set the absolute path of your authentication program or an HTTP URL using the `external_auth_hook` key in your configuration file.
|
||||
|
||||
The external program can read the following environment variables to get info about the user trying to authenticate:
|
||||
|
||||
- `SFTPGO_AUTHD_USERNAME`
|
||||
- `SFTPGO_AUTHD_USER`, STPGo user serialized as JSON, empty if the user does not exist within the data provider
|
||||
- `SFTPGO_AUTHD_IP`
|
||||
- `SFTPGO_AUTHD_PROTOCOL`, possible values are `SSH`, `FTP`, `DAV`, `HTTP`
|
||||
- `SFTPGO_AUTHD_PASSWORD`, not empty for password authentication
|
||||
- `SFTPGO_AUTHD_PUBLIC_KEY`, not empty for public key authentication
|
||||
- `SFTPGO_AUTHD_KEYBOARD_INTERACTIVE`, not empty for keyboard interactive authentication
|
||||
- `SFTPGO_AUTHD_TLS_CERT`, TLS client certificate PEM encoded. Not empty for TLS certificate authentication
|
||||
|
||||
Global environment variables are cleared, for security reasons, when the script is called. You can set additional environment variables in the "command" configuration section.
|
||||
The program can inspect the SFTPGo user, if it exists, using the `SFTPGO_AUTHD_USER` environment variable.
|
||||
The program must write, on its standard output:
|
||||
|
||||
- a valid SFTPGo user serialized as JSON if the authentication succeeds. The user will be added/updated within the defined data provider
|
||||
- an empty string, or no response at all, if authentication succeeds and the existing SFTPGo user does not need to be updated. This means that the credentials already stored in SFTPGo must match those used for the current authentication.
|
||||
- a user with an empty username if the authentication fails
|
||||
|
||||
If the hook is an HTTP URL then it will be invoked as HTTP POST. The request body will contain a JSON serialized struct with the following fields:
|
||||
|
||||
- `username`
|
||||
- `ip`
|
||||
- `user`, STPGo user, omitted if the user does not exist within the data provider
|
||||
- `protocol`, possible values are `SSH`, `FTP`, `DAV`, `HTTP`
|
||||
- `password`, not empty for password authentication
|
||||
- `public_key`, not empty for public key authentication
|
||||
- `keyboard_interactive`, not empty for keyboard interactive authentication
|
||||
- `tls_cert`, TLS client certificate PEM encoded. Not empty for TLS certificate authentication
|
||||
|
||||
If authentication succeeds the HTTP response code must be 200 and the response body can be:
|
||||
|
||||
- a valid SFTPGo user serialized as JSON. The user will be added/updated within the defined data provider
|
||||
- empty, the existing SFTPGo user does not need to be updated. Please note that in versions 2.0.x and earlier an empty response was interpreted as an authentication error
|
||||
|
||||
If the authentication fails the HTTP response code must be != 200 or the returned SFTPGo user must have an empty username.
|
||||
|
||||
If the hook returns a user who is only allowed to authenticate using public key + password (multi step authentication), your hook will be invoked for each authentication step, so it must validate the public key and password separately. SFTPGo will take care that the client uses the allowed sequence.
|
||||
|
||||
Actions defined for users added/updated will not be executed in this case and an already logged in user with the same username will not be disconnected.
|
||||
|
||||
The program hook must finish within 30 seconds, the HTTP hook timeout will use the global configuration for HTTP clients.
|
||||
|
||||
This method is slower than built-in authentication, but it's very flexible as anyone can easily write his own authentication hooks.
|
||||
You can also restrict the authentication scope for the hook using the `external_auth_scope` configuration key:
|
||||
|
||||
- `0` means all supported authentication scopes. The external hook will be used for password, public key, keyboard interactive and TLS certificate authentication
|
||||
- `1` means passwords only
|
||||
- `2` means public keys only
|
||||
- `4` means keyboard interactive only
|
||||
- `8` means TLS certificate only
|
||||
|
||||
You can combine the scopes. For example, 3 means password and public key, 5 means password and keyboard interactive, and so on.
|
||||
|
||||
Let's see a very basic example. Our sample authentication program will only accept user `test_user` with any password or public key.
|
||||
|
||||
```shell
|
||||
#!/bin/sh
|
||||
|
||||
if test "$SFTPGO_AUTHD_USERNAME" = "test_user"; then
|
||||
echo '{"status":1,"username":"test_user","expiration_date":0,"home_dir":"/tmp/test_user","uid":0,"gid":0,"max_sessions":0,"quota_size":0,"quota_files":100000,"permissions":{"/":["*"],"/somedir":["list","download"]},"upload_bandwidth":0,"download_bandwidth":0,"filters":{"allowed_ip":[],"denied_ip":[]},"public_keys":[]}'
|
||||
else
|
||||
echo '{"username":""}'
|
||||
fi
|
||||
```
|
||||
|
||||
The structure for SFTPGo users can be found within the [OpenAPI schema](../openapi/openapi.yaml).
|
||||
|
||||
You can instruct SFTPGo to cache the external user by setting an `external_auth_cache_time` in user object returned by your hook. The `external_auth_cache_time` defines the cache time in seconds.
|
||||
|
||||
You can disable the hook on a per-user basis so that you can mix external and internal users.
|
||||
|
||||
An example authentication program allowing to authenticate against an LDAP server can be found inside the source tree [ldapauth](../examples/ldapauth) directory.
|
||||
|
||||
An example server, to use as HTTP authentication hook, allowing to authenticate against an LDAP server can be found inside the source tree [ldapauthserver](../examples/ldapauthserver) directory.
|
||||
|
||||
If you have an external authentication hook that could be useful to others too, please let us know and/or please send a pull request.
|
||||
532
docs/full-configuration.md
Normal file
@@ -0,0 +1,532 @@
|
||||
# Configuring SFTPGo
|
||||
|
||||
<details><summary><font size=5> Command line option</font></summary>
|
||||
|
||||
The SFTPGo executable can be used this way:
|
||||
|
||||
```console
|
||||
Usage:
|
||||
sftpgo [command]
|
||||
|
||||
Available Commands:
|
||||
acme Obtain TLS certificates from ACME-based CAs like Let's Encrypt
|
||||
gen A collection of useful generators
|
||||
help Help about any command
|
||||
initprovider Initialize and/or updates the configured data provider
|
||||
portable Serve a single directory/account
|
||||
resetprovider Reset the configured provider, any data will be lost
|
||||
revertprovider Revert the configured data provider to a previous version
|
||||
serve Start the SFTPGo service
|
||||
smtptest Test the SMTP configuration
|
||||
startsubsys Use sftpgo as SFTP file transfer subsystem
|
||||
|
||||
Flags:
|
||||
-h, --help help for sftpgo
|
||||
-v, --version
|
||||
|
||||
Use "sftpgo [command] --help" for more information about a command
|
||||
```
|
||||
|
||||
The `serve` command supports the following flags:
|
||||
|
||||
- `--config-dir` string. Location of the config dir. This directory is used as the base for files with a relative path, eg. the private keys for the SFTP server or the SQLite database if you use SQLite as data provider. The configuration file, if not explicitly set, is looked for in this dir. We support reading from JSON, TOML, YAML, HCL, envfile and Java properties config files. The default config file name is `sftpgo` and therefore `sftpgo.json`, `sftpgo.yaml` and so on are searched. The default value is the working directory (".") or the value of `SFTPGO_CONFIG_DIR` environment variable.
|
||||
- `--config-file` string. This flag explicitly defines the path, name and extension of the config file. If must be an absolute path or a path relative to the configuration directory. The specified file name must have a supported extension (JSON, YAML, TOML, HCL or Java properties). The default value is empty or the value of `SFTPGO_CONFIG_FILE` environment variable.
|
||||
- `--grace-time`, integer. Graceful shutdown is an option to initiate a shutdown without abrupt cancellation of the currently ongoing client-initiated transfer sessions. This grace time defines the number of seconds allowed for existing transfers to get completed before shutting down. 0 means disabled. The default value is `0` or the value of `SFTPGO_GRACE_TIME` environment variable. A graceful shutdown is triggered by an interrupt signal or by a service `stop` request on Windows, if a grace time is configured.
|
||||
- `--loaddata-from` string. Load users and folders from this file. The file must be specified as absolute path and it must contain a backup obtained using the `dumpdata` REST API or compatible content. The default value is empty or the value of `SFTPGO_LOADDATA_FROM` environment variable.
|
||||
- `--loaddata-clean` boolean. Determine if the loaddata-from file should be removed after a successful load. Default `false` or the value of `SFTPGO_LOADDATA_CLEAN` environment variable (1 or `true`, 0 or `false`).
|
||||
- `--loaddata-mode`, integer. Restore mode for data to load. 0 means new users are added, existing users are updated. 1 means new users are added, existing users are not modified. Default 1 or the value of `SFTPGO_LOADDATA_MODE` environment variable.
|
||||
- `--loaddata-scan`, integer. Quota scan mode after data load. 0 means no quota scan. 1 means quota scan. 2 means scan quota if the user has quota restrictions. Default 0 or the value of `SFTPGO_LOADDATA_QUOTA_SCAN` environment variable.
|
||||
- `--log-compress` boolean. Determine if the rotated log files should be compressed using gzip. Default `false` or the value of `SFTPGO_LOG_COMPRESS` environment variable (1 or `true`, 0 or `false`). It is unused if `log-file-path` is empty.
|
||||
- `--log-file-path` string. Location for the log file, default "sftpgo.log" or the value of `SFTPGO_LOG_FILE_PATH` environment variable. Leave empty to write logs to the standard error.
|
||||
- `--log-max-age` int. Maximum number of days to retain old log files. Default 28 or the value of `SFTPGO_LOG_MAX_AGE` environment variable. It is unused if `log-file-path` is empty.
|
||||
- `--log-max-backups` int. Maximum number of old log files to retain. Default 5 or the value of `SFTPGO_LOG_MAX_BACKUPS` environment variable. It is unused if `log-file-path` is empty.
|
||||
- `--log-max-size` int. Maximum size in megabytes of the log file before it gets rotated. Default 10 or the value of `SFTPGO_LOG_MAX_SIZE` environment variable. It is unused if `log-file-path` is empty.
|
||||
- `--log-level` string. Set the log level. Supported values: `debug`, `info`, `warn`, `error`. Default `debug` or the value of `SFTPGO_LOG_LEVEL` environment variable.
|
||||
- `--log-utc-time` boolean. Enable UTC time for logging. Default `false` or the value of `SFTPGO_LOG_UTC_TIME` environment variable (1 or `true`, 0 or `false`)
|
||||
|
||||
Log file can be rotated on demand sending a `SIGUSR1` signal on Unix based systems and using the command `sftpgo service rotatelogs` on Windows.
|
||||
|
||||
If you don't configure any private host key, the daemon will use `id_rsa`, `id_ecdsa` and `id_ed25519` in the configuration directory. If these files don't exist, the daemon will attempt to autogenerate them. The server supports any private key format supported by [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/keys.go#L33).
|
||||
|
||||
The `gen` command allows to generate completion scripts for your shell and man pages.
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary><font size=5> Configuration file</font></summary>
|
||||
|
||||
The configuration file contains the following sections:
|
||||
|
||||
- **"common"**, configuration parameters shared among all the supported protocols
|
||||
- `idle_timeout`, integer. Time in minutes after which an idle client will be disconnected. 0 means disabled. Default: 15
|
||||
- `upload_mode` integer. 0 means standard: the files are uploaded directly to the requested path. 1 means atomic: files are uploaded to a temporary path and renamed to the requested path when the client ends the upload. Atomic mode avoids problems such as a web server that serves partial files when the files are being uploaded. In atomic mode, if there is an upload error, the temporary file is deleted and so the requested upload path will not contain a partial file. 2 means atomic with resume support: same as atomic but if there is an upload error, the temporary file is renamed to the requested path and not deleted. This way, a client can reconnect and resume the upload. Ignored for cloud-based storage backends (uploads are always atomic and resume is not supported for these backends) and for SFTP backend if buffering is enabled. Default: 0
|
||||
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions. See [Custom Actions](./custom-actions.md) for more details
|
||||
- `execute_on`, list of strings. Valid values are `pre-download`, `download`, `pre-upload`, `upload`, `pre-delete`, `delete`, `rename`, `mkdir`, `rmdir`, `ssh_cmd`. Leave empty to disable actions.
|
||||
- `execute_sync`, list of strings. Actions, defined in the `execute_on` list above, to be performed synchronously. The `pre-*` actions are always executed synchronously while the other ones are asynchronous. Executing an action synchronously means that SFTPGo will not return a result code to the client (which is waiting for it) until your hook have completed its execution. Leave empty to execute only the defined `pre-*` hook synchronously
|
||||
- `hook`, string. Absolute path to the command to execute or HTTP URL to notify.
|
||||
- `setstat_mode`, integer. 0 means "normal mode": requests for changing permissions, owner/group and access/modification times are executed. 1 means "ignore mode": requests for changing permissions, owner/group and access/modification times are silently ignored. 2 means "ignore mode if not supported": requests for changing permissions and owner/group are silently ignored for cloud filesystems and executed for local/SFTP filesystem. Requests for changing modification times are always executed for local/SFTP filesystems and are executed for cloud based filesystems if the target is a file and there is a metadata plugin available. A metadata plugin can be found [here](https://github.com/sftpgo/sftpgo-plugin-metadata).
|
||||
- `temp_path`, string. Defines the path for temporary files such as those used for atomic uploads or file pipes. If you set this option you must make sure that the defined path exists, is accessible for writing by the user running SFTPGo, and is on the same filesystem as the users home directories otherwise the renaming for atomic uploads will become a copy and therefore may take a long time. The temporary files are not namespaced. The default is generally fine. Leave empty for the default.
|
||||
- `proxy_protocol`, integer. Support for [HAProxy PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). If you are running SFTPGo behind a proxy server such as HAProxy, AWS ELB or NGINX, you can enable the proxy protocol. It provides a convenient way to safely transport connection information such as a client's address across multiple layers of NAT or TCP proxies to get the real client IP address instead of the proxy IP. Both protocol versions 1 and 2 are supported. If the proxy protocol is enabled in SFTPGo then you have to enable the protocol in your proxy configuration too. For example, for HAProxy, add `send-proxy` or `send-proxy-v2` to each server configuration line. The PROXY protocol is supported for SSH/SFTP and FTP/S. The following modes are supported:
|
||||
- 0, disabled
|
||||
- 1, enabled. If the upstream IP is not allowed to send a proxy header the header be ignored. Using this mode does not mean that we can accept connections with and without the proxy header. We always try to read the proxy header and we ignore it if the upstream IP is not allowed to send a proxy header
|
||||
- 2, required. If the upstream IP is not allowed to send a proxy header the connection will be rejected
|
||||
- `proxy_allowed`, List of IP addresses and IP ranges allowed to send the proxy header:
|
||||
- If `proxy_protocol` is set to 1 and we receive a proxy header from an IP that is not in the list then the connection will be accepted and the header will be ignored
|
||||
- If `proxy_protocol` is set to 2 and we receive a proxy header from an IP that is not in the list then the connection will be rejected
|
||||
- `startup_hook`, string. Absolute path to an external program or an HTTP URL to invoke as soon as SFTPGo starts. If you define an HTTP URL it will be invoked using a `GET` request. Please note that SFTPGo services may not yet be available when this hook is run. Leave empty do disable
|
||||
- `post_connect_hook`, string. Absolute path to the command to execute or HTTP URL to notify. See [Post-connect hook](./post-connect-hook.md) for more details. Leave empty to disable
|
||||
- `post_disconnect_hook`, string. Absolute path to the command to execute or HTTP URL to notify. See [Post-disconnect hook](./post-disconnect-hook.md) for more details. Leave empty to disable
|
||||
- `data_retention_hook`, string. Absolute path to the command to execute or HTTP URL to notify. See [Data retention hook](./data-retention-hook.md) for more details. Leave empty to disable
|
||||
- `max_total_connections`, integer. Maximum number of concurrent client connections. 0 means unlimited. Default: 0.
|
||||
- `max_per_host_connections`, integer. Maximum number of concurrent client connections from the same host (IP). If the defender is enabled, exceeding this limit will generate `score_limit_exceeded` events and thus hosts that repeatedly exceed the max allowed connections can be automatically blocked. 0 means unlimited. Default: 20.
|
||||
- `whitelist_file`, string. Path to a file containing a list of IP addresses and/or networks to allow. Only the listed IPs/networks can access the configured services, all other client connections will be dropped before they even try to authenticate. The whitelist must be a JSON file with the same structure documented for the [defenders's list](./defender.md). The whitelist can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. Default: "".
|
||||
- `allow_self_connections`, integer. Allow users on this instance to use other users/virtual folders on this instance as storage backend. Enable this setting if you know what you are doing. Set to `1` to enable. Default: `0`.
|
||||
- `defender`, struct containing the defender configuration. See [Defender](./defender.md) for more details.
|
||||
- `enabled`, boolean. Default `false`.
|
||||
- `driver`, string. Supported drivers are `memory` and `provider`. The `provider` driver will use the configured data provider to store defender events and it is supported for `MySQL`, `PostgreSQL` and `CockroachDB` data providers. Using the `provider` driver you can share the defender events among multiple SFTPGO instances. For a single instance the `memory` driver will be much faster. Default: `memory`.
|
||||
- `ban_time`, integer. Ban time in minutes.
|
||||
- `ban_time_increment`, integer. Ban time increment, as a percentage, if a banned host tries to connect again.
|
||||
- `threshold`, integer. Threshold value for banning a client.
|
||||
- `score_invalid`, integer. Score for invalid login attempts, eg. non-existent user accounts or client disconnected for inactivity without authentication attempts.
|
||||
- `score_valid`, integer. Score for valid login attempts, eg. user accounts that exist.
|
||||
- `score_limit_exceeded`, integer. Score for hosts that exceeded the configured rate limits or the maximum, per-host, allowed connections.
|
||||
- `observation_time`, integer. Defines the time window, in minutes, for tracking client errors. A host is banned if it has exceeded the defined threshold during the last observation time minutes.
|
||||
- `entries_soft_limit`, integer. Ignored for `provider` driver. Default: 100.
|
||||
- `entries_hard_limit`, integer. The number of banned IPs and host scores kept in memory will vary between the soft and hard limit for `memory` driver. If you use the `provider` driver, this setting will limit the number of entries to return when you ask for the entire host list from the defender. Default: 150.
|
||||
- `safelist_file`, string. Path to a file containing a list of ip addresses and/or networks to never ban.
|
||||
- `blocklist_file`, string. Path to a file containing a list of ip addresses and/or networks to always ban. The lists can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. An host that is already banned will not be automatically unbanned if you put it inside the safe list, you have to unban it using the REST API.
|
||||
- `safelist`, list of IP addresses and/or IP ranges and/or networks to never ban. Invalid entries will be silently ignored. For large lists prefer `safelist_file`. `safelist` and `safelist_file` will be merged so that you can set both.
|
||||
- `blocklist`, list of IP addresses and/or IP ranges and/or networks to always ban. Invalid entries will be silently ignored.. For large lists prefer `blocklist_file`. `blocklist` and `blocklist_file` will be merged so that you can set both.
|
||||
- `rate_limiters`, list of structs containing the rate limiters configuration. Take a look [here](./rate-limiting.md) for more details. Each struct has the following fields:
|
||||
- `average`, integer. Average defines the maximum rate allowed. 0 means disabled. Default: 0
|
||||
- `period`, integer. Period defines the period as milliseconds. The rate is actually defined by dividing average by period Default: 1000 (1 second).
|
||||
- `burst`, integer. Burst defines the maximum number of requests allowed to go through in the same arbitrarily small period of time. Default: 1
|
||||
- `type`, integer. 1 means a global rate limiter, independent from the source host. 2 means a per-ip rate limiter. Default: 2
|
||||
- `protocols`, list of strings. Available protocols are `SSH`, `FTP`, `DAV`, `HTTP`. By default all supported protocols are enabled
|
||||
- `allow_list`, list of IP addresses and IP ranges excluded from rate limiting. Default: empty
|
||||
- `generate_defender_events`, boolean. If `true`, the defender is enabled, and this is not a global rate limiter, a new defender event will be generated each time the configured limit is exceeded. Default `false`
|
||||
- `entries_soft_limit`, integer.
|
||||
- `entries_hard_limit`, integer. The number of per-ip rate limiters kept in memory will vary between the soft and hard limit
|
||||
- **"acme"**, Automatic Certificate Management Environment (ACME) protocol configuration. To obtain the certificates the first time you have to configure the ACME protocol and execute the `sftpgo acme run` command. The SFTPGo service will take care of the automatic renewal of certificates for the configured domains.
|
||||
- `domains`, list of domains for which to obtain certificates. If a single certificate is to be valid for multiple domains specify the names separated by commas, for example: `example.com,www.example.com`. An empty list means that ACME protocol is disabled. Default: empty.
|
||||
- `email`, string. Email used for registration and recovery contact. Default: empty.
|
||||
- `key_type`, string. Key type to use for private keys. Supported values: `2048` (RSA 2048), `4096` (RSA 4096), `8192` (RSA 8192), `P256` (EC 256), `P384` (EC 384). Default: `4096`
|
||||
- `certs_path`, string. Directory, absolute or relative to the configuration directory, to use for storing certificates and related data.
|
||||
- `ca_endpoint`, string. Default: `https://acme-v02.api.letsencrypt.org/directory`.
|
||||
- `renew_days`, integer. The number of days left on a certificate to renew it. Default: `30`.
|
||||
- `http01_challenge`, configuration for `HTTP-01` challenge type, the following fields are supported:
|
||||
- `port`, integer. This challenge is expected to run on port `80`. If you set a port other than `80` you have to proxy the path `/.well-known/acme-challenge` from the port `80` to the configured port. Default: `80`.
|
||||
- `proxy_header`, string. Validate against this HTTP header when solving HTTP based challenges behind a reverse proxy. Empty means `Host`. Default: empty.
|
||||
- `webroot`, string. Set the absolute path to the webroot folder to use for HTTP based challenges to write directly in a file in `.well-known/acme-challenge`. Setting a `webroot` disables the built-in server (the `port` setting is ignored) and expects the given directory to be publicly served, on port `80`, with access to `.well-known/acme-challenge`. If `webroot` is empty and `port` is `0` the `HTTP-01` challenge is disabled. Default: empty.
|
||||
- `tls_alpn01_challenge`, configuration for `TLS-ALPN-01` challenge type, the following fields are supported:
|
||||
- `port`, integer. This challenge is expected to run on port `443`. `0` means `TLS-ALPN-01` is disabled. Default: `0`.
|
||||
- **"sftpd"**, the configuration for the SFTP server
|
||||
- `bindings`, list of structs. Each struct has the following fields:
|
||||
- `port`, integer. The port used for serving SFTP requests. 0 means disabled. Default: 2022
|
||||
- `address`, string. Leave blank to listen on all available network interfaces. Default: ""
|
||||
- `apply_proxy_config`, boolean. If enabled the common proxy configuration, if any, will be applied. Default `true`
|
||||
- `max_auth_tries` integer. Maximum number of authentication attempts permitted per connection. If set to a negative number, the number of attempts is unlimited. If set to zero, the number of attempts is limited to 6.
|
||||
- `banner`, string. Identification string used by the server. Leave empty to use the default banner. Default `SFTPGo_<version>`, for example `SSH-2.0-SFTPGo_0.9.5`
|
||||
- `host_keys`, list of strings. It contains the daemon's private host keys. Each host key can be defined as a path relative to the configuration directory or an absolute one. If empty, the daemon will search or try to generate `id_rsa`, `id_ecdsa` and `id_ed25519` keys inside the configuration directory. If you configure absolute paths to files named `id_rsa`, `id_ecdsa` and/or `id_ed25519` then SFTPGo will try to generate these keys using the default settings.
|
||||
- `host_certificates`, list of strings. Public host certificates. Each certificate can be defined as a path relative to the configuration directory or an absolute one. Certificate's public key must match a private host key otherwise it will be silently ignored. Default: empty.
|
||||
- `host_key_algorithms`, list of strings. Public key algorithms that the server will accept for host key authentication. The supported values are: `rsa-sha2-512-cert-v01@openssh.com`, `rsa-sha2-256-cert-v01@openssh.com`, `ssh-rsa-cert-v01@openssh.com`, `ssh-dss-cert-v01@openssh.com`, `ecdsa-sha2-nistp256-cert-v01@openssh.com`, `ecdsa-sha2-nistp384-cert-v01@openssh.com`, `ecdsa-sha2-nistp521-cert-v01@openssh.com`, `ssh-ed25519-cert-v01@openssh.com`, `ecdsa-sha2-nistp256`, `ecdsa-sha2-nistp384`, `ecdsa-sha2-nistp521`, `rsa-sha2-512`, `rsa-sha2-256`, `ssh-rsa`, `ssh-dss`, `ssh-ed25519`. Default values: `rsa-sha2-512-cert-v01@openssh.com`, `rsa-sha2-256-cert-v01@openssh.com`, `ecdsa-sha2-nistp256-cert-v01@openssh.com`, `ecdsa-sha2-nistp384-cert-v01@openssh.com`, `ecdsa-sha2-nistp521-cert-v01@openssh.com`, `ssh-ed25519-cert-v01@openssh.com`, `ecdsa-sha2-nistp256`, `ecdsa-sha2-nistp384`, `ecdsa-sha2-nistp521`, `rsa-sha2-512`, `rsa-sha2-256`, `ssh-ed25519`.
|
||||
- `moduli`, list of strings. Diffie-Hellman moduli files. Each moduli file can be defined as a path relative to the configuration directory or an absolute one. If set, `diffie-hellman-group-exchange-sha256` and `diffie-hellman-group-exchange-sha1` KEX algorithms will be available, `diffie-hellman-group-exchange-sha256` will be enabled by default if you don't explicitly set KEXs. Default: empty.
|
||||
- `kex_algorithms`, list of strings. Available KEX (Key Exchange) algorithms in preference order. Leave empty to use default values. The supported values are: `curve25519-sha256`, `curve25519-sha256@libssh.org`, `ecdh-sha2-nistp256`, `ecdh-sha2-nistp384`, `ecdh-sha2-nistp521`, `diffie-hellman-group14-sha256`, `diffie-hellman-group16-sha512`, `diffie-hellman-group18-sha512`, `diffie-hellman-group14-sha1`, `diffie-hellman-group1-sha1`. Default values: `curve25519-sha256`, `curve25519-sha256@libssh.org`, `ecdh-sha2-nistp256`, `ecdh-sha2-nistp384`, `ecdh-sha2-nistp521`, `diffie-hellman-group14-sha256`. SHA512 based KEXs are disabled by default because they are slow. If you set one or more moduli files, `diffie-hellman-group-exchange-sha256` and `diffie-hellman-group-exchange-sha1` will be available.
|
||||
- `ciphers`, list of strings. Allowed ciphers in preference order. Leave empty to use default values. The supported values are: `aes128-gcm@openssh.com`, `aes256-gcm@openssh.com`, `chacha20-poly1305@openssh.com`, `aes128-ctr`, `aes192-ctr`, `aes256-ctr`, `aes128-cbc`, `aes192-cbc`, `aes256-cbc`, `3des-cbc`, `arcfour256`, `arcfour128`, `arcfour`. Default values: `aes128-gcm@openssh.com`, `aes256-gcm@openssh.com`, `chacha20-poly1305@openssh.com`, `aes128-ctr`, `aes192-ctr`, `aes256-ctr`. Please note that the ciphers disabled by default are insecure, you should expect that an active attacker can recover plaintext if you enable them.
|
||||
- `macs`, list of strings. Available MAC (message authentication code) algorithms in preference order. Leave empty to use default values. The supported values are: `hmac-sha2-256-etm@openssh.com`, `hmac-sha2-256`, `hmac-sha2-512-etm@openssh.com`, `hmac-sha2-512`, `hmac-sha1`, `hmac-sha1-96`. Default values: `hmac-sha2-256-etm@openssh.com`, `hmac-sha2-256`.
|
||||
- `trusted_user_ca_keys`, list of public keys paths of certificate authorities that are trusted to sign user certificates for authentication. The paths can be absolute or relative to the configuration directory.
|
||||
- `revoked_user_certs_file`, path to a file containing the revoked user certificates. The path can be absolute or relative to the configuration directory. It must contain a JSON list with the public key fingerprints of the revoked certificates. Example content: `["SHA256:bsBRHC/xgiqBJdSuvSTNpJNLTISP/G356jNMCRYC5Es","SHA256:119+8cL/HH+NLMawRsJx6CzPF1I3xC+jpM60bQHXGE8"]`. The revocation list can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. Default: "".
|
||||
- `login_banner_file`, path to the login banner file. The contents of the specified file, if any, are sent to the remote user before authentication is allowed. It can be a path relative to the config dir or an absolute one. Leave empty to disable login banner.
|
||||
- `enabled_ssh_commands`, list of enabled SSH commands. `*` enables all supported commands. More information can be found [here](./ssh-commands.md).
|
||||
- `keyboard_interactive_authentication`, boolean. This setting specifies whether keyboard interactive authentication is allowed. If no keyboard interactive hook or auth plugin is defined the default is to prompt for the user password and then the one time authentication code, if defined. Default: `false`.
|
||||
- `keyboard_interactive_auth_hook`, string. Absolute path to an external program or an HTTP URL to invoke for keyboard interactive authentication. See [Keyboard Interactive Authentication](./keyboard-interactive.md) for more details.
|
||||
- `password_authentication`, boolean. Set to false to disable password authentication. This setting will disable multi-step authentication method using public key + password too. It is useful for public key only configurations if you need to manage old clients that will not attempt to authenticate with public keys if the password login method is advertised. Default: `true`.
|
||||
- `folder_prefix`, string. Virtual root folder prefix to include in all file operations (ex: `/files`). The virtual paths used for per-directory permissions, file patterns etc. must not include the folder prefix. The prefix is only applied to SFTP requests (in SFTP server mode), SCP and other SSH commands will be automatically disabled if you configure a prefix. The prefix is ignored while running as OpenSSH's SFTP subsystem. This setting can help some specific migrations from SFTP servers based on OpenSSH and it is not recommended for general usage. Default: blank.
|
||||
- **"ftpd"**, the configuration for the FTP server
|
||||
- `bindings`, list of structs. Each struct has the following fields:
|
||||
- `port`, integer. The port used for serving FTP requests. 0 means disabled. Default: 0.
|
||||
- `address`, string. Leave blank to listen on all available network interfaces. Default: "".
|
||||
- `apply_proxy_config`, boolean. If enabled the common proxy configuration, if any, will be applied. Please note that we expect the proxy header on control and data connections. Default `true`.
|
||||
- `tls_mode`, integer. 0 means accept both cleartext and encrypted sessions. 1 means TLS is required for both control and data connection. 2 means implicit TLS. Do not enable this blindly, please check that a proper TLS config is in place if you set `tls_mode` is different from 0.
|
||||
- `certificate_file`, string. Binding specific TLS certificate. This can be an absolute path or a path relative to the config dir.
|
||||
- `certificate_key_file`, string. Binding specific private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If not set the global ones will be used, if any.
|
||||
- `min_tls_version`, integer. Defines the minimum version of TLS to be enabled. `12` means TLS 1.2 (and therefore TLS 1.2 and TLS 1.3 will be enabled),`13` means TLS 1.3. Default: `12`.
|
||||
- `force_passive_ip`, ip address. External IP address to expose for passive connections. Leave empty to autodetect. If not empty, it must be a valid IPv4 address. Default: "".
|
||||
- `passive_ip_overrides`, list of struct that allows to return a different passive ip based on the client IP address. Each struct has the following fields:
|
||||
- `networks`, list of strings. Each string must define a network in CIDR notation, for example 192.168.1.0/24.
|
||||
- `ip`, string. Passive IP to return if the client IP address belongs to the defined networks. Empty means autodetect.
|
||||
- `client_auth_type`, integer. Set to `1` to require a client certificate and verify it. Set to `2` to request a client certificate during the TLS handshake and verify it if given, in this mode the client is allowed not to send a certificate. At least one certification authority must be defined in order to verify client certificates. If no certification authority is defined, this setting is ignored. Default: 0.
|
||||
- `tls_cipher_suites`, list of strings. List of supported cipher suites for TLS version 1.2. If empty, a default list of secure cipher suites is used, with a preference order based on hardware performance. Note that TLS 1.3 ciphersuites are not configurable. The supported ciphersuites names are defined [here](https://github.com/golang/go/blob/master/src/crypto/tls/cipher_suites.go#L52). Any invalid name will be silently ignored. The order matters, the ciphers listed first will be the preferred ones. Default: empty.
|
||||
- `passive_connections_security`, integer. Defines the security checks for passive data connections. Set to `0` to require matching peer IP addresses of control and data connection. Set to `1` to disable any checks. Please note that if you run the FTP service behind a proxy you must enable the proxy protocol for control and data connections. Default: `0`.
|
||||
- `active_connections_security`, integer. Defines the security checks for active data connections. The supported values are the same as described for `passive_connections_security`. Please note that disabling the security checks you will make the FTP service vulnerable to bounce attacks on active data connections, so change the default value only if you are on a trusted/internal network. Default: `0`.
|
||||
- `debug`, boolean. If enabled any FTP command will be logged. This will generate a lot of logs. Enable only if you are investigating a client compatibility issue or something similar. You shouldn't leave this setting enabled for production servers. Default `false`.
|
||||
- `banner`, string. Greeting banner displayed when a connection first comes in. Leave empty to use the default banner. Default `SFTPGo <version> ready`, for example `SFTPGo 1.0.0-dev ready`.
|
||||
- `banner_file`, path to the banner file. The contents of the specified file, if any, are displayed when someone connects to the server. It can be a path relative to the config dir or an absolute one. If set, it overrides the banner string provided by the `banner` option. Leave empty to disable.
|
||||
- `active_transfers_port_non_20`, boolean. Do not impose the port 20 for active data transfers. Enabling this option allows to run SFTPGo with less privilege. Default: `true`.
|
||||
- `passive_port_range`, struct containing the key `start` and `end`. Port Range for data connections. Random if not specified. Default range is 50000-50100.
|
||||
- `disable_active_mode`, boolean. Set to `true` to disable active FTP, default `false`.
|
||||
- `enable_site`, boolean. Set to true to enable the FTP SITE command. We support `chmod` and `symlink` if SITE support is enabled. Default `false`
|
||||
- `hash_support`, integer. Set to `1` to enable FTP commands that allow to calculate the hash value of files. These FTP commands will be enabled: `HASH`, `XCRC`, `MD5/XMD5`, `XSHA/XSHA1`, `XSHA256`, `XSHA512`. Please keep in mind that to calculate the hash we need to read the whole file, for remote backends this means downloading the file, for the encrypted backend this means decrypting the file. Default `0`.
|
||||
- `combine_support`, integer. Set to 1 to enable support for the non standard `COMB` FTP command. Combine is only supported for local filesystem, for cloud backends it has no advantage as it will download the partial files and will upload the combined one. Cloud backends natively support multipart uploads. Default `0`.
|
||||
- `certificate_file`, string. Certificate for FTPS. This can be an absolute path or a path relative to the config dir.
|
||||
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. A certificate and the private key are required to enable explicit and implicit TLS. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
||||
- `ca_certificates`, list of strings. Set of root certificate authorities to be used to verify client certificates.
|
||||
- `ca_revocation_lists`, list of strings. Set a revocation lists, one for each root CA, to be used to check if a client certificate has been revoked. The revocation lists can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
||||
- **"webdavd"**, the configuration for the WebDAV server, more info [here](./webdav.md)
|
||||
- `bindings`, list of structs. Each struct has the following fields:
|
||||
- `port`, integer. The port used for serving WebDAV requests. 0 means disabled. Default: 0.
|
||||
- `address`, string. Leave blank to listen on all available network interfaces. Default: "".
|
||||
- `enable_https`, boolean. Set to `true` and provide both a certificate and a key file to enable HTTPS connection for this binding. Default `false`.
|
||||
- `certificate_file`, string. Binding specific TLS certificate. This can be an absolute path or a path relative to the config dir.
|
||||
- `certificate_key_file`, string. Binding specific private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If not set the global ones will be used, if any.
|
||||
- `min_tls_version`, integer. Defines the minimum version of TLS to be enabled. `12` means TLS 1.2 (and therefore TLS 1.2 and TLS 1.3 will be enabled),`13` means TLS 1.3. Default: `12`.
|
||||
- `client_auth_type`, integer. Set to `1` to require a client certificate and verify it. Set to `2` to request a client certificate during the TLS handshake and verify it if given, in this mode the client is allowed not to send a certificate. At least one certification authority must be defined in order to verify client certificates. If no certification authority is defined, this setting is ignored. Default: 0.
|
||||
- `tls_cipher_suites`, list of strings. List of supported cipher suites for TLS version 1.2. If empty, a default list of secure cipher suites is used, with a preference order based on hardware performance. Note that TLS 1.3 ciphersuites are not configurable. The supported ciphersuites names are defined [here](https://github.com/golang/go/blob/master/src/crypto/tls/cipher_suites.go#L52). Any invalid name will be silently ignored. The order matters, the ciphers listed first will be the preferred ones. Default: empty.
|
||||
- `prefix`, string. Prefix for WebDAV resources, if empty WebDAV resources will be available at the `/` URI. If defined it must be an absolute URI, for example `/dav`. Default: "".
|
||||
- `proxy_allowed`, list of IP addresses and IP ranges allowed to set client IP proxy header such as `X-Forwarded-For`. Any client IP proxy headers, if set on requests from a connection address not in this list, will be silently ignored. Default: empty.
|
||||
- `client_ip_proxy_header`, string. Defines the allowed client IP proxy header such as `X-Forwarded-For`, `X-Real-IP` etc. Default: empty
|
||||
- `client_ip_header_depth`, integer. Some client IP headers such as `X-Forwarded-For` can contain multiple IP address, this setting define the position to trust starting from the right. For example if we have: `10.0.0.1,11.0.0.1,12.0.0.1,13.0.0.1` and the depth is `0`, SFTPGo will use `13.0.0.1` as client IP, if depth is `1`, `12.0.0.1` will be used and so on. Default: `0`.
|
||||
- `disable_www_auth_header`, boolean. Set to `true` to not add the WWW-Authenticate header after an authentication failure, only the `401` status code will be sent. Default: `false`.
|
||||
- `certificate_file`, string. Certificate for WebDAV over HTTPS. This can be an absolute path or a path relative to the config dir.
|
||||
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. A certificate and a private key are required to enable HTTPS connections. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
||||
- `ca_certificates`, list of strings. Set of root certificate authorities to be used to verify client certificates.
|
||||
- `ca_revocation_lists`, list of strings. Set a revocation lists, one for each root CA, to be used to check if a client certificate has been revoked. The revocation lists can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
||||
- `cors` struct containing CORS configuration. SFTPGo uses [Go CORS handler](https://github.com/rs/cors), please refer to upstream documentation for fields meaning and their default values.
|
||||
- `enabled`, boolean, set to true to enable CORS.
|
||||
- `allowed_origins`, list of strings.
|
||||
- `allowed_methods`, list of strings.
|
||||
- `allowed_headers`, list of strings.
|
||||
- `exposed_headers`, list of strings.
|
||||
- `allow_credentials` boolean.
|
||||
- `max_age`, integer.
|
||||
- `options_passthrough`, boolean.
|
||||
- `options_success_status`, integer.
|
||||
- `allow_private_network`, boolean.
|
||||
- `cache` struct containing cache configuration for the authenticated users.
|
||||
- `enabled`, boolean, set to true to enable user caching. Default: true.
|
||||
- `expiration_time`, integer. Expiration time, in minutes, for the cached users. 0 means unlimited. Default: 0.
|
||||
- `max_size`, integer. Maximum number of users to cache. 0 means unlimited. Default: 50.
|
||||
- **"data_provider"**, the configuration for the data provider
|
||||
- `driver`, string. Supported drivers are `sqlite`, `mysql`, `postgresql`, `cockroachdb`, `bolt`, `memory`
|
||||
- `name`, string. Database name. For driver `sqlite` this can be the database name relative to the config dir or the absolute path to the SQLite database. For driver `memory` this is the (optional) path relative to the config dir or the absolute path to the provider dump, obtained using the `dumpdata` REST API, to load. This dump will be loaded at startup and can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. The `memory` provider will not modify the provided file so quota usage and last login will not be persisted. If you plan to use a SQLite database over a `cifs` network share (this is not recommended in general) you must use the `nobrl` mount option otherwise you will get the `database is locked` error. Some users reported that the `bolt` provider works fine over `cifs` shares.
|
||||
- `host`, string. Database host. For `postgresql` and `cockroachdb` drivers you can specify multiple hosts separated by commas. Leave empty for drivers `sqlite`, `bolt` and `memory`
|
||||
- `port`, integer. Database port. Leave empty for drivers `sqlite`, `bolt` and `memory`
|
||||
- `username`, string. Database user. Leave empty for drivers `sqlite`, `bolt` and `memory`
|
||||
- `password`, string. Database password. Leave empty for drivers `sqlite`, `bolt` and `memory`
|
||||
- `sslmode`, integer. Used for drivers `mysql` and `postgresql`. 0 disable TLS connections, 1 require TLS, 2 set TLS mode to `verify-ca` for driver `postgresql` and `skip-verify` for driver `mysql`, 3 set TLS mode to `verify-full` for driver `postgresql` and `preferred` for driver `mysql`
|
||||
- `root_cert`, string. Path to the root certificate authority used to verify that the server certificate was signed by a trusted CA
|
||||
- `disable_sni`, boolean. Allows to opt out Server Name Indication (SNI) for TLS connections. Default: `false`
|
||||
- `target_session_attrs`, string. This is a `postgresql` and `cockroachdb` specific option. It determines whether the session must have certain properties to be acceptable. It's typically used in combination with multiple host names to select the first acceptable alternative among several hosts. Supported values: `any`, `read-write`, `read-only`, `primary`, `standby`, `prefer-standby`. If empty, `any` is assumed.
|
||||
- `client_cert`, string. Path to the client certificate for two-way TLS authentication
|
||||
- `client_key`,string. Path to the client key for two-way TLS authentication
|
||||
- `connection_string`, string. Provide a custom database connection string. If not empty, this connection string will be used instead of building one using the previous parameters. Leave empty for drivers `bolt` and `memory`
|
||||
- `sql_tables_prefix`, string. Prefix for SQL tables
|
||||
- `track_quota`, integer. Set the preferred mode to track users quota between the following choices:
|
||||
- 0, disable quota tracking. REST API to scan users home directories/virtual folders and update quota will do nothing
|
||||
- 1, quota is updated each time a user uploads or deletes a file, even if the user has no quota restrictions
|
||||
- 2, quota is updated each time a user uploads or deletes a file, but only for users with quota restrictions and for virtual folders. With this configuration, the `quota scan` and `folder_quota_scan` REST API can still be used to periodically update space usage for users without quota restrictions and for folders
|
||||
- `delayed_quota_update`, integer. This configuration parameter defines the number of seconds to accumulate quota updates. If there are a lot of close uploads, accumulating quota updates can save you many queries to the data provider. If you want to track quotas, a scheduled quota update is recommended in any case, the stored quota may be incorrect for several reasons, such as an unexpected shutdown while uploading files, temporary provider failures, files copied outside of SFTPGo, and so on. You could use the [quotascan example](../examples/quotascan) as a starting point. 0 means immediate quota update.
|
||||
- `pool_size`, integer. Sets the maximum number of open connections for `mysql` and `postgresql` driver. Default 0 (unlimited)
|
||||
- `users_base_dir`, string. Users default base directory. If no home dir is defined while adding a new user, and this value is a valid absolute path, then the user home dir will be automatically defined as the path obtained joining the base dir and the username
|
||||
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions. See [Custom Actions](./custom-actions.md) for more details
|
||||
- `execute_on`, list of strings. Valid values are `add`, `update`, `delete`. `update` action will not be fired for internal updates such as the last login or the user quota fields.
|
||||
- `execute_for`, list of strings. Defines the provider objects that trigger the action. Valid values are `user`, `folder`, `group`, `admin`, `api_key`, `share`, `event_action`, `event_rule`.
|
||||
- `hook`, string. Absolute path to the command to execute or HTTP URL to notify.
|
||||
- `external_auth_hook`, string. Absolute path to an external program or an HTTP URL to invoke for users authentication. See [External Authentication](./external-auth.md) for more details. Leave empty to disable.
|
||||
- `external_auth_scope`, integer. 0 means all supported authentication scopes (passwords, public keys and keyboard interactive). 1 means passwords only. 2 means public keys only. 4 means key keyboard interactive only. 8 means TLS certificate. The flags can be combined, for example 6 means public keys and keyboard interactive
|
||||
- `credentials_path`, string. It defines the directory for storing user provided credential files such as Google Cloud Storage credentials. This can be an absolute path or a path relative to the config dir
|
||||
- `pre_login_hook`, string. Absolute path to an external program or an HTTP URL to invoke to modify user details just before the login. See [Dynamic user modification](./dynamic-user-mod.md) for more details. Leave empty to disable.
|
||||
- `post_login_hook`, string. Absolute path to an external program or an HTTP URL to invoke to notify a successful or failed login. See [Post-login hook](./post-login-hook.md) for more details. Leave empty to disable.
|
||||
- `post_login_scope`, defines the scope for the post-login hook. 0 means notify both failed and successful logins. 1 means notify failed logins. 2 means notify successful logins.
|
||||
- `check_password_hook`, string. Absolute path to an external program or an HTTP URL to invoke to check the user provided password. See [Check password hook](./check-password-hook.md) for more details. Leave empty to disable.
|
||||
- `check_password_scope`, defines the scope for the check password hook. 0 means all protocols, 1 means SSH, 2 means FTP, 4 means WebDAV. You can combine the scopes, for example 6 means FTP and WebDAV.
|
||||
- `password_hashing`, struct. It contains the configuration parameters to be used to generate the password hash. SFTPGo can verify passwords in several formats and uses, by default, the `bcrypt` algorithm to hash passwords in plain-text before storing them inside the data provider. These options allow you to customize how the hash is generated.
|
||||
- `argon2_options`, struct containing the options for argon2id hashing algorithm. The `memory` and `iterations` parameters control the computational cost of hashing the password. The higher these figures are, the greater the cost of generating the hash and the longer the runtime. It also follows that the greater the cost will be for any attacker trying to guess the password. If the code is running on a machine with multiple cores, then you can decrease the runtime without reducing the cost by increasing the `parallelism` parameter. This controls the number of threads that the work is spread across.
|
||||
- `memory`, unsigned integer. The amount of memory used by the algorithm (in kibibytes). Default: 65536.
|
||||
- `iterations`, unsigned integer. The number of iterations over the memory. Default: 1.
|
||||
- `parallelism`. unsigned 8 bit integer. The number of threads (or lanes) used by the algorithm. Default: 2.
|
||||
- `bcrypt_options`, struct containing the options for bcrypt hashing algorithm
|
||||
- `cost`, integer between 4 and 31. Default: 10
|
||||
- `algo`, string. Algorithm to use for hashing passwords. Available algorithms: `argon2id`, `bcrypt`. For bcrypt hashing we use the `$2a$` prefix. Default: `bcrypt`
|
||||
- `password_validation` struct. It defines the password validation rules for admins and protocol users.
|
||||
- `admins`, struct. It defines the password validation rules for SFTPGo admins.
|
||||
- `min_entropy`, float. Defines the minimum password entropy. Take a looke [here](https://github.com/wagslane/go-password-validator#what-entropy-value-should-i-use) for more details. `0` means disabled, any password will be accepted. Default: `0`.
|
||||
- `users`, struct. It defines the password validation rules for SFTPGo protocol users.
|
||||
- `min_entropy`, float. Default: `0`.
|
||||
- `password_caching`, boolean. Verifying argon2id passwords has a high memory and computational cost, verifying bcrypt passwords has a high computational cost, by enabling, in memory, password caching you reduce these costs. Default: `true`
|
||||
- `update_mode`, integer. Defines how the database will be initialized/updated. 0 means automatically. 1 means manually using the initprovider sub-command.
|
||||
- `create_default_admin`, boolean. Before you can use SFTPGo you need to create an admin account. If you open the admin web UI, a setup screen will guide you in creating the first admin account. You can automatically create the first admin account by enabling this setting and setting the environment variables `SFTPGO_DEFAULT_ADMIN_USERNAME` and `SFTPGO_DEFAULT_ADMIN_PASSWORD`. You can also create the first admin by loading initial data. This setting has no effect if an admin account is already found within the data provider. Default `false`.
|
||||
- `naming_rules`, integer. Naming rules for usernames, folder and group names. `0` means no rules. `1` means you can use any UTF-8 character. The names are used in URIs for REST API and Web admin. If not set only unreserved URI characters are allowed: ALPHA / DIGIT / "-" / "." / "_" / "~". `2` means names are converted to lowercase before saving/matching and so case insensitive matching is possible. `3` means trimming trailing and leading white spaces before saving/matching. Rules can be combined, for example `3` means both converting to lowercase and allowing any UTF-8 character. Enabling these options for existing installations could be backward incompatible, some users could be unable to login, for example existing users with mixed cases in their usernames. You have to ensure that all existing users respect the defined rules. Default: `1`.
|
||||
- `is_shared`, integer. If the data provider is shared across multiple SFTPGo instances, set this parameter to `1`. `MySQL`, `PostgreSQL` and `CockroachDB` can be shared, this setting is ignored for other data providers. For shared data providers, active transfers are persisted in the database and thus quota checks between ongoing transfers will work cross multiple instances. Password reset requests and OIDC tokens/states are also persisted in the database if the provider is shared. For shared data providers, scheduled event actions are only executed on a single SFTPGo instance by default, you can override this behavior on a per-action basis. The database table `shared_sessions` is used only to store temporary sessions. In performance critical installations, you might consider using a database-specific optimization, for example you might use an `UNLOGGED` table for PostgreSQL. This optimization in only required in very limited use cases. Default: `0`.
|
||||
- `node`, struct. Node-specific configurations to allow inter-node communications. If your provider is shared across multiple nodes, the nodes can exchange information to present a uniform view for node-specific data. The current implementation allows to obtain active connections from all nodes. Nodes connect to each other using the REST API.
|
||||
- `host`, string. IP address or hostname that other nodes can use to connect to this node via REST API. Empty means inter-node communications disabled. Default: empty.
|
||||
- `port`, integer. The port that other nodes can use to connect to this node via REST API. Default: `0`
|
||||
- `proto`, string. Supported values `http` or `https`. For `https` the configurations for http clients is used, so you can, for example, enable mutual TLS authentication. Default: `http`
|
||||
- `backups_path`, string. Path to the backup directory. This can be an absolute path or a path relative to the config dir. We don't allow backups in arbitrary paths for security reasons.
|
||||
- **"httpd"**, the configuration for the HTTP server used to serve REST API and to expose the built-in web interface
|
||||
- `bindings`, list of structs. Each struct has the following fields:
|
||||
- `port`, integer. The port used for serving HTTP requests. Default: 8080.
|
||||
- `address`, string. Leave blank to listen on all available network interfaces. On *NIX you can specify an absolute path to listen on a Unix-domain socket Default: blank.
|
||||
- `enable_web_admin`, boolean. Set to `false` to disable the built-in web admin for this binding. You also need to define `templates_path` and `static_files_path` to use the built-in web admin interface. Default `true`.
|
||||
- `enable_web_client`, boolean. Set to `false` to disable the built-in web client for this binding. You also need to define `templates_path` and `static_files_path` to use the built-in web client interface. Default `true`.
|
||||
- `enable_rest_api`, boolean. Set to `false` to disable REST API. Default `true`.
|
||||
- `enabled_login_methods`, integer. Defines the login methods available for the WebAdmin and WebClient UIs. `0` means any configured method: username/password login form and OIDC, if enabled. `1` means OIDC for the WebAdmin UI. `2` means OIDC for the WebClient UI. `4` means login form for the WebAdmin UI. `8` means login form for the WebClient UI. You can combine the values. For example `3` means that you can only login using OIDC on both WebClient and WebAdmin UI. Default: `0`.
|
||||
- `enable_https`, boolean. Set to `true` and provide both a certificate and a key file to enable HTTPS connection for this binding. Default `false`.
|
||||
- `certificate_file`, string. Binding specific TLS certificate. This can be an absolute path or a path relative to the config dir.
|
||||
- `certificate_key_file`, string. Binding specific private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If not set the global ones will be used, if any.
|
||||
- `min_tls_version`, integer. Defines the minimum version of TLS to be enabled. `12` means TLS 1.2 (and therefore TLS 1.2 and TLS 1.3 will be enabled),`13` means TLS 1.3. Default: `12`.
|
||||
- `client_auth_type`, integer. Set to `1` to require client certificate authentication in addition to JWT/Web authentication. You need to define at least a certificate authority for this to work. Default: 0.
|
||||
- `tls_cipher_suites`, list of strings. List of supported cipher suites for TLS version 1.2. If empty, a default list of secure cipher suites is used, with a preference order based on hardware performance. Note that TLS 1.3 ciphersuites are not configurable. The supported ciphersuites names are defined [here](https://github.com/golang/go/blob/master/src/crypto/tls/cipher_suites.go#L52). Any invalid name will be silently ignored. The order matters, the ciphers listed first will be the preferred ones. Default: empty.
|
||||
- `proxy_allowed`, list of IP addresses and IP ranges allowed to set client IP proxy header such as `X-Forwarded-For`, `X-Real-IP` and any other headers defined in the `security` section. Any of the indicated headers, if set on requests from a connection address not in this list, will be silently ignored. Default: empty.
|
||||
- `client_ip_proxy_header`, string. Defines the allowed client IP proxy header such as `X-Forwarded-For`, `X-Real-IP` etc. Default: empty
|
||||
- `client_ip_header_depth`, integer. Some client IP headers such as `X-Forwarded-For` can contain multiple IP address, this setting define the position to trust starting from the right. For example if we have: `10.0.0.1,11.0.0.1,12.0.0.1,13.0.0.1` and the depth is `0`, SFTPGo will use `13.0.0.1` as client IP, if depth is `1`, `12.0.0.1` will be used and so on. Default: `0`.
|
||||
- `hide_login_url`, integer. If both web admin and web client are enabled each login page will show a link to the other one. This setting allows to hide this link. 0 means that the login links are displayed on both admin and client login page. This is the default. 1 means that the login link to the web client login page is hidden on admin login page. 2 means that the login link to the web admin login page is hidden on client login page. The flags can be combined, for example 3 will disable both login links.
|
||||
- `render_openapi`, boolean. Set to `false` to disable serving of the OpenAPI schema and renderer. Default `true`.
|
||||
- `web_client_integrations`, list of struct. The SFTPGo web client allows to send the files with the specified extensions to the configured URL using the [postMessage API](https://developer.mozilla.org/en-US/docs/Web/API/Window/postMessage). This way you can integrate your own file viewer or editor. Take a look at the commentented example [here](../examples/webclient-integrations/test.html) to understand how to use this feature. Each struct has the following fields:
|
||||
- `file_extensions`, list of strings. File extensions must be specified with the leading dot, for example `.pdf`.
|
||||
- `url`, string. URL to open for the configured file extensions. The url will open in a new tab.
|
||||
- `oidc`, struct. Defines the OpenID connect configuration. OpenID integration allows you to map your identity provider users to SFTPGo users and so you can login to SFTPGo Web Client and Web Admin user interfaces using your identity provider. The following fields are supported:
|
||||
- `config_url`, string. Identifier for the service. If defined, SFTPGo will add `/.well-known/openid-configuration` to this url and attempt to retrieve the provider configuration on startup. SFTPGo will refuse to start if it fails to connect to the specified URL. Default: blank.
|
||||
- `client_id`, string. Defines the application's ID. Default: blank.
|
||||
- `client_secret`, string. Defines the application's secret. Default: blank.
|
||||
- `redirect_base_url`, string. Defines the base URL to redirect to after OpenID authentication. The suffix `/web/oidc/redirect` will be added to this base URL, adding also the `web_root` if configured. Default: blank.
|
||||
- `username_field`, string. Defines the ID token claims field to map to the SFTPGo username. Default: blank.
|
||||
- `scopes`, list of strings. Request the OAuth provider to provide the scope information from an authenticated users. The `openid` scope is mandatory. Default: `"openid", "profile", "email"`.
|
||||
- `role_field`, string. Defines the optional ID token claims field to map to a SFTPGo role. If the defined ID token claims field is set to `admin` the authenticated user is mapped to an SFTPGo admin. You don't need to specify this field if you want to use OpenID only for the Web Client UI. If the field is inside a nested structure, you can use the dot notation to traverse the structures. Default: blank.
|
||||
- `implicit_roles`, boolean. If set, the `role_field` is ignored and the SFTPGo role is assumed based on the login link used. Default: `false`.
|
||||
- `custom_fields`, list of strings. Custom token claims fields to pass to the pre-login hook. Default: empty.
|
||||
- `insecure_skip_signature_check`, boolean. This setting causes SFTPGo to skip JWT signature validation. It's intended for special cases where providers, such as Azure, use the `none` algorithm. Skipping the signature validation can cause security issues. Default: `false`.
|
||||
- `debug`, boolean. If set, the received id tokens will be logged at debug level. Default: `false`.
|
||||
- `security`, struct. Defines security headers to add to HTTP responses and allows to restrict allowed hosts. The following parameters are supported:
|
||||
- `enabled`, boolean. Set to `true` to enable security configurations. Default: `false`.
|
||||
- `allowed_hosts`, list of strings. Fully qualified domain names that are allowed. An empty list allows any and all host names. Default: empty.
|
||||
- `allowed_hosts_are_regex`, boolean. Determines if the provided allowed hosts contains valid regular expressions. Default: `false`.
|
||||
- `hosts_proxy_headers`, list of string. Defines a set of header keys that may hold a proxied hostname value for the request, for example `X-Forwarded-Host`. Default: empty.
|
||||
- `https_redirect`, boolean. Set to `true` to redirect HTTP requests to HTTPS. Default: `false`.
|
||||
- `https_host`, string. Defines the host name that is used to redirect HTTP requests to HTTPS. Default is blank, which indicates to use the same host. For example, if `https_redirect` is enabled and `https_host` is blank, a request for `http://127.0.0.1/web/client/login` will be redirected to `https://127.0.0.1/web/client/login`, if `https_host` is set to `www.example.com` the same request will be redirected to `https://www.example.com/web/client/login`.
|
||||
- `https_proxy_headers`, list of struct, each struct contains the fields `key` and `value`. Defines a a list of header keys with associated values that would indicate a valid https request. For example `key` could be `X-Forwarded-Proto` and `value` `https`. Default: empty.
|
||||
- `sts_seconds`, integer. Defines the max-age of the `Strict-Transport-Security` header. This header will be included for `https` responses or for HTTP request if the request includes a defined HTTPS proxy header. Default: `0`, which would NOT include the header.
|
||||
- `sts_include_subdomains`, boolean. Set to `true`, the `includeSubdomains` will be appended to the `Strict-Transport-Security` header. Default: `false`.
|
||||
- `sts_preload`, boolean. Set to true, the `preload` flag will be appended to the `Strict-Transport-Security` header. Default: `false`.
|
||||
- `content_type_nosniff`, boolean. Set to `true` to add the `X-Content-Type-Options` header with the value `nosniff`. Default: `false`.
|
||||
- `content_security_policy`, string. Allows to set the `Content-Security-Policy` header value. Default: blank.
|
||||
- `permissions_policy`, string. Allows to set the `Permissions-Policy` header value. Default: blank.
|
||||
- `cross_origin_opener_policy`, string. Allows to set the `Cross-Origin-Opener-Policy` header value. Default: blank.
|
||||
- `expect_ct_header`, string. Allows to set the `Expect-CT` header value. Default: blank.
|
||||
- `branding`, struct. Defines the supported customizations to suit your brand. It contains the `web_admin` and `web_client` structs that define customizations for the WebAdmin and the WebClient UIs. Each customization struct contains the following fields:
|
||||
- `name`, string. Defines the UI name
|
||||
- `short_name`, string. Defines the short name to show next to the logo image and on the login page
|
||||
- `favicon_path`, string. Path to the favicon relative to `static_files_path`. For example, if you create a directory named `branding` inside the static dir and put the `favicon.ico` file in it, you must set `/branding/favicon.ico` as path.
|
||||
- `logo_path`, string. Path to your logo relative to `static_files_path`. The preferred image size is 256x256 pixel
|
||||
- `login_image_path`, string. Path to a custom image to show on the login screen relative to `static_files_path`. The preferred image size is 900x900 pixel
|
||||
- `disclaimer_name`, string. Name for your optional disclaimer
|
||||
- `disclaimer_path`, string. Path to the HTML page with the disclaimer relative to `static_files_path`
|
||||
- `default_css`, string. Optional path to a custom CSS file, relative to `static_files_path`, which replaces the SB Admin2 default CSS
|
||||
- `extra_css`, list of strings. Defines the paths, relative to `static_files_path`, to additional CSS files
|
||||
- `templates_path`, string. Path to the HTML web templates. This can be an absolute path or a path relative to the config dir
|
||||
- `static_files_path`, string. Path to the static files for the web interface. This can be an absolute path or a path relative to the config dir. If both `templates_path` and `static_files_path` are empty the built-in web interface will be disabled
|
||||
- `openapi_path`, string. Path to the directory that contains the OpenAPI schema and the default renderer. This can be an absolute path or a path relative to the config dir. If empty the OpenAPI schema and the renderer will not be served regardless of the `render_openapi` directive
|
||||
- `web_root`, string. Defines a base URL for the web admin and client interfaces. If empty web admin and client resources will be available at the root ("/") URI. If defined it must be an absolute URI or it will be ignored
|
||||
- `certificate_file`, string. Certificate for HTTPS. This can be an absolute path or a path relative to the config dir.
|
||||
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If both the certificate and the private key are provided, you can enable HTTPS for the configured bindings. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
||||
- `ca_certificates`, list of strings. Set of root certificate authorities to be used to verify client certificates.
|
||||
- `ca_revocation_lists`, list of strings. Set a revocation lists, one for each root CA, to be used to check if a client certificate has been revoked. The revocation lists can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
||||
- `signing_passphrase`, string. Passphrase to use to derive the signing key for JWT and CSRF tokens. If empty a random signing key will be generated each time SFTPGo starts. If you set a signing passphrase you should consider rotating it periodically for added security.
|
||||
- `token_validation`, integer. Define how to validate JWT tokens, cookies and CSRF tokens. By default all the available security checks are enabled. Set to 1 to disable the requirement that a token must be used by the same IP for which it was issued. Default: `0`.
|
||||
- `max_upload_file_size`, integer. Defines the maximum request body size, in bytes, for Web Client/API HTTP upload requests. 0 means no limit. Default: 1048576000.
|
||||
- `cors` struct containing CORS configuration. SFTPGo uses [Go CORS handler](https://github.com/rs/cors), please refer to upstream documentation for fields meaning and their default values.
|
||||
- `enabled`, boolean, set to `true` to enable CORS.
|
||||
- `allowed_origins`, list of strings.
|
||||
- `allowed_methods`, list of strings.
|
||||
- `allowed_headers`, list of strings.
|
||||
- `exposed_headers`, list of strings.
|
||||
- `allow_credentials` boolean.
|
||||
- `max_age`, integer.
|
||||
- `options_passthrough`, boolean.
|
||||
- `options_success_status`, integer.
|
||||
- `allow_private_network`, boolean.
|
||||
- `setup` struct containing configurations for the initial setup screen
|
||||
- `installation_code`, string. If set, this installation code will be required when creating the first admin account. Please note that even if set using an environment variable this field is read at SFTPGo startup and not at runtime. This is not a license key or similar, the purpose here is to prevent anyone who can access to the initial setup screen from creating an admin user. Default: blank.
|
||||
- `installation_code_hint`, string. Description for the installation code input field. Default: `Installation code`.
|
||||
- `hide_support_link`, boolean. If set, the link to the [sponsors section](../README.md#sponsors) will not appear on the setup screen page. Default: `false`.
|
||||
- **"telemetry"**, the configuration for the telemetry server, more details [below](#telemetry-server)
|
||||
- `bind_port`, integer. The port used for serving HTTP requests. Set to 0 to disable HTTP server. Default: 0
|
||||
- `bind_address`, string. Leave blank to listen on all available network interfaces. On \*NIX you can specify an absolute path to listen on a Unix-domain socket. Default: `127.0.0.1`
|
||||
- `enable_profiler`, boolean. Enable the built-in profiler. Default `false`
|
||||
- `auth_user_file`, string. Path to a file used to store usernames and passwords for basic authentication. This can be an absolute path or a path relative to the config dir. We support HTTP basic authentication, and the file format must conform to the one generated using the Apache `htpasswd` tool. The supported password formats are bcrypt (`$2y$` prefix) and md5 crypt (`$apr1$` prefix). If empty, HTTP authentication is disabled. Authentication will be always disabled for the `/healthz` endpoint.
|
||||
- `certificate_file`, string. Certificate for HTTPS. This can be an absolute path or a path relative to the config dir.
|
||||
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If both the certificate and the private key are provided, the server will expect HTTPS connections. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
|
||||
- `min_tls_version`, integer. Defines the minimum version of TLS to be enabled. `12` means TLS 1.2 (and therefore TLS 1.2 and TLS 1.3 will be enabled),`13` means TLS 1.3. Default: `12`.
|
||||
- `tls_cipher_suites`, list of strings. List of supported cipher suites for TLS version 1.2. If empty, a default list of secure cipher suites is used, with a preference order based on hardware performance. Note that TLS 1.3 ciphersuites are not configurable. The supported ciphersuites names are defined [here](https://github.com/golang/go/blob/master/src/crypto/tls/cipher_suites.go#L52). Any invalid name will be silently ignored. The order matters, the ciphers listed first will be the preferred ones. Default: empty.
|
||||
- **"http"**, the configuration for HTTP clients. HTTP clients are used for executing hooks. Some hooks use a retryable HTTP client, for these hooks you can configure the time between retries and the number of retries. Please check the hook specific documentation to understand which hooks use a retryable HTTP client.
|
||||
- `timeout`, float. Timeout specifies a time limit, in seconds, for requests. For requests with retries this is the timeout for a single request
|
||||
- `retry_wait_min`, integer. Defines the minimum waiting time between attempts in seconds.
|
||||
- `retry_wait_max`, integer. Defines the maximum waiting time between attempts in seconds. The backoff algorithm will perform exponential backoff based on the attempt number and limited by the provided minimum and maximum durations.
|
||||
- `retry_max`, integer. Defines the maximum number of retries if the first request fails.
|
||||
- `ca_certificates`, list of strings. List of paths to extra CA certificates to trust. The paths can be absolute or relative to the config dir. Adding trusted CA certificates is a convenient way to use self-signed certificates without defeating the purpose of using TLS.
|
||||
- `certificates`, list of certificate for mutual TLS. Each certificate is a struct with the following fields:
|
||||
- `cert`, string. Path to the certificate file. The path can be absolute or relative to the config dir.
|
||||
- `key`, string. Path to the key file. The path can be absolute or relative to the config dir.
|
||||
- `skip_tls_verify`, boolean. if enabled the HTTP client accepts any TLS certificate presented by the server and any host name in that certificate. In this mode, TLS is susceptible to man-in-the-middle attacks. This should be used only for testing.
|
||||
- `headers`, list of structs. You can define a list of http headers to add to each hook. Each struct has the following fields:
|
||||
- `key`, string
|
||||
- `value`, string. The header is silently ignored if `key` or `value` are empty
|
||||
- `url`, string, optional. If not empty, the header will be added only if the request URL starts with the one specified here
|
||||
- **command**, configuration for external commands such as program based hooks
|
||||
- `timeout`, integer. Timeout specifies a time limit, in seconds, to execute external commands. Valid range: `1-300`. Default: `30`
|
||||
- `env`, list of strings. Environment variables to pass to all the external commands. Global environment variables are cleared, for security reasons, you have to explicitly set any environment variable such as `PATH` etc. if you need them. Each entry is of the form `key=value`. Do not use environment variables prefixed with `SFTPGO_` to avoid conflicts with environment variables that SFTPGo hooks can set. Default: empty
|
||||
- `commands`, list of structs. Allow to customize configuration per-command. Each struct has the following fields:
|
||||
- `path`, string. Define the command path as defined in the hook configuration
|
||||
- `timeout`, integer. This value overrides the global timeout if set
|
||||
- `env`, list of strings. These values are added to the environment variables defined for all commands, if any. Default: empty
|
||||
- `args`, list of strings. Arguments to pass to the command identified by `path`. Default: empty
|
||||
- `hook`, string. If not empty this configuration only apply to the specified hook name. Supported hook names: `fs_actions`, `provider_actions`, `startup`, `post_connect`, `post_disconnect`, `data_retention`, `check_password`, `pre_login`, `post_login`, `external_auth`, `keyboard_interactive`. Default: empty
|
||||
- **kms**, configuration for the Key Management Service, more details can be found [here](./kms.md)
|
||||
- `secrets`
|
||||
- `url`, string. Defines the URI to the KMS service. Default: blank.
|
||||
- `master_key`, string. Defines the master encryption key as string. If not empty, it takes precedence over `master_key_path`. Default: blank.
|
||||
- `master_key_path`, string. Defines the absolute path to a file containing the master encryption key. Default: blank.
|
||||
- **mfa**, multi-factor authentication settings
|
||||
- `totp`, list of struct that define settings for time-based one time passwords (RFC 6238). Each struct has the following fields:
|
||||
- `name`, string. Unique configuration name. This name should not be changed if there are users or admins using the configuration. The name is not exposed to the authentication apps. Default: `Default`.
|
||||
- `issuer`, string. Name of the issuing Organization/Company. Default: `SFTPGo`.
|
||||
- `algo`, string. Algorithm to use for HMAC. The supported algorithms are: `sha1`, `sha256`, `sha512`. Currently Google Authenticator app on iPhone seems to only support `sha1`, please check the compatibility with your target apps/device before setting a different algorithm. You can also define multiple configurations, for example one that uses `sha256` or `sha512` and another one that uses `sha1` and instruct your users to use the appropriate configuration for their devices/apps. The algorithm should not be changed if there are users or admins using the configuration. Default: `sha1`.
|
||||
- **smtp**, SMTP configuration enables SFTPGo email sending capabilities
|
||||
- `host`, string. Location of SMTP email server. Leave empty to disable email sending capabilities. Default: blank.
|
||||
- `port`, integer. Port of SMTP email server.
|
||||
- `from`, string. From address, for example `SFTPGo <sftpgo@example.com>`. Many SMTP servers reject emails without a `From` header so, if not set, SFTPGo will try to use the username as fallback, this may or may not be appropriate. Default: blank
|
||||
- `user`, string. SMTP username. Default: blank
|
||||
- `password`, string. SMTP password. Leaving both username and password empty the SMTP authentication will be disabled. Default: blank
|
||||
- `auth_type`, integer. 0 means `Plain`, 1 means `Login`, 2 means `CRAM-MD5`. Default: `0`.
|
||||
- `encryption`, integer. 0 means no encryption, 1 means `TLS`, 2 means `STARTTLS`. Default: `0`.
|
||||
- `domain`, string. Domain to use for `HELO` command, if empty `localhost` will be used. Default: blank.
|
||||
- `templates_path`, string. Path to the email templates. This can be an absolute path or a path relative to the config dir. Templates are searched within a subdirectory named "email" in the specified path. You can customize the email templates by simply specifying an alternate path and putting your custom templates there.
|
||||
- **plugins**, list of external plugins. Each plugin is configured using a struct with the following fields:
|
||||
- `type`, string. Defines the plugin type. Supported types: `notifier`, `kms`, `auth`, `metadata`.
|
||||
- `notifier_options`, struct. Defines the options for notifier plugins.
|
||||
- `fs_events`, list of strings. Defines the filesystem events that will be notified to this plugin.
|
||||
- `provider_events`, list of strings. Defines the provider events that will be notified to this plugin.
|
||||
- `provider_objects`, list if strings. Defines the provider objects that will be notified to this plugin.
|
||||
- `retry_max_time`, integer. Defines the maximum number of seconds an event can be late. SFTPGo adds a timestamp to each event and add to an internal queue any events that a the plugin fails to handle (the plugin returns an error or it is not running). If a plugin fails to handle an event that is too late, based on this configuration, it will be discarded. SFTPGo will try to resend queued events every 30 seconds. 0 means no retry.
|
||||
- `retry_queue_max_size`, integer. Defines the maximum number of events that the internal queue can hold. Once the queue is full, the events that cannot be sent to the plugin will be discarded. 0 means no limit.
|
||||
- `kms_options`, struct. Defines the options for kms plugins.
|
||||
- `scheme`, string. KMS scheme. Supported schemes are: `awskms`, `gcpkms`, `hashivault`, `azurekeyvault`.
|
||||
- `encrypted_status`, string. Encrypted status for a KMS secret. Supported statuses are: `AWS`, `GCP`, `VaultTransit`, `AzureKeyVault`.
|
||||
- `auth_options`, struct. Defines the options for auth plugins.
|
||||
- `scope`, integer. 1 means passwords only. 2 means public keys only. 4 means key keyboard interactive only. 8 means TLS certificate. The flags can be combined, for example 6 means public keys and keyboard interactive. The scope must be explicit, `0` is not a valid option.
|
||||
- `cmd`, string. Path to the plugin executable.
|
||||
- `args`, list of strings. Optional arguments to pass to the plugin executable.
|
||||
- `sha256sum`, string. SHA256 checksum for the plugin executable. If not empty it will be used to verify the integrity of the executable.
|
||||
- `auto_mtls`, boolean. If enabled the client and the server automatically negotiate mutual TLS for transport authentication. This ensures that only the original client will be allowed to connect to the server, and all other connections will be rejected. The client will also refuse to connect to any server that isn't the original instance started by the client.
|
||||
|
||||
:warning: Please note that the plugin system is experimental, the exposed configuration parameters and interfaces may change in a backward incompatible way in future.
|
||||
|
||||
A full example showing the default config (in JSON format) can be found [here](../sftpgo.json).
|
||||
|
||||
If you want to use a private host key that uses an algorithm/setting different from the auto generated RSA/ECDSA keys, or more than two private keys, you can generate your own keys and replace the empty `keys` array with something like this:
|
||||
|
||||
```json
|
||||
"host_keys": [
|
||||
"id_rsa",
|
||||
"id_ecdsa",
|
||||
"id_ed25519"
|
||||
]
|
||||
```
|
||||
|
||||
where `id_rsa`, `id_ecdsa` and `id_ed25519`, in this example, are files containing your generated keys. You can use absolute paths or paths relative to the configuration directory specified via the `--config-dir` serve flag. By default the configuration directory is the working directory.
|
||||
|
||||
If you want the default host keys generation in a directory different from the config dir, please specify absolute paths to files named `id_rsa`, `id_ecdsa` or `id_ed25519` like this:
|
||||
|
||||
```json
|
||||
"host_keys": [
|
||||
"/etc/sftpgo/keys/id_rsa",
|
||||
"/etc/sftpgo/keys/id_ecdsa",
|
||||
"/etc/sftpgo/keys/id_ed25519"
|
||||
]
|
||||
```
|
||||
|
||||
then SFTPGo will try to create `id_rsa`, `id_ecdsa` and `id_ed25519`, if they are missing, inside the directory `/etc/sftpgo/keys`.
|
||||
|
||||
The configuration can be read from JSON, TOML, YAML, HCL, envfile and Java properties config files. If your `config-file` flag is set to `sftpgo` (default value), you need to create a configuration file called `sftpgo.json` or `sftpgo.yaml` and so on inside `config-dir`.
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary><font size=5> Environment variables</font></summary>
|
||||
|
||||
You can also override all the available configuration options using environment variables. SFTPGo will check for environment variables with a name matching the key uppercased and prefixed with the `SFTPGO_`. You need to use `__` to traverse a struct.
|
||||
|
||||
Let's see some examples:
|
||||
|
||||
- To set the `port` for the first sftpd binding, you need to define the env var `SFTPGO_SFTPD__BINDINGS__0__PORT`
|
||||
- To set the `execute_on` actions, you need to define the env var `SFTPGO_COMMON__ACTIONS__EXECUTE_ON`. For example `SFTPGO_COMMON__ACTIONS__EXECUTE_ON=upload,download`
|
||||
|
||||
On some hardware you can get faster SFTP performance by replacing the Go `crypto/sha256` implementation with [sha256-simd](https://github.com/minio/sha256-simd).
|
||||
|
||||
The performances of SHA256 is relevant for clients using AES CTR ciphers and `hmac-sha2-256` as Message Authentication Code (MAC).
|
||||
|
||||
Up to 2.0.x versions SFTPGo automatically used `sha256-simd` but over the time the standard Go implementation improved a lot and now is faster than `sha256-simd` on some CPUs.
|
||||
You can select `sha256-simd` setting the environment variable `SFTPGO_MINIO_SHA256_SIMD` to `1`.
|
||||
|
||||
`sha256-simd` is particularly useful if you have an Intel CPU with SHA extensions or an ARM CPU with Cryptography Extensions.
|
||||
|
||||
The configuration file can change between different versions and merging your custom settings with the default configuration file, after updating SFTPGo, may be time-consuming. For this reason we suggest to set your custom settings using environment variables. This eliminates the need to merge your changes with the default configuration file after each update, you have to just check that your custom configuration keys still exists.
|
||||
|
||||
Setting configuration options from environment variables is natural in Docker/Kubernetes.
|
||||
If you install SFTPGo on Linux using the official deb/rpm packages you can set your custom environment variables in the file `/etc/sftpgo/sftpgo.env` (create this file if it does not exist, it is defined as `EnvironmentFile` in the SFTPGo systemd unit).
|
||||
SFTPGo also reads files inside the `env.d` directory relative to config dir and then exports the valid variables into environment variables if they are not already set. With this method you can override any configuration options, set environment variables for SFTPGo plugins but you cannot set command flags because these files are read after that SFTPGo starts and the config dir must already be set.
|
||||
Of course you can also set environment variables with the method provided by the operating system of your choice.
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary><font size=5>Binding to privileged ports</font></summary>
|
||||
|
||||
On Linux, if you want to use Internet domain privileged ports (port numbers less than 1024) instead of running the SFTPGo service as root user you can set the `cap_net_bind_service` capability on the `sftpgo` binary. To set the capability you can use the following command:
|
||||
|
||||
```shell
|
||||
$ sudo setcap cap_net_bind_service=+ep /usr/bin/sftpgo
|
||||
# Check that the capability is added
|
||||
$ getcap /usr/bin/sftpgo
|
||||
/usr/bin/sftpgo cap_net_bind_service=ep
|
||||
```
|
||||
|
||||
Now you can use privileged ports such as 21, 22, 443 etc.. without running the SFTPGo service as root user. You have to set the `cap_net_bind_service` capability each time you update the `sftpgo` binary.
|
||||
|
||||
The "official" deb/rpm packages attempt to set the `cap_net_bind_service` capability in their `postinstall` scripts.
|
||||
|
||||
An alternative method is to use `iptables`, for example you run the SFTP service on port `2022` and redirect traffic from port `22` to port `2022`:
|
||||
|
||||
```shell
|
||||
sudo iptables -t nat -A PREROUTING -d <ip> -p tcp --dport 22 -m addrtype --dst-type LOCAL -j DNAT --to-destination <ip>:2022
|
||||
sudo iptables -t nat -A OUTPUT -d <ip> -p tcp --dport 22 -m addrtype --dst-type LOCAL -j DNAT --to-destination <ip>:2022
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary><font size=5>Supported Password Hashing Algorithms</font></summary>
|
||||
|
||||
SFTPGo can verify passwords in several formats and uses, by default, the `bcrypt` algorithm to hash passwords in plain-text before storing them inside the data provider. Each hashing algorithm is identified by a prefix.
|
||||
Supported hash algorithms:
|
||||
|
||||
- bcrypt, prefix `$2a$`
|
||||
- argon2id, prefix `$argon2id$`
|
||||
- PBKDF2 sha1, prefix `$pbkdf2-sha1$`
|
||||
- PBKDF2 sha256, prefix `$pbkdf2-sha256$`
|
||||
- PBKDF2 sha512, prefix `$pbkdf2-sha512$`
|
||||
- PBKDF2 sha256 with base64 salt, prefix `$pbkdf2-b64salt-sha256$`
|
||||
- MD5 crypt, prefix `$1$`
|
||||
- MD5 crypt APR1, prefix `$apr1$`
|
||||
- SHA256 crypt, prefix `$5$`
|
||||
- SHA512 crypt, prefix `$6$`
|
||||
- LDAP MD5, prefix `{MD5}`
|
||||
|
||||
If you set a password with one of these prefixes it will not be hashed.
|
||||
When users log in, if their passwords are stored with anything other than the preferred algorithm, SFTPGo will automatically upgrade the algorithm to the preferred one.
|
||||
|
||||
</details>
|
||||
|
||||
## Telemetry Server
|
||||
|
||||
The telemetry server exposes the following endpoints:
|
||||
|
||||
- `/healthz`, health information (for health checks)
|
||||
- `/metrics`, Prometheus metrics
|
||||
- `/debug/pprof`, if enabled via the `enable_profiler` configuration key, for profiling, more details [here](./profiling.md)
|
||||
11
docs/google-cloud-storage.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Google Cloud Storage backend
|
||||
|
||||
To connect SFTPGo to Google Cloud Storage you can use use the Application Default Credentials (ADC) strategy to try to find your application's credentials automatically or you can explicitly provide a JSON credentials file that you can obtain from the Google Cloud Console. Take a look [here](https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application) for details.
|
||||
|
||||
Specifying a different `key_prefix`, you can assign different "folders" of the same bucket to different users. This is similar to a chroot directory for local filesystem. Each SFTP/SCP user can only access the assigned folder and its contents. The folder identified by `key_prefix` does not need to be pre-created.
|
||||
|
||||
You can optionally specify a [storage class](https://cloud.google.com/storage/docs/storage-classes) too. Leave it blank to use the default storage class.
|
||||
|
||||
The configured bucket must exist.
|
||||
|
||||
This backend is very similar to the [S3](./s3.md) backend, and it has the same limitations. As with S3 `chtime` will fail with the default configuration, you can install the [metadata plugin](https://github.com/sftpgo/sftpgo-plugin-metadata) to make it work and thus be able to preserve/change file modification times.
|
||||
43
docs/groups.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Groups
|
||||
|
||||
Using groups simplifies the administration of multiple accounts by letting you assign settings once to a group, instead of multiple times to each individual user.
|
||||
|
||||
SFTPGo supports two types of groups:
|
||||
|
||||
- primary groups
|
||||
- secondary groups
|
||||
|
||||
A user can be a member of a primary group and many secondary and membership groups. Depending on the group type, the settings are inherited differently.
|
||||
|
||||
:warning: SFTPGo groups are completely unrelated to system groups. Therefore, it is not necessary to add Linux/Windows groups to use SFTPGo groups.
|
||||
|
||||
The following settings are inherited from the primary group:
|
||||
|
||||
- home dir, if set for the group will replace the one defined for the user. The `%username%` placeholder is replaced with the username
|
||||
- filesystem config, if the provider set for the group is different from the "local provider" will replace the one defined for the user. The `%username%` placeholder is replaced with the username within the defined "prefix", for any vfs, and the "username" for the SFTP filesystem config
|
||||
- max sessions, quota size/files, upload/download bandwidth, upload/download/total data transfer, max upload size, external auth cache time, ftp_security, default share expiration: if they are set to `0` for the user they are replaced with the value set for the group, if different from `0`
|
||||
- TLS username, check password hook disabled, pre-login hook disabled, external auth hook disabled, filesystem checks disabled, allow API key authentication, anonymous user: if they are not set for the user they are replaced with the value set for the group
|
||||
- starting directory, if the user does not have a starting directory set, the value set for the group is used, if any. The `%username%` placeholder is replaced with the username
|
||||
|
||||
The following settings are inherited from the primary and secondary groups:
|
||||
|
||||
- virtual folders, file patterns, permissions: they are added to the user configuration if the user does not already have a setting for the configured path. The `/` path is ignored for secondary groups. The `%username%` placeholder is replaced with the username within the virtual path, the defined "prefix", for any vfs, and the "username" for the SFTP and HTTP filesystem config
|
||||
- per-source bandwidth limits
|
||||
- per-source data transfer limits
|
||||
- allowed/denied IPs
|
||||
- denied login methods and protocols
|
||||
- two factor auth protocols
|
||||
- web client/REST API permissions
|
||||
|
||||
The settings from the primary group are always merged first. no setting is inherited from "membership" groups.
|
||||
|
||||
The final settings are a combination of the user settings and the group ones.
|
||||
For example you can define the following groups:
|
||||
|
||||
- "group1", it has a virtual directory to mount on `/vdir1`
|
||||
- "group2", it has a virtual directory to mount on `/vdir2`
|
||||
- "group3", it has a virtual directory to mount on `/vdir3`
|
||||
|
||||
If you define users with a virtual directory to mount on `/vdir` and make them member of all the above groups, they will have virtual directories mounted on `/vdir`, `/vdir1`, `/vdir2`, `/vdir3`. If users already have a virtual directory to mount on `/vdir1`, the group's one will be ignored.
|
||||
|
||||
Please note that if the same virtual path is set in more than one secondary group the behavior is undefined. For example if a user is a member of two secondary groups and each secondary group defines a virtual folder to mount on the `/vdir2` path, the virtual folder mounted on `/vdir2` may change with every login.
|
||||
12
docs/howto/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Tutorials
|
||||
|
||||
Here we collect step-to-step tutorials. SFTPGo users are encouraged to contribute!
|
||||
|
||||
- [Getting Started](./getting-started.md)
|
||||
- [Securing SFTPGo with a free Let's Encrypt TLS Certificate](./lets-encrypt-certificate.md)
|
||||
- [Two-factor Authentication](./two-factor-authentication.md)
|
||||
- [Event Manager](./eventmanager.md)
|
||||
- [SFTPGo as OpenSSH's SFTP subsystem](./openssh-sftp-subsystem.md)
|
||||
- [SFTPGo with PostgreSQL data provider and S3 backend](./postgresql-s3.md)
|
||||
- [SFTPGo on Windows with Active Directory Integration + Caddy Static File Server](https://www.youtube.com/watch?v=M5UcJI8t4AI)
|
||||
- [File Traefik: Serve files securely via SFTP, HTTPS, and WebDAV with SFTPGo proxied behind Traefik](https://thad.getterman.org/articles/file-traefik/)
|
||||
96
docs/howto/eventmanager.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# Event Manager
|
||||
|
||||
The Event Manager allows an administrator to configure HTTP notifications, commands execution, email notifications and carry out certain server operations based on server events or schedules. More details [here](../eventmanager.md).
|
||||
|
||||
Let's see some common use cases.
|
||||
|
||||
- [Preliminary Note](#preliminary-note)
|
||||
- [Daily backups](#daily-backups)
|
||||
- [Automatically create a folder structure](#automatically-create-a-folder-structure)
|
||||
- [Upload notifications](#upload-notifications)
|
||||
|
||||
## Preliminary Note
|
||||
|
||||
We will use email actions in the following paragraphs, so let's assume you have a working SMTP configuration.
|
||||
You can adapt the following snippet to configure an SMTP server using environment variables.
|
||||
|
||||
```shell
|
||||
SFTPGO_SMTP__HOST="your smtp server host"
|
||||
SFTPGO_SMTP__FROM="SFTPGo <sftpgo@example.com>"
|
||||
SFTPGO_SMTP__USER=sftpgo@example.com
|
||||
SFTPGO_SMTP__PASSWORD="your password"
|
||||
SFTPGO_SMTP__AUTH_TYPE=1 # change based on what your server supports
|
||||
SFTPGO_SMTP__ENCRYPTION=2 # change based on what your server supports
|
||||
```
|
||||
|
||||
SFTPGo supports several placeholders for event actions. You can see all supported placeholders by clicking on the "info" icon at the top right of the add/update action page.
|
||||
|
||||
## Daily backups
|
||||
|
||||
You can schedule SFTPGo data backups (users, folders, groups, admins etc.) on a regular basis, such as daily.
|
||||
|
||||
From the WebAdmin expand the `Event Manager` section, select `Event actions` and add a new action.
|
||||
Create an action named `backup` and set the type to `Backup`.
|
||||
|
||||

|
||||
|
||||
Create another action named `backup notification`, set the type to `Email` and fill the recipient/s.
|
||||
As email subject set `Backup {{StatusString}}`. The `{{StatusString}}` placeholder will be expanded to `OK` or `KO`.
|
||||
As email body set `Backup done {{ErrorString}}`. The error string will be empty if no errors occur.
|
||||
|
||||

|
||||
|
||||
Now select `Event rules` and create a rule named `Daily backup`, select `Schedule` as trigger and schedule a backup at midnight UTC time.
|
||||
|
||||

|
||||
|
||||
As actions select `backup` and `backup notification`.
|
||||
|
||||

|
||||
|
||||
Done! SFTPGo will make a new backup every day and you will receive an email with the status of the backup. The backup will be saved on the server side in the configured backup directory. The backup files will have names like this `backup_<week day>_<hour>.json`.
|
||||
|
||||
## Automatically create a folder structure
|
||||
|
||||
Suppose you want to automatically create the folders `in` and `out` when you create new users.
|
||||
|
||||
From the WebAdmin expand the `Event Manager` section, select `Event actions` and add a new action.
|
||||
Create an action named `create dirs`, with the settings you can see in the following screen.
|
||||
|
||||

|
||||
|
||||
Create another action named `create dirs failure notification`, set the type to `Email` and fill the recipient/s.
|
||||
As email subject set `Unable to create dirs for user {{ObjectName}}`.
|
||||
As email body set `Error: {{ErrorString}}`.
|
||||
|
||||

|
||||
|
||||
Now select `Event rules` and create a rule named `Create dirs for users`, select `Provider event` as trigger, `add` as provider event and `user` as object filters.
|
||||
|
||||

|
||||
|
||||
As actions select `create dirs` and `create dirs failure notification`, check `Is failure action` for the notification action.
|
||||
This way you will only be notified by email if an error occurs.
|
||||
|
||||

|
||||
|
||||
Done! Create a new user and check that the defined directories are automatically created.
|
||||
|
||||
## Upload notifications
|
||||
|
||||
Let's see how you can receive an email notification after each upload and, optionally, the uploaded file as well.
|
||||
|
||||
From the WebAdmin expand the `Event Manager` section, select `Event actions` and add a new action.
|
||||
Create an action named `upload notification`, with the settings you can see in the following screen.
|
||||
|
||||

|
||||
|
||||
You can optionally add the uploaded file as an attachment but note that SFTPGo allows you to attach a maximum of 10MB. Then the action will fail for files bigger than 10MB.
|
||||
|
||||
Now select `Event rules` and create a rule named `Upload rule`, select `Filesystem evens` as trigger and `upload` as filesystem event.
|
||||
You can also filters events based on protocol, user and group name, filepath shell-like patterns, file size. We omit these additional filters for simplicity.
|
||||
|
||||

|
||||
|
||||
As actions, select `upload notification`.
|
||||
Done! Try uploading a new file and you will receive the configured email notification.
|
||||
659
docs/howto/getting-started.md
Normal file
@@ -0,0 +1,659 @@
|
||||
# Getting Started
|
||||
|
||||
SFTPGo allows to securely share your files over SFTP and optionally FTP/S and WebDAV too.
|
||||
Several storage backends are supported and they are configurable per user, so you can serve a local directory for a user and an S3 bucket (or part of it) for another one.
|
||||
SFTPGo also supports virtual folders, a virtual folder can use any of the supported storage backends. So you can have, for example, an S3 user that exposes a GCS bucket (or part of it) on a specified path and an encrypted local filesystem on another one.
|
||||
Virtual folders can be private or shared among multiple users, for shared virtual folders you can define different quota limits for each user.
|
||||
|
||||
In this tutorial we explore the main features and concepts using the built-in web admin interface. Advanced users can also use the SFTPGo [REST API](https://sftpgo.stoplight.io/docs/sftpgo/openapi.yaml)
|
||||
|
||||
- [Installation](#installation)
|
||||
- [Initial configuration](#initial-configuration)
|
||||
- [Creating users](#creating-users)
|
||||
- [Creating users with a Cloud Storage backend](#creating-users-with-a-cloud-storage-backend)
|
||||
- [Creating users with a local encrypted backend (Data At Rest Encryption)](#creating-users-with-a-local-encrypted-backend-data-at-rest-Encryption)
|
||||
- [Virtual permissions](#virtual-permissions)
|
||||
- [Virtual folders](#virtual-folders)
|
||||
- [Groups](#groups)
|
||||
- [Usage example](#usage-example)
|
||||
- [Simplify user page](#simplify-user-page)
|
||||
- [Configuration parameters](#configuration-parameters)
|
||||
- [Use PostgreSQL data provider](#use-postgresql-data-provider)
|
||||
- [Use MySQL/MariaDB data provider](#use-mysqlmariadb-data-provider)
|
||||
- [Use CockroachDB data provider](#use-cockroachdb-data-provider)
|
||||
- [Enable FTP service](#enable-ftp-service)
|
||||
- [Enable WebDAV service](#enable-webdav-service)
|
||||
|
||||
## Installation
|
||||
|
||||
You can easily install SFTPGo by downloading the appropriate package for your operating system and architecture. Please visit the [releases](https://github.com/drakkan/sftpgo/releases "releases") page.
|
||||
|
||||
An official Docker image is available. Documentation is [here](./../../docker/README.md).
|
||||
|
||||
In this guide, we assume that SFTPGo is already installed and running using the default configuration.
|
||||
|
||||
## Initial configuration
|
||||
|
||||
Before you can use SFTPGo you need to create an admin account, so open [http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web) in your web browser, replacing `127.0.0.1` with the appropriate IP address if SFTPGo is not running on localhost.
|
||||
|
||||

|
||||
|
||||
After creating the admin account you will be automatically logged in.
|
||||
|
||||

|
||||
|
||||
The web admin is now available at the following URL:
|
||||
|
||||
[http://127.0.0.1:8080/web/admin](http://127.0.0.1:8080/web/admin)
|
||||
|
||||
From the `Status` page you see the active services.
|
||||
|
||||

|
||||
|
||||
The default configuration enables the SFTP service on port `2022` and uses an embedded data provider (`SQLite` or `bolt` based on the target OS and architecture).
|
||||
|
||||
## Creating users
|
||||
|
||||
Let's create our first local user:
|
||||
|
||||
- from the `Users` page click the `+` icon to open the `Add user page`
|
||||
- the only required fields are the `Username` and a `Password` or a `Public key`
|
||||
- if you are on Windows or you installed SFTPGo manually and no `users_base_dir` is defined in your configuration file you also have to set a `Home Dir`. It must be an absolute path, for example `/srv/sftpgo/data/username` on Linux or `C:\sftpgo\data\username` on Windows. SFTPGo will try to automatically create the home directory, if missing, when the user logs in. Each user can only access files and folders inside its home directory.
|
||||
- click `Submit`
|
||||
|
||||

|
||||
|
||||
:warning: Please note that, on Linux, SFTPGo runs using a dedicated system user and group called `sftpgo`, for added security. If you want to be able to use directories outside the `/srv/sftpgo` path you need to set the appropriate system level permissions. For example if you define `/home/username/test` as home dir you have to create this directory yourself, if it doesn't exist, and set the appropriate system-level permissions:
|
||||
|
||||
```shell
|
||||
sudo mkdir /home/username/test
|
||||
sudo chown sftpgo:sftpgo /home/username/test
|
||||
```
|
||||
|
||||
You also need to make sure that the `sftpgo` system user has at least the read permission for any parent directory, so in the example above `/home/username` and `/home` must not have `0700` permissions.
|
||||
|
||||
Now test the new user, we use the `sftp` CLI here, you can use any SFTP client.
|
||||
|
||||
```shell
|
||||
$ sftp -P 2022 nicola@127.0.0.1
|
||||
nicola@127.0.0.1's password:
|
||||
Connected to 127.0.0.1.
|
||||
sftp> ls
|
||||
sftp> put file.txt
|
||||
Uploading file.txt to /file.txt
|
||||
file.txt 100% 4034 3.9MB/s 00:00
|
||||
sftp> ls
|
||||
file.txt
|
||||
sftp> mkdir adir
|
||||
sftp> cd adir/
|
||||
sftp> put file.txt
|
||||
Uploading file.txt to /adir/file.txt
|
||||
file.txt 100% 4034 4.0MB/s 00:00
|
||||
sftp> ls
|
||||
file.txt
|
||||
sftp> get file.txt
|
||||
Fetching /adir/file.txt to file.txt
|
||||
/adir/file.txt 100% 4034 1.9MB/s 00:00
|
||||
```
|
||||
|
||||
It worked! We can upload/download files and create directories.
|
||||
|
||||
Each user can browse and download their files, share files with external users, change their credentials and configure two-factor authentication using the WebClient interface available at the following URL:
|
||||
|
||||
[http://127.0.0.1:8080/web/client](http://127.0.0.1:8080/web/client)
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
### Creating users with a Cloud Storage backend
|
||||
|
||||
The procedure is similar to the one described for local users, you have only specify the Cloud Storage backend and its credentials.
|
||||
|
||||
The screenshot below shows an example configuration for an S3 backend.
|
||||
|
||||

|
||||

|
||||
|
||||
The screenshot below shows an example configuration for an Azure Blob Storage backend.
|
||||
|
||||

|
||||

|
||||
|
||||
The screenshot below shows an example configuration for a Google Cloud Storage backend.
|
||||
|
||||

|
||||
|
||||
The screenshot below shows an example configuration for an SFTP server as storage backend.
|
||||
|
||||

|
||||
|
||||
Setting a `Key Prefix` you restrict the user to a specific "sub-folder" in the bucket, so that the same bucket can be shared among different users.
|
||||
|
||||
### Creating users with a local encrypted backend (Data At Rest Encryption)
|
||||
|
||||
The procedure is similar to the one described for local users, you have only specify the encryption passphrase.
|
||||
The screenshot below shows an example configuration.
|
||||
|
||||

|
||||
|
||||
You can find more details about Data At Rest Encryption [here](../dare.md).
|
||||
|
||||
## Virtual permissions
|
||||
|
||||
SFTPGo supports per directory virtual permissions. For each user you have to specify global permissions and then override them on a per-directory basis.
|
||||
|
||||
Take a look at the following screens.
|
||||
|
||||

|
||||
|
||||
This user has full access as default (`*`), can only list and download from `/read-only` path and has no permissions at all for the `/subdir` path.
|
||||
|
||||
Let's test it. We use the `sftp` CLI here, you can use any SFTP client.
|
||||
|
||||
```shell
|
||||
$ sftp -P 2022 nicola@127.0.0.1
|
||||
Connected to 127.0.0.1.
|
||||
sftp> ls
|
||||
adir file.txt read-only subdir
|
||||
sftp> put file.txt
|
||||
Uploading file.txt to /file.txt
|
||||
file.txt 100% 4034 19.4MB/s 00:00
|
||||
sftp> rm file.txt
|
||||
Removing /file.txt
|
||||
sftp> ls
|
||||
adir read-only subdir
|
||||
sftp> cd read-only/
|
||||
sftp> ls
|
||||
file.txt
|
||||
sftp> put file1.txt
|
||||
Uploading file1.txt to /read-only/file1.txt
|
||||
remote open("/read-only/file1.txt"): Permission denied
|
||||
sftp> get file.txt
|
||||
Fetching /read-only/file.txt to file.txt
|
||||
/read-only/file.txt 100% 4034 2.2MB/s 00:00
|
||||
sftp> cd ..
|
||||
sftp> ls
|
||||
adir read-only subdir
|
||||
sftp> cd /subdir
|
||||
sftp> ls
|
||||
remote readdir("/subdir"): Permission denied
|
||||
```
|
||||
|
||||
as you can see it worked as expected.
|
||||
|
||||
## Virtual folders
|
||||
|
||||
A virtual folder is a mapping between a SFTPGo virtual path and a filesystem path outside the user home directory or on a different storage provider.
|
||||
Therefore, there is no need to create virtual folders for the users home directory or for directories within the users home directory.
|
||||
|
||||
From the web admin interface click `Folders` and then the `+` icon.
|
||||
|
||||

|
||||
|
||||
To create a local folder you need to specify a `Name` and an `Absolute path`. For other backends you have to specify the backend type and its credentials, this is the same procedure already detailed for creating users with cloud backends.
|
||||
|
||||
Suppose we created two virtual folders name `localfolder` and `minio` as you can see in the following screen.
|
||||
|
||||

|
||||
|
||||
- `localfolder` uses the local filesystem as storage backend
|
||||
- `minio` uses MinIO (S3 compatible) as storage backend
|
||||
|
||||
Now, click `Users`, on the left menu, select a user and click the `Edit` icon, to update the user and associate the virtual folders.
|
||||
|
||||
Virtual folders must be referenced using their unique name and you can expose them on a configurable virtual path. Take a look at the following screenshot.
|
||||
|
||||

|
||||
|
||||
We exposed the folder named `localfolder` on the path `/vdirlocal` (this must be an absolute UNIX path on Windows too) and the folder named `minio` on the path `/vdirminio`. For `localfolder` the quota usage is included within the user quota, while for the `minio` folder we defined separate quota limits: at most 2 files and at most 100MB, whichever is reached first.
|
||||
|
||||
The folder `minio` can be shared with other users and we can define different quota limits on a per-user basis. The folder `localfolder` is considered private since we have included its quota limits within those of the user, if we share them with other users we will break quota calculation.
|
||||
|
||||
Let's test these virtual folders. We use the `sftp` CLI here, you can use any SFTP client.
|
||||
|
||||
```shell
|
||||
$ sftp -P 2022 nicola@127.0.0.1
|
||||
nicola@127.0.0.1's password:
|
||||
Connected to 127.0.0.1.
|
||||
sftp> ls
|
||||
adir read-only subdir vdirlocal vdirminio
|
||||
sftp> cd vdirlocal
|
||||
sftp> put file.txt
|
||||
Uploading file.txt to /vdirlocal/file.txt
|
||||
file.txt 100% 4034 17.3MB/s 00:00
|
||||
sftp> ls
|
||||
file.txt
|
||||
sftp> cd ..
|
||||
sftp> cd vdirminio/
|
||||
sftp> put file.txt
|
||||
Uploading file.txt to /vdirminio/file.txt
|
||||
file.txt 100% 4034 4.8MB/s 00:00
|
||||
sftp> ls
|
||||
file.txt
|
||||
sftp> put file.txt file1.txt
|
||||
Uploading file.txt to /vdirminio/file1.txt
|
||||
file.txt 100% 4034 2.8MB/s 00:00
|
||||
sftp> put file.txt file2.txt
|
||||
Uploading file.txt to /vdirminio/file2.txt
|
||||
remote open("/vdirminio/file2.txt"): Failure
|
||||
sftp> quit
|
||||
```
|
||||
|
||||
The last upload failed since we exceeded the number of files quota limit.
|
||||
|
||||
## Groups
|
||||
|
||||
Using groups simplifies the administration of multiple SFTPGo users: you can assign settings once to a group, instead of multiple times to each individual user.
|
||||
|
||||
SFTPGo supports the following types of groups:
|
||||
|
||||
- primary groups
|
||||
- secondary groups
|
||||
- membership groups
|
||||
|
||||
A user can be a member of a primary group and many secondary and membership groups. Depending on the group type, the settings are inherited differently, more details [here](../groups.md).
|
||||
|
||||
:warning: SFTPGo groups are completely unrelated to system groups. Therefore, it is not necessary to add Linux/Windows groups to use SFTPGo groups.
|
||||
|
||||
### Usage example
|
||||
|
||||
Suppose you have the following requirements:
|
||||
|
||||
- each user must be restricted to a local home directory containing the username as last element of the path, for example `/srv/sftpgo/data/<username>`
|
||||
- for each user, the maximum upload size for a single file must be limited to 1GB
|
||||
- each user must have an S3 virtual folder available in the path `/s3<username>` and each user can only access a specified "prefix" of the S3 bucket. It must not be able to access other users' files
|
||||
- each user must have an S3 virtual folder available in the path `/shared`. This is a folder shared with other users
|
||||
- a group of users can only download and list contents in the `/shared` path while another group of users have full access
|
||||
|
||||
We can easily meet these requirements by defining two groups.
|
||||
|
||||
From the SFTPGo WebAdmin UI, click on `Folders` and then on the `+` icon.
|
||||
|
||||
Create a folder named `S3private`.
|
||||
Set the storage to `AWS S3 (Compatible)` and fill the required parameters:
|
||||
|
||||
- bucket name
|
||||
- region
|
||||
- credentials: access key and access secret
|
||||
|
||||

|
||||
|
||||
The important part is the `Key Prefix`, set it to `users/%username%/`
|
||||
|
||||

|
||||
|
||||
The placeholder `%username%` will be replaced with the associated username.
|
||||
|
||||
Create another folder named `S3shared` with the same settings as `S3private` but this time set the `Key Prefix` to `shared/`.
|
||||
The `Key Prefix` has no placeholder, so the folder will operate on a static path that won't change based on the associated user.
|
||||
|
||||
Now click on `Groups` and then on the `+` icon and add a group named `Primary`.
|
||||
|
||||
Set the `Home Dir` to `/srv/sftpgo/data/%username%`.
|
||||
|
||||

|
||||
|
||||
As before, the placeholder `%username%` will be replaced with the associated username.
|
||||
|
||||
Add the two virtual folders to this group and set the `Max file upload size` to 1GB.
|
||||
|
||||

|
||||
|
||||
Add a new group and name it `SharedReadOnly`, in the ACLs section set the permission on the `/shared` path so that read only access is granted.
|
||||
|
||||

|
||||
|
||||
The group setup is now complete. We can now create our users and set the primary group to `Primary`.
|
||||
For the users who need read-only access to the `/shared` path we also have to set `SharedReadOnly` as a secondary group.
|
||||
|
||||
You can now login with any SFTP client like FileZilla, WinSCP etc. and verify that the requirements are met.
|
||||
|
||||
### Simplify user page
|
||||
|
||||
The add/update user page has many configuration options and can be intimidating for some administrators. We can hide most of the settings and automatically add groups to newly created users. This way the hidden settings are inherited from the automatically assigned groups and therefore administrators can add new users simply by setting the username and credentials.
|
||||
|
||||
Click on `Admins` and then on the `+` icon and add an admin named `simply`.
|
||||
In the `Groups for users` section set `Primary` as primary group and `SharedReadOnly` as `seconday` group.
|
||||
In the `User page preferences` section hide all the sections.
|
||||
|
||||

|
||||
|
||||
Log in using the newly created administrator and try to add a new user. The user page is simplified as you can see in the following screen.
|
||||
|
||||

|
||||
|
||||
## Configuration parameters
|
||||
|
||||
Until now we used the default configuration, to change the global service parameters you have to edit the configuration file, or set appropriate environment variables, and restart SFTPGo to apply the changes.
|
||||
|
||||
A full explanation of all configuration methods can be found [here](./../full-configuration.md), we explore some common use cases. Please keep in mind that SFTPGo can also be configured via environment variables, this is very convenient if you are using Docker.
|
||||
|
||||
The default configuration file is `sftpgo.json` and it can be found within the `/etc/sftpgo` directory if you installed from Linux distro packages. On Windows the configuration file can be found within the `{commonappdata}\SFTPGo` directory where `{commonappdata}` is typically `C:\ProgramData`. SFTPGo also supports reading from TOML and YAML configuration files.
|
||||
|
||||
The configuration file can change between different versions and merging your custom settings with the default configuration file, after updating SFTPGo, may be time-consuming. For this reason we suggest to set your custom settings using environment variables.
|
||||
If you install SFTPGo on Linux using the official deb/rpm packages you can set your custom environment variables in the file `/etc/sftpgo/sftpgo.env`.
|
||||
SFTPGo also reads files inside the `env.d` directory relative to config dir (`/etc/sftpgo/env.d` on Linux and `{commonappdata}\SFTPGo\env.d` on Windows) and then exports the valid variables into environment variables if they are not already set.
|
||||
Of course you can also set environment variables with the method provided by the operating system of your choice.
|
||||
|
||||
The following snippets assume your are running SFTPGo on Linux but they can be easily adapted for other operating systems.
|
||||
|
||||
### Use PostgreSQL data provider
|
||||
|
||||
Create a PostgreSQL database named `sftpgo` and a PostgreSQL user with the correct permissions, for example using the `psql` CLI.
|
||||
|
||||
```shell
|
||||
sudo -i -u postgres psql
|
||||
CREATE DATABASE "sftpgo" WITH ENCODING='UTF8' CONNECTION LIMIT=-1;
|
||||
create user "sftpgo" with encrypted password 'your password here';
|
||||
grant all privileges on database "sftpgo" to "sftpgo";
|
||||
\q
|
||||
```
|
||||
|
||||
You can open the SFTPGo configuration file, search for the `data_provider` section and change it as follow.
|
||||
|
||||
```json
|
||||
"data_provider": {
|
||||
"driver": "postgresql",
|
||||
"name": "sftpgo",
|
||||
"host": "127.0.0.1",
|
||||
"port": 5432,
|
||||
"username": "sftpgo",
|
||||
"password": "your password here",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Alternatively (recommended), you can use environment variables by creating the file `/etc/sftpgo/env.d/postgresql.env` with the following content.
|
||||
|
||||
```shell
|
||||
SFTPGO_DATA_PROVIDER__DRIVER=postgresql
|
||||
SFTPGO_DATA_PROVIDER__NAME=sftpgo
|
||||
SFTPGO_DATA_PROVIDER__HOST=127.0.0.1
|
||||
SFTPGO_DATA_PROVIDER__PORT=5432
|
||||
SFTPGO_DATA_PROVIDER__USERNAME=sftpgo
|
||||
SFTPGO_DATA_PROVIDER__PASSWORD=your password here
|
||||
```
|
||||
|
||||
Confirm that the database connection works by initializing the data provider.
|
||||
|
||||
```shell
|
||||
$ sudo su - sftpgo -s /bin/bash -c 'sftpgo initprovider -c /etc/sftpgo'
|
||||
2021-05-19T22:21:54.000 INF Initializing provider: "postgresql" config file: "/etc/sftpgo/sftpgo.json"
|
||||
2021-05-19T22:21:54.000 INF updating database schema version: 8 -> 9
|
||||
2021-05-19T22:21:54.000 INF Data provider successfully initialized/updated
|
||||
```
|
||||
|
||||
Ensure that SFTPGo starts after the database service.
|
||||
|
||||
```shell
|
||||
sudo systemctl edit sftpgo.service
|
||||
```
|
||||
|
||||
And override the unit definition with the following snippet.
|
||||
|
||||
```shell
|
||||
[Unit]
|
||||
After=postgresql.service
|
||||
```
|
||||
|
||||
Restart SFTPGo to apply the changes.
|
||||
|
||||
### Use MySQL/MariaDB data provider
|
||||
|
||||
Create a MySQL database named `sftpgo` and a MySQL user with the correct permissions, for example using the `mysql` CLI.
|
||||
|
||||
```shell
|
||||
$ mysql -u root
|
||||
MariaDB [(none)]> CREATE DATABASE sftpgo CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
Query OK, 1 row affected (0.000 sec)
|
||||
|
||||
MariaDB [(none)]> grant all privileges on sftpgo.* to sftpgo@localhost identified by 'your password here';
|
||||
Query OK, 0 rows affected (0.027 sec)
|
||||
|
||||
MariaDB [(none)]> quit
|
||||
Bye
|
||||
```
|
||||
|
||||
You can open the SFTPGo configuration file, search for the `data_provider` section and change it as follow.
|
||||
|
||||
```json
|
||||
"data_provider": {
|
||||
"driver": "mysql",
|
||||
"name": "sftpgo",
|
||||
"host": "127.0.0.1",
|
||||
"port": 3306,
|
||||
"username": "sftpgo",
|
||||
"password": "your password here",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Alternatively (recommended), you can use environment variables by creating the file `/etc/sftpgo/env.d/mysql.env` with the following content.
|
||||
|
||||
```shell
|
||||
SFTPGO_DATA_PROVIDER__DRIVER=mysql
|
||||
SFTPGO_DATA_PROVIDER__NAME=sftpgo
|
||||
SFTPGO_DATA_PROVIDER__HOST=127.0.0.1
|
||||
SFTPGO_DATA_PROVIDER__PORT=3306
|
||||
SFTPGO_DATA_PROVIDER__USERNAME=sftpgo
|
||||
SFTPGO_DATA_PROVIDER__PASSWORD=your password here
|
||||
```
|
||||
|
||||
Confirm that the database connection works by initializing the data provider.
|
||||
|
||||
```shell
|
||||
$ sudo su - sftpgo -s /bin/bash -c 'sftpgo initprovider -c /etc/sftpgo'
|
||||
2021-05-19T22:29:30.000 INF Initializing provider: "mysql" config file: "/etc/sftpgo/sftpgo.json"
|
||||
2021-05-19T22:29:30.000 INF updating database schema version: 8 -> 9
|
||||
2021-05-19T22:29:30.000 INF Data provider successfully initialized/updated
|
||||
```
|
||||
|
||||
Ensure that SFTPGo starts after the database service.
|
||||
|
||||
```shell
|
||||
sudo systemctl edit sftpgo.service
|
||||
```
|
||||
|
||||
And override the unit definition with the following snippet.
|
||||
|
||||
```shell
|
||||
[Unit]
|
||||
After=mariadb.service
|
||||
```
|
||||
|
||||
Restart SFTPGo to apply the changes.
|
||||
|
||||
### Use CockroachDB data provider
|
||||
|
||||
We suppose you have installed CockroachDB this way:
|
||||
|
||||
```shell
|
||||
sudo su
|
||||
export CRDB_VERSION=22.1.8 # set the latest available version here
|
||||
wget -qO- https://binaries.cockroachdb.com/cockroach-v${CRDB_VERSION}.linux-amd64.tgz | tar xvz
|
||||
cp -i cockroach-v${CRDB_VERSION}.linux-amd64/cockroach /usr/local/bin/
|
||||
mkdir -p /usr/local/lib/cockroach
|
||||
cp -i cockroach-v${CRDB_VERSION}.linux-amd64/lib/libgeos.so /usr/local/lib/cockroach/
|
||||
cp -i cockroach-v${CRDB_VERSION}.linux-amd64/lib/libgeos_c.so /usr/local/lib/cockroach/
|
||||
mkdir /var/lib/cockroach
|
||||
chown sftpgo:sftpgo /var/lib/cockroach
|
||||
mkdir -p /etc/cockroach/{certs,ca}
|
||||
chmod 700 /etc/cockroach/ca
|
||||
/usr/local/bin/cockroach cert create-ca --certs-dir=/etc/cockroach/certs --ca-key=/etc/cockroach/ca/ca.key
|
||||
/usr/local/bin/cockroach cert create-node localhost $(hostname) --certs-dir=/etc/cockroach/certs --ca-key=/etc/cockroach/ca/ca.key
|
||||
/usr/local/bin/cockroach cert create-client root --certs-dir=/etc/cockroach/certs --ca-key=/etc/cockroach/ca/ca.key
|
||||
chown -R sftpgo:sftpgo /etc/cockroach/certs
|
||||
exit
|
||||
```
|
||||
|
||||
and you are running it using a systemd unit like this one:
|
||||
|
||||
```shell
|
||||
[Unit]
|
||||
Description=Cockroach Database single node
|
||||
Requires=network.target
|
||||
[Service]
|
||||
Type=notify
|
||||
WorkingDirectory=/var/lib/cockroach
|
||||
ExecStart=/usr/local/bin/cockroach start-single-node --certs-dir=/etc/cockroach/certs --http-addr 127.0.0.1:8888 --listen-addr 127.0.0.1:26257 --cache=.25 --max-sql-memory=.25 --store=path=/var/lib/cockroach
|
||||
TimeoutStopSec=60
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
User=sftpgo
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
```
|
||||
|
||||
Create a CockroachDB database named `sftpgo`.
|
||||
|
||||
```shell
|
||||
$ sudo /usr/local/bin/cockroach sql --certs-dir=/etc/cockroach/certs -e 'create database "sftpgo"'
|
||||
CREATE DATABASE
|
||||
|
||||
Time: 13ms
|
||||
```
|
||||
|
||||
You can open the SFTPGo configuration file, search for the `data_provider` section and change it as follow.
|
||||
|
||||
```json
|
||||
"data_provider": {
|
||||
"driver": "cockroachdb",
|
||||
"name": "sftpgo",
|
||||
"host": "localhost",
|
||||
"port": 26257,
|
||||
"username": "root",
|
||||
"password": "",
|
||||
"sslmode": 3,
|
||||
"root_cert": "/etc/cockroach/certs/ca.crt",
|
||||
"client_cert": "/etc/cockroach/certs/client.root.crt",
|
||||
"client_key": "/etc/cockroach/certs/client.root.key",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Alternatively (recommended), you can use environment variables by creating the file `/etc/sftpgo/env.d/cockroachdb.env` with the following content.
|
||||
|
||||
```shell
|
||||
SFTPGO_DATA_PROVIDER__DRIVER=cockroachdb
|
||||
SFTPGO_DATA_PROVIDER__NAME=sftpgo
|
||||
SFTPGO_DATA_PROVIDER__HOST=localhost
|
||||
SFTPGO_DATA_PROVIDER__PORT=26257
|
||||
SFTPGO_DATA_PROVIDER__USERNAME=root
|
||||
SFTPGO_DATA_PROVIDER__SSLMODE=3
|
||||
SFTPGO_DATA_PROVIDER__ROOT_CERT="/etc/cockroach/certs/ca.crt"
|
||||
SFTPGO_DATA_PROVIDER__CLIENT_CERT="/etc/cockroach/certs/client.root.crt"
|
||||
SFTPGO_DATA_PROVIDER__CLIENT_KEY="/etc/cockroach/certs/client.root.key"
|
||||
```
|
||||
|
||||
Confirm that the database connection works by initializing the data provider.
|
||||
|
||||
```shell
|
||||
$ sudo su - sftpgo -s /bin/bash -c 'sftpgo initprovider -c /etc/sftpgo'
|
||||
2022-06-02T14:54:04.510 INF Initializing provider: "cockroachdb" config file: "/etc/sftpgo/sftpgo.json"
|
||||
2022-06-02T14:54:04.554 INF creating initial database schema, version 15
|
||||
2022-06-02T14:54:04.698 INF updating database schema version: 15 -> 16
|
||||
2022-06-02T14:54:07.093 INF updating database schema version: 16 -> 17
|
||||
2022-06-02T14:54:07.672 INF updating database schema version: 17 -> 18
|
||||
2022-06-02T14:54:07.699 INF updating database schema version: 18 -> 19
|
||||
2022-06-02T14:54:07.721 INF Data provider successfully initialized/updated
|
||||
```
|
||||
|
||||
Ensure that SFTPGo starts after the database service.
|
||||
|
||||
```shell
|
||||
sudo systemctl edit sftpgo.service
|
||||
```
|
||||
|
||||
And override the unit definition with the following snippet.
|
||||
|
||||
```shell
|
||||
[Unit]
|
||||
After=cockroachdb.service
|
||||
```
|
||||
|
||||
Restart SFTPGo to apply the changes.
|
||||
|
||||
### Enable FTP service
|
||||
|
||||
You can set the configuration options to enable the FTP service by opening the SFTPGo configuration file, looking for the `ftpd` section and editing it as follows.
|
||||
|
||||
```json
|
||||
"ftpd": {
|
||||
"bindings": [
|
||||
{
|
||||
"port": 2121,
|
||||
"address": "",
|
||||
"apply_proxy_config": true,
|
||||
"tls_mode": 0,
|
||||
"certificate_file": "",
|
||||
"certificate_key_file": "",
|
||||
"min_tls_version": 12,
|
||||
"force_passive_ip": "",
|
||||
"passive_ip_overrides": [],
|
||||
"client_auth_type": 0,
|
||||
"tls_cipher_suites": [],
|
||||
"passive_connections_security": 0,
|
||||
"active_connections_security": 0,
|
||||
"debug": false
|
||||
}
|
||||
],
|
||||
"banner": "",
|
||||
"banner_file": "",
|
||||
"active_transfers_port_non_20": true,
|
||||
"passive_port_range": {
|
||||
"start": 50000,
|
||||
"end": 50100
|
||||
},
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Alternatively (recommended), you can use environment variables by creating the file `/etc/sftpgo/env.d/ftpd.env` with the following content.
|
||||
|
||||
```shell
|
||||
SFTPGO_FTPD__BINDINGS__0__PORT=2121
|
||||
```
|
||||
|
||||
Restart SFTPGo to apply the changes. The FTP service is now available on port `2121`.
|
||||
|
||||
You can also configure the passive ports range (`50000-50100` by default), these ports must be reachable for passive FTP to work. If your FTP server is on the private network side of a NAT configuration you have to set `force_passive_ip` to your external IP address. You may also need to open the passive port range on your firewall.
|
||||
|
||||
It is recommended that you provide a certificate and key file to expose FTP over TLS. You should prefer SFTP to FTP even if you configure TLS, please don't blindly enable the old FTP protocol.
|
||||
|
||||
### Enable WebDAV service
|
||||
|
||||
You can set the configuration options to enable the FTP service by opening the SFTPGo configuration file, looking for the `webdavd` section and editing it as follows.
|
||||
|
||||
```json
|
||||
"webdavd": {
|
||||
"bindings": [
|
||||
{
|
||||
"port": 10080,
|
||||
"address": "",
|
||||
"enable_https": false,
|
||||
"certificate_file": "",
|
||||
"certificate_key_file": "",
|
||||
"min_tls_version": 12,
|
||||
"client_auth_type": 0,
|
||||
"tls_cipher_suites": [],
|
||||
"prefix": "",
|
||||
"proxy_allowed": [],
|
||||
"client_ip_proxy_header": "",
|
||||
"client_ip_header_depth": 0,
|
||||
"disable_www_auth_header": false
|
||||
}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Alternatively (recommended), you can use environment variables by creating the file `/etc/sftpgo/env.d/webdavd.env` with the following content.
|
||||
|
||||
```shell
|
||||
SFTPGO_WEBDAVD__BINDINGS__0__PORT=10080
|
||||
```
|
||||
|
||||
Restart SFTPGo to apply the changes. The WebDAV service is now available on port `10080`. It is recommended that you provide a certificate and key file to expose WebDAV over https.
|
||||
BIN
docs/howto/img/add-folder.png
Normal file
|
After Width: | Height: | Size: 73 KiB |
BIN
docs/howto/img/add-group.png
Normal file
|
After Width: | Height: | Size: 49 KiB |
BIN
docs/howto/img/add-user-simplified.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
docs/howto/img/add-user.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
BIN
docs/howto/img/admin-2FA-login.png
Normal file
|
After Width: | Height: | Size: 37 KiB |
BIN
docs/howto/img/admin-2FA.png
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/howto/img/admin-save-2FA.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
BIN
docs/howto/img/az-user-1.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
docs/howto/img/az-user-2.png
Normal file
|
After Width: | Height: | Size: 56 KiB |
BIN
docs/howto/img/backup-action.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/howto/img/backup-notification-action.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
BIN
docs/howto/img/create-dirs-action.png
Normal file
|
After Width: | Height: | Size: 41 KiB |
BIN
docs/howto/img/create-dirs-failure-notification.png
Normal file
|
After Width: | Height: | Size: 54 KiB |
BIN
docs/howto/img/create-dirs-rule-actions.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
docs/howto/img/create-dirs-rule.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
docs/howto/img/daily-backup-actions.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
BIN
docs/howto/img/daily-backup-schedule.png
Normal file
|
After Width: | Height: | Size: 57 KiB |
BIN
docs/howto/img/folders.png
Normal file
|
After Width: | Height: | Size: 74 KiB |
BIN
docs/howto/img/gcs-user.png
Normal file
|
After Width: | Height: | Size: 78 KiB |
BIN
docs/howto/img/initial-screen.png
Normal file
|
After Width: | Height: | Size: 70 KiB |
BIN
docs/howto/img/local-encrypted.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
docs/howto/img/primary-group-settings.png
Normal file
|
After Width: | Height: | Size: 82 KiB |
BIN
docs/howto/img/read-only-share.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/howto/img/s3-key-prefix.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
docs/howto/img/s3-private-folder.png
Normal file
|
After Width: | Height: | Size: 59 KiB |
BIN
docs/howto/img/s3-user-1.png
Normal file
|
After Width: | Height: | Size: 97 KiB |
BIN
docs/howto/img/s3-user-2.png
Normal file
|
After Width: | Height: | Size: 60 KiB |
BIN
docs/howto/img/setup.png
Normal file
|
After Width: | Height: | Size: 33 KiB |