Compare commits

...

420 Commits

Author SHA1 Message Date
Nicola Murino
8d4f40ccd2 release workflow add initprovider again 2020-10-10 22:29:04 +02:00
Nicola Murino
765bad5edd set version to 1.1.0 2020-10-10 22:09:48 +02:00
Nicola Murino
0c0382c9b5 docker: disable scheduled build
We already have an edge version built after each commit
2020-10-10 20:15:34 +02:00
Nicola Murino
bbab6149e8 fix windows service: was broken in the latest commit 2020-10-09 22:42:13 +02:00
Nicola Murino
ce9387f1ab update dependencies and some docs 2020-10-09 20:25:42 +02:00
Nicola Murino
d126c5736a Docker: add Debian based image 2020-10-08 21:43:13 +02:00
Nicola Murino
5048d54d32 PPA: add source files used to build the packages 2020-10-08 18:20:15 +02:00
Nicola Murino
f22fe6af76 remove py extension from REST API CLI 2020-10-08 16:02:04 +02:00
Mark Sagi-Kazar
8034f289d1 Fix empty env context in nightly builds
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-08 15:48:40 +02:00
Nicola Murino
eed61ac510 Dockerfile: add a FEATURES build arg
This ARG allows to disable some optional features and it might be
useful if you build the image yourself
2020-10-07 20:04:02 +02:00
Nicola Murino
412d6096c0 Linux pkgs: fix postinstall scripts 2020-10-06 18:18:43 +02:00
Nicola Murino
c289ae07d2 Docker workflow: explicitly set image labels
while waiting for https://github.com/docker/build-push-action/issues/165
to be fixed.

Some minor changes to the default configuration for Linux packages
2020-10-06 18:03:55 +02:00
Nicola Murino
87f78b07b3 docker: add some docs and build for arm64 too 2020-10-06 13:59:31 +02:00
Mark Sagi-Kazar
5e2db77ef9 refactor: add an enum for filesystem providers
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 21:40:21 +02:00
Nicola Murino
c992072286 data provider: add a setting to prevent auto-update 2020-10-05 19:42:33 +02:00
Nicola Murino
0ef826c090 docker package: fix description 2020-10-05 17:24:09 +02:00
Mark Sagi-Kazar
5da75c3915 ci: enable docker build
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:32:59 +02:00
Nicola Murino
8222baa7ed Dockerfile: minor changes 2020-10-05 16:31:22 +02:00
Mark Sagi-Kazar
7b76b51314 feat: configure database path using configuration
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
c96dbbd3b5 feat: save credentials to /var/lib/sftpgo
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
da6ccedf24 feat: save database to /var/lib/sftpgo
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
13b37a835f revert: boltdb, sqlite is not automatically initialized
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
863fa33309 feat: install additional packages
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
9f4c54a212 refactor: make /var/lib/sftpgo the user home
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
2a7bff4c0e feat: switch to boltdb by default to make the container work out of the box
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
17406d1aab fix: permission issue caused by root owning the volume
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
6537c53d43 feat: add host_keys under /var/lib/sftpgo
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
b4bd10521a feat: move data under /var/lib/sftpgo
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
65cbef1962 feat: move backups under /var/lib/sftpgo
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
a8d355900a fix: missing sha from docker image on GHA
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
ffd9c381ce feat: add workflow for building docker image
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Mark Sagi-Kazar
2a0bce0beb feat: add dockerfile
Signed-off-by: Mark Sagi-Kazar <mark.sagikazar@gmail.com>
2020-10-05 16:15:06 +02:00
Nicola Murino
f1f7b81088 logger: don't print connection_id if empty
Fixes #183
2020-10-05 15:51:17 +02:00
Nicola Murino
f9827f958b sftpd auto host keys: try to auto-create parent dir if missing 2020-10-05 14:16:57 +02:00
Nicola Murino
3e2afc35ba data provider: try to automatically initialize it if required 2020-10-05 12:55:49 +02:00
Ilias Trichopoulos
c65dd86d5e Fix typos (#181) 2020-10-05 11:29:18 +02:00
Nicola Murino
2d6c0388af update deps 2020-10-04 18:29:42 +02:00
Nicola Murino
4d19d87720 pkgs: use glob notation to include static folder 2020-10-02 18:16:49 +02:00
Nicola Murino
5eabaf98e0 gcs: remove a superfluous debug log 2020-09-29 09:17:08 +02:00
Nicola Murino
d1f0e9ae9f CGS: implement MimeTyper interface 2020-09-28 22:12:46 +02:00
Thomas Blommaert
cd56039ab7 GCS mime-type detection (#179)
Fixes #178
2020-09-28 21:52:18 +02:00
Nicola Murino
55515fee95 update deps, GCS can no finally use attribute selection
See https://github.com/googleapis/google-cloud-go/pull/2661
2020-09-28 12:51:19 +02:00
Nicola Murino
13d43a2d31 improve some docs 2020-09-27 09:24:10 +02:00
Nicola Murino
001261433b howto postgres-s3: update to use the debian package 2020-09-26 19:28:56 +02:00
Nicola Murino
03bf595525 automatically build deb and rpm Linux packages
The packages are built after each tag/commit

Fixes #176
2020-09-26 14:07:24 +02:00
Nicola Murino
4ebedace1e systemd unit: run as "sftpgo" system user
Update the docs too

Fixes #177
2020-09-25 18:23:04 +02:00
Stephan Müller
b23276c002 Set verbosity for go commands in docker build (#174) 2020-09-21 19:33:44 +02:00
Nicola Murino
bf708cb8bc osfs: improve isSubDir check 2020-09-21 19:32:33 +02:00
Nicola Murino
a550d082a3 portable mode: advertise WebDAV service if requested 2020-09-21 16:08:32 +02:00
Nicola Murino
6c1a7449fe ssh commands: return better error messages
This improve the fix for #171 and return better error message for
SSH commands other than SCP too
2020-09-19 10:14:30 +02:00
Nicola Murino
f0c9b55036 dataprovider: improve user validation errors
Fixes #170
2020-09-18 19:21:24 +02:00
Nicola Murino
209badf10c scp: return better error messages
Fixes #171
2020-09-18 19:13:09 +02:00
Nicola Murino
242dde4480 sftpd: ensure to always close idle connections
after the last commit this wasn't the case anymore

Completly fixes #169
2020-09-18 18:15:28 +02:00
Nicola Murino
2df0dd1f70 sshd: map each channel with a new connection
Fixes #169
2020-09-18 10:52:53 +02:00
Nicola Murino
98a6d138d4 sftpd: add a test case to ensure we return sftp.ErrSSHFxNoSuchFile ...
if stat/lstat fails on a missing file
2020-09-17 12:30:48 +02:00
Nicola Murino
38f06ab373 ftpd: fix TLS for active connections
See https://github.com/fclairamb/ftpserverlib/issues/177

Some minor doc improvements
2020-09-17 09:45:40 +02:00
Nicola Murino
3c1300721c add some basic how-to style documents 2020-09-13 19:43:56 +02:00
Nicola Murino
61003c8079 sftpd: add lstat support 2020-09-11 09:30:25 +02:00
Nicola Murino
01850c7399 REST API: remove status from ApiResponse
it duplicates the header HTTP status
2020-09-08 09:45:21 +02:00
Nicola Murino
b9c381e26f sftpd: update pkg/sftp
The patch to open a file in read/write mode is now merged
2020-09-06 11:40:31 +02:00
Nicola Murino
542554fb2c replace the library to verify UNIX's crypt(3) passwords 2020-09-04 21:08:09 +02:00
Nicola Murino
bdf18fa862 password hashing: exposes argon2 options
So the hashing complexity can be changed depending on available
memory/CPU resources and business requirements
2020-09-04 17:09:31 +02:00
Nicola Murino
afc411c51b adjust runtime.GOMAXPROCS to match the container CPU quota, if any 2020-09-03 18:09:45 +02:00
Nicola Murino
a59163e56c multi-step auth: don't advertise password method if it is disabled
also rename the settings to password_authentication so it is more like
OpenSSH, add some test cases and improve documentation
2020-09-01 19:34:40 +02:00
Giorgio Pellero
8391b19abb Add password_disabled bool to sftpd config, disables password auth callback (#165) 2020-09-01 19:26:33 +02:00
Nicola Murino
3925c7ff95 REST API/Web admin: add a parameter to disconnect a user after an update
This way you can force the user to login again and so to use the updated
configuration.

A deleted user will be automatically disconnected.

Fixes #163

Improved some docs too.
2020-09-01 16:10:26 +02:00
Nicola Murino
dbed110d02 WebDAV: add caching for authenticated users
In this way we get a big performance boost
2020-08-31 19:25:17 +02:00
Giorgio Pellero
f978355520 Fix "compatible" typo in README.md (#162) 2020-08-31 13:43:24 +02:00
Nicola Murino
4748e6f54d sftpd: handle read and write from the same handle (#158)
Fixes #155
2020-08-31 06:45:22 +02:00
Nicola Murino
91a4c64390 fix initprovider exit code for MySQL and PostgreSQL 2020-08-30 14:00:45 +02:00
Nicola Murino
600a107699 initprovider: check if the provider is already initialized
exit with code 0 if no initialization is required
2020-08-30 13:50:43 +02:00
Nicola Murino
2746c0b0f1 move stat to base connection and differentiate between Stat and Lstat
we will use Lstat once it will be exposed in pkg/sftp
2020-08-25 18:23:00 +02:00
Nicola Murino
701a6115f8 ftpd: use ftpserverlib master, the tls patch is now merged 2020-08-24 23:06:10 +02:00
Nicola Murino
56b00addc4 docker: try to improve the docs
See #159
2020-08-24 15:46:31 +02:00
Nicola Murino
02e35ee002 sftpd: add Readlink support 2020-08-22 14:52:17 +02:00
Nicola Murino
5208e4a4ca sftpd: improve truncate
quota usage and max allowed write size are now properly updated after a
truncate
2020-08-22 10:12:00 +02:00
Nicola Murino
7381a867ba fix truncate test cases on Windows 2020-08-20 14:44:38 +02:00
Nicola Murino
f41ce6619f sftpd: add SSH_FXP_FSETSTAT support
This change will fix file editing from sshfs, we need this patch

https://github.com/pkg/sftp/pull/373

for pkg/sftp to support this feature
2020-08-20 13:54:36 +02:00
Nicola Murino
933427310d fix check pwd hook when using memory provider 2020-08-19 19:47:52 +02:00
Nicola Murino
8b0a1817b3 add check password hook
its main use case is to allow to easily support things like password+OTP for
protocols without keyboard interactive support such as FTP and WebDAV
2020-08-19 19:36:12 +02:00
Nicola Murino
04c9a5c008 add some examples hooks for one time password logins
The examples use Twillo Authy since I use it for my GitHub account.

You can easily use other multi factor authentication software in a
similar way.
2020-08-18 21:21:01 +02:00
Nicola Murino
bbc8c091e6 portable mode: add WebDAV support 2020-08-17 14:08:08 +02:00
Nicola Murino
f3228713bc Allow individual protocols to be enabled per user
Fixes #154
2020-08-17 12:49:20 +02:00
Nicola Murino
fa5333784b add a maximum allowed size for a single upload 2020-08-16 20:17:02 +02:00
Nicola Murino
0dbf0cc81f WebDAV: add CORS support 2020-08-15 15:55:20 +02:00
Nicola Murino
196a56726e FTP improvements
- add a setting to require TLS
- add symlink support

require TLS 1.2 for all TLS connections
2020-08-15 13:02:25 +02:00
Nicola Murino
fe857dcb1b CI: use go 1.15 by default now that it is released 2020-08-12 16:42:38 +02:00
Nicola Murino
aa0ed5dbd0 add post-login hook
a login scope is supported too so you can get notifications for failed logins,
successful logins or both
2020-08-12 16:15:12 +02:00
Nicola Murino
a9e21c282a add WebDAV support
Fixes #147
2020-08-11 23:56:10 +02:00
Antoine Deschênes
9a15a54885 sftpd: set failed connection loglevel to debug (#152) 2020-08-06 21:20:31 +02:00
Nicola Murino
91dcc349de Add client IP address to external auth, pre-login and keyboard interactive hooks 2020-08-04 18:03:28 +02:00
Nicola Murino
fa41bfd06a Cloud backends: add support for FTP REST command
So partial downloads are now supported as for local fs
2020-08-03 18:03:09 +02:00
Nicola Murino
8839c34d53 FTP: implements ClientDriverExtensionRemoveDir
Fixes #149 for FTP too
2020-08-03 17:36:43 +02:00
Nicola Murino
11ceaa8850 docker: document how to enable FTP/S 2020-08-01 08:56:15 +02:00
Nicola Murino
2a9f7db1e2 Cloud FS: don't propagate the error if removing a folder returns not found
for Cloud FS the folders are virtual and they, generally, disappear when the
last file is removed.

This fix doesn't work for FTP protocol for now.

Fixes #149
2020-07-31 19:24:57 +02:00
Nicola Murino
22338ed478 add post connect hook
Fixes #144
2020-07-30 22:33:49 +02:00
Nicola Murino
59a21158a6 fix FTP quota limits test case
It failed sometime due to a bug in the ftp client library used in test
cases. The failure was more frequent on FreeBSD but it could happen in
any supported OS. It was not systematic since we use small files in
test cases.

See https://github.com/jlaffaye/ftp/pull/192
2020-07-30 19:52:29 +02:00
Nicola Murino
93ce96d011 add support for the venerable FTP protocol
Fixes #46
2020-07-29 21:56:56 +02:00
Nicola Murino
cc2f04b0e4 fix concurrency test case on go 1.13
a sleep seems required, needs investigation
2020-07-25 08:55:17 +02:00
Nicola Murino
aa5191fa1b CI: add a timeout for test cases execution 2020-07-25 00:14:44 +02:00
Nicola Murino
4e41a5583d refactoring: add common package
The common package defines the interfaces that a protocol must implement
and contain code that can be shared among supported protocols.

This way should be easier to support new protocols
2020-07-24 23:39:38 +02:00
Nicola Murino
ded8fad5e4 add sponsor button 2020-07-13 22:23:11 +02:00
Nicola Murino
3702bc8413 several doc fixes 2020-07-11 13:03:15 +02:00
Nicola Murino
7896d2eef7 improve CI/CD workflows 2020-07-10 23:31:53 +02:00
Nicola Murino
da0f470f1c document FreeBSD support
improve some tests cleanup
2020-07-10 19:20:37 +02:00
Nicola Murino
8fddb742df try to improve error message if the user forgot to initialize the provider
See #138
2020-07-09 20:01:37 +02:00
Nicola Murino
95fe26f3e3 keep track of services errors
So we can exit with the correct code if an error happen inside the
services goroutines

Fixes #143
2020-07-09 19:16:52 +02:00
Nicola Murino
1e10381143 improve help strings formatting
Fixes #139
2020-07-09 18:58:22 +02:00
Nicola Murino
96cbce52f9 cmd: add shell completion and man pages generators 2020-07-08 23:21:33 +02:00
Nicola Murino
0ea2ca3141 simplify data provider usage
remove the obsolete SQL scripts too. They are not required since v0.9.6
2020-07-08 19:59:31 +02:00
Nicola Murino
42877dd915 sql providers: add a query timeout 2020-07-08 18:54:44 +02:00
Nicola Murino
790c11c453 back to development 2020-07-07 19:40:22 +02:00
Nicola Murino
1ac4baa00a set version to 1.0.0 2020-07-06 22:41:50 +02:00
Nicola Murino
fc32286045 update deps 2020-07-05 22:54:00 +02:00
Nicola Murino
ee1131f254 enable SCP test cases on Windows 2020-06-30 23:25:25 +02:00
Nicola Murino
c5dc3ee3b6 simplify CI workflow 2020-06-29 20:07:51 +02:00
Nicola Murino
dd593b1035 ssh commands: send a generic error for unexpected failures
and log the real error, it could leak a filesystem path
2020-06-29 18:53:33 +02:00
Nicola Murino
4814786556 windows installer: fix exe name for service control
It worked before since Windows is case insensitive
2020-06-29 14:55:58 +02:00
Nicola Murino
4f0a936ca0 web admin: fix Microsoft edge compatibility
Edge does not support trimEnd
2020-06-29 11:46:02 +02:00
Nicola Murino
aec372ca31 Windows setup: require Windows 10
Windows 7 is EOL since several months now
2020-06-29 11:15:24 +02:00
Nicola Murino
d2a739f8f6 add workflow status badge 2020-06-28 21:01:03 +02:00
Nicola Murino
165110872b add release workflow
for each tag a new release, including binaries, is automatically created
2020-06-28 15:57:33 +02:00
Nicola Murino
6ab4e9f533 add test case for concurrent logins 2020-06-27 12:36:42 +02:00
Nicola Murino
cf541d62ea recursive permissions check before renaming/copying directories 2020-06-26 23:38:29 +02:00
Nicola Murino
19fc58dd1f portable: avoid to log user provided password
disable DNS Multicast as default

Fixes #135 and #136
2020-06-24 13:37:38 +02:00
Nicola Murino
ac9c475849 test bolt and memory provider on macOS and Windows too 2020-06-22 23:47:07 +02:00
Nicola Murino
ddf99ab706 workflow: execute test cases on MySQL too 2020-06-22 20:02:51 +02:00
Nicola Murino
0056984d4b Allow to rotate logs on demand
Log file can be rotated sending a SIGUSR1 signal on Unix based systems and
using "sftpgo service rotatelogs" on Windows

Fixes #133
2020-06-22 19:11:53 +02:00
Nicola Murino
44fb276464 workflow: execute tests using postgresql provider too 2020-06-21 21:28:59 +02:00
Nicola Murino
558a1b4050 workflow: execute tests using memory provider too 2020-06-21 20:20:30 +02:00
Nicola Murino
8f934f2648 run test cases against bolt provider too 2020-06-20 23:49:27 +02:00
Nicola Murino
403b9a8310 replace Travis with GitHub actions 2020-06-20 21:57:51 +02:00
Nicola Murino
33436488e2 update deps 2020-06-20 16:09:55 +02:00
Nicola Murino
3c28366fed add action to build code after each commit
You can download the build artifact from the "Actions->Code Build"

Fixes #129
2020-06-20 15:34:19 +02:00
Nicola Murino
b80abe6c05 return exit code 1 on error
Fixes #132
2020-06-20 14:30:46 +02:00
Nicola Murino
8cb47817f6 Add API endpoint to set current quota
Fixes #130
2020-06-20 12:38:04 +02:00
Nicola Murino
23a80b01b6 add build tag to disable metrics 2020-06-19 17:08:51 +02:00
Nicola Murino
b30614e9d8 httpd: make the built-in web interface optional
The built-in web admin will be disabled if both "templates_path" and
"static_files_path" are empty

Fixes #131
2020-06-18 23:53:38 +02:00
Nicola Murino
e86089a9f3 quota: improve size check
get the remaining allowed size when an upload starts and check it against the
uploaded bytes

Fixes #128
2020-06-18 22:38:03 +02:00
Nicola Murino
3ceba7a147 sftpgo-copy: add quota limits check 2020-06-16 22:49:18 +02:00
Nicola Murino
c491133aff docs: fix markdown lint warnings 2020-06-15 23:46:11 +02:00
Nicola Murino
37418a7630 SSH system commands: allow git and rsync inside virtual folders 2020-06-15 23:32:12 +02:00
Nicola Murino
73a9c002e0 permissions: improve rename
Allow to enable rename permission in a more controlled way granting "delete"
permission on source directory and "upload" permission on target directory
2020-06-13 23:49:28 +02:00
Nicola Murino
3d48fa7382 ssh commands: add sftpgo-copy and sftpgo-remove
Fixes #122
2020-06-13 22:48:51 +02:00
Nicola Murino
8e22dd1b13 virtual folders: allow overlapped mapped paths if quota is disabled
See #95
2020-06-10 09:11:32 +02:00
Nicola Murino
7807fa7cc2 use os.ModePerm for files and directory creation 2020-06-08 19:40:17 +02:00
Nicola Murino
cd380973df allows host keys auto generation inside a user configured directory
Fixes #124
2020-06-08 18:45:04 +02:00
Nicola Murino
01d681faa3 external auth: allow to map multiple login username to a single account
some external auth users want to map multiple login usernames with a single
SGTPGo account.
For example an SFTP user logins using "user1" or "user2" and the external auth
returns "user" in both cases, so we use the username returned from external auth
and not the one used to login

Fixes #125
2020-06-08 13:06:02 +02:00
Nicola Murino
c231b663a3 add docs for virtual folders
fix test cases on macOS
2020-06-08 00:15:14 +02:00
Nicola Murino
8306b6bde6 refactor virtual folders
The same virtual folder can now be shared among users and different
folder quota limits for each user are supported.

Fixes #120
2020-06-07 23:30:18 +02:00
Nicola Murino
dc011af90d sftpd actions: add support for pre-delete action
Fixes #121
2020-05-24 23:31:14 +02:00
Nicola Murino
c27e3ef436 actions: add a generic hook to define external commands and HTTP URL
We can only define a single hook now and it can be an HTTP notification
or an external command, not both
2020-05-24 15:29:39 +02:00
Nicola Murino
760cc9ba5a partial auth: fix public key query response
more details here:

https://github.com/golang/crypto/pull/130#issuecomment-633191423
2020-05-24 12:13:14 +02:00
Nicola Murino
5665e9c0e7 improve some docs 2020-05-23 12:47:44 +02:00
Nicola Murino
ad53429cf1 add support for build tag to allow to disable some features
The following build tags are available:

- "nogcs", disable Google Cloud Storage backend
- "nos3", disable S3 Compabible Object Storage backends
- "nobolt", disable Bolt data provider
- "nomysql", disable MySQL data provider
- "nopgsql", disable PostgreSQL data provider
- "nosqlite", disable SQLite data provider
- "noportable", disable portable mode
2020-05-23 11:58:05 +02:00
Nicola Murino
15298b0409 sftpd: remove unused expectedSize field from Transfer struct
Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
2020-05-20 20:17:59 +02:00
Nicola Murino
cfa710037c cloud backends: fix SFTP error message for some write failures
Fixes #119

Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
2020-05-19 19:17:43 +02:00
Nicola Murino
a08dd85efd sftpd: deprecate keys and add a new host_keys config param
host_key defines the private host keys as plain list of strings.

Remove the other deprecated config params from the default config too.

Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
2020-05-16 23:26:44 +02:00
Nicola Murino
469d36d979 certificate auth: fix source address checking inside crypto/ssh
So we can avoid to check source address ourself

81aafe6d26

Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
2020-05-16 15:15:32 +02:00
Nicola Murino
7ae8b2cdeb move REST API CLI in examples directory
Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
2020-05-16 14:02:46 +02:00
Nicola Murino
cf148db75d add test case for expired SSH certificate
Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
2020-05-15 23:23:49 +02:00
Nicola Murino
738c7ab43e sftpd: add support for SSH user certificate authentication
This add support for PROTOCOL.certkeys vendor extension:

https://cvsweb.openbsd.org/src/usr.bin/ssh/PROTOCOL.certkeys?rev=1.8

Fixes #117

Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
2020-05-15 20:08:53 +02:00
Nicola Murino
82fb7f8cf0 update proxyproto to v0.1.3
Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
2020-05-14 20:10:33 +02:00
Nicola Murino
e0f2ab9c01 test cases: minor improvements
Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
2020-05-10 12:37:29 +02:00
Nicola Murino
e0183217b6 test cases: simplify TestLoginInvalidFs
we can simulate an invalid filesystem config using a GCS user without a
credentials file
2020-05-07 19:47:46 +02:00
Nicola Murino
f066b7fb9c use upstream pipeat
my patches are now merged
2020-05-07 00:05:40 +02:00
Nicola Murino
0c6e2b566b fix test cases on Windows 2020-05-06 23:16:08 +02:00
Nicola Murino
f02e24437a add more linters
test cases migration to testify is now complete.
Linters are enabled for test cases too
2020-05-06 19:36:34 +02:00
Nicola Murino
e9534be1e6 travis: exclude go 1.13 for arch arm64 2020-05-03 22:46:39 +02:00
Nicola Murino
7056997e49 travis: add arm64 2020-05-03 15:46:42 +02:00
Nicola Murino
155af19aaa tests: update httpd test to use testify 2020-05-03 15:24:26 +02:00
Nicola Murino
f369fdf6f2 httpclient: add a configuration parameter to skip TLS certificate validation
In this mode, TLS is susceptible to man-in-the-middle attacks.
This should be used only for testing.
2020-05-03 11:37:50 +02:00
Nicola Murino
510a95bd6d code quality check: set go version to 1.14 2020-05-02 15:55:27 +02:00
Nicola Murino
da90dbe645 tests: update config to use testify
we should port the other test cases to testify too
2020-05-02 15:47:23 +02:00
Nicola Murino
b006c5f914 NewOsFs: return an interface and not a pointer 2020-05-02 15:01:56 +02:00
Nicola Murino
3f75d46a16 sftpd: add support for excluding virtual folders from user quota limit
Fixes #110
2020-05-01 15:27:53 +02:00
Nicola Murino
14c2a244b7 code quality check: use setup-go@v2 and go 1.14 2020-04-30 17:57:06 +02:00
Nicola Murino
94ff9d7346 initprovider: fail if a configuration file cannot be found 2020-04-30 16:48:42 +02:00
Enes Çakır
14196167b0 add github action workflow for code quality 2020-04-30 15:06:15 +02:00
Nicola Murino
d70959c34c fix some lint issues 2020-04-30 14:23:55 +02:00
Sam Millar
67c6f27064 Tiny documentation typo fix 2020-04-29 16:13:33 +02:00
Enes Çakır
6bfbb27856 fix log level changing problem 2020-04-28 23:03:18 +02:00
Enes Çakır
baac3749b3 add verbose flag for portable mode 2020-04-28 17:03:14 +02:00
Nicola Murino
d377181b25 add a new configuration section for HTTP clients
HTTP clients are used for executing hooks such as the ones used for custom
actions, external authentication and pre-login user modifications.

This allows, for example, to use self-signed certificate without defeating the
purpose of using TLS
2020-04-26 23:29:09 +02:00
Nicola Murino
ebd6a11f3a external auth: add example HTTP server to use as authentication hook
The server authenticate against an LDAP server.
2020-04-26 14:48:32 +02:00
Nicola Murino
0a47412e8c scp, ssh commands: hide the real fs path on errors
The underlying filesystem errors for permissions and non-existing files
can contain the real storage path.
Map these errors to more generic ones to avoid to leak this info

Fixes #109
2020-04-22 12:26:18 +02:00
Nicola Murino
4f668bf558 simplify some httpd related code
and update chi, cobra and viper
2020-04-21 19:24:38 +02:00
Mengsk
9248c5a987 Update performance.md 2020-04-13 21:20:53 +02:00
Nicola Murino
b0ed190591 add an example auth program that allow to authenticate against LDAP
External authentication is the way to go to authenticate against LDAP,
at least for now.

Closes #99
2020-04-11 22:30:41 +02:00
Nicola Murino
37357b2d63 add support for checking pbkdf2 passwords with base64 encoded salt
This way we can import the default passwords format used in 389ds.

See TestPasswordsHashPbkdf2Sha256_389DS test case to learn how to convert
389ds passwords
2020-04-11 12:25:21 +02:00
Nicola Murino
9b06e0a3b7 sql providers: change password field from varchar 255 to text
some passwords can be longer than 255 characters
2020-04-11 11:17:40 +02:00
Nicola Murino
5a5912ea66 switch to my pkg/sftp branch and enable the request-server allocator
This way we have performance comparable to OpenSSH if the cipher
isn't the bottleneck
2020-04-10 23:35:57 +02:00
Nicola Murino
b1c7317cf6 add support for partial authentication
Multi-step authentication is activated disabling all single-step
auth methods for a given user
2020-04-09 23:32:42 +02:00
Nicola Murino
a0fe4cf5e4 docker: TAG build arg can be used to build a specific commit too 2020-04-09 11:30:51 +02:00
Henrik Lundahl
7fe3c965e3 Add a version build arg to the Alpine Dockerfile. 2020-04-09 11:26:09 +02:00
Henrik Lundahl
fd9b3c2767 Add a version build arg to the debian Dockerfile. 2020-04-09 11:15:21 +02:00
Nicola Murino
fb9e188e36 systemd service: add ExecReload 2020-04-05 11:36:29 +02:00
Nicola Murino
c93d8cecfc update deps
chi 4.1.0 requires some minor code changes
2020-04-03 22:30:30 +02:00
Nicola Murino
94b46e57f1 sftpd actions: execute defined command on error too
add a new field inside the notification to indicate if an error is
detected
2020-04-03 19:25:38 +02:00
Nicola Murino
9046acbe68 add HTTP hooks
external auth, pre-login user modification and keyboard interactive
authentication is now supported via HTTP requests too
2020-04-01 23:25:23 +02:00
Nicola Murino
075bbe2aef added test case that checks quota for files inside virtual folders 2020-03-29 11:10:03 +02:00
Nicola Murino
b52d078986 pbkdf2: fix password comparison
the key len for the derived function must be equal to the len of the
expected key
2020-03-28 16:09:06 +01:00
Nicola Murino
0a9c4914aa pre-login program: allow to create a new user too
clarify the difference between dynamic user creation/update and external
authentication
2020-03-27 23:26:22 +01:00
Nicola Murino
f284008fb5 enable scp in default configuration
remove the deprecated enable_scp setting
2020-03-26 23:38:24 +01:00
Nicola Murino
4759254e10 file actions: add bucket and endpoint to notifications
The HTTP notifications are now invoked as POST and the notification is
a JSON inside the POST body.

This is a backward incompatible change but this way the actions can be
extended more easily, sorry for the trouble

Fixes #101
2020-03-25 18:36:33 +01:00
Nicola Murino
e22d377203 docs: clarify "ca-certificates" requirement
Fixes #98
2020-03-22 20:17:36 +01:00
Nicola Murino
0787e3e595 bolt provider: fix error handling for get users with username filter 2020-03-22 15:37:08 +01:00
Nicola Murino
c1194d558c docs: minor improvements 2020-03-22 14:03:06 +01:00
Nicola Murino
952b10a9f6 update boltdb to v1.3.4
update other deps too
2020-03-21 10:12:30 +01:00
Nicola Murino
f55851bdc8 update nathanaelle password to v2
Fixes #97
2020-03-20 17:25:38 +01:00
Nicola Murino
76bb361393 docs: add built-in profiler 2020-03-15 23:33:12 +01:00
Nicola Murino
81c8e8d898 add profiler support
profiling is now available via the HTTP base URL /debug/pprof/

examples, use this URL to start and download a 30 seconds CPU profile:

/debug/pprof/profile?seconds=30

use this URL to profile used memory:

/debug/pprof/heap?gc=1

use this URL to profile allocated memory:

/debug/pprof/allocs?gc=1

Full docs here:

https://golang.org/pkg/net/http/pprof/
2020-03-15 15:16:35 +01:00
Nicola Murino
f4e872c782 portable mode: add flags for s3 upload part size and concurrency 2020-03-15 11:40:06 +01:00
Nicola Murino
ddcb500c51 update pipeat
it contains my latest performance patch that remove extraneous
allocation.

This improve performance for S3 and GCS
2020-03-15 01:36:19 +01:00
Nicola Murino
e8664c0ce4 docker: update docs
update dependencies too
2020-03-14 15:27:03 +01:00
Nicola Murino
3b002ddc86 improve performance
- use latest pkg/sftp that contains my latest performance patch
- replace default crypto with my branch that use minio sha256-simd
instead of Golang SHA256 implementation, this improve performance on
some hardware
2020-03-13 19:37:51 +01:00
Nicola Murino
1770da545d s3: upload concurrency is now configurable
Please note that if the upload bandwidth between the SFTP client and
SFTPGo is greater than the upload bandwidth between SFTPGo and S3 then
the SFTP client have to wait for the upload of the last parts to S3
after it ends the file upload to SFTPGo, and it may time out.
Keep this in mind if you customize parts size and upload concurrency
2020-03-13 19:13:58 +01:00
Nicola Murino
de3e69f846 s3: add documentation and test cases for upload part size 2020-03-13 17:28:55 +01:00
Michael Bonfils
cdf1233065 s3: export PartSize parameter
By default AWS SDK use part_size of 5 MB. For big files,
it is not ideal case. For Hadoop, it is not uncommon to
use 512 MB.
2020-03-13 17:28:04 +01:00
Nicola Murino
6b70f0b25f back to development 2020-03-07 18:06:46 +01:00
Nicola Murino
4fe51f7cce set version to 0.9.6 2020-03-07 13:36:46 +01:00
HiFiPhile
7221bf9b25 Add performance summary
Pull request #92
2020-03-06 22:48:55 +01:00
Nicola Murino
61f20f5449 Linux: add basic instructions to run SFTPGo as service 2020-03-06 09:24:55 +01:00
Nicola Murino
5dafbb54de macOS: add basic instructions to run SFTPGo as service 2020-03-05 23:26:47 +01:00
Nicola Murino
ec8ab28a22 portable mode: add support for file extensions filters 2020-03-05 15:37:10 +01:00
Nicola Murino
aaa6d0c71f docs: fix some typos 2020-03-05 09:32:29 +01:00
Nicola Murino
ea74aca165 doc: some other minor improvements 2020-03-05 00:01:40 +01:00
Nicola Murino
9b119765fc docs: minor improvements 2020-03-04 23:51:16 +01:00
Jo Vandeginste
df02496145 Refactor docs 2020-03-04 23:10:58 +01:00
Nicola Murino
31d285813e windows: try to escape trailing double quote in user input
we try to remove the trailing double quote for user input such as this one

sftpgo.exe serve -c "C:\ProgramData\SFTPGO\"

the value for the -c flag is parsed as:

C:\ProgramData\SFTPGO"

this is what the user specified, but the user want this value:

C:\ProgramData\SFTPGO

so we try to remove the trailing double quote.

Please note that we cannot do anything for something like this:

-c "C:\ProgramData\SFTPGO\" -l "sftpgo.log"

in this case the -l flag will be ignored and the value for the c flag is:

C:\ProgramData\SFTPGO" -l sftpgo.log

and so probably it is invalid. This is definitely a bad user input
2020-03-04 09:27:14 +01:00
Germs2004
f9fc5792fd fixed minor typos and edits 2020-03-04 08:01:02 +01:00
Germs2004
6ad9c5ae64 minor typo 2020-03-04 08:00:34 +01:00
Nicola Murino
016abda6d7 improve docs 2020-03-03 23:25:23 +01:00
Nicola Murino
2eea6c95b9 windows setup: use ProgramData folder to store application data
this is a backward incompatible change, but it is the way to do the
things on Windows.
2020-03-03 22:31:13 +01:00
Nicola Murino
7f1946de34 improve validations for user provided file and directory paths 2020-03-03 09:09:58 +01:00
Nicola Murino
d0a81cabab log file: if the path is not absolute make it relative to config dir
Also refuse to join invalid file name such as "."

Fixes #85
2020-03-03 00:34:06 +01:00
Nicola Murino
df67f4ef34 clean config dir
Fixes #80
2020-03-02 22:56:14 +01:00
Nicola Murino
ed11e1128a docs: clarify the initprovider command is required for SQLite too 2020-03-02 10:34:31 +01:00
Nicola Murino
ed1c7cac17 update deps
we now use git master for pkg/sftp: it includes the performance patches
from my copy branch.
2020-03-02 10:13:49 +01:00
Nicola Murino
7c115aa9c8 windows service: only restart twice
reset the service failure count to zero after 3600 seconds.

Fixes #83
2020-03-02 09:58:36 +01:00
Nicola Murino
3ffddcba92 web: log an error if loading a required template fails
We used template.Must that panics if an error happen but the error is
visible only if sftpgo is started in an interactive way

Fixes #82
2020-03-02 09:34:13 +01:00
Nicola Murino
833b702b90 proxy protocol: add list of allowed IP addresses and IP ranges
"proxy_allowed" setting allows to specify the allowed IP address and IP
ranges that can send the proxy header. This setting combined with
"proxy_protocol" allows to ignore the header or to reject connections
that send the proxy header from a non listed IP
2020-03-01 23:12:28 +01:00
Nicola Murino
b885d453a2 filters: we can now set allowed and denied files extensions 2020-03-01 22:10:29 +01:00
Nicola Murino
7163fde724 proxy protocol: added an option to make the proxy header required
now we can configure SFTPGo to accept or reject requests without the proxy
header when the proxy protocol is enabled
2020-02-29 00:02:06 +01:00
Nicola Murino
830e3d1f64 Support for HAProxy PROXY protocol
you can proxy and/or load balance the SFTP/SCP service without losing
the information about the client's address.
2020-02-27 09:21:30 +01:00
Mengsk
637463a068 Rename before_login_program to pre_login_program
and some documentation update
2020-02-25 16:34:54 +01:00
Nicola Murino
e69536f540 fixed some typos and improved a log 2020-02-25 12:46:52 +01:00
Mengsk
c516780289 Documentation update 1 2020-02-25 12:41:28 +01:00
Nicola Murino
eb1b869b73 virtual folders fixes
scp now properly handles virtual folders.

rsync is disabled for users with virtual folders: we execute a system
command and it is not aware about virtual folders.

git is not allowed if the repo path is inside a virtual folder
2020-02-24 18:54:35 +01:00
Nicola Murino
703ccc8d91 add support for dynamic users modifications
A custom program can be executed before the users login to modify the
configurations for the user trying to login.
You can, for example, allow login based on time range.

Fixes #77
2020-02-23 18:50:59 +01:00
Nicola Murino
45b9366dd0 add support for virtual folders
directories outside the user home directory can be exposed as virtual folders
2020-02-23 11:30:26 +01:00
Nicola Murino
382c6fda89 updated dependencies
fix CVE-2020-9283
2020-02-21 00:23:00 +01:00
Nicola Murino
0f80de86b2 simplify some code
now gocyclo is happy again
2020-02-20 15:53:26 +01:00
Nicola Murino
bc11cdd8d5 add support for per user authentication methods
You can, for example, deny one or more authentication methods to one or
more users.
2020-02-19 22:39:30 +01:00
Nicola Murino
62b20cd884 scp: check for write errors
exits as soon as there is a write error instead of get the same error when
the transfer is closed
2020-02-19 11:26:40 +01:00
Nicola Murino
ae8ed75ae5 gcs: add support for automatic credentials
We can now also support implicit credentials using the Application
Default Credentials strategy
2020-02-19 09:41:15 +01:00
Nicola Murino
c8cc81cf4a sftpd: autogenerate ecdsa key
With default configuration we now generate RSA and ECDSA server keys.
2020-02-16 18:17:39 +01:00
Nicola Murino
79c8b6cbc2 keyboard interactive auth: allows to automatically check the user password
This simplify the common pattern where the user password and a one time
token is requested: now the external program can delegate password check
to SFTPGo and verify the token itself
2020-02-16 11:43:52 +01:00
Nicola Murino
58253968fc s3: improve credentials validation
access secret can now be empty, so check if not empty before encrypting
the secret
2020-02-16 10:14:44 +01:00
Enes Çakır
dbd75209df s3: add auth options to README 2020-02-16 09:06:25 +01:00
Enes Çakır
da01848855 s3: enable shared config state 2020-02-16 09:06:25 +01:00
Nicola Murino
0b7be1175d parse ssh commands with shlex
instead of use our bugged home made method.

Fixes #72
2020-02-14 16:17:32 +01:00
Enes Çakır
3479a7e438 docker: remove serve command from entrypoint in alpine 2020-02-14 15:13:39 +01:00
Enes Çakır
4f5c67e7df portable: set data-provider name to empty 2020-02-14 15:13:10 +01:00
Nicola Murino
b99495ebbb sftpd download: remove check for download size
some clients, for example rclone can request only part of a file, we have
no way to detect this so we haven't return an error if the downloaded size
does not match the file size
2020-02-14 11:09:16 +01:00
Nicola Murino
0061978db8 docker: better clarify when ca-certificates package is needed 2020-02-14 08:29:31 +01:00
Mazinger
e011f793ec CA certificate for Debian Docker image (#71)
* ca-certificate package included to ensure ssl connections

* comment instruction
2020-02-14 06:34:16 +01:00
Nicola Murino
5b47292366 sftpd: improve error logs
Now logs for cloud filesystems are more readable.

Also use standard output as default for debian Dockerfile
2020-02-13 08:26:45 +01:00
Nicola Murino
8eff2df39c subdir perms: allow empty perms
empty perms will allow nothing on the specified subdir.

Non empty permissions for the "/" dir are still required.

Fixes #70
2020-02-10 19:28:35 +01:00
Nicola Murino
7bfe0ddf80 ssh commands: fix parsing commands with space
For now we support "\" escaping style
2020-02-08 23:33:06 +01:00
Nicola Murino
d6fa853a37 add support for integrated database schema migrations
added the "initprovider" command to initialize the database structure.
If we change the database schema the required changes will be checked
at startup and automatically applyed.
2020-02-08 14:44:25 +01:00
Nicola Murino
553cceab42 dataprovider actions: add more users fiels as env vars 2020-02-05 22:17:03 +01:00
Nicola Murino
5bfaae9202 httpd: allow to reload the https certificate without restarting the service
HTTPS certificate can be reloaded on demand sending a SIGHUP signal on
Unix based systems and a "paramchange" request to the running service on
Windows
2020-02-04 23:21:33 +01:00
Nicola Murino
9359669cd4 s3: set upload concurrency to 2
we upload a file while receiving it via SFTP not a file stored on a local
disk. We use concurrent uploads only to be able to send files of arbitrary
size, so concurrency is not really useful here. Setting the concurrency to
2 we have a max difference of 10 MB between the writer (sftp client) and
the reader (aws sdk), with the default concurrency value this difference
is 25MB.
2020-02-04 23:14:55 +01:00
Nicola Murino
8b039e0447 httpd: add support for basic auth and HTTPS 2020-02-04 00:08:00 +01:00
Nicola Murino
c64c080159 fix test cases on Windows
We have to rework TestRelativePaths and TestResolvePaths if we want to run
them for Cloud Storage on Windows too: we use filesystem path while Cloud
Storage providers expect Unix paths.
On Windows is important to check the local filesystem so skip Cloud Storage
providers test cases for now
2020-02-02 22:40:10 +01:00
Nicola Murino
bcaf283c35 memory provider: load users from a dump file
The `memory` provider can load users from a dump obtained using the
`dumpdata` REST API. This dump file can be configured using the
dataprovider `name` configuration key. It will be loaded at startup
and can be reloaded on demand using a `SIGHUP` on Unix based systems
and a `paramchange` request to the running service on Windows.

Fixes #66
2020-02-02 22:20:39 +01:00
Nicola Murino
31a433cda2 update deps and simplify some code 2020-01-31 23:26:56 +01:00
Nicola Murino
e647f3626e loaddata: add an option that allows to not modify existing users 2020-01-31 19:10:45 +01:00
Nicola Murino
3491717c26 add support for serving Google Cloud Storage over SFTP/SCP
Each user can be mapped with a Google Cloud Storage bucket or a bucket
virtual folder
2020-01-31 19:04:00 +01:00
thomsh
45a13f5f4e Update README add section for dataprovider init 2020-01-31 08:56:15 +01:00
Nicola Murino
6884ce3f3e sftp actions: add a parameter to distinguish local and remote files 2020-01-23 23:23:28 +01:00
Nicola Murino
5f4efc9148 S3: add metrics 2020-01-23 23:17:00 +01:00
Nicola Murino
d481294519 S3: fix quota update after an upload error
S3 uploads are atomic, if the upload fails we have no partial file so we
have to update the user quota only if the upload succeed
2020-01-23 10:19:56 +01:00
Nicola Murino
7ebbbe5c29 S3: update pipeat to the latest commit in my fork
Here are the main improvements:

- unliked files works on windows too
- the uploads are now synced on the lower speed between the SFTP client write
and the upload speed to S3

This commit increase the external auth timeout to 60 seconds too
2020-01-22 19:42:23 +01:00
Nicola Murino
9ff303b8c0 add support for keyboard interactive authentication
Fixes #64
2020-01-21 10:54:05 +01:00
Nicola Murino
4463421028 S3: add support for serving virtual folders
inside the same bucket each user can be assigned to a virtual folder.
This is similar to a chroot directory for local filesystem
2020-01-19 23:23:09 +01:00
Nicola Murino
d75f56b914 vfs: store root dir
so we don't need to pass it over and over
2020-01-19 13:58:55 +01:00
Nicola Murino
a4834f4a83 add basic S3-Compatible Object Storage support
we have now an interface for filesystem backeds, this make easy to add
new filesystem backends
2020-01-19 07:41:05 +01:00
Nicola Murino
0b42dbc3c3 back to development 2020-01-12 19:16:18 +01:00
Nicola Murino
2013ba497c macOS: add backups dir to the sample launchd service 2020-01-12 19:02:09 +01:00
Nicola Murino
2be37217cf set version to 0.9.5 2020-01-12 14:56:07 +01:00
Nicola Murino
55a03c2e2b REST API CLI: improve color/no-color arguments 2020-01-12 14:54:41 +01:00
Nicola Murino
27dbcf0066 fix test cases on Windows 2020-01-12 08:25:08 +01:00
Nicola Murino
ec194d73d2 update pkg/sftp to a stable version ...
... now that it contains all the needed patches.
Remove an hack for setstat with empty attrs, it is now handled in pkg/sftp.
Update other dependencies too.
2020-01-11 14:29:30 +01:00
Nicola Murino
1d9bb54073 transfers: improve errors detection
We can now properly report write errors if for example no space left on
device.

For downloads we check the downloaded size with the expected one
2020-01-10 19:20:22 +01:00
Nicola Murino
5cf4a47b48 web interface: fix setting expiration date
editor autoformatting changed the meaning of the code
2020-01-09 12:14:33 +01:00
Nicola Murino
eec60d6309 custom actions: add env vars
action parameters can now be readed from env vars too.
Added a timeout for the command execution
2020-01-09 12:00:37 +01:00
Paweł K
37c602a477 Expand environment variables for external auth program
Use os.Environ() as a base instead of empty variable. Currently the environment of executed external auth program only contains SFTPGO_AUTHD* variables and therefore the program lacks additional context when started.
2020-01-09 08:33:40 +01:00
Nicola Murino
8e604f888a improve docs and test cases 2020-01-07 09:39:20 +01:00
Nicola Murino
531091906d add support for authentication using external programs
Fixes #62
2020-01-06 21:42:41 +01:00
Nicola Murino
e046b35b97 check permissions against sftp path
instead of building filesystem paths and then checking permissions against
path relative to the home dir that is the initial sftp path
2020-01-05 11:41:25 +01:00
Nicola Murino
eb2ddc4798 small README improvements 2020-01-01 23:44:33 +01:00
Nicola Murino
aee9312cea better document how to reuse existing users 2019-12-31 14:22:42 +01:00
Nicola Murino
6a99a5cb9f Improve README 2019-12-31 11:11:07 +01:00
RIVIERE Fabien
8e0ca88421 Add systemD/journalD sftpgo Fail2ban configuration 2019-12-31 10:08:51 +01:00
Nicola Murino
c7e55db4e0 update dependencies 2019-12-30 19:27:50 +01:00
Nicola Murino
1b1c740b29 Add support for allowed/denied IP/Mask
Login can be restricted to specific ranges of IP address or to a specific IP
address.

Please apply the appropriate SQL upgrade script to add the filter field to your
database.

The filter database field will allow to add other filters without requiring a
new database migration
2019-12-30 18:37:50 +01:00
Nicola Murino
ad5436e3f6 ssh commands: improve command ended detection
Sometime we can have this error:

read |0: file already closed

reading from the command standard error, this means that the command is
already finished so we don't need to do nothing.
This happen randomically while running the test cases on travis.
2019-12-30 00:10:03 +01:00
Nicola Murino
20606a0043 sftpd test: add a debug log
The git push test sometime fails when running on travis.
The issue cannot be replicated locally so print the logs to try to
understand what is happening
2019-12-29 23:27:32 +01:00
Nicola Murino
80e9902324 scripts: add support for converting users from other users stores
currently we support:

- Linux/Unix users stored in shadow/passwd files
- Pure-FTPd virtual users generated using `pure-pw` CLI
- ProFTPD users generated using `ftpasswd` CLI
2019-12-29 17:21:25 +01:00
Nicola Murino
741e65a3a1 REST cli: allow to disable password or public_key auth
using something like this:

update-user <user-id> <username> --public-keys ''

the public keys auth will be disabled

using something like this:

update-user <user-id> <username> --password ''

the password auth will be disabled
2019-12-29 14:07:44 +01:00
Nicola Murino
6aff8c2f5e add support for checking passwords in md5crypt ($1$) format
this is an old and unsafe schema but it is still useful to import users
from legacy systems
2019-12-29 07:43:59 +01:00
Nicola Murino
e5770af2fa small Dockerfiles improvements
remove a debug log from httpd_test
2019-12-27 23:54:45 +01:00
Nicola Murino
ae094d3479 add backup/restore REST API 2019-12-27 23:12:44 +01:00
Nicola Murino
f49c280a7f fix test cases on Windows
SysProcAttr.Credential is not available on Windows we need to move the
WrapCmd test in a separate file to be able to build test cases on Windows,
skipping the test is not enough
2019-12-26 08:29:38 +01:00
Nicola Murino
ae812e55af sftpd: explicitly disallow some commands on root directory
It was possible to remove an empty root dir or create a symlink to it.
We now return a Permission Denied error if we detect an attempt to remove,
renaming or symlinking the root directory
2019-12-25 23:37:37 +01:00
Nicola Murino
489101668c add per directory permissions
we can now have permissions such as these ones

{"/":["*"],"/somedir":["list","download"]}

The old permissions are automatically converted to the new structure,
no database migration is needed
2019-12-25 18:20:19 +01:00
Nicola Murino
f8fd5c067c docker alpine: fix example
execute 20191112.sql too
2019-12-06 21:57:29 +01:00
Nicola Murino
39fc9b73e9 sftp setstat: guard against empty attrs
It seems that there are some clients that sends Setstat requests with
no attrs:

https://github.com/pkg/sftp/issues/325

I haven't never seen this myself, anyway we now return ErrSSHFxBadMessage
and log the client version in such cases
2019-12-04 08:31:47 +01:00
Nicola Murino
80a5138115 add rsync support ...
and better document quota management issues for system commands.

rsync and git are not enabled in the default config so don't install
them in sample Dockerfiles, simply add a comment to facilitate their
installation if needed

Fixes #44
2019-11-29 15:24:56 +01:00
Nicola Murino
bc844105b2 ssh system commands: wait on write from command out to network
we only need to wait for the write from the local command to
the ssh channel. There is no need to wait for the write from ssh
channel to the local command stdin
2019-11-27 22:33:17 +01:00
Nicola Murino
7de0fe467a docker: install git
so support for Git repo is available using the sample Dockerfiles
2019-11-27 09:47:25 +01:00
Nicola Murino
0a025aabfd add support for Git over SSH
We use the system commands "git-receive-pack", "git-upload-pack" and
"git-upload-archive". they need to be installed and in your system's
PATH. Since we execute system commands we have no direct control on
file creation/deletion and so quota check is suboptimal: if quota is
enabled, the number of files is checked at the command begin and not
while new files are created.
The allowed size is calculated as the difference between the max quota
and the used one. The command is aborted if it uploads more bytes than
the remaining allowed size calculated at the command start. Quotas are
recalculated at the command end with a full home directory scan, this
could be heavy for big directories.
2019-11-26 22:26:42 +01:00
Nicola Murino
7a8b1645ef set version to 0.9.4 2019-11-22 21:27:56 +01:00
Nicola Murino
b3729e4666 log ssh commands in "command logs" category 2019-11-19 11:38:39 +01:00
Nicola Murino
9c4dbbc3f8 sftpd: add support for some SSH commands
md5sum, sha1sum are used by rclone.
cd, pwd improve the support for RemoteFiles mobile app.

These commands are all implemented inside SFTPGo so they work even
if the matching system commands are not available, for example on Windows
2019-11-18 23:30:37 +01:00
Nicola Murino
ca6cb34d98 sftpd: add support for chtimes
This improve rclone compatibility
2019-11-16 10:23:41 +01:00
Nicola Murino
fc442d7862 sftpd: document chmod/chown on Windows
chmod is partially supported and chown is not supported on Windows.

Skip unsupported test cases on Windows
2019-11-15 17:09:00 +01:00
Nicola Murino
3ac5af47f2 minor fixes 2019-11-15 12:26:52 +01:00
Nicola Murino
bb37a1c1ce sftpd: add support for chmod/chown
added matching permissions too and a new setting "setstat_mode".
Setting setstat_mode to 1 you can keep the previous behaviour that
silently ignore setstat requests
2019-11-15 12:15:07 +01:00
Nicola Murino
206799ff1c httpd: add an API to get data provider status 2019-11-14 18:48:01 +01:00
Nicola Murino
f3de83707f improve README 2019-11-14 17:43:14 +01:00
Nicola Murino
5be1d1be69 sftpd: send exit-status message on close
this fix restic compatibility
2019-11-14 16:49:42 +01:00
Nicola Murino
08e85f6be9 sftpd: return sftp.ErrSSHFxNoSuchFile if the client ask the file for a missing path
some clients expected this error and not the generic one if the path is missing
2019-11-14 14:18:43 +01:00
Nicola Murino
acdf351047 dataprovider: add custom command and/or HTTP notifications on users add, update and delete
This way custom logic can be implemented for example to create a UNIX user
as asked in #58
2019-11-14 11:06:03 +01:00
Nicola Murino
c2ff50c917 dataprovider: add support for user status and expiration
an user can now be disabled or expired.

If you are using an SQL database as dataprovider please remember to
execute the sql update script inside "sql" folder.

Fixes #57
2019-11-13 11:36:21 +01:00
Nicola Murino
363b9ccc7f sftpd: explicitly configure supported SFTP extensions
update pkg/sftp to a git revision that includes the needed patch

https://github.com/pkg/sftp/pull/315
2019-11-12 07:37:47 +01:00
Nicola Murino
74367a65cc failed connection logs: rename host to client_ip 2019-11-11 19:53:27 +01:00
Nicola Murino
2221d3307a Improve README 2019-11-11 18:28:18 +01:00
Nicola Murino
4ff34b3e53 logger: add specific logs for failed attempts to initialize a connection
This should allow for better integration in tools like fail2ban.

Hopefully fix #59
2019-11-11 15:20:00 +01:00
Nicola Murino
191da1ecaf fix a typo 2019-11-08 08:46:31 +01:00
Nicola Murino
77db2bd3d1 fix posix rename
update pkg/sftp to a git revision that includes:

https://github.com/pkg/sftp/pull/316

add a test case here too and update other deps
2019-11-08 08:43:27 +01:00
Nicola Murino
758f2ee834 improve README 2019-10-26 20:42:46 +02:00
Nicola Murino
c5a6ca5650 portable mode: advertise service via multicast dns
Fixes #51
2019-10-26 18:25:53 +02:00
Nicola Murino
b409523d5c document memory data provider 2019-10-25 19:01:01 +02:00
Nicola Murino
8cd0aec417 add memory data provider and use it for portable mode 2019-10-25 18:37:12 +02:00
Nicola Murino
a4cddf4f7f add portable mode
Portable mode is a convenient way to share a single directory on demand
2019-10-24 18:50:35 +02:00
Roman Isko
d970e757eb Use relative to the current branch links in readme 2019-10-22 10:00:09 +02:00
Nicola Murino
083d9f76c6 add commit hash and build date to the version logged when starting the daemon 2019-10-21 21:18:45 +02:00
Jo Vandeginste
2003d08c59 Log version when starting the daemon
Signed-off-by: Jo Vandeginste <Jo.Vandeginste@kuleuven.be>
2019-10-21 21:15:31 +02:00
Nicola Murino
9cf4653425 improve docs for available Arch Linux packages 2019-10-20 09:23:04 +02:00
Nicola Murino
4f6bb00996 web: display version string 2019-10-19 07:52:58 +02:00
Nicola Murino
25f97bbe62 set version to 0.9.3 2019-10-18 11:28:20 +02:00
Nicola Murino
44d403cf9c sftpd_test: use path.Join for SFTP/SCP path
filepath.Join could use an OS dependent separator
2019-10-16 12:57:06 +02:00
Nicola Murino
8682ae4a54 sftpd: make file/dir removal and creation more standard
- remove a non empty directory. Before: the directory contents were
removed recursively. Now: removing a non empty directory fails.

- make a directory in a non existent path: Before: any necessary parents
were created. Now: it fails.

- remove a file. Before: files, directories and symlinks were removed.
Now: only files and symlink are removed, removing a directory using "Remove"
instead of "Rmdir" fails.

Upload a file in a non existent directory. Before: any necessary parents
were created. Now: it fails.

Now SFTPGo behaves as OpenSSH.
2019-10-16 07:48:22 +02:00
Nicola Murino
f98a29a1e0 sftpd: port to non deprecated pkg/sftp error types
see https://github.com/pkg/sftp/pull/304
2019-10-14 22:44:57 +02:00
Nicola Murino
2932dba5cc update modules deps 2019-10-14 08:36:39 +02:00
Nicola Murino
24914e90d1 web: fix quota scan error message 2019-10-13 13:08:19 +02:00
Nicola Murino
587c8a0347 web: add quota scan support 2019-10-13 12:07:22 +02:00
Nicola Murino
62224debd2 simplify closing a connection
get the connection from the map instead of cycling all the open connections
2019-10-12 15:12:43 +02:00
Nicola Murino
871e2ccbbf sftpd: refactor connection closing
we have not known bugs with the previous implementation anyway this one
is cleaner: the underlying network connection is directly related with
SFTP/SCP connections.
This should better protect us against buggy clients and edge cases
2019-10-10 09:04:17 +02:00
Nicola Murino
4b5ce3913e ssh handshake: add a deadline for handshake to complete
we use a 2 minutes timeout as OpenSSH
2019-10-09 19:07:35 +02:00
Nicola Murino
1d917561fe sftpd: add support for upload resume
we support resume only if the client sets the correct offset while resuming
the upload.
Based on the specs the offset is optional for resume, but all the tested
clients sets a right offset.
If an invalid offset is given we interrupt the transfer with the error
"Invalid write offset ..."

See https://github.com/pkg/sftp/issues/295

This commit add a new upload mode: "atomic with resume support", this acts
as atomic but if there is an upload error the temporary file is renamed
to the requested path and not deleted, this way a client can reconnect
and resume the upload
2019-10-09 17:33:30 +02:00
Nicola Murino
4f36c1de06 web: use html/template
so output is safe against code injection
2019-10-09 11:48:54 +02:00
Nicola Murino
5ffa34dacb improve godoc 2019-10-08 10:29:16 +02:00
Nicola Murino
60d4a3e1b5 improve docs 2019-10-08 08:20:26 +02:00
Nicola Murino
3e0558c0e9 add web interface support to windows setup ...
... and other small improvements
2019-10-07 22:37:28 +02:00
Nicola Murino
c74d90407b launchd service: add env vars for html templates and static files 2019-10-07 19:40:17 +02:00
Nicola Murino
557831fa0d Dockerfiles: add support for web interface 2019-10-07 19:16:52 +02:00
Nicola Murino
afd312f26a add a basic web interface
The builtin web interface allows to manage users and connections
2019-10-07 18:19:01 +02:00
Nicola Murino
bb0338870a unhide public keys
hiding public keys give no security improvement
2019-10-03 15:29:54 +02:00
Nicola Murino
fb8ccfe824 improve doc for dataprovider Close method
the method is used in test cases, it assumes that the dataprovider
is initialized and it is not safe Initialize/Close from different
goroutines
2019-09-29 08:38:09 +02:00
Nicola Murino
0b4ff97a1a dataprovider: simplifly validateUser 2019-09-28 23:44:36 +02:00
Nicola Murino
00dd5db226 add support for users' default base dir 2019-09-28 22:48:52 +02:00
Jo Vandeginste
71093bbe1b Log to journald
By default on systems with systemd, send logs to stdout and thus to journald.
2019-09-20 12:38:31 +02:00
Nicola Murino
088e187e6a add Inno Setup script used to generate the Windows setup
So we can receive pull requests to improve it :)
2019-09-19 08:19:25 +02:00
Nicola Murino
0d8506c93d set version to 0.9.2 2019-09-18 22:19:34 +02:00
Nicola Murino
df96ea7e9f add a new permission for overwriting existing files
The upload permission is required to allow file overwrite
2019-09-17 08:53:45 +02:00
Nicola Murino
580fae7a8f minor improvements 2019-09-16 18:11:35 +02:00
Nicola Murino
4a1baaee69 windows service: improve doc 2019-09-16 09:22:27 +02:00
Nicola Murino
f3f38f5f09 add Windows Service support 2019-09-16 08:52:58 +02:00
Nicola Murino
bba78763e1 add a test case and document sha512crypt passwords support 2019-09-15 12:23:27 +02:00
Seunghoon Yeon
b5211fd31b Added sha512crypt support 2019-09-15 12:08:28 +02:00
Nicola Murino
360f32a91b fixed some typos 2019-09-14 17:06:21 +02:00
Nicola Murino
018b10808d improve SQL provider availability check adding a timeout 2019-09-14 16:18:31 +02:00
Jo Vandeginste
29aadbf3e3 log redacted passwords
Closes #48
2019-09-13 21:57:52 +02:00
Nicola Murino
7eb5b01169 add Prometheus support
some basic counters and gauges are now exposed
2019-09-13 18:45:36 +02:00
Nicola Murino
fd59f35108 update pkg/sftp to the latest master
Our pull request to handle transfer errors is now merged, so updating
pkg/sftp should fix #36
2019-09-13 08:30:22 +02:00
Nicola Murino
e7eb3476b7 dataprovider: remove transaction for quota update
The update is atomic so no transaction is needed.
Addionally a transaction will ask for a new connection to the pool
and this can deadlock if the pool has a max connection limit too low.

Also make configurable the pool size instead of hard code to the cpu number.

Fixes #47
2019-09-13 08:14:07 +02:00
Nicola Murino
bf00ca334d mysql data provider: add read and write timeout
This should hopefully fix #47
2019-09-12 16:26:47 +02:00
Nicola Murino
6b33d3d9f1 add version to the default server banner 2019-09-12 09:21:58 +02:00
Nicola Murino
3d13fe15c3 connections: close the ssh channel before the network connection
This way if pkg/sftp is stuck in Serve() method should be unlocked.
2019-09-11 16:29:56 +02:00
Nicola Murino
9794ca7ee0 connection: add a close method
and use it instead of netConn directly
2019-09-11 12:46:21 +02:00
Nicola Murino
784f75f45b use net.Conn instead of ssh.Conn to close connections 2019-09-11 09:41:46 +02:00
Nicola Murino
c1effdf701 atomic upload mode: remove temporary file on error
If a transfer error is detected, then the partial temporary file will
be removed and not renamed to requested path
2019-09-10 18:47:21 +02:00
Nicola Murino
7010f513e3 macOS: add launchd sample plist 2019-09-10 08:03:53 +02:00
Nicola Murino
e2e122169d docker alpine: improve README
The volume configuration is not required anymore, we have
a default config in /etc/sftpgo and the volume config is
mounted on /srv/sftpgo/config so we always have a default
config in /etc/sftpgo/sftpgo.json.

Anyway to persist server's keys after an image rebuild a
configuration volume is highly recommended
2019-09-09 13:24:48 +02:00
Nicola Murino
e79f7010b4 docker alpine: use the latest stable docker tag for both run and build env
so we don't need to update our Dockerfile each time a new alpine or golang
version is released
2019-09-09 12:58:07 +02:00
Nicola Murino
f4507aeec2 improve transfer error log and TestSCPErrors 2019-09-09 08:57:11 +02:00
Nicola Murino
bd0eb2f0a8 docker alpine README: fix project name 2019-09-07 23:34:59 +02:00
Nicola Murino
e09ea00d8b improve transfer error log 2019-09-07 23:27:37 +02:00
Nicola Murino
bc5779e26f actions: don't execute actions on errors
detect upload/download errors and don't execute actions if a transfer
error happen.

To detect SFTP errors this patch is needed:

https://github.com/pkg/sftp/pull/307
2019-09-07 23:10:20 +02:00
Nicola Murino
2a7e56ed29 docker: minor fixes 2019-09-07 18:21:03 +02:00
Binou
29f69876fe Docker alpine multiuser (#39)
* SFTPgo with docker alpine image and init systemD

* Permissions entrypoint script + ENTRYPOINT directive

* Drakkan's reviewed fix

Move Dockerfile into docker/sftpgo/

* Add Default Configuration File in image

Add -R to chown default config file in the directory

Move systemD init file for the alpine image
2019-09-07 17:20:44 +02:00
Nicola Murino
4f1c2c094f improve logging
this partially revert #45
2019-09-06 15:19:01 +02:00
Jo Vandeginste
abbb7f272b Refactor the logging system
* created a "Log" function for type "Connection"
* created a "log" function for type "Provider"
* replace logger calls to Log/log where possible

I also renamed PGSSQL to PGSQL, as this seemed to be a typo

Signed-off-by: Jo Vandeginste <Jo.Vandeginste@kuleuven.be>
2019-09-06 13:43:21 +02:00
Nicola Murino
a7363a16be add sample Dockerfiles 2019-09-06 13:20:21 +02:00
Nicola Murino
a26343a694 improve more logs 2019-09-05 23:42:00 +02:00
Jo Vandeginste
25260297aa Show info about public key during login
This will show the key fingerprint and the associated comment, or
"password" when password was used, during login.

Eg.:

```
message":"User id: 1, logged in with: \"public_key:SHA256:FV3+wlAKGzYy7+J02786fh8N8c06+jga/mdiSOSPT7g:jo@desktop\",
```

or

```
message":"User id: 1, logged in with: \"password\",
...`

Signed-off-by: Jo Vandeginste <Jo.Vandeginste@kuleuven.be>
2019-09-05 22:58:14 +02:00
Jo Vandeginste
bb589c6fc8 Show user info, remove some extraneous connection ids
Signed-off-by: Jo Vandeginste <Jo.Vandeginste@kuleuven.be>
2019-09-05 22:58:14 +02:00
Jo Vandeginste
0737c672f5 Add connectionID to as many entries as possible
Signed-off-by: Jo Vandeginste <Jo.Vandeginste@kuleuven.be>
2019-09-05 22:58:14 +02:00
Nicola Murino
53d70b68d8 config: add configDir before additional search paths
This way the configuration will be searched in the following order:

- config dir
- $HOME/.config/sftpgo
- /etc/sftpgo
- current directory
2019-09-05 22:07:59 +02:00
Jo Vandeginste
795ffc4c5f Add quotes to certain strings
Signed-off-by: Jo Vandeginste <Jo.Vandeginste@kuleuven.be>
2019-09-05 16:14:31 +02:00
Nicola Murino
e8db7d8539 improve configuration docs
viper will not use env vars if no configuration file is found

https://github.com/spf13/viper/issues/584

As workaround we could manually bind/set a default for each configuration
option using viper.SetDefault("key") and then generate a default config
using viper.Get("key").
This manual solution is error prone and it will become increasingly
difficult to maintain since the configuration options will grow, so
we avoid it for now.

Let's see if viper will solve this issue

Fixes #35
2019-09-04 09:09:17 +02:00
Nicola Murino
c6343ee095 travis: add go 1.13 2019-09-04 08:29:23 +02:00
Nicola Murino
3441b75a58 allow empty log file, use the standard output in this case
Fixes #34
2019-09-03 23:13:33 +02:00
Nicola Murino
cc2ccb3dd7 go.sum cleanup 2019-09-03 12:18:19 +02:00
Nicola Murino
49a40f7a0b sftpd: add configuration options for allowed ciphers, MACs and KEX algorithms
add support for login banner too

Fixes #32
2019-09-03 12:08:09 +02:00
Nicola Murino
dc5eeb54fd scp: fix quota update after file overwrite
added a test case too
2019-09-02 23:12:41 +02:00
269 changed files with 54918 additions and 6912 deletions

12
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
# These are supported funding model platforms
github: [drakkan] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']

2
.github/workflows/.editorconfig vendored Normal file
View File

@@ -0,0 +1,2 @@
[*.yml]
indent_size = 2

204
.github/workflows/development.yml vendored Normal file
View File

@@ -0,0 +1,204 @@
name: CI
on:
push:
branches: [master]
pull_request:
jobs:
test-deploy:
name: Test and deploy
runs-on: ${{ matrix.os }}
strategy:
matrix:
go: [1.15]
os: [ubuntu-latest, macos-latest]
upload-coverage: [true]
include:
- go: 1.14
os: ubuntu-latest
upload-coverage: false
- go: 1.15
os: windows-latest
upload-coverage: false
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
- name: Build for Linux/macOS
if: startsWith(matrix.os, 'windows-') != true
run: go build -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
- name: Build for Windows
if: startsWith(matrix.os, 'windows-')
run: |
$GIT_COMMIT = (git describe --always --dirty) | Out-String
$DATE_TIME = ([datetime]::Now.ToUniversalTime().toString("yyyy-MM-ddTHH:mm:ssZ")) | Out-String
go build -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/version.date=$DATE_TIME" -o sftpgo.exe
- name: Run test cases using SQLite provider
run: go test -v -p 1 -timeout 10m ./... -coverprofile=coverage.txt -covermode=atomic
- name: Upload coverage to Codecov
if: ${{ matrix.upload-coverage }}
uses: codecov/codecov-action@v1
with:
file: ./coverage.txt
fail_ci_if_error: false
- name: Run test cases using bolt provider
run: |
go test -v -p 1 -timeout 2m ./config -covermode=atomic
go test -v -p 1 -timeout 2m ./common -covermode=atomic
go test -v -p 1 -timeout 3m ./httpd -covermode=atomic
go test -v -p 1 -timeout 8m ./sftpd -covermode=atomic
go test -v -p 1 -timeout 2m ./ftpd -covermode=atomic
go test -v -p 1 -timeout 2m ./webdavd -covermode=atomic
env:
SFTPGO_DATA_PROVIDER__DRIVER: bolt
SFTPGO_DATA_PROVIDER__NAME: 'sftpgo_bolt.db'
- name: Run test cases using memory provider
run: go test -v -p 1 -timeout 10m ./... -covermode=atomic
env:
SFTPGO_DATA_PROVIDER__DRIVER: memory
SFTPGO_DATA_PROVIDER__NAME: ''
- name: Prepare build artifact for Linux/macOS
if: startsWith(matrix.os, 'windows-') != true
run: |
mkdir -p output/{bash_completion,zsh_completion}
mkdir -p output/examples/rest-api-cli
cp sftpgo output/
cp sftpgo.json output/
cp -r templates output/
cp -r static output/
cp -r init output/
cp examples/rest-api-cli/sftpgo_api_cli output/examples/rest-api-cli/
./sftpgo gen completion bash > output/bash_completion/sftpgo
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
./sftpgo gen man -d output/man/man1
gzip output/man/man1/*
- name: Prepare build artifact for Windows
if: startsWith(matrix.os, 'windows-')
run: |
mkdir output
copy .\sftpgo.exe .\output
copy .\sftpgo.json .\output
mkdir output\templates
xcopy .\templates .\output\templates\ /E
mkdir output\static
xcopy .\static .\output\static\ /E
- name: Upload build artifact
uses: actions/upload-artifact@v2
with:
name: sftpgo-${{ matrix.os }}-go${{ matrix.go }}
path: output
- name: Build Linux Packages
id: build_linux_pkgs
if: ${{ matrix.upload-coverage && startsWith(matrix.os, 'ubuntu-') }}
run: |
cd pkgs
./build.sh
PKG_VERSION=$(cat dist/version)
echo "::set-output name=pkg-version::${PKG_VERSION}"
- name: Upload Debian Package
if: ${{ matrix.upload-coverage && startsWith(matrix.os, 'ubuntu-') }}
uses: actions/upload-artifact@v2
with:
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-x86_64-deb
path: pkgs/dist/deb/*
- name: Upload RPM Package
if: ${{ matrix.upload-coverage && startsWith(matrix.os, 'ubuntu-') }}
uses: actions/upload-artifact@v2
with:
name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-x86_64-rpm
path: pkgs/dist/rpm/*
test-postgresql-mysql:
name: Test with PostgreSQL/MySQL
runs-on: ubuntu-latest
services:
postgres:
image: postgres:latest
env:
POSTGRES_PASSWORD: postgres
POSTGRES_DB: sftpgo
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
mariadb:
image: mariadb:latest
env:
MYSQL_ROOT_PASSWORD: mysql
MYSQL_DATABASE: sftpgo
MYSQL_USER: sftpgo
MYSQL_PASSWORD: sftpgo
options: >-
--health-cmd "mysqladmin status -h 127.0.0.1 -P 3306 -u root -p$MYSQL_ROOT_PASSWORD"
--health-interval 10s
--health-timeout 5s
--health-retries 6
ports:
- 3307:3306
steps:
- uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.15
- name: Build
run: go build -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
- name: Run tests using PostgreSQL provider
run: |
go test -v -p 1 -timeout 10m ./... -covermode=atomic
env:
SFTPGO_DATA_PROVIDER__DRIVER: postgresql
SFTPGO_DATA_PROVIDER__NAME: sftpgo
SFTPGO_DATA_PROVIDER__HOST: localhost
SFTPGO_DATA_PROVIDER__PORT: 5432
SFTPGO_DATA_PROVIDER__USERNAME: postgres
SFTPGO_DATA_PROVIDER__PASSWORD: postgres
- name: Run tests using MySQL provider
run: |
go test -v -p 1 -timeout 10m ./... -covermode=atomic
env:
SFTPGO_DATA_PROVIDER__DRIVER: mysql
SFTPGO_DATA_PROVIDER__NAME: sftpgo
SFTPGO_DATA_PROVIDER__HOST: localhost
SFTPGO_DATA_PROVIDER__PORT: 3307
SFTPGO_DATA_PROVIDER__USERNAME: sftpgo
SFTPGO_DATA_PROVIDER__PASSWORD: sftpgo
golangci-lint:
name: golangci-lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v1
with:
version: v1.31

104
.github/workflows/docker.yml vendored Normal file
View File

@@ -0,0 +1,104 @@
name: Docker
on:
#schedule:
# - cron: '0 4 * * *' # everyday at 4:00 AM UTC
push:
branches:
- master
tags:
- v*
pull_request:
jobs:
build:
name: Build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os:
- ubuntu-latest
docker_pkg: [debian, alpine]
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Repo metadata
id: repo
uses: actions/github-script@v3
with:
script: |
const repo = await github.repos.get(context.repo)
return repo.data
- name: Gather image information
id: info
run: |
DOCKER_IMAGE=ghcr.io/drakkan/sftpgo
VERSION=noop
DOCKERFILE=Dockerfile
if [ "${{ github.event_name }}" = "schedule" ]; then
VERSION=nightly
elif [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
elif [[ $GITHUB_REF == refs/heads/* ]]; then
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
VERSION=edge
fi
elif [[ $GITHUB_REF == refs/pull/* ]]; then
VERSION=pr-${{ github.event.number }}
fi
if [[ $DOCKER_PKG == alpine ]]; then
VERSION="$VERSION-alpine"
DOCKERFILE=Dockerfile.alpine
fi
TAGS="${DOCKER_IMAGE}:${VERSION}"
if [[ $GITHUB_REF == refs/tags/* ]]; then
if [[ $DOCKER_PKG == debian ]]; then
TAGS="$TAGS,${DOCKER_IMAGE}:latest"
else
TAGS="$TAGS,${DOCKER_IMAGE}:alpine"
fi
fi
echo ::set-output name=dockerfile::${DOCKERFILE}
echo ::set-output name=version::${VERSION}
echo ::set-output name=tags::${TAGS}
echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
echo ::set-output name=sha::${GITHUB_SHA::8}
env:
DOCKER_PKG: ${{ matrix.docker_pkg }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.CR_PAT }}
if: github.event_name != 'pull_request'
- name: Build and push
uses: docker/build-push-action@v2
with:
file: ./${{ steps.info.outputs.dockerfile }}
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.info.outputs.tags }}
build-args: |
COMMIT_SHA=${{ steps.info.outputs.sha }}
labels: |
org.opencontainers.image.title=SFTPGo
org.opencontainers.image.description=Fully featured and highly configurable SFTP server with optional FTP/S and WebDAV support
org.opencontainers.image.url=${{ fromJson(steps.repo.outputs.result).html_url }}
org.opencontainers.image.documentation=${{ fromJson(steps.repo.outputs.result).html_url }}/blob/${{ github.sha }}/docker/README.md
org.opencontainers.image.source=${{ fromJson(steps.repo.outputs.result).clone_url }}
org.opencontainers.image.version=${{ steps.info.outputs.version }}
org.opencontainers.image.created=${{ steps.info.outputs.created }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.licenses=${{ fromJson(steps.repo.outputs.result).license.spdx_id }}

291
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,291 @@
name: Release
on:
push:
tags: 'v*'
env:
GO_VERSION: 1.15
jobs:
create-release:
name: Create
runs-on: ubuntu-latest
steps:
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: ${{ github.ref }}
draft: false
prerelease: false
- name: Save release upload URL
run: echo "${{ steps.create_release.outputs.upload_url }}" > ./upload_url.txt
shell: bash
- name: Store release upload URL
uses: actions/upload-artifact@v2
with:
name: upload_url
path: ./upload_url.txt
release-sources-with-deps:
name: Publish sources
needs: create-release
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- name: Get SFTPGo version
id: get_version
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
- name: Prepare release
run: |
go mod vendor
echo "${SFTPGO_VERSION}" > VERSION.txt
tar cJvf sftpgo_${SFTPGO_VERSION}_src_with_deps.tar.xz *
env:
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
- name: Download release upload URL
uses: actions/download-artifact@v2
with:
name: upload_url
- name: Get release upload URL
id: upload_url
run: |
URL=$(cat upload_url.txt)
echo "::set-output name=url::${URL}"
shell: bash
- name: Upload Release
id: upload-release-asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.upload_url.outputs.url }}
asset_path: ./sftpgo_${{ steps.get_version.outputs.VERSION }}_src_with_deps.tar.xz
asset_name: sftpgo_${{ steps.get_version.outputs.VERSION }}_src_with_deps.tar.xz
asset_content_type: application/x-xz
publish:
name: Publish binary
needs: create-release
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- name: Set up Python
if: startsWith(matrix.os, 'windows-')
uses: actions/setup-python@v2
with:
python-version: 3.x
- name: Build for Linux/macOS
if: startsWith(matrix.os, 'windows-') != true
run: go build -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
- name: Build for Windows
if: startsWith(matrix.os, 'windows-')
run: |
$GIT_COMMIT = (git describe --always --dirty) | Out-String
$DATE_TIME = ([datetime]::Now.ToUniversalTime().toString("yyyy-MM-ddTHH:mm:ssZ")) | Out-String
go build -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=$GIT_COMMIT -X github.com/drakkan/sftpgo/version.date=$DATE_TIME" -o sftpgo.exe
- name: Initialize data provider
run: ./sftpgo initprovider
shell: bash
- name: Get SFTPGo version
id: get_version
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
shell: bash
- name: Get OS name
id: get_os_name
run: |
if [ $MATRIX_OS == 'ubuntu-latest' ]
then
echo ::set-output name=OS::linux
elif [ $MATRIX_OS == 'macos-latest' ]
then
echo ::set-output name=OS::macOS
else
echo ::set-output name=OS::windows
fi
shell: bash
env:
MATRIX_OS: ${{ matrix.os }}
- name: Build REST API CLI for Windows
if: startsWith(matrix.os, 'windows-')
run: |
python -m pip install --upgrade pip setuptools wheel
pip install requests
pip install pygments
pip install pyinstaller
pyinstaller --hidden-import="pkg_resources.py2_warn" --noupx --onefile examples\rest-api-cli\sftpgo_api_cli
- name: Prepare Release for Linux/macOS
if: startsWith(matrix.os, 'windows-') != true
run: |
mkdir -p output/{init,examples/rest-api-cli,sqlite,bash_completion,zsh_completion}
echo "For documentation please take a look here:" > output/README.txt
echo "" >> output/README.txt
echo "https://github.com/drakkan/sftpgo/blob/${SFTPGO_VERSION}/README.md" >> output/README.txt
cp LICENSE output/
cp sftpgo output/
cp sftpgo.json output/
cp sftpgo.db output/sqlite/
cp -r static output/
cp -r templates output/
if [ $OS == 'linux' ]
then
cp init/sftpgo.service output/init/
else
cp init/com.github.drakkan.sftpgo.plist output/init/
fi
./sftpgo gen completion bash > output/bash_completion/sftpgo
./sftpgo gen completion zsh > output/zsh_completion/_sftpgo
./sftpgo gen man -d output/man/man1
gzip output/man/man1/*
cp examples/rest-api-cli/sftpgo_api_cli output/examples/rest-api-cli/
cd output
tar cJvf sftpgo_${SFTPGO_VERSION}_${OS}_x86_64.tar.xz *
cd ..
env:
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
OS: ${{ steps.get_os_name.outputs.OS }}
- name: Prepare Linux Packages
id: build_linux_pkgs
if: ${{ matrix.os == 'ubuntu-latest' }}
run: |
cd pkgs
./build.sh
PKG_VERSION=${SFTPGO_VERSION:1}
echo "::set-output name=pkg-version::${PKG_VERSION}"
env:
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
- name: Prepare Release for Windows
if: startsWith(matrix.os, 'windows-')
run: |
mkdir output
copy .\sftpgo.exe .\output
copy .\sftpgo.json .\output
copy .\sftpgo.db .\output
copy .\dist\sftpgo_api_cli.exe .\output
copy .\LICENSE .\output\LICENSE.txt
mkdir output\templates
xcopy .\templates .\output\templates\ /E
mkdir output\static
xcopy .\static .\output\static\ /E
iscc windows-installer\sftpgo.iss
env:
SFTPGO_ISS_VERSION: ${{ steps.get_version.outputs.VERSION }}
SFTPGO_ISS_DOC_URL: https://github.com/drakkan/sftpgo/blob/${{ steps.get_version.outputs.VERSION }}/README.md
- name: Prepare Portable Release for Windows
if: startsWith(matrix.os, 'windows-')
run: |
mkdir win-portable\examples\rest-api-cli
copy .\sftpgo.exe .\win-portable
copy .\sftpgo.json .\win-portable
copy .\sftpgo.db .\win-portable
copy .\dist\sftpgo_api_cli.exe .\win-portable\examples\rest-api-cli
copy .\LICENSE .\win-portable\LICENSE.txt
mkdir win-portable\templates
xcopy .\templates .\win-portable\templates\ /E
mkdir win-portable\static
xcopy .\static .\win-portable\static\ /E
Compress-Archive .\win-portable\* sftpgo_portable_x86_64.zip
env:
SFTPGO_VERSION: ${{ steps.get_version.outputs.VERSION }}
OS: ${{ steps.get_os_name.outputs.OS }}
- name: Download release upload URL
uses: actions/download-artifact@v2
with:
name: upload_url
- name: Get release upload URL
id: upload_url
run: |
URL=$(cat upload_url.txt)
echo "::set-output name=url::${URL}"
shell: bash
- name: Upload Linux/macOS Release
if: startsWith(matrix.os, 'windows-') != true
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.upload_url.outputs.url }}
asset_path: ./output/sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.tar.xz
asset_name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.tar.xz
asset_content_type: application/x-xz
- name: Upload Windows Release
if: startsWith(matrix.os, 'windows-')
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.upload_url.outputs.url }}
asset_path: ./sftpgo_windows_x86_64.exe
asset_name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_x86_64.exe
asset_content_type: application/x-dosexec
- name: Upload Portable Windows Release
if: startsWith(matrix.os, 'windows-')
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.upload_url.outputs.url }}
asset_path: ./sftpgo_portable_x86_64.zip
asset_name: sftpgo_${{ steps.get_version.outputs.VERSION }}_${{ steps.get_os_name.outputs.OS }}_portable_x86_64.zip
asset_content_type: application/zip
- name: Upload Debian Package
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.upload_url.outputs.url }}
asset_path: ./pkgs/dist/deb/sftpgo_${{ steps.build_linux_pkgs.outputs.pkg-version }}-1_amd64.deb
asset_name: sftpgo_${{ steps.build_linux_pkgs.outputs.pkg-version }}-1_amd64.deb
asset_content_type: application/vnd.debian.binary-package
- name: Upload RPM Package
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.upload_url.outputs.url }}
asset_path: ./pkgs/dist/rpm/sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-1.x86_64.rpm
asset_name: sftpgo-${{ steps.build_linux_pkgs.outputs.pkg-version }}-1.x86_64.rpm
asset_content_type: application/x-rpm

3
.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
# compilation output
sftpgo
sftpgo.exe

42
.golangci.yml Normal file
View File

@@ -0,0 +1,42 @@
run:
timeout: 5m
issues-exit-code: 1
tests: true
linters-settings:
dupl:
threshold: 150
errcheck:
check-type-assertions: false
check-blank: false
goconst:
min-len: 3
min-occurrences: 3
gocyclo:
min-complexity: 15
gofmt:
simplify: true
goimports:
local-prefixes: github.com/drakkan/sftpgo
maligned:
suggest-new: true
linters:
enable:
- goconst
- errcheck
- gofmt
- goimports
- golint
- unconvert
- unparam
- bodyclose
- gocyclo
- misspell
- maligned
- whitespace
- dupl
- scopelint
- rowserrcheck
- dogsled

View File

@@ -1,23 +0,0 @@
language: go
os:
- linux
- osx
go:
- "1.12.x"
env:
- GO111MODULE=on
before_script:
- sqlite3 sftpgo.db 'CREATE TABLE "users" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL UNIQUE, "password" varchar(255) NULL, "public_keys" text NULL, "home_dir" varchar(255) NOT NULL, "uid" integer NOT NULL, "gid" integer NOT NULL, "max_sessions" integer NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "permissions" text NOT NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL, "upload_bandwidth" integer NOT NULL, "download_bandwidth" integer NOT NULL);'
install:
- go get -v -t ./...
script:
- go test -v ./... -coverprofile=coverage.txt -covermode=atomic
after_success:
- bash <(curl -s https://codecov.io/bash)

70
Dockerfile Normal file
View File

@@ -0,0 +1,70 @@
FROM golang:1.15 as builder
ENV GOFLAGS="-mod=readonly"
RUN mkdir -p /workspace
WORKDIR /workspace
ARG GOPROXY
COPY go.mod go.sum ./
RUN go mod download
ARG COMMIT_SHA
# This ARG allows to disable some optional features and it might be useful if you build the image yourself.
# For example you can disable S3 and GCS support like this:
# --build-arg FEATURES=nos3,nogcs
ARG FEATURES
COPY . .
RUN set -xe && \
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --dirty)} && \
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -v -o sftpgo
FROM debian:buster-slim
RUN apt-get update && apt-get install --no-install-recommends -y ca-certificates mime-support && apt-get clean
SHELL ["/bin/bash", "-c"]
RUN mkdir -p /etc/sftpgo /var/lib/sftpgo /usr/share/sftpgo
RUN groupadd --system -g 1000 sftpgo && \
useradd --system --gid sftpgo --no-create-home \
--home-dir /var/lib/sftpgo --shell /usr/sbin/nologin \
--comment "SFTPGo user" --uid 1000 sftpgo
# Install some optional packages used by SFTPGo features
RUN apt-get update && apt-get install --no-install-recommends -y git rsync && apt-get clean
# Override some configuration details
ENV SFTPGO_CONFIG_DIR=/etc/sftpgo
ENV SFTPGO_LOG_FILE_PATH=""
ENV SFTPGO_HTTPD__TEMPLATES_PATH=/usr/share/sftpgo/templates
ENV SFTPGO_HTTPD__STATIC_FILES_PATH=/usr/share/sftpgo/static
# Sane defaults, but users should still be able to override this from env vars
ENV SFTPGO_DATA_PROVIDER__USERS_BASE_DIR=/var/lib/sftpgo/users
ENV SFTPGO_DATA_PROVIDER__CREDENTIALS_PATH=/var/lib/sftpgo/credentials
ENV SFTPGO_HTTPD__BACKUPS_PATH=/var/lib/sftpgo/backups
ENV SFTPGO_SFTPD__HOST_KEYS=/var/lib/sftpgo/host_keys/id_rsa,/var/lib/sftpgo/host_keys/id_ecdsa
ENV SFTPGO_HTTPD__BIND_ADDRESS=""
COPY --from=builder /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
COPY --from=builder /workspace/static /usr/share/sftpgo/static
COPY --from=builder /workspace/sftpgo /usr/local/bin/
RUN sed -i "s|sftpgo.db|/var/lib/sftpgo/sftpgo.db|" /etc/sftpgo/sftpgo.json
RUN chown -R sftpgo:sftpgo /etc/sftpgo && chown sftpgo:sftpgo /var/lib/sftpgo && \
chmod 640 /etc/sftpgo/sftpgo.json && \
chmod 750 /etc/sftpgo /var/lib/sftpgo
USER sftpgo
VOLUME /var/lib/sftpgo
CMD sftpgo serve

75
Dockerfile.alpine Normal file
View File

@@ -0,0 +1,75 @@
FROM golang:1.15-alpine AS builder
ENV GOFLAGS="-mod=readonly"
RUN apk add --update --no-cache bash ca-certificates curl git gcc g++
RUN mkdir -p /workspace
WORKDIR /workspace
ARG GOPROXY
COPY go.mod go.sum ./
RUN go mod download
ARG COMMIT_SHA
# This ARG allows to disable some optional features and it might be useful if you build the image yourself.
# For example you can disable S3 and GCS support like this:
# --build-arg FEATURES=nos3,nogcs
ARG FEATURES
COPY . .
RUN set -xe && \
export COMMIT_SHA=${COMMIT_SHA:-$(git describe --always --dirty)} && \
go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=${COMMIT_SHA} -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -v -o sftpgo
FROM alpine:3.12
RUN apk add --update --no-cache ca-certificates tzdata bash mailcap
SHELL ["/bin/bash", "-c"]
# set up nsswitch.conf for Go's "netgo" implementation
# https://github.com/gliderlabs/docker-alpine/issues/367#issuecomment-424546457
RUN test ! -e /etc/nsswitch.conf && echo 'hosts: files dns' > /etc/nsswitch.conf
RUN mkdir -p /etc/sftpgo /var/lib/sftpgo /usr/share/sftpgo
RUN addgroup -g 1000 -S sftpgo && \
adduser -u 1000 -h /var/lib/sftpgo -s /sbin/nologin -G sftpgo -S -D -H sftpgo
# Install some optional packages used by SFTPGo features
RUN apk add --update --no-cache rsync git
# Override some configuration details
ENV SFTPGO_CONFIG_DIR=/etc/sftpgo
ENV SFTPGO_LOG_FILE_PATH=""
ENV SFTPGO_HTTPD__TEMPLATES_PATH=/usr/share/sftpgo/templates
ENV SFTPGO_HTTPD__STATIC_FILES_PATH=/usr/share/sftpgo/static
# Sane defaults, but users should still be able to override this from env vars
ENV SFTPGO_DATA_PROVIDER__USERS_BASE_DIR=/var/lib/sftpgo/users
ENV SFTPGO_DATA_PROVIDER__CREDENTIALS_PATH=/var/lib/sftpgo/credentials
ENV SFTPGO_HTTPD__BACKUPS_PATH=/var/lib/sftpgo/backups
ENV SFTPGO_SFTPD__HOST_KEYS=/var/lib/sftpgo/host_keys/id_rsa,/var/lib/sftpgo/host_keys/id_ecdsa
ENV SFTPGO_HTTPD__BIND_ADDRESS=""
COPY --from=builder /workspace/sftpgo.json /etc/sftpgo/sftpgo.json
COPY --from=builder /workspace/templates /usr/share/sftpgo/templates
COPY --from=builder /workspace/static /usr/share/sftpgo/static
COPY --from=builder /workspace/sftpgo /usr/local/bin/
RUN sed -i "s|sftpgo.db|/var/lib/sftpgo/sftpgo.db|" /etc/sftpgo/sftpgo.json
RUN chown -R sftpgo:sftpgo /etc/sftpgo && chown sftpgo:sftpgo /var/lib/sftpgo && \
chmod 640 /etc/sftpgo/sftpgo.json && \
chmod 750 /etc/sftpgo /var/lib/sftpgo
USER sftpgo
VOLUME /var/lib/sftpgo
CMD sftpgo serve

449
README.md
View File

@@ -1,343 +1,198 @@
# SFTPGo
[![Build Status](https://travis-ci.org/drakkan/sftpgo.svg?branch=master)](https://travis-ci.org/drakkan/sftpgo) [![Code Coverage](https://codecov.io/gh/drakkan/sftpgo/branch/master/graph/badge.svg)](https://codecov.io/gh/drakkan/sftpgo/branch/master) [![Go Report Card](https://goreportcard.com/badge/github.com/drakkan/sftpgo)](https://goreportcard.com/report/github.com/drakkan/sftpgo) [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go)
Full featured and highly configurable SFTP server
![CI Status](https://github.com/drakkan/sftpgo/workflows/CI/badge.svg?branch=master&event=push)
[![Code Coverage](https://codecov.io/gh/drakkan/sftpgo/branch/master/graph/badge.svg)](https://codecov.io/gh/drakkan/sftpgo/branch/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/drakkan/sftpgo)](https://goreportcard.com/report/github.com/drakkan/sftpgo)
[![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0)
[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go)
Fully featured and highly configurable SFTP server with optional FTP/S and WebDAV support, written in Go.
It can serve local filesystem, S3 or Google Cloud Storage.
## Features
- Each account is chrooted to his Home Dir.
- SFTP accounts are virtual accounts stored in a "data provider".
- SQLite, MySQL, PostgreSQL and bbolt (key/value store in pure Go) data providers are supported.
- SFTPGo uses virtual accounts stored inside a "data provider".
- SQLite, MySQL, PostgreSQL, bbolt (key/value store in pure Go) and in-memory data providers are supported.
- Each account is chrooted to its home directory.
- Public key and password authentication. Multiple public keys per user are supported.
- SSH user [certificate authentication](https://cvsweb.openbsd.org/src/usr.bin/ssh/PROTOCOL.certkeys?rev=1.8).
- Keyboard interactive authentication. You can easily setup a customizable multi-factor authentication.
- Partial authentication. You can configure multi-step authentication requiring, for example, the user password after successful public key authentication.
- Per user authentication methods. You can configure the allowed authentication methods for each user.
- Custom authentication via external programs/HTTP API is supported.
- Dynamic user modification before login via external programs/HTTP API is supported.
- Quota support: accounts can have individual quota expressed as max total size and/or max number of files.
- Bandwidth throttling is supported, with distinct settings for upload and download.
- Per user maximum concurrent sessions.
- Per user permissions: list directories content, upload, download, delete, rename, create directories, create symlinks can be enabled or disabled.
- Per user files/folders ownership: you can map all the users to the system account that runs SFTPGo (all platforms are supported) or you can run SFTPGo as root user and map each user or group of users to a different system account (*NIX only).
- Configurable custom commands and/or HTTP notifications on upload, download, delete or rename.
- Per user and per directory permission management: list directory contents, upload, overwrite, download, delete, rename, create directories, create symlinks, change owner/group and mode, change access and modification times.
- Per user files/folders ownership mapping: you can map all the users to the system account that runs SFTPGo (all platforms are supported) or you can run SFTPGo as root user and map each user or group of users to a different system account (\*NIX only).
- Per user IP filters are supported: login can be restricted to specific ranges of IP addresses or to a specific IP address.
- Per user and per directory file extensions filters are supported: files can be allowed or denied based on their extensions.
- Virtual folders are supported: directories outside the user home directory can be exposed as virtual folders.
- Configurable custom commands and/or HTTP notifications on file upload, download, pre-delete, delete, rename, on SSH commands and on user add, update and delete.
- Automatically terminating idle connections.
- Atomic uploads are configurable.
- Optional SCP support.
- REST API for users and quota management and real time reports for the active connections with possibility of forcibly closing a connection.
- Configuration is a your choice: JSON, TOML, YAML, HCL, envfile are supported.
- Log files are accurate and they are saved in the easily parsable JSON format.
- Support for Git repositories over SSH.
- SCP and rsync are supported.
- FTP/S is supported. You can configure the FTP service to require TLS for both control and data connections.
- [WebDAV](./docs/webdav.md) is supported.
- Support for serving local filesystem, S3 Compatible Object Storage and Google Cloud Storage over SFTP/SCP/FTP/WebDAV.
- Per user protocols restrictions. You can configure the allowed protocols (SSH/FTP/WebDAV) for each user.
- [Prometheus metrics](./docs/metrics.md) are exposed.
- Support for HAProxy PROXY protocol: you can proxy and/or load balance the SFTP/SCP/FTP/WebDAV service without losing the information about the client's address.
- [REST API](./docs/rest-api.md) for users and folders management, backup, restore and real time reports of the active connections with possibility of forcibly closing a connection.
- [Web based administration interface](./docs/web-admin.md) to easily manage users, folders and connections.
- Easy [migration](./examples/rest-api-cli#convert-users-from-other-stores) from Linux system user accounts.
- [Portable mode](./docs/portable-mode.md): a convenient way to share a single directory on demand.
- Performance analysis using built-in [profiler](./docs/profiling.md).
- Configuration format is at your choice: JSON, TOML, YAML, HCL, envfile are supported.
- Log files are accurate and they are saved in the easily parsable JSON format ([more information](./docs/logs.md)).
## Platforms
SFTPGo is developed and tested on Linux. After each commit the code is automatically built and tested on Linux and macOS using Travis CI.
Regularly the test cases are manually executed and pass on Windows. Other UNIX variants such as *BSD should work too.
SFTPGo is developed and tested on Linux. After each commit, the code is automatically built and tested on Linux, macOS and Windows using a [GitHub Action](./.github/workflows/development.yml). The test cases are regularly manually executed and passed on FreeBSD. Other *BSD variants should work too.
## Requirements
- Go 1.12 or higher.
- A suitable SQL server or key/value store to use as data provider: PostreSQL 9.4+ or MySQL 5.6+ or SQLite 3.x or bbolt 1.3.x
- Go 1.13 or higher as build only dependency.
- A suitable SQL server to use as data provider: PostgreSQL 9.4+ or MySQL 5.6+ or SQLite 3.x.
- The SQL server is optional: you can choose to use an embedded bolt database as key/value store or an in memory data provider.
## Installation
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
Binary releases for Linux, macOS, and Windows are available. Please visit the [releases](https://github.com/drakkan/sftpgo/releases "releases") page.
```bash
$ go get -u github.com/drakkan/sftpgo
```
Official Docker images are available. Documentation is [here](./docker/README.md).
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
Some Linux distro packages are available:
SFTPGo depends on [go-sqlite3](https://github.com/mattn/go-sqlite3) that is a CGO package and so it requires a `C` compiler at build time.
On Linux and macOS a compiler is easy to install or already installed, on Windows you need to download [MinGW-w64](https://sourceforge.net/projects/mingw-w64/files/) and build SFTPGo from its command prompt.
- For Arch Linux via AUR:
- [sftpgo](https://aur.archlinux.org/packages/sftpgo/). This package follows stable releases. It requires `git`, `gcc` and `go` to build.
- [sftpgo-bin](https://aur.archlinux.org/packages/sftpgo-bin/). This package follows stable releases downloading the prebuilt linux binary from GitHub. It does not require `git`, `gcc` and `go` to build.
- [sftpgo-git](https://aur.archlinux.org/packages/sftpgo-git/). This package builds and installs the latest git master. It requires `git`, `gcc` and `go` to build.
- Deb and RPM packages are built after each commit and for each release.
- For Ubuntu a PPA is available [here](https://launchpad.net/~sftpgo/+archive/ubuntu/sftpgo).
The compiler is a build time only dependency, it is not not required at runtime.
You can easily test new features selecting a commit from the [Actions](https://github.com/drakkan/sftpgo/actions) page and downloading the matching build artifacts for Linux, macOS or Windows. GitHub stores artifacts for 90 days.
If you don't need SQLite, you can also get/build SFTPGo setting the environment variable `GCO_ENABLED` to 0, this way SQLite support will be disabled but PostgreSQL, MySQL and bbolt will work and you don't need a `C` compiler for building.
Version info, such as git commit and build date, can be embedded setting the following string variables at build time:
- `github.com/drakkan/sftpgo/utils.commit`
- `github.com/drakkan/sftpgo/utils.date`
For example you can build using the following command:
```bash
go build -i -ldflags "-s -w -X github.com/drakkan/sftpgo/utils.commit=`git describe --tags --always --dirty` -X github.com/drakkan/sftpgo/utils.date=`date -u +%FT%TZ`" -o sftpgo
```
and you will get a version that includes git commit and build date like this one:
```bash
sftpgo -v
SFTPGo version: 0.9.0-dev-90607d4-dirty-2019-08-08T19:28:36Z
```
For Linux, a systemd sample [service](https://github.com/drakkan/sftpgo/tree/master/init/sftpgo.service "systemd service") can be found inside the source tree.
Alternately you can use distro packages:
- Arch Linux PKGBUILD is available on [AUR](https://aur.archlinux.org/packages/sftpgo-git/ "SFTPGo")
Alternately, you can [build from source](./docs/build-from-source.md).
## Configuration
The `sftpgo` executable can be used this way:
A full explanation of all configuration methods can be found [here](./docs/full-configuration.md).
```bash
Usage:
sftpgo [command]
Please make sure to [initialize the data provider](#data-provider-initialization) before running the daemon!
Available Commands:
help Help about any command
serve Start the SFTP Server
Flags:
-h, --help help for sftpgo
-v, --version
```
The `serve` subcommand supports the following flags:
- `--config-dir` string. Location of the config dir. This directory should contain the `sftpgo` configuration file and is used as the base for files with a relative path (eg. the private keys for the SFTP server, the SQLite or bblot database if you use SQLite or bbolt as data provider). The default value is "." or the value of `SFTPGO_CONFIG_DIR` environment variable.
- `--config-file` string. Name of the configuration file. It must be the name of a file stored in config-dir not the absolute path to the configuration file. The specified file name must have no extension we automatically load JSON, YAML, TOML, HCL and Java properties. The default value is "sftpgo" (and therefore `sftpgo.json`, `sftpgo.yaml` and so on are searched) or the value of `SFTPGO_CONFIG_FILE` environment variable.
- `--log-compress` boolean. Determine if the rotated log files should be compressed using gzip. Default `false` or the value of `SFTPGO_LOG_COMPRESS` environment variable (1 or `true`, 0 or `false`).
- `--log-file-path` string. Location for the log file, default "sftpgo.log" or the value of `SFTPGO_LOG_FILE_PATH` environment variable.
- `--log-max-age` int. Maximum number of days to retain old log files. Default 28 or the value of `SFTPGO_LOG_MAX_AGE` environment variable.
- `--log-max-backups` int. Maximum number of old log files to retain. Default 5 or the value of `SFTPGO_LOG_MAX_BACKUPS` environment variable.
- `--log-max-size` int. Maximum size in megabytes of the log file before it gets rotated. Default 10 or the value of `SFTPGO_LOG_MAX_SIZE` environment variable.
- `--log-verbose` boolean. Enable verbose logs. Default `true` or the value of `SFTPGO_LOG_VERBOSE` environment variable (1 or `true`, 0 or `false`).
If you don't configure any private host keys, the daemon will use `id_rsa` in the configuration directory. If that file doesn't exist, the daemon will attempt to autogenerate it (if the user that executes SFTPGo has write access to the config-dir). The server supports any private key format supported by [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/keys.go#L32).
Before starting `sftpgo` a dataprovider must be configured.
Sample SQL scripts to create the required database structure can be found inside the source tree [sql](https://github.com/drakkan/sftpgo/tree/master/sql "sql") directory. The SQL scripts filename's is, by convention, the date as `YYYYMMDD` and the suffix `.sql`. You need to apply all the SQL scripts for your database ordered by name, for example `20190706.sql` must be applied before `20190728.sql` and so on.
The `sftpgo` configuration file contains the following sections:
- **"sftpd"**, the configuration for the SFTP server
- `bind_port`, integer. The port used for serving SFTP requests. Default: 2022
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: ""
- `idle_timeout`, integer. Time in minutes after which an idle client will be disconnected. Default: 15
- `max_auth_tries` integer. Maximum number of authentication attempts permitted per connection. If set to a negative number, the number of attempts are unlimited. If set to zero, the number of attempts are limited to 6.
- `umask`, string. Umask for the new files and directories. This setting has no effect on Windows. Default: "0022"
- `banner`, string. Identification string used by the server. Default "SFTPGo"
- `upload_mode` integer. 0 means standard, the files are uploaded directly to the requested path. 1 means atomic: files are uploaded to a temporary path and renamed to the requested path when the client ends the upload. Atomic mode avoids problems such as a web server that serves partial files when the files are being uploaded
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions
- `execute_on`, list of strings. Valid values are `download`, `upload`, `delete`, `rename`. On folder deletion a `delete` notification will be sent for each deleted file. Leave empty to disable actions.
- `command`, string. Absolute path to the command to execute. Leave empty to disable. The command is invoked with the following arguments:
- `action`, any valid `execute_on` string
- `username`, user who did the action
- `path` to the affected file. For `rename` action this is the old file name
- `target_path`, non empty for `rename` action, this is the new file name
- `http_notification_url`, a valid URL. An HTTP GET request will be executed to this URL. Leave empty to disable. The query string will contain the following parameters that have the same meaning of the command's arguments:
- `action`
- `username`
- `path`
- `target_path`, added for `rename` action only
- `keys`, struct array. It contains the daemon's private keys. If empty or missing the daemon will search or try to generate `id_rsa` in the configuration directory.
- `private_key`, path to the private key file. It can be a path relative to the config dir or an absolute one.
- `enable_scp`, boolean. Default disabled. Set to `true` to enable SCP support. SCP is an experimental feature, we have our own SCP implementation since we can't rely on `scp` system command to proper handle permissions, quota and user's home dir restrictions. The SCP protocol is quite simple but there is no official docs about it, so we need more testing and feedbacks before enabling it by default. We may not handle some borderline cases or have sneaky bugs. Please do accurate tests yourself before enabling SCP and let us known if something does not work as expected for your use cases. SCP between two remote hosts is supported using the `-3` scp option.
- **"data_provider"**, the configuration for the data provider
- `driver`, string. Supported drivers are `sqlite`, `mysql`, `postgresql`, `bolt`
- `name`, string. Database name. For driver `sqlite` this can be the database name relative to the config dir or the absolute path to the SQLite database.
- `host`, string. Database host. Leave empty for driver `sqlite` and `bolt`
- `port`, integer. Database port. Leave empty for driver `sqlite` and `bolt`
- `username`, string. Database user. Leave empty for driver `sqlite` and `bolt`
- `password`, string. Database password. Leave empty for driver `sqlite` and `bolt`
- `sslmode`, integer. Used for drivers `mysql` and `postgresql`. 0 disable SSL/TLS connections, 1 require ssl, 2 set ssl mode to `verify-ca` for driver `postgresql` and `skip-verify` for driver `mysql`, 3 set ssl mode to `verify-full` for driver `postgresql` and `preferred` for driver `mysql`
- `connectionstring`, string. Provide a custom database connection string. If not empty this connection string will be used instead of build one using the previous parameters. Leave empty for driver `bolt`
- `users_table`, string. Database table for SFTP users
- `manage_users`, integer. Set to 0 to disable users management, 1 to enable
- `track_quota`, integer. Set the preferred way to track users quota between the following choices:
- 0, disable quota tracking. REST API to scan user dir and update quota will do nothing
- 1, quota is updated each time a user upload or delete a file even if the user has no quota restrictions
- 2, quota is updated each time a user upload or delete a file but only for users with quota restrictions. With this configuration the "quota scan" REST API can still be used to periodically update space usage for users without quota restrictions
- **"httpd"**, the configuration for the HTTP server used to serve REST API
- `bind_port`, integer. The port used for serving HTTP requests. Set to 0 to disable HTTP server. Default: 8080
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: "127.0.0.1"
Here is a full example showing the default config in JSON format:
```json
{
"sftpd": {
"bind_port": 2022,
"bind_address": "",
"idle_timeout": 15,
"max_auth_tries": 0,
"umask": "0022",
"banner": "SFTPGo",
"actions": {
"execute_on": [],
"command": "",
"http_notification_url": ""
},
"keys": [],
"enable_scp": false
},
"data_provider": {
"driver": "sqlite",
"name": "sftpgo.db",
"host": "",
"port": 5432,
"username": "",
"password": "",
"sslmode": 0,
"connection_string": "",
"users_table": "users",
"manage_users": 1,
"track_quota": 2
},
"httpd": {
"bind_port": 8080,
"bind_address": "127.0.0.1"
}
}
```
If you want to use a private key that use an algorithm different from RSA or more than one private key then replace the empty `keys` array with something like this:
```json
"keys": [
{
"private_key": "id_rsa"
},
{
"private_key": "id_ecdsa"
}
]
```
The configuration can be read from JSON, TOML, YAML, HCL, envfile and Java properties config files, if your `config-file` flag is set to `sftpgo` (default value) you need to create a configuration file called `sftpgo.json` or `sftpgo.yaml` and so on inside `config-dir`.
You can also configure all the available options using environment variables, sftpgo will check for environment variables with a name matching the key uppercased and prefixed with the `SFTPGO_`. You need to use `__` to traverse a struct.
Let's see some examples:
- To set sftpd `bind_port` you need to define the env var `SFTPGO_SFTPD__BIND_PORT`
- To set the `execute_on` actions you need to define the env var `SFTPGO_SFTPD__ACTIONS__EXECUTE_ON` for example `SFTPGO_SFTPD__ACTIONS__EXECUTE_ON=upload,download`
To start the SFTP Server with the default values for the command line flags simply use:
To start SFTPGo with the default settings, simply run:
```bash
sftpgo serve
```
Check out [this documentation](./docs/service.md) if you want to run SFTPGo as a service.
### Data provider initialization and update
Before starting the SFTPGo server please ensure that the configured data provider is properly initialized/updated.
SQL based data providers (SQLite, MySQL, PostgreSQL) require the creation of a database containing the required tables. Memory and bolt data providers do not require an initialization but they could require an update to the existing data after upgrading SFTPGo.
For PostgreSQL and MySQL providers, you need to create the configured database.
SFTPGo will attempt to automatically detect if the data provider is initialized/updated and if not, will attempt to initialize/ update it on startup as needed.
Alternately, you can create/update the required data provider structures yourself using the `initprovider` command.
For example, you can simply execute the following command from the configuration directory:
```bash
sftpgo initprovider
```
Take a look at the CLI usage to learn how to specify a different configuration file:
```bash
sftpgo initprovider --help
```
You can disable automatic data provider checks/updates at startup by setting the `update_mode` configuration key to `1`.
## Tutorials
Some step-to-step tutorials can be found inside the source tree [howto](./docs/howto "How-to") directory.
## Authentication options
### External Authentication
Custom authentication methods can easily be added. SFTPGo supports external authentication modules, and writing a new backend can be as simple as a few lines of shell script. More information can be found [here](./docs/external-auth.md).
### Keyboard Interactive Authentication
Keyboard interactive authentication is, in general, a series of questions asked by the server with responses provided by the client.
This authentication method is typically used for multi-factor authentication.
More information can be found [here](./docs/keyboard-interactive.md).
## Dynamic user creation or modification
A user can be created or modified by an external program just before the login. More information about this can be found [here](./docs/dynamic-user-mod.md).
## Custom Actions
SFTPGo allows to configure custom commands and/or HTTP notifications on file upload, download, delete, rename, on SSH commands and on user add, update and delete.
More information about custom actions can be found [here](./docs/custom-actions.md).
## Virtual folders
Directories outside the user home directory can be exposed as virtual folders, more information [here](./docs/virtual-folders.md).
## Other hooks
You can get notified as soon as a new connection is established using the [Post-connect hook](./docs/post-connect-hook.md) and after each login using the [Post-login hook](./docs/post-login-hook.md).
You can use your own hook to [check passwords](./docs/check-password-hook.md).
## Storage backends
### S3 Compatible Object Storage backends
Each user can be mapped to the whole bucket or to a bucket virtual folder. This way, the mapped bucket/virtual folder is exposed over SFTP/SCP/FTP/WebDAV. More information about S3 integration can be found [here](./docs/s3.md).
### Google Cloud Storage backend
Each user can be mapped with a Google Cloud Storage bucket or a bucket virtual folder. This way, the mapped bucket/virtual folder is exposed over SFTP/SCP/FTP/WebDAV. More information about Google Cloud Storage integration can be found [here](./docs/google-cloud-storage.md).
### Other Storage backends
Adding new storage backends is quite easy:
- implement the [Fs interface](./vfs/vfs.go#L18 "interface for filesystem backends").
- update the user method `GetFilesystem` to return the new backend
- update the web interface and the REST API CLI
- add the flags for the new storage backed to the `portable` mode
Anyway, some backends require a pay per use account (or they offer free account for a limited time period only). To be able to add support for such backends or to review pull requests, please provide a test account. The test account must be available for enough time to be able to maintain the backend and do basic tests before each new release.
## Brute force protection
The [connection failed logs](./docs/logs.md) can be used for integration in tools such as [Fail2ban](http://www.fail2ban.org/). Example of [jails](./fail2ban/jails) and [filters](./fail2ban/filters) working with `systemd`/`journald` are available in fail2ban directory.
## Account's configuration properties
For each account the following properties can be configured:
Details information about account configuration properties can be found [here](./docs/account.md).
- `username`
- `password` used for password authentication. For users created using SFTPGo REST API if the password has no known hashing algo prefix it will be stored using argon2id. SFTPGo supports checking passwords stored with bcrypt and pbkdf2 too. For pbkdf2 the supported format is `$<algo>$<iterations>$<salt>$<hashed pwd base64 encoded>`, where algo is `pbkdf2-sha1` or `pbkdf2-sha256` or `pbkdf2-sha512`. For example the `pbkdf2-sha256` of the word `password` using 150000 iterations and `E86a9YMX3zC7` as salt must be stored as `$pbkdf2-sha256$150000$E86a9YMX3zC7$R5J62hsSq+pYw00hLLPKBbcGXmq7fj5+/M0IFoYtZbo=`. For bcrypt the format must be the one supported by golang's [crypto/bcrypt](https://godoc.org/golang.org/x/crypto/bcrypt) package, for example the password `secret` with cost `14` must be stored as `$2a$14$ajq8Q7fbtFRQvXpdCq7Jcuy.Rx1h/L4J60Otx.gyNLbAYctGMJ9tK`. Using the REST API you can send a password hashed as bcrypt or pbkdf2 and it will be stored as is.
- `public_keys` array of public keys. At least one public key or the password is mandatory.
- `home_dir` The user cannot upload or download files outside this directory. Must be an absolute path
- `uid`, `gid`. If sftpgo runs as root system user then the created files and directories will be assigned to this system uid/gid. Ignored on windows and if sftpgo runs as non root user: in this case files and directories for all SFTP users will be owned by the system user that runs sftpgo.
- `max_sessions` maximum concurrent sessions. 0 means unlimited
- `quota_size` maximum size allowed as bytes. 0 means unlimited
- `quota_files` maximum number of files allowed. 0 means unlimited
- `permissions` the following permissions are supported:
- `*` all permission are granted
- `list` list items is allowed
- `download` download files is allowed
- `upload` upload files is allowed
- `delete` delete files or directories is allowed
- `rename` rename files or directories is allowed
- `create_dirs` create directories is allowed
- `create_symlinks` create symbolic links is allowed
- `upload_bandwidth` maximum upload bandwidth as KB/s, 0 means unlimited
- `download_bandwidth` maximum download bandwidth as KB/s, 0 means unlimited
## Performance
These properties are stored inside the data provider. If you want to use your existing accounts, you can create a database view. Since a view is read only, you have to disable user management and quota tracking so SFTPGo will never try to write to the view.
SFTPGo can easily saturate a Gigabit connection on low end hardware with no special configuration, this is generally enough for most use cases.
## REST API
SFTPGo exposes REST API to manage users and quota and to get real time reports for the active connections with possibility of forcibly closing a connection.
If quota tracking is enabled in `sftpgo` configuration file, then the used size and number of files are updated each time a file is added/removed. If files are added/removed not using SFTP or if you change `track_quota` from `2` to `1`, you can rescan the user home dir and update the used quota using the REST API.
REST API is designed to run on localhost or on a trusted network, if you need HTTPS or authentication you can setup a reverse proxy using an HTTP Server such as Apache or NGNIX.
For example you can keep SFTPGo listening on localhost and expose it externally configuring a reverse proxy using Apache HTTP Server this way:
```
ProxyPass /api/v1 http://127.0.0.1:8080/api/v1
ProxyPassReverse /api/v1 http://127.0.0.1:8080/api/v1
```
and you can add authentication with something like this:
```
<Location /api/v1>
AuthType Digest
AuthName "Private"
AuthDigestDomain "/api/v1"
AuthDigestProvider file
AuthUserFile "/etc/httpd/conf/auth_digest"
Require valid-user
</Location>
```
and, of course, you can configure the web server to use HTTPS.
The OpenAPI 3 schema for the exposed API can be found inside the source tree: [openapi.yaml](https://github.com/drakkan/sftpgo/tree/master/api/schema/openapi.yaml "OpenAPI 3 specs").
A sample CLI client for the REST API can be found inside the source tree [scripts](https://github.com/drakkan/sftpgo/tree/master/scripts "scripts") directory.
You can also generate your own REST client, in your preferred programming language or even bash scripts, using an OpenAPI generator such as [swagger-codegen](https://github.com/swagger-api/swagger-codegen) or [OpenAPI Generator](https://openapi-generator.tech/)
## Logs
Inside the log file each line is a JSON struct, each struct has a `sender` fields that identify the log type.
The logs can be divided into the following categories:
- **"app logs"**, internal logs used to debug `sftpgo`:
- `sender` string. This is generally the package name that emits the log
- `time` string. Date/time with millisecond precision
- `level` string
- `message` string
- **"transfer logs"**, SFTP/SCP transfer logs:
- `sender` string. `Upload` or `Download`
- `time` string. Date/time with millisecond precision
- `level` string
- `elapsed_ms`, int64. Elapsed time, as milliseconds, for the upload/download
- `size_bytes`, int64. Size, as bytes, of the download/upload
- `username`, string
- `file_path` string
- `connection_id` string. Unique connection identifier
- `protocol` string. `SFTP` or `SCP`
- **"command logs"**, SFTP/SCP command logs:
- `sender` string. `Rename`, `Rmdir`, `Mkdir`, `Symlink`, `Remove`
- `level` string
- `username`, string
- `file_path` string
- `target_path` string
- `connection_id` string. Unique connection identifier
- `protocol` string. `SFTP` or `SCP`
- **"http logs"**, REST API logs:
- `sender` string. `httpd`
- `level` string
- `remote_addr` string. IP and port of the remote client
- `proto` string, for example `HTTP/1.1`
- `method` string. HTTP method (`GET`, `POST`, `PUT`, `DELETE` etc.)
- `user_agent` string
- `uri` string. Full uri
- `resp_status` integer. HTTP response status code
- `resp_size` integer. Size in bytes of the HTTP response
- `elapsed_ms` int64. Elapsed time, as milliseconds, to complete the request
- `request_id` string. Unique request identifier
More in-depth analysis of performance can be found [here](./docs/performance.md).
## Acknowledgements
- [pkg/sftp](https://github.com/pkg/sftp)
- [go-chi](https://github.com/go-chi/chi)
- [zerolog](https://github.com/rs/zerolog)
- [lumberjack](https://gopkg.in/natefinch/lumberjack.v2)
- [argon2id](https://github.com/alexedwards/argon2id)
- [go-sqlite3](https://github.com/mattn/go-sqlite3)
- [go-sql-driver/mysql](https://github.com/go-sql-driver/mysql)
- [bbolt](https://github.com/etcd-io/bbolt)
- [lib/pq](https://github.com/lib/pq)
- [viper](https://github.com/spf13/viper)
- [cobra](https://github.com/spf13/cobra)
- [xid](https://github.com/rs/xid)
Some code was initially taken from [Pterodactyl sftp server](https://github.com/pterodactyl/sftp-server)
SFTPGo makes use of the third party libraries listed inside [go.mod](./go.mod).
Some code was initially taken from [Pterodactyl SFTP Server](https://github.com/pterodactyl/sftp-server).
We are very grateful to all the people who contributed with ideas and/or pull requests.
## License

View File

@@ -1,77 +0,0 @@
// Package api implements REST API for sftpgo.
// REST API allows to manage users and quota and to get real time reports for the active connections
// with possibility of forcibly closing a connection.
// The OpenAPI 3 schema for the exposed API can be found inside the source tree:
// https://github.com/drakkan/sftpgo/tree/master/api/schema/openapi.yaml
package api
import (
"net/http"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/go-chi/chi"
"github.com/go-chi/render"
)
const (
logSender = "api"
activeConnectionsPath = "/api/v1/connection"
quotaScanPath = "/api/v1/quota_scan"
userPath = "/api/v1/user"
versionPath = "/api/v1/version"
)
var (
router *chi.Mux
dataProvider dataprovider.Provider
)
// HTTPDConf httpd daemon configuration
type HTTPDConf struct {
// The port used for serving HTTP requests. 0 disable the HTTP server. Default: 8080
BindPort int `json:"bind_port" mapstructure:"bind_port"`
// The address to listen on. A blank value means listen on all available network interfaces. Default: "127.0.0.1"
BindAddress string `json:"bind_address" mapstructure:"bind_address"`
}
type apiResponse struct {
Error string `json:"error"`
Message string `json:"message"`
HTTPStatus int `json:"status"`
}
func init() {
initializeRouter()
}
// SetDataProvider sets the data provider to use to fetch the data about users
func SetDataProvider(provider dataprovider.Provider) {
dataProvider = provider
}
func sendAPIResponse(w http.ResponseWriter, r *http.Request, err error, message string, code int) {
var errorString string
if err != nil {
errorString = err.Error()
}
resp := apiResponse{
Error: errorString,
Message: message,
HTTPStatus: code,
}
if code != http.StatusOK {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(code)
}
render.JSON(w, r, resp)
}
func getRespStatus(err error) int {
if _, ok := err.(*dataprovider.ValidationError); ok {
return http.StatusBadRequest
}
if _, ok := err.(*dataprovider.MethodDisabledError); ok {
return http.StatusForbidden
}
return http.StatusInternalServerError
}

View File

@@ -1,755 +0,0 @@
package api_test
import (
"bytes"
"encoding/json"
"fmt"
"net"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"runtime"
"strconv"
"testing"
"time"
"github.com/go-chi/render"
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
"github.com/rs/zerolog"
"github.com/drakkan/sftpgo/api"
"github.com/drakkan/sftpgo/config"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/sftpd"
)
const (
defaultUsername = "test_user"
defaultPassword = "test_password"
testPubKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03jj0D+djk7pxIf/0OhrxrchJTRZklofJ1NoIu4752Sq02mdXmarMVsqJ1cAjV5LBVy3D1F5U6XW4rppkXeVtd04Pxb09ehtH0pRRPaoHHlALiJt8CoMpbKYMA8b3KXPPriGxgGomvtU2T2RMURSwOZbMtpsugfjYSWenyYX+VORYhylWnSXL961LTyC21ehd6d6QnW9G7E5hYMITMY9TuQZz3bROYzXiTsgN0+g6Hn7exFQp50p45StUMfV/SftCMdCxlxuyGny2CrN/vfjO7xxOo2uv7q1qm10Q46KPWJQv+pgZ/OfL+EDjy07n5QVSKHlbx+2nT4Q0EgOSQaCTYwn3YjtABfIxWwgAFdyj6YlPulCL22qU4MYhDcA6PSBwDdf8hvxBfvsiHdM+JcSHvv8/VeJhk6CmnZxGY0fxBupov27z3yEO8nAg8k+6PaUiW1MSUfuGMF/ktB8LOstXsEPXSszuyXiOv4DaryOXUiSn7bmRqKcEFlJusO6aZP0= nicola@p1"
logSender = "APITesting"
userPath = "/api/v1/user"
activeConnectionsPath = "/api/v1/connection"
quotaScanPath = "/api/v1/quota_scan"
versionPath = "/api/v1/version"
)
var (
defaultPerms = []string{dataprovider.PermAny}
homeBasePath string
testServer *httptest.Server
)
func TestMain(m *testing.M) {
if runtime.GOOS == "windows" {
homeBasePath = "C:\\"
} else {
homeBasePath = "/tmp"
}
configDir := ".."
logfilePath := filepath.Join(configDir, "sftpgo_api_test.log")
logger.InitLogger(logfilePath, 5, 1, 28, false, zerolog.DebugLevel)
config.LoadConfig(configDir, "")
providerConf := config.GetProviderConf()
err := dataprovider.Initialize(providerConf, configDir)
if err != nil {
logger.Warn(logSender, "error initializing data provider: %v", err)
os.Exit(1)
}
dataProvider := dataprovider.GetProvider()
httpdConf := config.GetHTTPDConfig()
router := api.GetHTTPRouter()
httpdConf.BindPort = 8081
api.SetBaseURL("http://127.0.0.1:8081")
sftpd.SetDataProvider(dataProvider)
api.SetDataProvider(dataProvider)
go func() {
logger.Debug(logSender, "initializing HTTP server with config %+v", httpdConf)
s := &http.Server{
Addr: fmt.Sprintf("%s:%d", httpdConf.BindAddress, httpdConf.BindPort),
Handler: router,
ReadTimeout: 300 * time.Second,
WriteTimeout: 300 * time.Second,
MaxHeaderBytes: 1 << 20, // 1MB
}
if err := s.ListenAndServe(); err != nil {
logger.Error(logSender, "could not start HTTP server: %v", err)
}
}()
testServer = httptest.NewServer(api.GetHTTPRouter())
defer testServer.Close()
waitTCPListening(fmt.Sprintf("%s:%d", httpdConf.BindAddress, httpdConf.BindPort))
exitCode := m.Run()
os.Remove(logfilePath)
os.Exit(exitCode)
}
func TestBasicUserHandling(t *testing.T) {
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
if err != nil {
t.Errorf("unable to add user: %v", err)
}
user.MaxSessions = 10
user.QuotaSize = 4096
user.QuotaFiles = 2
user.UploadBandwidth = 128
user.DownloadBandwidth = 64
user, _, err = api.UpdateUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to update user: %v", err)
}
users, _, err := api.GetUsers(0, 0, defaultUsername, http.StatusOK)
if err != nil {
t.Errorf("unable to get users: %v", err)
}
if len(users) != 1 {
t.Errorf("number of users mismatch, expected: 1, actual: %v", len(users))
}
_, err = api.RemoveUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to remove: %v", err)
}
}
func TestAddUserNoCredentials(t *testing.T) {
u := getTestUser()
u.Password = ""
u.PublicKeys = []string{}
_, _, err := api.AddUser(u, http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error adding user with no credentials: %v", err)
}
}
func TestAddUserNoUsername(t *testing.T) {
u := getTestUser()
u.Username = ""
_, _, err := api.AddUser(u, http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error adding user with no home dir: %v", err)
}
}
func TestAddUserNoHomeDir(t *testing.T) {
u := getTestUser()
u.HomeDir = ""
_, _, err := api.AddUser(u, http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error adding user with no home dir: %v", err)
}
}
func TestAddUserInvalidHomeDir(t *testing.T) {
u := getTestUser()
u.HomeDir = "relative_path"
_, _, err := api.AddUser(u, http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error adding user with invalid home dir: %v", err)
}
}
func TestAddUserNoPerms(t *testing.T) {
u := getTestUser()
u.Permissions = []string{}
_, _, err := api.AddUser(u, http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error adding user with no perms: %v", err)
}
}
func TestAddUserInvalidPerms(t *testing.T) {
u := getTestUser()
u.Permissions = []string{"invalidPerm"}
_, _, err := api.AddUser(u, http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error adding user with no perms: %v", err)
}
}
func TestUserPublicKey(t *testing.T) {
u := getTestUser()
invalidPubKey := "invalid"
validPubKey := "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC03jj0D+djk7pxIf/0OhrxrchJTRZklofJ1NoIu4752Sq02mdXmarMVsqJ1cAjV5LBVy3D1F5U6XW4rppkXeVtd04Pxb09ehtH0pRRPaoHHlALiJt8CoMpbKYMA8b3KXPPriGxgGomvtU2T2RMURSwOZbMtpsugfjYSWenyYX+VORYhylWnSXL961LTyC21ehd6d6QnW9G7E5hYMITMY9TuQZz3bROYzXiTsgN0+g6Hn7exFQp50p45StUMfV/SftCMdCxlxuyGny2CrN/vfjO7xxOo2uv7q1qm10Q46KPWJQv+pgZ/OfL+EDjy07n5QVSKHlbx+2nT4Q0EgOSQaCTYwn3YjtABfIxWwgAFdyj6YlPulCL22qU4MYhDcA6PSBwDdf8hvxBfvsiHdM+JcSHvv8/VeJhk6CmnZxGY0fxBupov27z3yEO8nAg8k+6PaUiW1MSUfuGMF/ktB8LOstXsEPXSszuyXiOv4DaryOXUiSn7bmRqKcEFlJusO6aZP0= nicola@p1"
u.PublicKeys = []string{invalidPubKey}
_, _, err := api.AddUser(u, http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error adding user with invalid pub key: %v", err)
}
u.PublicKeys = []string{validPubKey}
user, _, err := api.AddUser(u, http.StatusOK)
if err != nil {
t.Errorf("unable to add user: %v", err)
}
user.PublicKeys = []string{validPubKey, invalidPubKey}
_, _, err = api.UpdateUser(user, http.StatusBadRequest)
if err != nil {
t.Errorf("update user with invalid public key must fail: %v", err)
}
user.PublicKeys = []string{validPubKey, validPubKey, validPubKey}
_, _, err = api.UpdateUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to update user: %v", err)
}
_, err = api.RemoveUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to remove: %v", err)
}
}
func TestUpdateUser(t *testing.T) {
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
if err != nil {
t.Errorf("unable to add user: %v", err)
}
user.HomeDir = filepath.Join(homeBasePath, "testmod")
user.UID = 33
user.GID = 101
user.MaxSessions = 10
user.QuotaSize = 4096
user.QuotaFiles = 2
user.Permissions = []string{dataprovider.PermCreateDirs, dataprovider.PermDelete, dataprovider.PermDownload}
user.UploadBandwidth = 1024
user.DownloadBandwidth = 512
user, _, err = api.UpdateUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to update user: %v", err)
}
_, err = api.RemoveUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to remove: %v", err)
}
}
func TestUpdateUserNoCredentials(t *testing.T) {
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
if err != nil {
t.Errorf("unable to add user: %v", err)
}
user.Password = ""
user.PublicKeys = []string{}
// password and public key will be omitted from json serialization if empty and so they will remain unchanged
// and no validation error will be raised
_, _, err = api.UpdateUser(user, http.StatusOK)
if err != nil {
t.Errorf("unexpected error updating user with no credentials: %v", err)
}
_, err = api.RemoveUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to remove: %v", err)
}
}
func TestUpdateUserEmptyHomeDir(t *testing.T) {
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
if err != nil {
t.Errorf("unable to add user: %v", err)
}
user.HomeDir = ""
_, _, err = api.UpdateUser(user, http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error updating user with empty home dir: %v", err)
}
_, err = api.RemoveUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to remove: %v", err)
}
}
func TestUpdateUserInvalidHomeDir(t *testing.T) {
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
if err != nil {
t.Errorf("unable to add user: %v", err)
}
user.HomeDir = "relative_path"
_, _, err = api.UpdateUser(user, http.StatusBadRequest)
if err != nil {
t.Errorf("unexpected error updating user with empty home dir: %v", err)
}
_, err = api.RemoveUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to remove: %v", err)
}
}
func TestUpdateNonExistentUser(t *testing.T) {
_, _, err := api.UpdateUser(getTestUser(), http.StatusNotFound)
if err != nil {
t.Errorf("unable to update user: %v", err)
}
}
func TestGetNonExistentUser(t *testing.T) {
_, _, err := api.GetUserByID(0, http.StatusNotFound)
if err != nil {
t.Errorf("unable to get user: %v", err)
}
}
func TestDeleteNonExistentUser(t *testing.T) {
_, err := api.RemoveUser(getTestUser(), http.StatusNotFound)
if err != nil {
t.Errorf("unable to remove user: %v", err)
}
}
func TestAddDuplicateUser(t *testing.T) {
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
if err != nil {
t.Errorf("unable to add user: %v", err)
}
_, _, err = api.AddUser(getTestUser(), http.StatusInternalServerError)
if err != nil {
t.Errorf("unable to add second user: %v", err)
}
_, _, err = api.AddUser(getTestUser(), http.StatusOK)
if err == nil {
t.Errorf("adding a duplicate user must fail")
}
_, err = api.RemoveUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to remove user: %v", err)
}
}
func TestGetUsers(t *testing.T) {
user1, _, err := api.AddUser(getTestUser(), http.StatusOK)
if err != nil {
t.Errorf("unable to add user: %v", err)
}
u := getTestUser()
u.Username = defaultUsername + "1"
user2, _, err := api.AddUser(u, http.StatusOK)
if err != nil {
t.Errorf("unable to add second user: %v", err)
}
users, _, err := api.GetUsers(0, 0, "", http.StatusOK)
if err != nil {
t.Errorf("unable to get users: %v", err)
}
if len(users) < 2 {
t.Errorf("at least 2 users are expected")
}
users, _, err = api.GetUsers(1, 0, "", http.StatusOK)
if err != nil {
t.Errorf("unable to get users: %v", err)
}
if len(users) != 1 {
t.Errorf("1 user is expected")
}
users, _, err = api.GetUsers(1, 1, "", http.StatusOK)
if err != nil {
t.Errorf("unable to get users: %v", err)
}
if len(users) != 1 {
t.Errorf("1 user is expected")
}
_, _, err = api.GetUsers(1, 1, "", http.StatusInternalServerError)
if err == nil {
t.Errorf("get users must succeed, we requested a fail for a good request")
}
_, err = api.RemoveUser(user1, http.StatusOK)
if err != nil {
t.Errorf("unable to remove user: %v", err)
}
_, err = api.RemoveUser(user2, http.StatusOK)
if err != nil {
t.Errorf("unable to remove user: %v", err)
}
}
func TestGetQuotaScans(t *testing.T) {
_, _, err := api.GetQuotaScans(http.StatusOK)
if err != nil {
t.Errorf("unable to get quota scans: %v", err)
}
_, _, err = api.GetQuotaScans(http.StatusInternalServerError)
if err == nil {
t.Errorf("quota scan request must succeed, we requested to check a wrong status code")
}
}
func TestStartQuotaScan(t *testing.T) {
user, _, err := api.AddUser(getTestUser(), http.StatusOK)
if err != nil {
t.Errorf("unable to add user: %v", err)
}
_, err = api.StartQuotaScan(user, http.StatusCreated)
if err != nil {
t.Errorf("unable to start quota scan: %v", err)
}
_, err = api.RemoveUser(user, http.StatusOK)
if err != nil {
t.Errorf("unable to remove user: %v", err)
}
}
func TestGetVersion(t *testing.T) {
_, _, err := api.GetVersion(http.StatusOK)
if err != nil {
t.Errorf("unable to get sftp version: %v", err)
}
_, _, err = api.GetVersion(http.StatusInternalServerError)
if err == nil {
t.Errorf("get version request must succeed, we requested to check a wrong status code")
}
}
func TestGetConnections(t *testing.T) {
_, _, err := api.GetConnections(http.StatusOK)
if err != nil {
t.Errorf("unable to get sftp connections: %v", err)
}
_, _, err = api.GetConnections(http.StatusInternalServerError)
if err == nil {
t.Errorf("get sftp connections request must succeed, we requested to check a wrong status code")
}
}
func TestCloseActiveConnection(t *testing.T) {
_, err := api.CloseConnection("non_existent_id", http.StatusNotFound)
if err != nil {
t.Errorf("unexpected error closing non existent sftp connection: %v", err)
}
}
// test using mock http server
func TestBasicUserHandlingMock(t *testing.T) {
user := getTestUser()
userAsJSON := getUserAsJSON(t, user)
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
rr := executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
err := render.DecodeJSON(rr.Body, &user)
if err != nil {
t.Errorf("Error get user: %v", err)
}
req, _ = http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
rr = executeRequest(req)
checkResponseCode(t, http.StatusInternalServerError, rr.Code)
user.MaxSessions = 10
user.UploadBandwidth = 128
userAsJSON = getUserAsJSON(t, user)
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(user.ID, 10), bytes.NewBuffer(userAsJSON))
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
req, _ = http.NewRequest(http.MethodGet, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
var updatedUser dataprovider.User
err = render.DecodeJSON(rr.Body, &updatedUser)
if err != nil {
t.Errorf("Error decoding updated user: %v", err)
}
if user.MaxSessions != updatedUser.MaxSessions || user.UploadBandwidth != updatedUser.UploadBandwidth {
t.Errorf("Error modifying user actual: %v, %v", updatedUser.MaxSessions, updatedUser.UploadBandwidth)
}
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
}
func TestGetUserByIdInvalidParamsMock(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, userPath+"/0", nil)
rr := executeRequest(req)
checkResponseCode(t, http.StatusNotFound, rr.Code)
req, _ = http.NewRequest(http.MethodGet, userPath+"/a", nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
}
func TestAddUserNoUsernameMock(t *testing.T) {
user := getTestUser()
user.Username = ""
userAsJSON := getUserAsJSON(t, user)
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
rr := executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
}
func TestAddUserInvalidHomeDirMock(t *testing.T) {
user := getTestUser()
user.HomeDir = "relative_path"
userAsJSON := getUserAsJSON(t, user)
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
rr := executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
}
func TestAddUserInvalidPermsMock(t *testing.T) {
user := getTestUser()
user.Permissions = []string{}
userAsJSON := getUserAsJSON(t, user)
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
rr := executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
}
func TestAddUserInvalidJsonMock(t *testing.T) {
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer([]byte("invalid json")))
rr := executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
}
func TestUpdateUserInvalidJsonMock(t *testing.T) {
user := getTestUser()
userAsJSON := getUserAsJSON(t, user)
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
rr := executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
err := render.DecodeJSON(rr.Body, &user)
if err != nil {
t.Errorf("Error get user: %v", err)
}
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(user.ID, 10), bytes.NewBuffer([]byte("Invalid json")))
rr = executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
}
func TestUpdateUserInvalidParamsMock(t *testing.T) {
user := getTestUser()
userAsJSON := getUserAsJSON(t, user)
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
rr := executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
err := render.DecodeJSON(rr.Body, &user)
if err != nil {
t.Errorf("Error get user: %v", err)
}
user.HomeDir = ""
userAsJSON = getUserAsJSON(t, user)
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(user.ID, 10), bytes.NewBuffer(userAsJSON))
rr = executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
userID := user.ID
user.ID = 0
userAsJSON = getUserAsJSON(t, user)
req, _ = http.NewRequest(http.MethodPut, userPath+"/"+strconv.FormatInt(userID, 10), bytes.NewBuffer(userAsJSON))
rr = executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
user.ID = userID
req, _ = http.NewRequest(http.MethodPut, userPath+"/0", bytes.NewBuffer(userAsJSON))
rr = executeRequest(req)
checkResponseCode(t, http.StatusNotFound, rr.Code)
req, _ = http.NewRequest(http.MethodPut, userPath+"/a", bytes.NewBuffer(userAsJSON))
rr = executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
}
func TestGetUsersMock(t *testing.T) {
user := getTestUser()
userAsJSON := getUserAsJSON(t, user)
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
rr := executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
err := render.DecodeJSON(rr.Body, &user)
if err != nil {
t.Errorf("Error get user: %v", err)
}
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=510&offset=0&order=ASC&username="+defaultUsername, nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
var users []dataprovider.User
err = render.DecodeJSON(rr.Body, &users)
if err != nil {
t.Errorf("Error decoding users: %v", err)
}
if len(users) != 1 {
t.Errorf("1 user is expected")
}
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=a&offset=0&order=ASC", nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=a&order=ASC", nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
req, _ = http.NewRequest(http.MethodGet, userPath+"?limit=1&offset=0&order=ASCa", nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
}
func TestDeleteUserInvalidParamsMock(t *testing.T) {
req, _ := http.NewRequest(http.MethodDelete, userPath+"/0", nil)
rr := executeRequest(req)
checkResponseCode(t, http.StatusNotFound, rr.Code)
req, _ = http.NewRequest(http.MethodDelete, userPath+"/a", nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
}
func TestGetQuotaScansMock(t *testing.T) {
req, err := http.NewRequest("GET", quotaScanPath, nil)
if err != nil {
t.Errorf("error get quota scan: %v", err)
}
rr := executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
}
func TestStartQuotaScanMock(t *testing.T) {
user := getTestUser()
userAsJSON := getUserAsJSON(t, user)
req, _ := http.NewRequest(http.MethodPost, userPath, bytes.NewBuffer(userAsJSON))
rr := executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
err := render.DecodeJSON(rr.Body, &user)
if err != nil {
t.Errorf("Error get user: %v", err)
}
_, err = os.Stat(user.HomeDir)
if err == nil {
os.Remove(user.HomeDir)
}
// simulate a duplicate quota scan
userAsJSON = getUserAsJSON(t, user)
sftpd.AddQuotaScan(user.Username)
req, _ = http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
rr = executeRequest(req)
checkResponseCode(t, http.StatusConflict, rr.Code)
sftpd.RemoveQuotaScan(user.Username)
userAsJSON = getUserAsJSON(t, user)
req, _ = http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
rr = executeRequest(req)
checkResponseCode(t, http.StatusCreated, rr.Code)
req, _ = http.NewRequest(http.MethodGet, quotaScanPath, nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
var scans []sftpd.ActiveQuotaScan
err = render.DecodeJSON(rr.Body, &scans)
if err != nil {
t.Errorf("Error get active scans: %v", err)
}
for len(scans) > 0 {
req, _ = http.NewRequest(http.MethodGet, quotaScanPath, nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
err = render.DecodeJSON(rr.Body, &scans)
if err != nil {
t.Errorf("Error get active scans: %v", err)
break
}
}
_, err = os.Stat(user.HomeDir)
if err != nil && os.IsNotExist(err) {
os.MkdirAll(user.HomeDir, 0777)
}
req, _ = http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
rr = executeRequest(req)
checkResponseCode(t, http.StatusCreated, rr.Code)
req, _ = http.NewRequest(http.MethodDelete, userPath+"/"+strconv.FormatInt(user.ID, 10), nil)
rr = executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
}
func TestStartQuotaScanBadUserMock(t *testing.T) {
user := getTestUser()
userAsJSON := getUserAsJSON(t, user)
req, _ := http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer(userAsJSON))
rr := executeRequest(req)
checkResponseCode(t, http.StatusNotFound, rr.Code)
}
func TestStartQuotaScanNonExistentUserMock(t *testing.T) {
req, _ := http.NewRequest(http.MethodPost, quotaScanPath, bytes.NewBuffer([]byte("invalid json")))
rr := executeRequest(req)
checkResponseCode(t, http.StatusBadRequest, rr.Code)
}
func TestGetVersionMock(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, versionPath, nil)
rr := executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
}
func TestGetConnectionsMock(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, activeConnectionsPath, nil)
rr := executeRequest(req)
checkResponseCode(t, http.StatusOK, rr.Code)
}
func TestDeleteActiveConnectionMock(t *testing.T) {
req, _ := http.NewRequest(http.MethodDelete, activeConnectionsPath+"/connectionID", nil)
rr := executeRequest(req)
checkResponseCode(t, http.StatusNotFound, rr.Code)
}
func TestNotFoundMock(t *testing.T) {
req, _ := http.NewRequest(http.MethodGet, "/non/existing/path", nil)
rr := executeRequest(req)
checkResponseCode(t, http.StatusNotFound, rr.Code)
}
func TestMethodNotAllowedMock(t *testing.T) {
req, _ := http.NewRequest(http.MethodPost, activeConnectionsPath, nil)
rr := executeRequest(req)
checkResponseCode(t, http.StatusMethodNotAllowed, rr.Code)
}
func waitTCPListening(address string) {
for {
conn, err := net.Dial("tcp", address)
if err != nil {
logger.WarnToConsole("tcp server %v not listening: %v\n", address, err)
time.Sleep(100 * time.Millisecond)
continue
}
logger.InfoToConsole("tcp server %v now listening\n", address)
defer conn.Close()
break
}
}
func getTestUser() dataprovider.User {
return dataprovider.User{
Username: defaultUsername,
Password: defaultPassword,
HomeDir: filepath.Join(homeBasePath, defaultUsername),
Permissions: defaultPerms,
}
}
func getUserAsJSON(t *testing.T, user dataprovider.User) []byte {
json, err := json.Marshal(user)
if err != nil {
t.Errorf("error get user as json: %v", err)
return []byte("{}")
}
return json
}
func executeRequest(req *http.Request) *httptest.ResponseRecorder {
rr := httptest.NewRecorder()
testServer.Config.Handler.ServeHTTP(rr, req)
return rr
}
func checkResponseCode(t *testing.T, expected, actual int) {
if expected != actual {
t.Errorf("Expected response code %d. Got %d", expected, actual)
}
}

View File

@@ -1,330 +0,0 @@
package api
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/sftpd"
"github.com/drakkan/sftpgo/utils"
"github.com/go-chi/render"
)
var (
httpBaseURL = "http://127.0.0.1:8080"
)
// SetBaseURL sets the base url to use for HTTP requests, default is "http://127.0.0.1:8080"
func SetBaseURL(url string) {
httpBaseURL = url
}
// gets an HTTP Client with a timeout
func getHTTPClient() *http.Client {
return &http.Client{
Timeout: 15 * time.Second,
}
}
func buildURLRelativeToBase(paths ...string) string {
// we need to use path.Join and not filepath.Join
// since filepath.Join will use backslash separator on Windows
p := path.Join(paths...)
return fmt.Sprintf("%s/%s", strings.TrimRight(httpBaseURL, "/"), strings.TrimLeft(p, "/"))
}
// AddUser adds a new user and checks the received HTTP Status code against expectedStatusCode.
func AddUser(user dataprovider.User, expectedStatusCode int) (dataprovider.User, []byte, error) {
var newUser dataprovider.User
var body []byte
userAsJSON, err := json.Marshal(user)
if err != nil {
return newUser, body, err
}
resp, err := getHTTPClient().Post(buildURLRelativeToBase(userPath), "application/json", bytes.NewBuffer(userAsJSON))
if err != nil {
return newUser, body, err
}
defer resp.Body.Close()
err = checkResponse(resp.StatusCode, expectedStatusCode)
if expectedStatusCode != http.StatusOK {
body, _ = getResponseBody(resp)
return newUser, body, err
}
if err == nil {
err = render.DecodeJSON(resp.Body, &newUser)
} else {
body, _ = getResponseBody(resp)
}
if err == nil {
err = checkUser(user, newUser)
}
return newUser, body, err
}
// UpdateUser updates an existing user and checks the received HTTP Status code against expectedStatusCode.
func UpdateUser(user dataprovider.User, expectedStatusCode int) (dataprovider.User, []byte, error) {
var newUser dataprovider.User
var body []byte
userAsJSON, err := json.Marshal(user)
if err != nil {
return user, body, err
}
req, err := http.NewRequest(http.MethodPut, buildURLRelativeToBase(userPath, strconv.FormatInt(user.ID, 10)),
bytes.NewBuffer(userAsJSON))
if err != nil {
return user, body, err
}
resp, err := getHTTPClient().Do(req)
if err != nil {
return user, body, err
}
defer resp.Body.Close()
body, _ = getResponseBody(resp)
err = checkResponse(resp.StatusCode, expectedStatusCode)
if expectedStatusCode != http.StatusOK {
return newUser, body, err
}
if err == nil {
newUser, body, err = GetUserByID(user.ID, expectedStatusCode)
}
if err == nil {
err = checkUser(user, newUser)
}
return newUser, body, err
}
// RemoveUser removes an existing user and checks the received HTTP Status code against expectedStatusCode.
func RemoveUser(user dataprovider.User, expectedStatusCode int) ([]byte, error) {
var body []byte
req, err := http.NewRequest(http.MethodDelete, buildURLRelativeToBase(userPath, strconv.FormatInt(user.ID, 10)), nil)
if err != nil {
return body, err
}
resp, err := getHTTPClient().Do(req)
if err != nil {
return body, err
}
defer resp.Body.Close()
body, _ = getResponseBody(resp)
return body, checkResponse(resp.StatusCode, expectedStatusCode)
}
// GetUserByID gets an user by database id and checks the received HTTP Status code against expectedStatusCode.
func GetUserByID(userID int64, expectedStatusCode int) (dataprovider.User, []byte, error) {
var user dataprovider.User
var body []byte
resp, err := getHTTPClient().Get(buildURLRelativeToBase(userPath, strconv.FormatInt(userID, 10)))
if err != nil {
return user, body, err
}
defer resp.Body.Close()
err = checkResponse(resp.StatusCode, expectedStatusCode)
if err == nil && expectedStatusCode == http.StatusOK {
err = render.DecodeJSON(resp.Body, &user)
} else {
body, _ = getResponseBody(resp)
}
return user, body, err
}
// GetUsers allows to get a list of users and checks the received HTTP Status code against expectedStatusCode.
// The number of results can be limited specifying a limit.
// Some results can be skipped specifying an offset.
// The results can be filtered specifying an username, the username filter is an exact match
func GetUsers(limit int64, offset int64, username string, expectedStatusCode int) ([]dataprovider.User, []byte, error) {
var users []dataprovider.User
var body []byte
url, err := url.Parse(buildURLRelativeToBase(userPath))
if err != nil {
return users, body, err
}
q := url.Query()
if limit > 0 {
q.Add("limit", strconv.FormatInt(limit, 10))
}
if offset > 0 {
q.Add("offset", strconv.FormatInt(offset, 10))
}
if len(username) > 0 {
q.Add("username", username)
}
url.RawQuery = q.Encode()
resp, err := getHTTPClient().Get(url.String())
if err != nil {
return users, body, err
}
defer resp.Body.Close()
err = checkResponse(resp.StatusCode, expectedStatusCode)
if err == nil && expectedStatusCode == http.StatusOK {
err = render.DecodeJSON(resp.Body, &users)
} else {
body, _ = getResponseBody(resp)
}
return users, body, err
}
// GetQuotaScans gets active quota scans and checks the received HTTP Status code against expectedStatusCode.
func GetQuotaScans(expectedStatusCode int) ([]sftpd.ActiveQuotaScan, []byte, error) {
var quotaScans []sftpd.ActiveQuotaScan
var body []byte
resp, err := getHTTPClient().Get(buildURLRelativeToBase(quotaScanPath))
if err != nil {
return quotaScans, body, err
}
defer resp.Body.Close()
err = checkResponse(resp.StatusCode, expectedStatusCode)
if err == nil && expectedStatusCode == http.StatusOK {
err = render.DecodeJSON(resp.Body, &quotaScans)
} else {
body, _ = getResponseBody(resp)
}
return quotaScans, body, err
}
// StartQuotaScan start a new quota scan for the given user and checks the received HTTP Status code against expectedStatusCode.
func StartQuotaScan(user dataprovider.User, expectedStatusCode int) ([]byte, error) {
var body []byte
userAsJSON, err := json.Marshal(user)
if err != nil {
return body, err
}
resp, err := getHTTPClient().Post(buildURLRelativeToBase(quotaScanPath), "application/json", bytes.NewBuffer(userAsJSON))
if err != nil {
return body, err
}
defer resp.Body.Close()
body, _ = getResponseBody(resp)
return body, checkResponse(resp.StatusCode, expectedStatusCode)
}
// GetConnections returns status and stats for active SFTP/SCP connections
func GetConnections(expectedStatusCode int) ([]sftpd.ConnectionStatus, []byte, error) {
var connections []sftpd.ConnectionStatus
var body []byte
resp, err := getHTTPClient().Get(buildURLRelativeToBase(activeConnectionsPath))
if err != nil {
return connections, body, err
}
defer resp.Body.Close()
err = checkResponse(resp.StatusCode, expectedStatusCode)
if err == nil && expectedStatusCode == http.StatusOK {
err = render.DecodeJSON(resp.Body, &connections)
} else {
body, _ = getResponseBody(resp)
}
return connections, body, err
}
// CloseConnection closes an active connection identified by connectionID
func CloseConnection(connectionID string, expectedStatusCode int) ([]byte, error) {
var body []byte
req, err := http.NewRequest(http.MethodDelete, buildURLRelativeToBase(activeConnectionsPath, connectionID), nil)
if err != nil {
return body, err
}
resp, err := getHTTPClient().Do(req)
if err != nil {
return body, err
}
defer resp.Body.Close()
err = checkResponse(resp.StatusCode, expectedStatusCode)
body, _ = getResponseBody(resp)
return body, err
}
// GetVersion returns version details
func GetVersion(expectedStatusCode int) (utils.VersionInfo, []byte, error) {
var version utils.VersionInfo
var body []byte
resp, err := getHTTPClient().Get(buildURLRelativeToBase(versionPath))
if err != nil {
return version, body, err
}
defer resp.Body.Close()
err = checkResponse(resp.StatusCode, expectedStatusCode)
if err == nil && expectedStatusCode == http.StatusOK {
err = render.DecodeJSON(resp.Body, &version)
} else {
body, _ = getResponseBody(resp)
}
return version, body, err
}
func checkResponse(actual int, expected int) error {
if expected != actual {
return fmt.Errorf("wrong status code: got %v want %v", actual, expected)
}
return nil
}
func getResponseBody(resp *http.Response) ([]byte, error) {
return ioutil.ReadAll(resp.Body)
}
func checkUser(expected dataprovider.User, actual dataprovider.User) error {
if len(actual.Password) > 0 {
return errors.New("User password must not be visible")
}
if len(actual.PublicKeys) > 0 {
return errors.New("User public keys must not be visible")
}
if expected.ID <= 0 {
if actual.ID <= 0 {
return errors.New("actual user ID must be > 0")
}
} else {
if actual.ID != expected.ID {
return errors.New("user ID mismatch")
}
}
for _, v := range expected.Permissions {
if !utils.IsStringInSlice(v, actual.Permissions) {
return errors.New("Permissions contents mismatch")
}
}
return compareEqualsUserFields(expected, actual)
}
func compareEqualsUserFields(expected dataprovider.User, actual dataprovider.User) error {
if expected.Username != actual.Username {
return errors.New("Username mismatch")
}
if expected.HomeDir != actual.HomeDir {
return errors.New("HomeDir mismatch")
}
if expected.UID != actual.UID {
return errors.New("UID mismatch")
}
if expected.GID != actual.GID {
return errors.New("GID mismatch")
}
if expected.MaxSessions != actual.MaxSessions {
return errors.New("MaxSessions mismatch")
}
if expected.QuotaSize != actual.QuotaSize {
return errors.New("QuotaSize mismatch")
}
if expected.QuotaFiles != actual.QuotaFiles {
return errors.New("QuotaFiles mismatch")
}
if len(expected.Permissions) != len(actual.Permissions) {
return errors.New("Permissions mismatch")
}
if expected.UploadBandwidth != actual.UploadBandwidth {
return errors.New("UploadBandwidth mismatch")
}
if expected.DownloadBandwidth != actual.DownloadBandwidth {
return errors.New("DownloadBandwidth mismatch")
}
return nil
}

View File

@@ -1,228 +0,0 @@
package api
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/go-chi/chi"
)
const (
invalidURL = "http://foo\x7f.com/"
inactiveURL = "http://127.0.0.1:12345"
)
func TestGetRespStatus(t *testing.T) {
var err error
err = &dataprovider.MethodDisabledError{}
respStatus := getRespStatus(err)
if respStatus != http.StatusForbidden {
t.Errorf("wrong resp status extected: %d got: %d", http.StatusForbidden, respStatus)
}
err = fmt.Errorf("generic error")
respStatus = getRespStatus(err)
if respStatus != http.StatusInternalServerError {
t.Errorf("wrong resp status extected: %d got: %d", http.StatusInternalServerError, respStatus)
}
}
func TestCheckResponse(t *testing.T) {
err := checkResponse(http.StatusOK, http.StatusCreated)
if err == nil {
t.Errorf("check must fail")
}
err = checkResponse(http.StatusBadRequest, http.StatusBadRequest)
if err != nil {
t.Errorf("test must succeed, error: %v", err)
}
}
func TestCheckUser(t *testing.T) {
expected := dataprovider.User{}
actual := dataprovider.User{}
actual.Password = "password"
err := checkUser(expected, actual)
if err == nil {
t.Errorf("actual password must be nil")
}
actual.Password = ""
actual.PublicKeys = []string{"pub key"}
err = checkUser(expected, actual)
if err == nil {
t.Errorf("actual public key must be nil")
}
actual.PublicKeys = []string{}
err = checkUser(expected, actual)
if err == nil {
t.Errorf("actual ID must be > 0")
}
expected.ID = 1
actual.ID = 2
err = checkUser(expected, actual)
if err == nil {
t.Errorf("actual ID must be equal to expected ID")
}
expected.ID = 2
actual.ID = 2
expected.Permissions = []string{dataprovider.PermCreateDirs, dataprovider.PermDelete, dataprovider.PermDownload}
actual.Permissions = []string{dataprovider.PermCreateDirs, dataprovider.PermCreateSymlinks}
err = checkUser(expected, actual)
if err == nil {
t.Errorf("Permissions are not equal")
}
expected.Permissions = append(expected.Permissions, dataprovider.PermRename)
err = checkUser(expected, actual)
if err == nil {
t.Errorf("Permissions are not equal")
}
}
func TestCompareUserFields(t *testing.T) {
expected := dataprovider.User{}
actual := dataprovider.User{}
expected.Username = "test"
err := compareEqualsUserFields(expected, actual)
if err == nil {
t.Errorf("Username does not match")
}
expected.Username = ""
expected.HomeDir = "homedir"
err = compareEqualsUserFields(expected, actual)
if err == nil {
t.Errorf("HomeDir does not match")
}
expected.HomeDir = ""
expected.UID = 1
err = compareEqualsUserFields(expected, actual)
if err == nil {
t.Errorf("UID does not match")
}
expected.UID = 0
expected.GID = 1
err = compareEqualsUserFields(expected, actual)
if err == nil {
t.Errorf("GID does not match")
}
expected.GID = 0
expected.MaxSessions = 2
err = compareEqualsUserFields(expected, actual)
if err == nil {
t.Errorf("MaxSessions do not match")
}
expected.MaxSessions = 0
expected.QuotaSize = 4096
err = compareEqualsUserFields(expected, actual)
if err == nil {
t.Errorf("QuotaSize does not match")
}
expected.QuotaSize = 0
expected.QuotaFiles = 2
err = compareEqualsUserFields(expected, actual)
if err == nil {
t.Errorf("QuotaFiles do not match")
}
expected.QuotaFiles = 0
expected.Permissions = []string{dataprovider.PermCreateDirs}
err = compareEqualsUserFields(expected, actual)
if err == nil {
t.Errorf("Permissions are not equal")
}
expected.Permissions = nil
expected.UploadBandwidth = 64
err = compareEqualsUserFields(expected, actual)
if err == nil {
t.Errorf("UploadBandwidth does not match")
}
expected.UploadBandwidth = 0
expected.DownloadBandwidth = 128
err = compareEqualsUserFields(expected, actual)
if err == nil {
t.Errorf("DownloadBandwidth does not match")
}
}
func TestApiCallsWithBadURL(t *testing.T) {
oldBaseURL := httpBaseURL
SetBaseURL(invalidURL)
u := dataprovider.User{}
_, _, err := UpdateUser(u, http.StatusBadRequest)
if err == nil {
t.Errorf("request with invalid URL must fail")
}
_, err = RemoveUser(u, http.StatusNotFound)
if err == nil {
t.Errorf("request with invalid URL must fail")
}
_, _, err = GetUsers(1, 0, "", http.StatusBadRequest)
if err == nil {
t.Errorf("request with invalid URL must fail")
}
_, err = CloseConnection("non_existent_id", http.StatusNotFound)
if err == nil {
t.Errorf("request with invalid URL must fail")
}
SetBaseURL(oldBaseURL)
}
func TestApiCallToNotListeningServer(t *testing.T) {
oldBaseURL := httpBaseURL
SetBaseURL(inactiveURL)
u := dataprovider.User{}
_, _, err := AddUser(u, http.StatusBadRequest)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
_, _, err = UpdateUser(u, http.StatusNotFound)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
_, err = RemoveUser(u, http.StatusNotFound)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
_, _, err = GetUserByID(-1, http.StatusNotFound)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
_, _, err = GetUsers(100, 0, "", http.StatusOK)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
_, _, err = GetQuotaScans(http.StatusOK)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
_, err = StartQuotaScan(u, http.StatusNotFound)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
_, _, err = GetConnections(http.StatusOK)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
_, err = CloseConnection("non_existent_id", http.StatusNotFound)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
_, _, err = GetVersion(http.StatusOK)
if err == nil {
t.Errorf("request to an inactive URL must fail")
}
SetBaseURL(oldBaseURL)
}
func TestCloseConnectionHandler(t *testing.T) {
req, _ := http.NewRequest(http.MethodDelete, activeConnectionsPath+"/connectionID", nil)
rctx := chi.NewRouteContext()
rctx.URLParams.Add("connectionID", "")
req = req.WithContext(context.WithValue(req.Context(), chi.RouteCtxKey, rctx))
rr := httptest.NewRecorder()
handleCloseConnection(rr, req)
if rr.Code != http.StatusBadRequest {
t.Errorf("Expected response code 400. Got %d", rr.Code)
}
}

View File

@@ -1,44 +0,0 @@
package api
import (
"net/http"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/sftpd"
"github.com/drakkan/sftpgo/utils"
"github.com/go-chi/render"
)
func getQuotaScans(w http.ResponseWriter, r *http.Request) {
render.JSON(w, r, sftpd.GetQuotaScans())
}
func startQuotaScan(w http.ResponseWriter, r *http.Request) {
var u dataprovider.User
err := render.DecodeJSON(r.Body, &u)
if err != nil {
sendAPIResponse(w, r, err, "", http.StatusBadRequest)
return
}
user, err := dataprovider.UserExists(dataProvider, u.Username)
if err != nil {
sendAPIResponse(w, r, err, "", http.StatusNotFound)
return
}
if sftpd.AddQuotaScan(user.Username) {
sendAPIResponse(w, r, err, "Scan started", http.StatusCreated)
go func() {
numFiles, size, _, err := utils.ScanDirContents(user.HomeDir)
if err != nil {
logger.Warn(logSender, "error scanning user home dir %v: %v", user.HomeDir, err)
} else {
err := dataprovider.UpdateUserQuota(dataProvider, user, numFiles, size, true)
logger.Debug(logSender, "user dir scanned, user: %v, dir: %v, error: %v", user.Username, user.HomeDir, err)
}
sftpd.RemoveQuotaScan(user.Username)
}()
} else {
sendAPIResponse(w, r, err, "Another scan is already in progress", http.StatusConflict)
}
}

View File

@@ -1,86 +0,0 @@
package api
import (
"net/http"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/sftpd"
"github.com/drakkan/sftpgo/utils"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/go-chi/render"
)
// GetHTTPRouter returns the configured HTTP handler
func GetHTTPRouter() http.Handler {
return router
}
func initializeRouter() {
router = chi.NewRouter()
router.Use(middleware.RequestID)
router.Use(middleware.RealIP)
router.Use(logger.NewStructuredLogger(logger.GetLogger()))
router.Use(middleware.Recoverer)
router.NotFound(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
sendAPIResponse(w, r, nil, "Not Found", http.StatusNotFound)
}))
router.MethodNotAllowed(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
sendAPIResponse(w, r, nil, "Method not allowed", http.StatusMethodNotAllowed)
}))
router.Get(versionPath, func(w http.ResponseWriter, r *http.Request) {
render.JSON(w, r, utils.GetAppVersion())
})
router.Get(activeConnectionsPath, func(w http.ResponseWriter, r *http.Request) {
render.JSON(w, r, sftpd.GetConnectionsStats())
})
router.Delete(activeConnectionsPath+"/{connectionID}", func(w http.ResponseWriter, r *http.Request) {
handleCloseConnection(w, r)
})
router.Get(quotaScanPath, func(w http.ResponseWriter, r *http.Request) {
getQuotaScans(w, r)
})
router.Post(quotaScanPath, func(w http.ResponseWriter, r *http.Request) {
startQuotaScan(w, r)
})
router.Get(userPath, func(w http.ResponseWriter, r *http.Request) {
getUsers(w, r)
})
router.Post(userPath, func(w http.ResponseWriter, r *http.Request) {
addUser(w, r)
})
router.Get(userPath+"/{userID}", func(w http.ResponseWriter, r *http.Request) {
getUserByID(w, r)
})
router.Put(userPath+"/{userID}", func(w http.ResponseWriter, r *http.Request) {
updateUser(w, r)
})
router.Delete(userPath+"/{userID}", func(w http.ResponseWriter, r *http.Request) {
deleteUser(w, r)
})
}
func handleCloseConnection(w http.ResponseWriter, r *http.Request) {
connectionID := chi.URLParam(r, "connectionID")
if connectionID == "" {
sendAPIResponse(w, r, nil, "connectionID is mandatory", http.StatusBadRequest)
return
}
if sftpd.CloseActiveConnection(connectionID) {
sendAPIResponse(w, r, nil, "Connection closed", http.StatusOK)
} else {
sendAPIResponse(w, r, nil, "Not Found", http.StatusNotFound)
}
}

View File

@@ -1,689 +0,0 @@
openapi: 3.0.1
info:
title: SFTPGo
description: 'SFTPGo REST API'
version: 1.0.0
servers:
- url: /api/v1
paths:
/version:
get:
tags:
- version
summary: Get version details
operationId: get_version
responses:
200:
description: successful operation
content:
application/json:
schema:
type: array
items:
$ref : '#/components/schemas/VersionInfo'
/connection:
get:
tags:
- connections
summary: Get the active users and info about their uploads/downloads
operationId: get_connections
responses:
200:
description: successful operation
content:
application/json:
schema:
type: array
items:
$ref : '#/components/schemas/ConnectionStatus'
/connection/{connectionID}:
delete:
tags:
- connections
summary: Terminate an active connection
operationId: close_connection
parameters:
- name: connectionID
in: path
description: ID of the connection to close
required: true
schema:
type: string
responses:
200:
description: successful operation
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 200
message: "Connection closed"
error: ""
400:
description: Bad request
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 400
message: ""
error: "Error description if any"
404:
description: Not Found
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 404
message: ""
error: "Error description if any"
500:
description: Internal Server Error
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 500
message: ""
error: "Error description if any"
/quota_scan:
get:
tags:
- quota
summary: Get the active quota scans
operationId: get_quota_scans
responses:
200:
description: successful operation
content:
application/json:
schema:
type: array
items:
$ref : '#/components/schemas/QuotaScan'
post:
tags:
- quota
summary: start a new quota scan
description: A quota scan update the number of files and their total size for the given user
operationId: start_quota_scan
requestBody:
required: true
content:
application/json:
schema:
$ref : '#/components/schemas/User'
responses:
201:
description: successful operation
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 201
message: "Scan started"
error: ""
400:
description: Bad request
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 400
message: ""
error: "Error description if any"
403:
description: Forbidden
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 403
message: ""
error: "Error description if any"
404:
description: Not Found
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 404
message: ""
error: "Error description if any"
409:
description: Another scan is already in progress for this user
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 409
message: "Another scan is already in progress"
error: "Error description if any"
500:
description: Internal Server Error
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 500
message: ""
error: "Error description if any"
/user:
get:
tags:
- users
summary: Returns an array with one or more users
description: For security reasons password and public key are empty in the response
operationId: get_users
parameters:
- in: query
name: offset
schema:
type: integer
minimum: 0
default: 0
required: false
- in: query
name: limit
schema:
type: integer
minimum: 1
maximum: 500
default: 100
required: false
description: The maximum number of items to return. Max value is 500, default is 100
- in: query
name: order
required: false
description: Ordering users by username
schema:
type: string
enum:
- ASC
- DESC
example: ASC
- in: query
name: username
required: false
description: Filter by username, extact match case sensitive
schema:
type: string
responses:
200:
description: successful operation
content:
application/json:
schema:
type: array
items:
$ref : '#/components/schemas/User'
400:
description: Bad request
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 400
message: ""
error: "Error description if any"
403:
description: Forbidden
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 403
message: ""
error: "Error description if any"
500:
description: Internal Server Error
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 500
message: ""
error: "Error description if any"
post:
tags:
- users
summary: Adds a new SFTP/SCP user
operationId: add_user
requestBody:
required: true
content:
application/json:
schema:
$ref : '#/components/schemas/User'
responses:
200:
description: successful operation
content:
application/json:
schema:
$ref : '#/components/schemas/User'
400:
description: Bad request
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 400
message: ""
error: "Error description if any"
403:
description: Forbidden
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 403
message: ""
error: "Error description if any"
500:
description: Internal Server Error
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 500
message: ""
error: "Error description if any"
/user/{userID}:
get:
tags:
- users
summary: Find user by ID
description: For security reasons password and public key are empty in the response
operationId: get_user_by_id
parameters:
- name: userID
in: path
description: ID of the user to retrieve
required: true
schema:
type: integer
format: int32
responses:
200:
description: successful operation
content:
application/json:
schema:
$ref : '#/components/schemas/User'
400:
description: Bad request
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 400
message: ""
error: "Error description if any"
403:
description: Forbidden
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 403
message: ""
error: "Error description if any"
404:
description: Not Found
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 404
message: ""
error: "Error description if any"
500:
description: Internal Server Error
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 500
message: ""
error: "Error description if any"
put:
tags:
- users
summary: Update an existing user
operationId: update_user
parameters:
- name: userID
in: path
description: ID of the user to update
required: true
schema:
type: integer
format: int32
requestBody:
required: true
content:
application/json:
schema:
$ref : '#/components/schemas/User'
responses:
200:
description: successful operation
content:
application/json:
schema:
$ref : '#/components/schemas/ApiResponse'
example:
status: 200
message: "User updated"
error: ""
400:
description: Bad request
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 400
message: ""
error: "Error description if any"
403:
description: Forbidden
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 403
message: ""
error: "Error description if any"
404:
description: Not Found
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 404
message: ""
error: "Error description if any"
500:
description: Internal Server Error
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 500
message: ""
error: "Error description if any"
delete:
tags:
- users
summary: Delete an existing user
operationId: delete_user
parameters:
- name: userID
in: path
description: ID of the user to delete
required: true
schema:
type: integer
format: int32
responses:
200:
description: successful operation
content:
application/json:
schema:
$ref : '#/components/schemas/ApiResponse'
example:
status: 200
message: "User deleted"
error: ""
400:
description: Bad request
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 400
message: ""
error: "Error description if any"
403:
description: Forbidden
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 403
message: ""
error: "Error description if any"
404:
description: Not Found
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 404
message: ""
error: "Error description if any"
500:
description: Internal Server Error
content:
application/json:
schema:
$ref: '#/components/schemas/ApiResponse'
example:
status: 500
message: ""
error: "Error description if any"
components:
schemas:
Permission:
type: string
enum:
- '*'
- list
- download
- upload
- delete
- rename
- create_dirs
- create_symlinks
description: >
Permissions:
* `*` - all permission are granted
* `list` - list items is allowed
* `download` - download files is allowed
* `upload` - upload files is allowed
* `delete` - delete files or directories is allowed
* `rename` - rename files or directories is allowed
* `create_dirs` - create directories is allowed
* `create_symlinks` - create links is allowed
User:
type: object
properties:
id:
type: integer
format: int32
minimum: 1
username:
type: string
password:
type: string
nullable: true
description: password or public key are mandatory. If the password has no known hashing algo prefix it will be stored using argon2id. You can send a password hashed as bcrypt or pbkdf2 and it will be stored as is. For security reasons this field is omitted when you search/get users
public_keys:
type: array
items:
type: string
nullable: true
description: a password or at least one public key are mandatory. For security reasons this field is omitted when you search/get users.
home_dir:
type: string
description: path to the user home directory. The user cannot upload or download files outside this directory. SFTPGo tries to automatically create this folder if missing. Must be an absolute path
uid:
type: integer
format: int32
minimum: 0
maximum: 65535
description: if you run sftpgo as root user the created files and directories will be assigned to this uid. 0 means no change, the owner will be the user that runs sftpgo. Ignored on windows
gid:
type: integer
format: int32
minimum: 0
maximum: 65535
description: if you run sftpgo as root user the created files and directories will be assigned to this gid. 0 means no change, the group will be the one of the user that runs sftpgo. Ignored on windows
max_sessions:
type: integer
format: int32
description: limit the sessions that an user can open. 0 means unlimited
quota_size:
type: integer
format: int64
description: quota as size. 0 menas unlimited. Please note that quota is updated if files are added/removed via SFTP/SCP otherwise a quota scan is needed
quota_files:
type: integer
format: int32
description: quota as number of files. 0 menas unlimited. Please note that quota is updated if files are added/removed via SFTP/SCP otherwise a quota scan is needed
permissions:
type: array
items:
$ref: '#/components/schemas/Permission'
minItems: 1
used_quota_size:
type: integer
format: int64
used_quota_file:
type: integer
format: int32
last_quota_update:
type: integer
format: int64
description: last quota update as unix timestamp in milliseconds
upload_bandwidth:
type: integer
format: int32
description: Maximum upload bandwidth as KB/s, 0 means unlimited
download_bandwidth:
type: integer
format: int32
description: Maximum download bandwidth as KB/s, 0 means unlimited
Transfer:
type: object
properties:
operation_type:
type: string
enum:
- upload
- download
path:
type: string
description: SFTP/SCP file path for the upload/download
start_time:
type: integer
format: int64
description: start time as unix timestamp in milliseconds
size:
type: integer
format: int64
description: bytes transferred
last_activity:
type: integer
format: int64
description: last transfer activity as unix timestamp in milliseconds
ConnectionStatus:
type: object
properties:
username:
type: string
description: connected username
connection_id:
type: string
description: unique connection identifier
client_version:
type: string
description: SFTP/SCP client version
remote_address:
type: string
description: Remote address for the connected SFTP/SCP client
connection_time:
type: integer
format: int64
description: connection time as unix timestamp in milliseconds
last_activity:
type: integer
format: int64
description: last client activity as unix timestamp in milliseconds
protocol:
type: string
enum:
- SFTP
- SCP
active_transfers:
type: array
items:
$ref : '#/components/schemas/Transfer'
QuotaScan:
type: object
properties:
username:
type: string
description: username with an active scan
start_time:
type: integer
format: int64
description: scan start time as unix timestamp in milliseconds
ApiResponse:
type: object
properties:
status:
type: integer
format: int32
minimum: 200
maximum: 500
example: 200
description: HTTP Status code, for example 200 OK, 400 Bad request and so on
message:
type: string
nullable: true
description: additional message if any
error:
type: string
nullable: true
description: error description if any
VersionInfo:
type: object
properties:
version:
type: string
build_date:
type: string
commit_hash:
type: string

12
cmd/gen.go Normal file
View File

@@ -0,0 +1,12 @@
package cmd
import "github.com/spf13/cobra"
var genCmd = &cobra.Command{
Use: "gen",
Short: "A collection of useful generators",
}
func init() {
rootCmd.AddCommand(genCmd)
}

67
cmd/gencompletion.go Normal file
View File

@@ -0,0 +1,67 @@
package cmd
import (
"os"
"github.com/rs/zerolog"
"github.com/spf13/cobra"
"github.com/drakkan/sftpgo/logger"
)
var genCompletionCmd = &cobra.Command{
Use: "completion [bash|zsh|fish|powershell]",
Short: "Generate shell completion script to the stdout",
Long: `To load completions:
Bash:
$ source <(sftpgo gen completion bash)
# To load completions for each session, execute once:
Linux:
$ sftpgo gen completion bash > /etc/bash_completion.d/sftpgo-completion.bash
MacOS:
$ sftpgo gen completion bash > /usr/local/etc/bash_completion.d/sftpgo-completion.bash
Zsh:
$ source <(sftpgo gen completion zsh)
# To load completions for each session, execute once:
$ sftpgo gen completion zsh > "${fpath[1]}/_sftpgo"
Fish:
$ sftpgo gen completion fish | source
# To load completions for each session, execute once:
$ sftpgo gen completion fish > ~/.config/fish/completions/sftpgo.fish
`,
DisableFlagsInUseLine: true,
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
Args: cobra.ExactValidArgs(1),
Run: func(cmd *cobra.Command, args []string) {
var err error
logger.DisableLogger()
logger.EnableConsoleLogger(zerolog.DebugLevel)
switch args[0] {
case "bash":
err = cmd.Root().GenBashCompletion(os.Stdout)
case "zsh":
err = cmd.Root().GenZshCompletion(os.Stdout)
case "fish":
err = cmd.Root().GenFishCompletion(os.Stdout, true)
case "powershell":
err = cmd.Root().GenPowerShellCompletion(os.Stdout)
}
if err != nil {
logger.WarnToConsole("Unable to generate shell completion script: %v", err)
os.Exit(1)
}
},
}
func init() {
genCmd.AddCommand(genCompletionCmd)
}

52
cmd/genman.go Normal file
View File

@@ -0,0 +1,52 @@
package cmd
import (
"fmt"
"os"
"github.com/rs/zerolog"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/version"
)
var (
manDir string
genManCmd = &cobra.Command{
Use: "man",
Short: "Generate man pages for SFTPGo CLI",
Long: `This command automatically generates up-to-date man pages of SFTPGo's
command-line interface. By default, it creates the man page files
in the "man" directory under the current directory.
`,
Run: func(cmd *cobra.Command, args []string) {
logger.DisableLogger()
logger.EnableConsoleLogger(zerolog.DebugLevel)
if _, err := os.Stat(manDir); os.IsNotExist(err) {
err = os.MkdirAll(manDir, os.ModePerm)
if err != nil {
logger.WarnToConsole("Unable to generate man page files: %v", err)
os.Exit(1)
}
}
header := &doc.GenManHeader{
Section: "1",
Manual: "SFTPGo Manual",
Source: fmt.Sprintf("SFTPGo %v", version.Get().Version),
}
cmd.Root().DisableAutoGenTag = true
err := doc.GenManTree(cmd.Root(), header, manDir)
if err != nil {
logger.WarnToConsole("Unable to generate man page files: %v", err)
os.Exit(1)
}
},
}
)
func init() {
genManCmd.Flags().StringVarP(&manDir, "dir", "d", "man", "The directory to write the man pages")
genCmd.AddCommand(genManCmd)
}

64
cmd/initprovider.go Normal file
View File

@@ -0,0 +1,64 @@
package cmd
import (
"os"
"github.com/rs/zerolog"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/drakkan/sftpgo/config"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/utils"
)
var (
initProviderCmd = &cobra.Command{
Use: "initprovider",
Short: "Initializes and/or updates the configured data provider",
Long: `This command reads the data provider connection details from the specified
configuration file and creates the initial structure or update the existing one,
as needed.
Some data providers such as bolt and memory does not require an initialization
but they could require an update to the existing data after upgrading SFTPGo.
For SQLite/bolt providers the database file will be auto-created if missing.
For PostgreSQL and MySQL providers you need to create the configured database,
this command will create/update the required tables as needed.
To initialize/update the data provider from the configuration directory simply use:
$ sftpgo initprovider
Please take a look at the usage below to customize the options.`,
Run: func(cmd *cobra.Command, args []string) {
logger.DisableLogger()
logger.EnableConsoleLogger(zerolog.DebugLevel)
configDir = utils.CleanDirInput(configDir)
err := config.LoadConfig(configDir, configFile)
if err != nil {
logger.WarnToConsole("Unable to initialize data provider, config load error: %v", err)
return
}
providerConf := config.GetProviderConf()
logger.InfoToConsole("Initializing provider: %#v config file: %#v", providerConf.Driver, viper.ConfigFileUsed())
err = dataprovider.InitializeDatabase(providerConf, configDir)
if err == nil {
logger.InfoToConsole("Data provider successfully initialized/updated")
} else if err == dataprovider.ErrNoInitRequired {
logger.InfoToConsole("%v", err.Error())
} else {
logger.WarnToConsole("Unable to initialize/update the data provider: %v", err)
os.Exit(1)
}
},
}
)
func init() {
rootCmd.AddCommand(initProviderCmd)
addConfigFlags(initProviderCmd)
}

97
cmd/install_windows.go Normal file
View File

@@ -0,0 +1,97 @@
package cmd
import (
"fmt"
"os"
"strconv"
"github.com/spf13/cobra"
"github.com/drakkan/sftpgo/service"
"github.com/drakkan/sftpgo/utils"
)
var (
installCmd = &cobra.Command{
Use: "install",
Short: "Install SFTPGo as Windows Service",
Long: `To install the SFTPGo Windows Service with the default values for the command
line flags simply use:
sftpgo service install
Please take a look at the usage below to customize the startup options`,
Run: func(cmd *cobra.Command, args []string) {
s := service.Service{
ConfigDir: utils.CleanDirInput(configDir),
ConfigFile: configFile,
LogFilePath: logFilePath,
LogMaxSize: logMaxSize,
LogMaxBackups: logMaxBackups,
LogMaxAge: logMaxAge,
LogCompress: logCompress,
LogVerbose: logVerbose,
Shutdown: make(chan bool),
}
winService := service.WindowsService{
Service: s,
}
serviceArgs := []string{"service", "start"}
customFlags := getCustomServeFlags()
if len(customFlags) > 0 {
serviceArgs = append(serviceArgs, customFlags...)
}
err := winService.Install(serviceArgs...)
if err != nil {
fmt.Printf("Error installing service: %v\r\n", err)
os.Exit(1)
} else {
fmt.Printf("Service installed!\r\n")
}
},
}
)
func init() {
serviceCmd.AddCommand(installCmd)
addServeFlags(installCmd)
}
func getCustomServeFlags() []string {
result := []string{}
if configDir != defaultConfigDir {
configDir = utils.CleanDirInput(configDir)
result = append(result, "--"+configDirFlag)
result = append(result, configDir)
}
if configFile != defaultConfigName {
result = append(result, "--"+configFileFlag)
result = append(result, configFile)
}
if logFilePath != defaultLogFile {
result = append(result, "--"+logFilePathFlag)
result = append(result, logFilePath)
}
if logMaxSize != defaultLogMaxSize {
result = append(result, "--"+logMaxSizeFlag)
result = append(result, strconv.Itoa(logMaxSize))
}
if logMaxBackups != defaultLogMaxBackup {
result = append(result, "--"+logMaxBackupFlag)
result = append(result, strconv.Itoa(logMaxBackups))
}
if logMaxAge != defaultLogMaxAge {
result = append(result, "--"+logMaxAgeFlag)
result = append(result, strconv.Itoa(logMaxAge))
}
if logVerbose != defaultLogVerbose {
result = append(result, "--"+logVerboseFlag+"=false")
}
if logCompress != defaultLogCompress {
result = append(result, "--"+logCompressFlag+"=true")
}
if profiler != defaultProfiler {
result = append(result, "--"+profilerFlag+"=true")
}
return result
}

305
cmd/portable.go Normal file
View File

@@ -0,0 +1,305 @@
// +build !noportable
package cmd
import (
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"github.com/spf13/cobra"
"github.com/drakkan/sftpgo/common"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/service"
"github.com/drakkan/sftpgo/sftpd"
"github.com/drakkan/sftpgo/version"
"github.com/drakkan/sftpgo/vfs"
)
var (
directoryToServe string
portableSFTPDPort int
portableAdvertiseService bool
portableAdvertiseCredentials bool
portableUsername string
portablePassword string
portableLogFile string
portableLogVerbose bool
portablePublicKeys []string
portablePermissions []string
portableSSHCommands []string
portableAllowedExtensions []string
portableDeniedExtensions []string
portableFsProvider int
portableS3Bucket string
portableS3Region string
portableS3AccessKey string
portableS3AccessSecret string
portableS3Endpoint string
portableS3StorageClass string
portableS3KeyPrefix string
portableS3ULPartSize int
portableS3ULConcurrency int
portableGCSBucket string
portableGCSCredentialsFile string
portableGCSAutoCredentials int
portableGCSStorageClass string
portableGCSKeyPrefix string
portableFTPDPort int
portableFTPSCert string
portableFTPSKey string
portableWebDAVPort int
portableWebDAVCert string
portableWebDAVKey string
portableCmd = &cobra.Command{
Use: "portable",
Short: "Serve a single directory",
Long: `To serve the current working directory with auto generated credentials simply
use:
$ sftpgo portable
Please take a look at the usage below to customize the serving parameters`,
Run: func(cmd *cobra.Command, args []string) {
portableDir := directoryToServe
fsProvider := dataprovider.FilesystemProvider(portableFsProvider)
if !filepath.IsAbs(portableDir) {
if fsProvider == dataprovider.LocalFilesystemProvider {
portableDir, _ = filepath.Abs(portableDir)
} else {
portableDir = os.TempDir()
}
}
permissions := make(map[string][]string)
permissions["/"] = portablePermissions
portableGCSCredentials := ""
if fsProvider == dataprovider.GCSFilesystemProvider && len(portableGCSCredentialsFile) > 0 {
fi, err := os.Stat(portableGCSCredentialsFile)
if err != nil {
fmt.Printf("Invalid GCS credentials file: %v\n", err)
os.Exit(1)
}
if fi.Size() > 1048576 {
fmt.Printf("Invalid GCS credentials file: %#v is too big %v/1048576 bytes\n", portableGCSCredentialsFile,
fi.Size())
os.Exit(1)
}
creds, err := ioutil.ReadFile(portableGCSCredentialsFile)
if err != nil {
fmt.Printf("Unable to read credentials file: %v\n", err)
}
portableGCSCredentials = base64.StdEncoding.EncodeToString(creds)
portableGCSAutoCredentials = 0
}
if portableFTPDPort >= 0 && len(portableFTPSCert) > 0 && len(portableFTPSKey) > 0 {
_, err := common.NewCertManager(portableFTPSCert, portableFTPSKey, "FTP portable")
if err != nil {
fmt.Printf("Unable to load FTPS key pair, cert file %#v key file %#v error: %v\n",
portableFTPSCert, portableFTPSKey, err)
os.Exit(1)
}
}
if portableWebDAVPort > 0 && len(portableWebDAVCert) > 0 && len(portableWebDAVKey) > 0 {
_, err := common.NewCertManager(portableWebDAVCert, portableWebDAVKey, "WebDAV portable")
if err != nil {
fmt.Printf("Unable to load WebDAV key pair, cert file %#v key file %#v error: %v\n",
portableWebDAVCert, portableWebDAVKey, err)
os.Exit(1)
}
}
service := service.Service{
ConfigDir: filepath.Clean(defaultConfigDir),
ConfigFile: defaultConfigName,
LogFilePath: portableLogFile,
LogMaxSize: defaultLogMaxSize,
LogMaxBackups: defaultLogMaxBackup,
LogMaxAge: defaultLogMaxAge,
LogCompress: defaultLogCompress,
LogVerbose: portableLogVerbose,
Profiler: defaultProfiler,
Shutdown: make(chan bool),
PortableMode: 1,
PortableUser: dataprovider.User{
Username: portableUsername,
Password: portablePassword,
PublicKeys: portablePublicKeys,
Permissions: permissions,
HomeDir: portableDir,
Status: 1,
FsConfig: dataprovider.Filesystem{
Provider: dataprovider.FilesystemProvider(portableFsProvider),
S3Config: vfs.S3FsConfig{
Bucket: portableS3Bucket,
Region: portableS3Region,
AccessKey: portableS3AccessKey,
AccessSecret: portableS3AccessSecret,
Endpoint: portableS3Endpoint,
StorageClass: portableS3StorageClass,
KeyPrefix: portableS3KeyPrefix,
UploadPartSize: int64(portableS3ULPartSize),
UploadConcurrency: portableS3ULConcurrency,
},
GCSConfig: vfs.GCSFsConfig{
Bucket: portableGCSBucket,
Credentials: portableGCSCredentials,
AutomaticCredentials: portableGCSAutoCredentials,
StorageClass: portableGCSStorageClass,
KeyPrefix: portableGCSKeyPrefix,
},
},
Filters: dataprovider.UserFilters{
FileExtensions: parseFileExtensionsFilters(),
},
},
}
if err := service.StartPortableMode(portableSFTPDPort, portableFTPDPort, portableWebDAVPort, portableSSHCommands, portableAdvertiseService,
portableAdvertiseCredentials, portableFTPSCert, portableFTPSKey, portableWebDAVCert, portableWebDAVKey); err == nil {
service.Wait()
if service.Error == nil {
os.Exit(0)
}
}
os.Exit(1)
},
}
)
func init() {
version.AddFeature("+portable")
portableCmd.Flags().StringVarP(&directoryToServe, "directory", "d", ".", `Path to the directory to serve.
This can be an absolute path or a path
relative to the current directory
`)
portableCmd.Flags().IntVarP(&portableSFTPDPort, "sftpd-port", "s", 0, "0 means a random unprivileged port")
portableCmd.Flags().IntVar(&portableFTPDPort, "ftpd-port", -1, `0 means a random unprivileged port,
< 0 disabled`)
portableCmd.Flags().IntVar(&portableWebDAVPort, "webdav-port", -1, `0 means a random unprivileged port,
< 0 disabled`)
portableCmd.Flags().StringSliceVarP(&portableSSHCommands, "ssh-commands", "c", sftpd.GetDefaultSSHCommands(),
`SSH commands to enable.
"*" means any supported SSH command
including scp
`)
portableCmd.Flags().StringVarP(&portableUsername, "username", "u", "", `Leave empty to use an auto generated
value`)
portableCmd.Flags().StringVarP(&portablePassword, "password", "p", "", `Leave empty to use an auto generated
value`)
portableCmd.Flags().StringVarP(&portableLogFile, logFilePathFlag, "l", "", "Leave empty to disable logging")
portableCmd.Flags().BoolVarP(&portableLogVerbose, logVerboseFlag, "v", false, "Enable verbose logs")
portableCmd.Flags().StringSliceVarP(&portablePublicKeys, "public-key", "k", []string{}, "")
portableCmd.Flags().StringSliceVarP(&portablePermissions, "permissions", "g", []string{"list", "download"},
`User's permissions. "*" means any
permission`)
portableCmd.Flags().StringArrayVar(&portableAllowedExtensions, "allowed-extensions", []string{},
`Allowed file extensions case
insensitive. The format is
/dir::ext1,ext2.
For example: "/somedir::.jpg,.png"`)
portableCmd.Flags().StringArrayVar(&portableDeniedExtensions, "denied-extensions", []string{},
`Denied file extensions case
insensitive. The format is
/dir::ext1,ext2.
For example: "/somedir::.jpg,.png"`)
portableCmd.Flags().BoolVarP(&portableAdvertiseService, "advertise-service", "S", false,
`Advertise SFTP/FTP service using
multicast DNS`)
portableCmd.Flags().BoolVarP(&portableAdvertiseCredentials, "advertise-credentials", "C", false,
`If the SFTP/FTP service is
advertised via multicast DNS, this
flag allows to put username/password
inside the advertised TXT record`)
portableCmd.Flags().IntVarP(&portableFsProvider, "fs-provider", "f", int(dataprovider.LocalFilesystemProvider), `0 means local filesystem,
1 Amazon S3 compatible,
2 Google Cloud Storage`)
portableCmd.Flags().StringVar(&portableS3Bucket, "s3-bucket", "", "")
portableCmd.Flags().StringVar(&portableS3Region, "s3-region", "", "")
portableCmd.Flags().StringVar(&portableS3AccessKey, "s3-access-key", "", "")
portableCmd.Flags().StringVar(&portableS3AccessSecret, "s3-access-secret", "", "")
portableCmd.Flags().StringVar(&portableS3Endpoint, "s3-endpoint", "", "")
portableCmd.Flags().StringVar(&portableS3StorageClass, "s3-storage-class", "", "")
portableCmd.Flags().StringVar(&portableS3KeyPrefix, "s3-key-prefix", "", `Allows to restrict access to the
virtual folder identified by this
prefix and its contents`)
portableCmd.Flags().IntVar(&portableS3ULPartSize, "s3-upload-part-size", 5, `The buffer size for multipart uploads
(MB)`)
portableCmd.Flags().IntVar(&portableS3ULConcurrency, "s3-upload-concurrency", 2, `How many parts are uploaded in
parallel`)
portableCmd.Flags().StringVar(&portableGCSBucket, "gcs-bucket", "", "")
portableCmd.Flags().StringVar(&portableGCSStorageClass, "gcs-storage-class", "", "")
portableCmd.Flags().StringVar(&portableGCSKeyPrefix, "gcs-key-prefix", "", `Allows to restrict access to the
virtual folder identified by this
prefix and its contents`)
portableCmd.Flags().StringVar(&portableGCSCredentialsFile, "gcs-credentials-file", "", `Google Cloud Storage JSON credentials
file`)
portableCmd.Flags().IntVar(&portableGCSAutoCredentials, "gcs-automatic-credentials", 1, `0 means explicit credentials using
a JSON credentials file, 1 automatic
`)
portableCmd.Flags().StringVar(&portableFTPSCert, "ftpd-cert", "", "Path to the certificate file for FTPS")
portableCmd.Flags().StringVar(&portableFTPSKey, "ftpd-key", "", "Path to the key file for FTPS")
portableCmd.Flags().StringVar(&portableWebDAVCert, "webdav-cert", "", `Path to the certificate file for WebDAV
over HTTPS`)
portableCmd.Flags().StringVar(&portableWebDAVKey, "webdav-key", "", `Path to the key file for WebDAV over
HTTPS`)
rootCmd.AddCommand(portableCmd)
}
func parseFileExtensionsFilters() []dataprovider.ExtensionsFilter {
var extensions []dataprovider.ExtensionsFilter
for _, val := range portableAllowedExtensions {
p, exts := getExtensionsFilterValues(strings.TrimSpace(val))
if len(p) > 0 {
extensions = append(extensions, dataprovider.ExtensionsFilter{
Path: path.Clean(p),
AllowedExtensions: exts,
DeniedExtensions: []string{},
})
}
}
for _, val := range portableDeniedExtensions {
p, exts := getExtensionsFilterValues(strings.TrimSpace(val))
if len(p) > 0 {
found := false
for index, e := range extensions {
if path.Clean(e.Path) == path.Clean(p) {
extensions[index].DeniedExtensions = append(extensions[index].DeniedExtensions, exts...)
found = true
break
}
}
if !found {
extensions = append(extensions, dataprovider.ExtensionsFilter{
Path: path.Clean(p),
AllowedExtensions: []string{},
DeniedExtensions: exts,
})
}
}
}
return extensions
}
func getExtensionsFilterValues(value string) (string, []string) {
if strings.Contains(value, "::") {
dirExts := strings.Split(value, "::")
if len(dirExts) > 1 {
dir := strings.TrimSpace(dirExts[0])
exts := []string{}
for _, e := range strings.Split(dirExts[1], ",") {
cleanedExt := strings.TrimSpace(e)
if len(cleanedExt) > 0 {
exts = append(exts, cleanedExt)
}
}
if len(dir) > 0 && len(exts) > 0 {
return dir, exts
}
}
}
return "", nil
}

9
cmd/portable_disabled.go Normal file
View File

@@ -0,0 +1,9 @@
// +build noportable
package cmd
import "github.com/drakkan/sftpgo/version"
func init() {
version.AddFeature("-portable")
}

35
cmd/reload_windows.go Normal file
View File

@@ -0,0 +1,35 @@
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/drakkan/sftpgo/service"
)
var (
reloadCmd = &cobra.Command{
Use: "reload",
Short: "Reload the SFTPGo Windows Service sending a \"paramchange\" request",
Run: func(cmd *cobra.Command, args []string) {
s := service.WindowsService{
Service: service.Service{
Shutdown: make(chan bool),
},
}
err := s.Reload()
if err != nil {
fmt.Printf("Error sending reload signal: %v\r\n", err)
os.Exit(1)
} else {
fmt.Printf("Reload signal sent!\r\n")
}
},
}
)
func init() {
serviceCmd.AddCommand(reloadCmd)
}

View File

@@ -1,29 +1,68 @@
// Package cmd provides Command Line Interface support
package cmd
import (
"fmt"
"os"
"github.com/drakkan/sftpgo/utils"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/drakkan/sftpgo/config"
"github.com/drakkan/sftpgo/version"
)
const (
logSender = "cmd"
configDirFlag = "config-dir"
configDirKey = "config_dir"
configFileFlag = "config-file"
configFileKey = "config_file"
logFilePathFlag = "log-file-path"
logFilePathKey = "log_file_path"
logMaxSizeFlag = "log-max-size"
logMaxSizeKey = "log_max_size"
logMaxBackupFlag = "log-max-backups"
logMaxBackupKey = "log_max_backups"
logMaxAgeFlag = "log-max-age"
logMaxAgeKey = "log_max_age"
logCompressFlag = "log-compress"
logCompressKey = "log_compress"
logVerboseFlag = "log-verbose"
logVerboseKey = "log_verbose"
profilerFlag = "profiler"
profilerKey = "profiler"
defaultConfigDir = "."
defaultConfigName = config.DefaultConfigName
defaultLogFile = "sftpgo.log"
defaultLogMaxSize = 10
defaultLogMaxBackup = 5
defaultLogMaxAge = 28
defaultLogCompress = false
defaultLogVerbose = true
defaultProfiler = false
)
var (
configDir string
configFile string
logFilePath string
logMaxSize int
logMaxBackups int
logMaxAge int
logCompress bool
logVerbose bool
profiler bool
rootCmd = &cobra.Command{
Use: "sftpgo",
Short: "Full featured and highly configurable SFTP server",
Short: "Fully featured and highly configurable SFTP server",
}
)
func init() {
version := utils.GetAppVersion()
rootCmd.Flags().BoolP("version", "v", false, "")
rootCmd.Version = version.GetVersionAsString()
rootCmd.SetVersionTemplate(`{{printf "SFTPGo version: "}}{{printf "%s" .Version}}
rootCmd.Version = version.GetAsString()
rootCmd.SetVersionTemplate(`{{printf "SFTPGo "}}{{printf "%s" .Version}}
`)
}
@@ -35,3 +74,102 @@ func Execute() {
os.Exit(1)
}
}
func addConfigFlags(cmd *cobra.Command) {
viper.SetDefault(configDirKey, defaultConfigDir)
viper.BindEnv(configDirKey, "SFTPGO_CONFIG_DIR") //nolint:errcheck // err is not nil only if the key to bind is missing
cmd.Flags().StringVarP(&configDir, configDirFlag, "c", viper.GetString(configDirKey),
`Location for SFTPGo config dir. This directory
should contain the "sftpgo" configuration file
or the configured config-file and it is used as
the base for files with a relative path (eg. the
private keys for the SFTP server, the SQLite
database if you use SQLite as data provider).
This flag can be set using SFTPGO_CONFIG_DIR
env var too.`)
viper.BindPFlag(configDirKey, cmd.Flags().Lookup(configDirFlag)) //nolint:errcheck
viper.SetDefault(configFileKey, defaultConfigName)
viper.BindEnv(configFileKey, "SFTPGO_CONFIG_FILE") //nolint:errcheck
cmd.Flags().StringVarP(&configFile, configFileFlag, "f", viper.GetString(configFileKey),
`Name for SFTPGo configuration file. It must be
the name of a file stored in config-dir not the
absolute path to the configuration file. The
specified file name must have no extension we
automatically load JSON, YAML, TOML, HCL and
Java properties. Therefore if you set "sftpgo"
then "sftpgo.json", "sftpgo.yaml" and so on
are searched.
This flag can be set using SFTPGO_CONFIG_FILE
env var too.`)
viper.BindPFlag(configFileKey, cmd.Flags().Lookup(configFileFlag)) //nolint:errcheck
}
func addServeFlags(cmd *cobra.Command) {
addConfigFlags(cmd)
viper.SetDefault(logFilePathKey, defaultLogFile)
viper.BindEnv(logFilePathKey, "SFTPGO_LOG_FILE_PATH") //nolint:errcheck
cmd.Flags().StringVarP(&logFilePath, logFilePathFlag, "l", viper.GetString(logFilePathKey),
`Location for the log file. Leave empty to write
logs to the standard output. This flag can be
set using SFTPGO_LOG_FILE_PATH env var too.
`)
viper.BindPFlag(logFilePathKey, cmd.Flags().Lookup(logFilePathFlag)) //nolint:errcheck
viper.SetDefault(logMaxSizeKey, defaultLogMaxSize)
viper.BindEnv(logMaxSizeKey, "SFTPGO_LOG_MAX_SIZE") //nolint:errcheck
cmd.Flags().IntVarP(&logMaxSize, logMaxSizeFlag, "s", viper.GetInt(logMaxSizeKey),
`Maximum size in megabytes of the log file
before it gets rotated. This flag can be set
using SFTPGO_LOG_MAX_SIZE env var too. It is
unused if log-file-path is empty.
`)
viper.BindPFlag(logMaxSizeKey, cmd.Flags().Lookup(logMaxSizeFlag)) //nolint:errcheck
viper.SetDefault(logMaxBackupKey, defaultLogMaxBackup)
viper.BindEnv(logMaxBackupKey, "SFTPGO_LOG_MAX_BACKUPS") //nolint:errcheck
cmd.Flags().IntVarP(&logMaxBackups, "log-max-backups", "b", viper.GetInt(logMaxBackupKey),
`Maximum number of old log files to retain.
This flag can be set using SFTPGO_LOG_MAX_BACKUPS
env var too. It is unused if log-file-path is
empty.`)
viper.BindPFlag(logMaxBackupKey, cmd.Flags().Lookup(logMaxBackupFlag)) //nolint:errcheck
viper.SetDefault(logMaxAgeKey, defaultLogMaxAge)
viper.BindEnv(logMaxAgeKey, "SFTPGO_LOG_MAX_AGE") //nolint:errcheck
cmd.Flags().IntVarP(&logMaxAge, "log-max-age", "a", viper.GetInt(logMaxAgeKey),
`Maximum number of days to retain old log files.
This flag can be set using SFTPGO_LOG_MAX_AGE env
var too. It is unused if log-file-path is empty.
`)
viper.BindPFlag(logMaxAgeKey, cmd.Flags().Lookup(logMaxAgeFlag)) //nolint:errcheck
viper.SetDefault(logCompressKey, defaultLogCompress)
viper.BindEnv(logCompressKey, "SFTPGO_LOG_COMPRESS") //nolint:errcheck
cmd.Flags().BoolVarP(&logCompress, logCompressFlag, "z", viper.GetBool(logCompressKey),
`Determine if the rotated log files
should be compressed using gzip. This flag can
be set using SFTPGO_LOG_COMPRESS env var too.
It is unused if log-file-path is empty.
`)
viper.BindPFlag(logCompressKey, cmd.Flags().Lookup(logCompressFlag)) //nolint:errcheck
viper.SetDefault(logVerboseKey, defaultLogVerbose)
viper.BindEnv(logVerboseKey, "SFTPGO_LOG_VERBOSE") //nolint:errcheck
cmd.Flags().BoolVarP(&logVerbose, logVerboseFlag, "v", viper.GetBool(logVerboseKey),
`Enable verbose logs. This flag can be set
using SFTPGO_LOG_VERBOSE env var too.
`)
viper.BindPFlag(logVerboseKey, cmd.Flags().Lookup(logVerboseFlag)) //nolint:errcheck
viper.SetDefault(profilerKey, defaultProfiler)
viper.BindEnv(profilerKey, "SFTPGO_PROFILER") //nolint:errcheck
cmd.Flags().BoolVarP(&profiler, profilerFlag, "p", viper.GetBool(profilerKey),
`Enable the built-in profiler. The profiler will
be accessible via HTTP/HTTPS using the base URL
"/debug/pprof/".
This flag can be set using SFTPGO_PROFILER env
var too.`)
viper.BindPFlag(profilerKey, cmd.Flags().Lookup(profilerFlag)) //nolint:errcheck
}

35
cmd/rotatelogs_windows.go Normal file
View File

@@ -0,0 +1,35 @@
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/drakkan/sftpgo/service"
)
var (
rotateLogCmd = &cobra.Command{
Use: "rotatelogs",
Short: "Signal to the running service to rotate the logs",
Run: func(cmd *cobra.Command, args []string) {
s := service.WindowsService{
Service: service.Service{
Shutdown: make(chan bool),
},
}
err := s.RotateLogFile()
if err != nil {
fmt.Printf("Error sending rotate log file signal to the service: %v\r\n", err)
os.Exit(1)
} else {
fmt.Printf("Rotate log file signal sent!\r\n")
}
},
}
)
func init() {
serviceCmd.AddCommand(rotateLogCmd)
}

View File

@@ -1,181 +1,49 @@
package cmd
import (
"fmt"
"net/http"
"os"
"time"
"github.com/drakkan/sftpgo/api"
"github.com/drakkan/sftpgo/config"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/sftpd"
"github.com/rs/zerolog"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
configDirFlag = "config-dir"
configDirKey = "config_dir"
configFileFlag = "config-file"
configFileKey = "config_file"
logFilePathFlag = "log-file-path"
logFilePathKey = "log_file_path"
logMaxSizeFlag = "log-max-size"
logMaxSizeKey = "log_max_size"
logMaxBackupFlag = "log-max-backups"
logMaxBackupKey = "log_max_backups"
logMaxAgeFlag = "log-max-age"
logMaxAgeKey = "log_max_age"
logCompressFlag = "log-compress"
logCompressKey = "log_compress"
logVerboseFlag = "log-verbose"
logVerboseKey = "log_verbose"
"github.com/drakkan/sftpgo/service"
"github.com/drakkan/sftpgo/utils"
)
var (
configDir string
configFile string
logFilePath string
logMaxSize int
logMaxBackups int
logMaxAge int
logCompress bool
logVerbose bool
testVar string
serveCmd = &cobra.Command{
serveCmd = &cobra.Command{
Use: "serve",
Short: "Start the SFTP Server",
Long: `To start the SFTP Server with the default values for the command line flags simply use:
Long: `To start the SFTPGo with the default values for the command line flags simply
use:
sftpgo serve
$ sftpgo serve
Please take a look at the usage below to customize the startup options`,
Run: func(cmd *cobra.Command, args []string) {
startServe()
service := service.Service{
ConfigDir: utils.CleanDirInput(configDir),
ConfigFile: configFile,
LogFilePath: logFilePath,
LogMaxSize: logMaxSize,
LogMaxBackups: logMaxBackups,
LogMaxAge: logMaxAge,
LogCompress: logCompress,
LogVerbose: logVerbose,
Profiler: profiler,
Shutdown: make(chan bool),
}
if err := service.Start(); err == nil {
service.Wait()
if service.Error == nil {
os.Exit(0)
}
}
os.Exit(1)
},
}
)
func init() {
rootCmd.AddCommand(serveCmd)
viper.SetDefault(configDirKey, ".")
viper.BindEnv(configDirKey, "SFTPGO_CONFIG_DIR")
serveCmd.Flags().StringVarP(&configDir, configDirFlag, "c", viper.GetString(configDirKey),
"Location for SFTPGo config dir. This directory should contain the \"sftpgo\" configuration file or the configured "+
"config-file and it is used as the base for files with a relative path (eg. the private keys for the SFTP server, "+
"the SQLite database if you use SQLite as data provider). This flag can be set using SFTPGO_CONFIG_DIR env var too.")
viper.BindPFlag(configDirKey, serveCmd.Flags().Lookup(configDirFlag))
viper.SetDefault(configFileKey, config.DefaultConfigName)
viper.BindEnv(configFileKey, "SFTPGO_CONFIG_FILE")
serveCmd.Flags().StringVarP(&configFile, configFileFlag, "f", viper.GetString(configFileKey),
"Name for SFTPGo configuration file. It must be the name of a file stored in config-dir not the absolute path to the "+
"configuration file. The specified file name must have no extension we automatically load JSON, YAML, TOML, HCL and "+
"Java properties. Therefore if you set \"sftpgo\" then \"sftpgo.json\", \"sftpgo.yaml\" and so on are searched. "+
"This flag can be set using SFTPGO_CONFIG_FILE env var too.")
viper.BindPFlag(configFileKey, serveCmd.Flags().Lookup(configFileFlag))
viper.SetDefault(logFilePathKey, "sftpgo.log")
viper.BindEnv(logFilePathKey, "SFTPGO_LOG_FILE_PATH")
serveCmd.Flags().StringVarP(&logFilePath, logFilePathFlag, "l", viper.GetString(logFilePathKey),
"Location for the log file. This flag can be set using SFTPGO_LOG_FILE_PATH env var too.")
viper.BindPFlag(logFilePathKey, serveCmd.Flags().Lookup(logFilePathFlag))
viper.SetDefault(logMaxSizeKey, 10)
viper.BindEnv(logMaxSizeKey, "SFTPGO_LOG_MAX_SIZE")
serveCmd.Flags().IntVarP(&logMaxSize, logMaxSizeFlag, "s", viper.GetInt(logMaxSizeKey),
"Maximum size in megabytes of the log file before it gets rotated. This flag can be set using SFTPGO_LOG_MAX_SIZE "+
"env var too.")
viper.BindPFlag(logMaxSizeKey, serveCmd.Flags().Lookup(logMaxSizeFlag))
viper.SetDefault(logMaxBackupKey, 5)
viper.BindEnv(logMaxBackupKey, "SFTPGO_LOG_MAX_BACKUPS")
serveCmd.Flags().IntVarP(&logMaxBackups, "log-max-backups", "b", viper.GetInt(logMaxBackupKey),
"Maximum number of old log files to retain. This flag can be set using SFTPGO_LOG_MAX_BACKUPS env var too.")
viper.BindPFlag(logMaxBackupKey, serveCmd.Flags().Lookup(logMaxBackupFlag))
viper.SetDefault(logMaxAgeKey, 28)
viper.BindEnv(logMaxAgeKey, "SFTPGO_LOG_MAX_AGE")
serveCmd.Flags().IntVarP(&logMaxAge, "log-max-age", "a", viper.GetInt(logMaxAgeKey),
"Maximum number of days to retain old log files. This flag can be set using SFTPGO_LOG_MAX_AGE env var too.")
viper.BindPFlag(logMaxAgeKey, serveCmd.Flags().Lookup(logMaxAgeFlag))
viper.SetDefault(logCompressKey, false)
viper.BindEnv(logCompressKey, "SFTPGO_LOG_COMPRESS")
serveCmd.Flags().BoolVarP(&logCompress, logCompressFlag, "z", viper.GetBool(logCompressKey), "Determine if the rotated "+
"log files should be compressed using gzip. This flag can be set using SFTPGO_LOG_COMPRESS env var too.")
viper.BindPFlag(logCompressKey, serveCmd.Flags().Lookup(logCompressFlag))
viper.SetDefault(logVerboseKey, true)
viper.BindEnv(logVerboseKey, "SFTPGO_LOG_VERBOSE")
serveCmd.Flags().BoolVarP(&logVerbose, logVerboseFlag, "v", viper.GetBool(logVerboseKey), "Enable verbose logs. "+
"This flag can be set using SFTPGO_LOG_VERBOSE env var too.")
viper.BindPFlag(logVerboseKey, serveCmd.Flags().Lookup(logVerboseFlag))
}
func startServe() {
logLevel := zerolog.DebugLevel
if !logVerbose {
logLevel = zerolog.InfoLevel
}
logger.InitLogger(logFilePath, logMaxSize, logMaxBackups, logMaxAge, logCompress, logLevel)
logger.Info(logSender, "starting SFTPGo, config dir: %v, config file: %v, log max size: %v log max backups: %v "+
"log max age: %v log verbose: %v, log compress: %v", configDir, configFile, logMaxSize, logMaxBackups, logMaxAge,
logVerbose, logCompress)
config.LoadConfig(configDir, configFile)
providerConf := config.GetProviderConf()
err := dataprovider.Initialize(providerConf, configDir)
if err != nil {
logger.Error(logSender, "error initializing data provider: %v", err)
logger.ErrorToConsole("error initializing data provider: %v", err)
os.Exit(1)
}
dataProvider := dataprovider.GetProvider()
sftpdConf := config.GetSFTPDConfig()
httpdConf := config.GetHTTPDConfig()
sftpd.SetDataProvider(dataProvider)
shutdown := make(chan bool)
go func() {
logger.Debug(logSender, "initializing SFTP server with config %+v", sftpdConf)
if err := sftpdConf.Initialize(configDir); err != nil {
logger.Error(logSender, "could not start SFTP server: %v", err)
logger.ErrorToConsole("could not start SFTP server: %v", err)
}
shutdown <- true
}()
if httpdConf.BindPort > 0 {
router := api.GetHTTPRouter()
api.SetDataProvider(dataProvider)
go func() {
logger.Debug(logSender, "initializing HTTP server with config %+v", httpdConf)
s := &http.Server{
Addr: fmt.Sprintf("%s:%d", httpdConf.BindAddress, httpdConf.BindPort),
Handler: router,
ReadTimeout: 300 * time.Second,
WriteTimeout: 300 * time.Second,
MaxHeaderBytes: 1 << 20, // 1MB
}
if err := s.ListenAndServe(); err != nil {
logger.Error(logSender, "could not start HTTP server: %v", err)
logger.ErrorToConsole("could not start HTTP server: %v", err)
}
shutdown <- true
}()
} else {
logger.Debug(logSender, "HTTP server not started, disabled in config file")
logger.DebugToConsole("HTTP server not started, disabled in config file")
}
<-shutdown
addServeFlags(serveCmd)
}

16
cmd/service_windows.go Normal file
View File

@@ -0,0 +1,16 @@
package cmd
import (
"github.com/spf13/cobra"
)
var (
serviceCmd = &cobra.Command{
Use: "service",
Short: "Manage SFTPGo Windows Service",
}
)
func init() {
rootCmd.AddCommand(serviceCmd)
}

52
cmd/start_windows.go Normal file
View File

@@ -0,0 +1,52 @@
package cmd
import (
"fmt"
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/drakkan/sftpgo/service"
"github.com/drakkan/sftpgo/utils"
)
var (
startCmd = &cobra.Command{
Use: "start",
Short: "Start SFTPGo Windows Service",
Run: func(cmd *cobra.Command, args []string) {
configDir = utils.CleanDirInput(configDir)
if !filepath.IsAbs(logFilePath) && utils.IsFileInputValid(logFilePath) {
logFilePath = filepath.Join(configDir, logFilePath)
}
s := service.Service{
ConfigDir: configDir,
ConfigFile: configFile,
LogFilePath: logFilePath,
LogMaxSize: logMaxSize,
LogMaxBackups: logMaxBackups,
LogMaxAge: logMaxAge,
LogCompress: logCompress,
LogVerbose: logVerbose,
Profiler: profiler,
Shutdown: make(chan bool),
}
winService := service.WindowsService{
Service: s,
}
err := winService.RunService()
if err != nil {
fmt.Printf("Error starting service: %v\r\n", err)
os.Exit(1)
} else {
fmt.Printf("Service started!\r\n")
}
},
}
)
func init() {
serviceCmd.AddCommand(startCmd)
addServeFlags(startCmd)
}

35
cmd/status_windows.go Normal file
View File

@@ -0,0 +1,35 @@
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/drakkan/sftpgo/service"
)
var (
statusCmd = &cobra.Command{
Use: "status",
Short: "Retrieve the status for the SFTPGo Windows Service",
Run: func(cmd *cobra.Command, args []string) {
s := service.WindowsService{
Service: service.Service{
Shutdown: make(chan bool),
},
}
status, err := s.Status()
if err != nil {
fmt.Printf("Error querying service status: %v\r\n", err)
os.Exit(1)
} else {
fmt.Printf("Service status: %#v\r\n", status.String())
}
},
}
)
func init() {
serviceCmd.AddCommand(statusCmd)
}

35
cmd/stop_windows.go Normal file
View File

@@ -0,0 +1,35 @@
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/drakkan/sftpgo/service"
)
var (
stopCmd = &cobra.Command{
Use: "stop",
Short: "Stop SFTPGo Windows Service",
Run: func(cmd *cobra.Command, args []string) {
s := service.WindowsService{
Service: service.Service{
Shutdown: make(chan bool),
},
}
err := s.Stop()
if err != nil {
fmt.Printf("Error stopping service: %v\r\n", err)
os.Exit(1)
} else {
fmt.Printf("Service stopped!\r\n")
}
},
}
)
func init() {
serviceCmd.AddCommand(stopCmd)
}

35
cmd/uninstall_windows.go Normal file
View File

@@ -0,0 +1,35 @@
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/drakkan/sftpgo/service"
)
var (
uninstallCmd = &cobra.Command{
Use: "uninstall",
Short: "Uninstall SFTPGo Windows Service",
Run: func(cmd *cobra.Command, args []string) {
s := service.WindowsService{
Service: service.Service{
Shutdown: make(chan bool),
},
}
err := s.Uninstall()
if err != nil {
fmt.Printf("Error removing service: %v\r\n", err)
os.Exit(1)
} else {
fmt.Printf("Service uninstalled\r\n")
}
},
}
)
func init() {
serviceCmd.AddCommand(uninstallCmd)
}

157
common/actions.go Normal file
View File

@@ -0,0 +1,157 @@
package common
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/httpclient"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/utils"
)
var (
errUnconfiguredAction = errors.New("no hook is configured for this action")
errNoHook = errors.New("unable to execute action, no hook defined")
errUnexpectedHTTResponse = errors.New("unexpected HTTP response code")
)
// ProtocolActions defines the action to execute on file operations and SSH commands
type ProtocolActions struct {
// Valid values are download, upload, pre-delete, delete, rename, ssh_cmd. Empty slice to disable
ExecuteOn []string `json:"execute_on" mapstructure:"execute_on"`
// Absolute path to an external program or an HTTP URL
Hook string `json:"hook" mapstructure:"hook"`
}
// actionNotification defines a notification for a Protocol Action
type actionNotification struct {
Action string `json:"action"`
Username string `json:"username"`
Path string `json:"path"`
TargetPath string `json:"target_path,omitempty"`
SSHCmd string `json:"ssh_cmd,omitempty"`
FileSize int64 `json:"file_size,omitempty"`
FsProvider int `json:"fs_provider"`
Bucket string `json:"bucket,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Status int `json:"status"`
Protocol string `json:"protocol"`
}
// SSHCommandActionNotification executes the defined action for the specified SSH command
func SSHCommandActionNotification(user *dataprovider.User, filePath, target, sshCmd string, err error) {
action := newActionNotification(user, operationSSHCmd, filePath, target, sshCmd, ProtocolSSH, 0, err)
go action.execute() //nolint:errcheck
}
func newActionNotification(user *dataprovider.User, operation, filePath, target, sshCmd, protocol string, fileSize int64,
err error) actionNotification {
bucket := ""
endpoint := ""
status := 1
if user.FsConfig.Provider == dataprovider.S3FilesystemProvider {
bucket = user.FsConfig.S3Config.Bucket
endpoint = user.FsConfig.S3Config.Endpoint
} else if user.FsConfig.Provider == dataprovider.GCSFilesystemProvider {
bucket = user.FsConfig.GCSConfig.Bucket
}
if err == ErrQuotaExceeded {
status = 2
} else if err != nil {
status = 0
}
return actionNotification{
Action: operation,
Username: user.Username,
Path: filePath,
TargetPath: target,
SSHCmd: sshCmd,
FileSize: fileSize,
FsProvider: int(user.FsConfig.Provider),
Bucket: bucket,
Endpoint: endpoint,
Status: status,
Protocol: protocol,
}
}
func (a *actionNotification) asJSON() []byte {
res, _ := json.Marshal(a)
return res
}
func (a *actionNotification) asEnvVars() []string {
return []string{fmt.Sprintf("SFTPGO_ACTION=%v", a.Action),
fmt.Sprintf("SFTPGO_ACTION_USERNAME=%v", a.Username),
fmt.Sprintf("SFTPGO_ACTION_PATH=%v", a.Path),
fmt.Sprintf("SFTPGO_ACTION_TARGET=%v", a.TargetPath),
fmt.Sprintf("SFTPGO_ACTION_SSH_CMD=%v", a.SSHCmd),
fmt.Sprintf("SFTPGO_ACTION_FILE_SIZE=%v", a.FileSize),
fmt.Sprintf("SFTPGO_ACTION_FS_PROVIDER=%v", a.FsProvider),
fmt.Sprintf("SFTPGO_ACTION_BUCKET=%v", a.Bucket),
fmt.Sprintf("SFTPGO_ACTION_ENDPOINT=%v", a.Endpoint),
fmt.Sprintf("SFTPGO_ACTION_STATUS=%v", a.Status),
fmt.Sprintf("SFTPGO_ACTION_PROTOCOL=%v", a.Protocol),
}
}
func (a *actionNotification) executeNotificationCommand() error {
if !filepath.IsAbs(Config.Actions.Hook) {
err := fmt.Errorf("invalid notification command %#v", Config.Actions.Hook)
logger.Warn(a.Protocol, "", "unable to execute notification command: %v", err)
return err
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, Config.Actions.Hook, a.Action, a.Username, a.Path, a.TargetPath, a.SSHCmd)
cmd.Env = append(os.Environ(), a.asEnvVars()...)
startTime := time.Now()
err := cmd.Run()
logger.Debug(a.Protocol, "", "executed command %#v with arguments: %#v, %#v, %#v, %#v, %#v, elapsed: %v, error: %v",
Config.Actions.Hook, a.Action, a.Username, a.Path, a.TargetPath, a.SSHCmd, time.Since(startTime), err)
return err
}
func (a *actionNotification) execute() error {
if !utils.IsStringInSlice(a.Action, Config.Actions.ExecuteOn) {
return errUnconfiguredAction
}
if len(Config.Actions.Hook) == 0 {
logger.Warn(a.Protocol, "", "Unable to send notification, no hook is defined")
return errNoHook
}
if strings.HasPrefix(Config.Actions.Hook, "http") {
var url *url.URL
url, err := url.Parse(Config.Actions.Hook)
if err != nil {
logger.Warn(a.Protocol, "", "Invalid hook %#v for operation %#v: %v", Config.Actions.Hook, a.Action, err)
return err
}
startTime := time.Now()
httpClient := httpclient.GetHTTPClient()
resp, err := httpClient.Post(url.String(), "application/json", bytes.NewBuffer(a.asJSON()))
respCode := 0
if err == nil {
respCode = resp.StatusCode
resp.Body.Close()
if respCode != http.StatusOK {
err = errUnexpectedHTTResponse
}
}
logger.Debug(a.Protocol, "", "notified operation %#v to URL: %v status code: %v, elapsed: %v err: %v",
a.Action, url.String(), respCode, time.Since(startTime), err)
return err
}
return a.executeNotificationCommand()
}

181
common/actions_test.go Normal file
View File

@@ -0,0 +1,181 @@
package common
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/vfs"
)
func TestNewActionNotification(t *testing.T) {
user := &dataprovider.User{
Username: "username",
}
user.FsConfig.Provider = dataprovider.LocalFilesystemProvider
user.FsConfig.S3Config = vfs.S3FsConfig{
Bucket: "s3bucket",
Endpoint: "endpoint",
}
user.FsConfig.GCSConfig = vfs.GCSFsConfig{
Bucket: "gcsbucket",
}
a := newActionNotification(user, operationDownload, "path", "target", "", ProtocolSFTP, 123, errors.New("fake error"))
assert.Equal(t, user.Username, a.Username)
assert.Equal(t, 0, len(a.Bucket))
assert.Equal(t, 0, len(a.Endpoint))
assert.Equal(t, 0, a.Status)
user.FsConfig.Provider = dataprovider.S3FilesystemProvider
a = newActionNotification(user, operationDownload, "path", "target", "", ProtocolSSH, 123, nil)
assert.Equal(t, "s3bucket", a.Bucket)
assert.Equal(t, "endpoint", a.Endpoint)
assert.Equal(t, 1, a.Status)
user.FsConfig.Provider = dataprovider.GCSFilesystemProvider
a = newActionNotification(user, operationDownload, "path", "target", "", ProtocolSCP, 123, ErrQuotaExceeded)
assert.Equal(t, "gcsbucket", a.Bucket)
assert.Equal(t, 0, len(a.Endpoint))
assert.Equal(t, 2, a.Status)
}
func TestActionHTTP(t *testing.T) {
actionsCopy := Config.Actions
Config.Actions = ProtocolActions{
ExecuteOn: []string{operationDownload},
Hook: fmt.Sprintf("http://%v", httpAddr),
}
user := &dataprovider.User{
Username: "username",
}
a := newActionNotification(user, operationDownload, "path", "target", "", ProtocolSFTP, 123, nil)
err := a.execute()
assert.NoError(t, err)
Config.Actions.Hook = "http://invalid:1234"
err = a.execute()
assert.Error(t, err)
Config.Actions.Hook = fmt.Sprintf("http://%v/404", httpAddr)
err = a.execute()
if assert.Error(t, err) {
assert.EqualError(t, err, errUnexpectedHTTResponse.Error())
}
Config.Actions = actionsCopy
}
func TestActionCMD(t *testing.T) {
if runtime.GOOS == osWindows {
t.Skip("this test is not available on Windows")
}
actionsCopy := Config.Actions
hookCmd, err := exec.LookPath("true")
assert.NoError(t, err)
Config.Actions = ProtocolActions{
ExecuteOn: []string{operationDownload},
Hook: hookCmd,
}
user := &dataprovider.User{
Username: "username",
}
a := newActionNotification(user, operationDownload, "path", "target", "", ProtocolSFTP, 123, nil)
err = a.execute()
assert.NoError(t, err)
SSHCommandActionNotification(user, "path", "target", "sha1sum", nil)
Config.Actions = actionsCopy
}
func TestWrongActions(t *testing.T) {
actionsCopy := Config.Actions
badCommand := "/bad/command"
if runtime.GOOS == osWindows {
badCommand = "C:\\bad\\command"
}
Config.Actions = ProtocolActions{
ExecuteOn: []string{operationUpload},
Hook: badCommand,
}
user := &dataprovider.User{
Username: "username",
}
a := newActionNotification(user, operationUpload, "", "", "", ProtocolSFTP, 123, nil)
err := a.execute()
assert.Error(t, err, "action with bad command must fail")
a.Action = operationDelete
err = a.execute()
assert.EqualError(t, err, errUnconfiguredAction.Error())
Config.Actions.Hook = "http://foo\x7f.com/"
a.Action = operationUpload
err = a.execute()
assert.Error(t, err, "action with bad url must fail")
Config.Actions.Hook = ""
err = a.execute()
if assert.Error(t, err) {
assert.EqualError(t, err, errNoHook.Error())
}
Config.Actions.Hook = "relative path"
err = a.execute()
if assert.Error(t, err) {
assert.EqualError(t, err, fmt.Sprintf("invalid notification command %#v", Config.Actions.Hook))
}
Config.Actions = actionsCopy
}
func TestPreDeleteAction(t *testing.T) {
if runtime.GOOS == osWindows {
t.Skip("this test is not available on Windows")
}
actionsCopy := Config.Actions
hookCmd, err := exec.LookPath("true")
assert.NoError(t, err)
Config.Actions = ProtocolActions{
ExecuteOn: []string{operationPreDelete},
Hook: hookCmd,
}
homeDir := filepath.Join(os.TempDir(), "test_user")
err = os.MkdirAll(homeDir, os.ModePerm)
assert.NoError(t, err)
user := dataprovider.User{
Username: "username",
HomeDir: homeDir,
}
user.Permissions = make(map[string][]string)
user.Permissions["/"] = []string{dataprovider.PermAny}
fs := vfs.NewOsFs("id", homeDir, nil)
c := NewBaseConnection("id", ProtocolSFTP, user, fs)
testfile := filepath.Join(user.HomeDir, "testfile")
err = ioutil.WriteFile(testfile, []byte("test"), os.ModePerm)
assert.NoError(t, err)
info, err := os.Stat(testfile)
assert.NoError(t, err)
err = c.RemoveFile(testfile, "testfile", info)
assert.NoError(t, err)
assert.FileExists(t, testfile)
os.RemoveAll(homeDir)
Config.Actions = actionsCopy
}

749
common/common.go Normal file
View File

@@ -0,0 +1,749 @@
// Package common defines code shared among file transfer packages and protocols
package common
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/pires/go-proxyproto"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/httpclient"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/metrics"
"github.com/drakkan/sftpgo/utils"
)
// constants
const (
logSender = "common"
uploadLogSender = "Upload"
downloadLogSender = "Download"
renameLogSender = "Rename"
rmdirLogSender = "Rmdir"
mkdirLogSender = "Mkdir"
symlinkLogSender = "Symlink"
removeLogSender = "Remove"
chownLogSender = "Chown"
chmodLogSender = "Chmod"
chtimesLogSender = "Chtimes"
truncateLogSender = "Truncate"
operationDownload = "download"
operationUpload = "upload"
operationDelete = "delete"
operationPreDelete = "pre-delete"
operationRename = "rename"
operationSSHCmd = "ssh_cmd"
chtimesFormat = "2006-01-02T15:04:05" // YYYY-MM-DDTHH:MM:SS
idleTimeoutCheckInterval = 3 * time.Minute
)
// Stat flags
const (
StatAttrUIDGID = 1
StatAttrPerms = 2
StatAttrTimes = 4
StatAttrSize = 8
)
// Transfer types
const (
TransferUpload = iota
TransferDownload
)
// Supported protocols
const (
ProtocolSFTP = "SFTP"
ProtocolSCP = "SCP"
ProtocolSSH = "SSH"
ProtocolFTP = "FTP"
ProtocolWebDAV = "DAV"
)
// Upload modes
const (
UploadModeStandard = iota
UploadModeAtomic
UploadModeAtomicWithResume
)
// errors definitions
var (
ErrPermissionDenied = errors.New("permission denied")
ErrNotExist = errors.New("no such file or directory")
ErrOpUnsupported = errors.New("operation unsupported")
ErrGenericFailure = errors.New("failure")
ErrQuotaExceeded = errors.New("denying write due to space limit")
ErrSkipPermissionsCheck = errors.New("permission check skipped")
ErrConnectionDenied = errors.New("You are not allowed to connect")
errNoTransfer = errors.New("requested transfer not found")
errTransferMismatch = errors.New("transfer mismatch")
)
var (
// Config is the configuration for the supported protocols
Config Configuration
// Connections is the list of active connections
Connections ActiveConnections
// QuotaScans is the list of active quota scans
QuotaScans ActiveScans
idleTimeoutTicker *time.Ticker
idleTimeoutTickerDone chan bool
supportedProtocols = []string{ProtocolSFTP, ProtocolSCP, ProtocolSSH, ProtocolFTP, ProtocolWebDAV}
)
// Initialize sets the common configuration
func Initialize(c Configuration) {
Config = c
Config.idleLoginTimeout = 2 * time.Minute
Config.idleTimeoutAsDuration = time.Duration(Config.IdleTimeout) * time.Minute
if Config.IdleTimeout > 0 {
startIdleTimeoutTicker(idleTimeoutCheckInterval)
}
}
func startIdleTimeoutTicker(duration time.Duration) {
stopIdleTimeoutTicker()
idleTimeoutTicker = time.NewTicker(duration)
idleTimeoutTickerDone = make(chan bool)
go func() {
for {
select {
case <-idleTimeoutTickerDone:
return
case <-idleTimeoutTicker.C:
Connections.checkIdleConnections()
}
}
}()
}
func stopIdleTimeoutTicker() {
if idleTimeoutTicker != nil {
idleTimeoutTicker.Stop()
idleTimeoutTickerDone <- true
idleTimeoutTicker = nil
}
}
// ActiveTransfer defines the interface for the current active transfers
type ActiveTransfer interface {
GetID() uint64
GetType() int
GetSize() int64
GetVirtualPath() string
GetStartTime() time.Time
SignalClose()
Truncate(fsPath string, size int64) (int64, error)
GetRealFsPath(fsPath string) string
}
// ActiveConnection defines the interface for the current active connections
type ActiveConnection interface {
GetID() string
GetUsername() string
GetRemoteAddress() string
GetClientVersion() string
GetProtocol() string
GetConnectionTime() time.Time
GetLastActivity() time.Time
GetCommand() string
Disconnect() error
AddTransfer(t ActiveTransfer)
RemoveTransfer(t ActiveTransfer)
GetTransfers() []ConnectionTransfer
}
// StatAttributes defines the attributes for set stat commands
type StatAttributes struct {
Mode os.FileMode
Atime time.Time
Mtime time.Time
UID int
GID int
Flags int
Size int64
}
// ConnectionTransfer defines the trasfer details to expose
type ConnectionTransfer struct {
ID uint64 `json:"-"`
OperationType string `json:"operation_type"`
StartTime int64 `json:"start_time"`
Size int64 `json:"size"`
VirtualPath string `json:"path"`
}
func (t *ConnectionTransfer) getConnectionTransferAsString() string {
result := ""
switch t.OperationType {
case operationUpload:
result += "UL "
case operationDownload:
result += "DL "
}
result += fmt.Sprintf("%#v ", t.VirtualPath)
if t.Size > 0 {
elapsed := time.Since(utils.GetTimeFromMsecSinceEpoch(t.StartTime))
speed := float64(t.Size) / float64(utils.GetTimeAsMsSinceEpoch(time.Now())-t.StartTime)
result += fmt.Sprintf("Size: %#v Elapsed: %#v Speed: \"%.1f KB/s\"", utils.ByteCountSI(t.Size),
utils.GetDurationAsString(elapsed), speed)
}
return result
}
// Configuration defines configuration parameters common to all supported protocols
type Configuration struct {
// Maximum idle timeout as minutes. If a client is idle for a time that exceeds this setting it will be disconnected.
// 0 means disabled
IdleTimeout int `json:"idle_timeout" mapstructure:"idle_timeout"`
// UploadMode 0 means standard, the files are uploaded directly to the requested path.
// 1 means atomic: the files are uploaded to a temporary path and renamed to the requested path
// when the client ends the upload. Atomic mode avoid problems such as a web server that
// serves partial files when the files are being uploaded.
// In atomic mode if there is an upload error the temporary file is deleted and so the requested
// upload path will not contain a partial file.
// 2 means atomic with resume support: as atomic but if there is an upload error the temporary
// file is renamed to the requested path and not deleted, this way a client can reconnect and resume
// the upload.
UploadMode int `json:"upload_mode" mapstructure:"upload_mode"`
// Actions to execute for SFTP file operations and SSH commands
Actions ProtocolActions `json:"actions" mapstructure:"actions"`
// SetstatMode 0 means "normal mode": requests for changing permissions and owner/group are executed.
// 1 means "ignore mode": requests for changing permissions and owner/group are silently ignored.
SetstatMode int `json:"setstat_mode" mapstructure:"setstat_mode"`
// Support for HAProxy PROXY protocol.
// If you are running SFTPGo behind a proxy server such as HAProxy, AWS ELB or NGNIX, you can enable
// the proxy protocol. It provides a convenient way to safely transport connection information
// such as a client's address across multiple layers of NAT or TCP proxies to get the real
// client IP address instead of the proxy IP. Both protocol versions 1 and 2 are supported.
// - 0 means disabled
// - 1 means proxy protocol enabled. Proxy header will be used and requests without proxy header will be accepted.
// - 2 means proxy protocol required. Proxy header will be used and requests without proxy header will be rejected.
// If the proxy protocol is enabled in SFTPGo then you have to enable the protocol in your proxy configuration too,
// for example for HAProxy add "send-proxy" or "send-proxy-v2" to each server configuration line.
ProxyProtocol int `json:"proxy_protocol" mapstructure:"proxy_protocol"`
// List of IP addresses and IP ranges allowed to send the proxy header.
// If proxy protocol is set to 1 and we receive a proxy header from an IP that is not in the list then the
// connection will be accepted and the header will be ignored.
// If proxy protocol is set to 2 and we receive a proxy header from an IP that is not in the list then the
// connection will be rejected.
ProxyAllowed []string `json:"proxy_allowed" mapstructure:"proxy_allowed"`
// Absolute path to an external program or an HTTP URL to invoke after a user connects
// and before he tries to login. It allows you to reject the connection based on the source
// ip address. Leave empty do disable.
PostConnectHook string `json:"post_connect_hook" mapstructure:"post_connect_hook"`
idleTimeoutAsDuration time.Duration
idleLoginTimeout time.Duration
}
// IsAtomicUploadEnabled returns true if atomic upload is enabled
func (c *Configuration) IsAtomicUploadEnabled() bool {
return c.UploadMode == UploadModeAtomic || c.UploadMode == UploadModeAtomicWithResume
}
// GetProxyListener returns a wrapper for the given listener that supports the
// HAProxy Proxy Protocol or nil if the proxy protocol is not configured
func (c *Configuration) GetProxyListener(listener net.Listener) (*proxyproto.Listener, error) {
var proxyListener *proxyproto.Listener
var err error
if c.ProxyProtocol > 0 {
var policyFunc func(upstream net.Addr) (proxyproto.Policy, error)
if c.ProxyProtocol == 1 && len(c.ProxyAllowed) > 0 {
policyFunc, err = proxyproto.LaxWhiteListPolicy(c.ProxyAllowed)
if err != nil {
return nil, err
}
}
if c.ProxyProtocol == 2 {
if len(c.ProxyAllowed) == 0 {
policyFunc = func(upstream net.Addr) (proxyproto.Policy, error) {
return proxyproto.REQUIRE, nil
}
} else {
policyFunc, err = proxyproto.StrictWhiteListPolicy(c.ProxyAllowed)
if err != nil {
return nil, err
}
}
}
proxyListener = &proxyproto.Listener{
Listener: listener,
Policy: policyFunc,
}
}
return proxyListener, nil
}
// ExecutePostConnectHook executes the post connect hook if defined
func (c *Configuration) ExecutePostConnectHook(remoteAddr, protocol string) error {
if len(c.PostConnectHook) == 0 {
return nil
}
ip := utils.GetIPFromRemoteAddress(remoteAddr)
if strings.HasPrefix(c.PostConnectHook, "http") {
var url *url.URL
url, err := url.Parse(c.PostConnectHook)
if err != nil {
logger.Warn(protocol, "", "Login from ip %#v denied, invalid post connect hook %#v: %v",
ip, c.PostConnectHook, err)
return err
}
httpClient := httpclient.GetHTTPClient()
q := url.Query()
q.Add("ip", ip)
q.Add("protocol", protocol)
url.RawQuery = q.Encode()
resp, err := httpClient.Get(url.String())
if err != nil {
logger.Warn(protocol, "", "Login from ip %#v denied, error executing post connect hook: %v", ip, err)
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logger.Warn(protocol, "", "Login from ip %#v denied, post connect hook response code: %v", ip, resp.StatusCode)
return errUnexpectedHTTResponse
}
return nil
}
if !filepath.IsAbs(c.PostConnectHook) {
err := fmt.Errorf("invalid post connect hook %#v", c.PostConnectHook)
logger.Warn(protocol, "", "Login from ip %#v denied: %v", ip, err)
return err
}
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, c.PostConnectHook)
cmd.Env = append(os.Environ(),
fmt.Sprintf("SFTPGO_CONNECTION_IP=%v", ip),
fmt.Sprintf("SFTPGO_CONNECTION_PROTOCOL=%v", protocol))
err := cmd.Run()
if err != nil {
logger.Warn(protocol, "", "Login from ip %#v denied, connect hook error: %v", ip, err)
}
return err
}
// SSHConnection defines an ssh connection.
// Each SSH connection can open several channels for SFTP or SSH commands
type SSHConnection struct {
id string
conn net.Conn
lastActivity int64
}
// NewSSHConnection returns a new SSHConnection
func NewSSHConnection(id string, conn net.Conn) *SSHConnection {
return &SSHConnection{
id: id,
conn: conn,
lastActivity: time.Now().UnixNano(),
}
}
// GetID returns the ID for this SSHConnection
func (c *SSHConnection) GetID() string {
return c.id
}
// UpdateLastActivity updates last activity for this connection
func (c *SSHConnection) UpdateLastActivity() {
atomic.StoreInt64(&c.lastActivity, time.Now().UnixNano())
}
// GetLastActivity returns the last connection activity
func (c *SSHConnection) GetLastActivity() time.Time {
return time.Unix(0, atomic.LoadInt64(&c.lastActivity))
}
// Close closes the underlying network connection
func (c *SSHConnection) Close() error {
return c.conn.Close()
}
// ActiveConnections holds the currect active connections with the associated transfers
type ActiveConnections struct {
sync.RWMutex
connections []ActiveConnection
sshConnections []*SSHConnection
}
// GetActiveSessions returns the number of active sessions for the given username.
// We return the open sessions for any protocol
func (conns *ActiveConnections) GetActiveSessions(username string) int {
conns.RLock()
defer conns.RUnlock()
numSessions := 0
for _, c := range conns.connections {
if c.GetUsername() == username {
numSessions++
}
}
return numSessions
}
// Add adds a new connection to the active ones
func (conns *ActiveConnections) Add(c ActiveConnection) {
conns.Lock()
defer conns.Unlock()
conns.connections = append(conns.connections, c)
metrics.UpdateActiveConnectionsSize(len(conns.connections))
logger.Debug(c.GetProtocol(), c.GetID(), "connection added, num open connections: %v", len(conns.connections))
}
// Swap replaces an existing connection with the given one.
// This method is useful if you have to change some connection details
// for example for FTP is used to update the connection once the user
// authenticates
func (conns *ActiveConnections) Swap(c ActiveConnection) error {
conns.Lock()
defer conns.Unlock()
for idx, conn := range conns.connections {
if conn.GetID() == c.GetID() {
conn = nil
conns.connections[idx] = c
return nil
}
}
return errors.New("connection to swap not found")
}
// Remove removes a connection from the active ones
func (conns *ActiveConnections) Remove(connectionID string) {
conns.Lock()
defer conns.Unlock()
var c ActiveConnection
indexToRemove := -1
for i, conn := range conns.connections {
if conn.GetID() == connectionID {
indexToRemove = i
c = conn
break
}
}
if indexToRemove >= 0 {
conns.connections[indexToRemove] = conns.connections[len(conns.connections)-1]
conns.connections[len(conns.connections)-1] = nil
conns.connections = conns.connections[:len(conns.connections)-1]
metrics.UpdateActiveConnectionsSize(len(conns.connections))
logger.Debug(c.GetProtocol(), c.GetID(), "connection removed, num open connections: %v", len(conns.connections))
} else {
logger.Warn(logSender, "", "connection to remove with id %#v not found!", connectionID)
}
}
// Close closes an active connection.
// It returns true on success
func (conns *ActiveConnections) Close(connectionID string) bool {
conns.RLock()
result := false
for _, c := range conns.connections {
if c.GetID() == connectionID {
defer func(conn ActiveConnection) {
err := conn.Disconnect()
logger.Debug(conn.GetProtocol(), conn.GetID(), "close connection requested, close err: %v", err)
}(c)
result = true
break
}
}
conns.RUnlock()
return result
}
// AddSSHConnection adds a new ssh connection to the active ones
func (conns *ActiveConnections) AddSSHConnection(c *SSHConnection) {
conns.Lock()
defer conns.Unlock()
conns.sshConnections = append(conns.sshConnections, c)
logger.Debug(logSender, c.GetID(), "ssh connection added, num open connections: %v", len(conns.sshConnections))
}
// RemoveSSHConnection removes a connection from the active ones
func (conns *ActiveConnections) RemoveSSHConnection(connectionID string) {
conns.Lock()
defer conns.Unlock()
var c *SSHConnection
indexToRemove := -1
for i, conn := range conns.sshConnections {
if conn.GetID() == connectionID {
indexToRemove = i
c = conn
break
}
}
if indexToRemove >= 0 {
conns.sshConnections[indexToRemove] = conns.sshConnections[len(conns.sshConnections)-1]
conns.sshConnections[len(conns.sshConnections)-1] = nil
conns.sshConnections = conns.sshConnections[:len(conns.sshConnections)-1]
logger.Debug(logSender, c.GetID(), "ssh connection removed, num open ssh connections: %v", len(conns.sshConnections))
} else {
logger.Warn(logSender, "", "ssh connection to remove with id %#v not found!", connectionID)
}
}
func (conns *ActiveConnections) checkIdleConnections() {
conns.RLock()
for _, sshConn := range conns.sshConnections {
idleTime := time.Since(sshConn.GetLastActivity())
if idleTime > Config.idleTimeoutAsDuration {
// we close the an ssh connection if it has no active connections associated
idToMatch := fmt.Sprintf("_%v_", sshConn.GetID())
toClose := true
for _, conn := range conns.connections {
if strings.Contains(conn.GetID(), idToMatch) {
toClose = false
break
}
}
if toClose {
defer func(c *SSHConnection) {
err := c.Close()
logger.Debug(logSender, c.GetID(), "close idle SSH connection, idle time: %v, close err: %v",
time.Since(c.GetLastActivity()), err)
}(sshConn)
}
}
}
for _, c := range conns.connections {
idleTime := time.Since(c.GetLastActivity())
isUnauthenticatedFTPUser := (c.GetProtocol() == ProtocolFTP && len(c.GetUsername()) == 0)
if idleTime > Config.idleTimeoutAsDuration || (isUnauthenticatedFTPUser && idleTime > Config.idleLoginTimeout) {
defer func(conn ActiveConnection, isFTPNoAuth bool) {
err := conn.Disconnect()
logger.Debug(conn.GetProtocol(), conn.GetID(), "close idle connection, idle time: %v, username: %#v close err: %v",
time.Since(conn.GetLastActivity()), conn.GetUsername(), err)
if isFTPNoAuth {
ip := utils.GetIPFromRemoteAddress(c.GetRemoteAddress())
logger.ConnectionFailedLog("", ip, dataprovider.LoginMethodNoAuthTryed, c.GetProtocol(), "client idle")
metrics.AddNoAuthTryed()
dataprovider.ExecutePostLoginHook("", dataprovider.LoginMethodNoAuthTryed, ip, c.GetProtocol(),
dataprovider.ErrNoAuthTryed)
}
}(c, isUnauthenticatedFTPUser)
}
}
conns.RUnlock()
}
// GetStats returns stats for active connections
func (conns *ActiveConnections) GetStats() []ConnectionStatus {
conns.RLock()
defer conns.RUnlock()
stats := make([]ConnectionStatus, 0, len(conns.connections))
for _, c := range conns.connections {
stat := ConnectionStatus{
Username: c.GetUsername(),
ConnectionID: c.GetID(),
ClientVersion: c.GetClientVersion(),
RemoteAddress: c.GetRemoteAddress(),
ConnectionTime: utils.GetTimeAsMsSinceEpoch(c.GetConnectionTime()),
LastActivity: utils.GetTimeAsMsSinceEpoch(c.GetLastActivity()),
Protocol: c.GetProtocol(),
Command: c.GetCommand(),
Transfers: c.GetTransfers(),
}
stats = append(stats, stat)
}
return stats
}
// ConnectionStatus returns the status for an active connection
type ConnectionStatus struct {
// Logged in username
Username string `json:"username"`
// Unique identifier for the connection
ConnectionID string `json:"connection_id"`
// client's version string
ClientVersion string `json:"client_version,omitempty"`
// Remote address for this connection
RemoteAddress string `json:"remote_address"`
// Connection time as unix timestamp in milliseconds
ConnectionTime int64 `json:"connection_time"`
// Last activity as unix timestamp in milliseconds
LastActivity int64 `json:"last_activity"`
// Protocol for this connection
Protocol string `json:"protocol"`
// active uploads/downloads
Transfers []ConnectionTransfer `json:"active_transfers,omitempty"`
// SSH command or WevDAV method
Command string `json:"command,omitempty"`
}
// GetConnectionDuration returns the connection duration as string
func (c ConnectionStatus) GetConnectionDuration() string {
elapsed := time.Since(utils.GetTimeFromMsecSinceEpoch(c.ConnectionTime))
return utils.GetDurationAsString(elapsed)
}
// GetConnectionInfo returns connection info.
// Protocol,Client Version and RemoteAddress are returned.
// For SSH commands the issued command is returned too.
func (c ConnectionStatus) GetConnectionInfo() string {
result := fmt.Sprintf("%v. Client: %#v From: %#v", c.Protocol, c.ClientVersion, c.RemoteAddress)
if c.Protocol == ProtocolSSH && len(c.Command) > 0 {
result += fmt.Sprintf(". Command: %#v", c.Command)
}
if c.Protocol == ProtocolWebDAV && len(c.Command) > 0 {
result += fmt.Sprintf(". Method: %#v", c.Command)
}
return result
}
// GetTransfersAsString returns the active transfers as string
func (c ConnectionStatus) GetTransfersAsString() string {
result := ""
for _, t := range c.Transfers {
if len(result) > 0 {
result += ". "
}
result += t.getConnectionTransferAsString()
}
return result
}
// ActiveQuotaScan defines an active quota scan for a user home dir
type ActiveQuotaScan struct {
// Username to which the quota scan refers
Username string `json:"username"`
// quota scan start time as unix timestamp in milliseconds
StartTime int64 `json:"start_time"`
}
// ActiveVirtualFolderQuotaScan defines an active quota scan for a virtual folder
type ActiveVirtualFolderQuotaScan struct {
// folder path to which the quota scan refers
MappedPath string `json:"mapped_path"`
// quota scan start time as unix timestamp in milliseconds
StartTime int64 `json:"start_time"`
}
// ActiveScans holds the active quota scans
type ActiveScans struct {
sync.RWMutex
UserHomeScans []ActiveQuotaScan
FolderScans []ActiveVirtualFolderQuotaScan
}
// GetUsersQuotaScans returns the active quota scans for users home directories
func (s *ActiveScans) GetUsersQuotaScans() []ActiveQuotaScan {
s.RLock()
defer s.RUnlock()
scans := make([]ActiveQuotaScan, len(s.UserHomeScans))
copy(scans, s.UserHomeScans)
return scans
}
// AddUserQuotaScan adds a user to the ones with active quota scans.
// Returns false if the user has a quota scan already running
func (s *ActiveScans) AddUserQuotaScan(username string) bool {
s.Lock()
defer s.Unlock()
for _, scan := range s.UserHomeScans {
if scan.Username == username {
return false
}
}
s.UserHomeScans = append(s.UserHomeScans, ActiveQuotaScan{
Username: username,
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
})
return true
}
// RemoveUserQuotaScan removes a user from the ones with active quota scans.
// Returns false if the user has no active quota scans
func (s *ActiveScans) RemoveUserQuotaScan(username string) bool {
s.Lock()
defer s.Unlock()
indexToRemove := -1
for i, scan := range s.UserHomeScans {
if scan.Username == username {
indexToRemove = i
break
}
}
if indexToRemove >= 0 {
s.UserHomeScans[indexToRemove] = s.UserHomeScans[len(s.UserHomeScans)-1]
s.UserHomeScans = s.UserHomeScans[:len(s.UserHomeScans)-1]
return true
}
return false
}
// GetVFoldersQuotaScans returns the active quota scans for virtual folders
func (s *ActiveScans) GetVFoldersQuotaScans() []ActiveVirtualFolderQuotaScan {
s.RLock()
defer s.RUnlock()
scans := make([]ActiveVirtualFolderQuotaScan, len(s.FolderScans))
copy(scans, s.FolderScans)
return scans
}
// AddVFolderQuotaScan adds a virtual folder to the ones with active quota scans.
// Returns false if the folder has a quota scan already running
func (s *ActiveScans) AddVFolderQuotaScan(folderPath string) bool {
s.Lock()
defer s.Unlock()
for _, scan := range s.FolderScans {
if scan.MappedPath == folderPath {
return false
}
}
s.FolderScans = append(s.FolderScans, ActiveVirtualFolderQuotaScan{
MappedPath: folderPath,
StartTime: utils.GetTimeAsMsSinceEpoch(time.Now()),
})
return true
}
// RemoveVFolderQuotaScan removes a folder from the ones with active quota scans.
// Returns false if the folder has no active quota scans
func (s *ActiveScans) RemoveVFolderQuotaScan(folderPath string) bool {
s.Lock()
defer s.Unlock()
indexToRemove := -1
for i, scan := range s.FolderScans {
if scan.MappedPath == folderPath {
indexToRemove = i
break
}
}
if indexToRemove >= 0 {
s.FolderScans[indexToRemove] = s.FolderScans[len(s.FolderScans)-1]
s.FolderScans = s.FolderScans[:len(s.FolderScans)-1]
return true
}
return false
}

516
common/common_test.go Normal file
View File

@@ -0,0 +1,516 @@
package common
import (
"fmt"
"net"
"net/http"
"os"
"os/exec"
"runtime"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/rs/zerolog"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/httpclient"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/vfs"
)
const (
logSenderTest = "common_test"
httpAddr = "127.0.0.1:9999"
httpProxyAddr = "127.0.0.1:7777"
configDir = ".."
osWindows = "windows"
userTestUsername = "common_test_username"
userTestPwd = "common_test_pwd"
)
type providerConf struct {
Config dataprovider.Config `json:"data_provider" mapstructure:"data_provider"`
}
type fakeConnection struct {
*BaseConnection
command string
}
func (c *fakeConnection) AddUser(user dataprovider.User) error {
fs, err := user.GetFilesystem(c.GetID())
if err != nil {
return err
}
c.BaseConnection.User = user
c.BaseConnection.Fs = fs
return nil
}
func (c *fakeConnection) Disconnect() error {
Connections.Remove(c.GetID())
return nil
}
func (c *fakeConnection) GetClientVersion() string {
return ""
}
func (c *fakeConnection) GetCommand() string {
return c.command
}
func (c *fakeConnection) GetRemoteAddress() string {
return ""
}
type customNetConn struct {
net.Conn
id string
isClosed bool
}
func (c *customNetConn) Close() error {
Connections.RemoveSSHConnection(c.id)
c.isClosed = true
return c.Conn.Close()
}
func TestMain(m *testing.M) {
logfilePath := "common_test.log"
logger.InitLogger(logfilePath, 5, 1, 28, false, zerolog.DebugLevel)
viper.SetEnvPrefix("sftpgo")
replacer := strings.NewReplacer(".", "__")
viper.SetEnvKeyReplacer(replacer)
viper.SetConfigName("sftpgo")
viper.AutomaticEnv()
viper.AllowEmptyEnv(true)
driver, err := initializeDataprovider(-1)
if err != nil {
logger.WarnToConsole("error initializing data provider: %v", err)
os.Exit(1)
}
logger.InfoToConsole("Starting COMMON tests, provider: %v", driver)
Initialize(Configuration{})
httpConfig := httpclient.Config{
Timeout: 5,
}
httpConfig.Initialize(configDir)
go func() {
// start a test HTTP server to receive action notifications
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "OK\n")
})
http.HandleFunc("/404", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "Not found\n")
})
if err := http.ListenAndServe(httpAddr, nil); err != nil {
logger.ErrorToConsole("could not start HTTP notification server: %v", err)
os.Exit(1)
}
}()
go func() {
Config.ProxyProtocol = 2
listener, err := net.Listen("tcp", httpProxyAddr)
if err != nil {
logger.ErrorToConsole("error creating listener for proxy protocol server: %v", err)
os.Exit(1)
}
proxyListener, err := Config.GetProxyListener(listener)
if err != nil {
logger.ErrorToConsole("error creating proxy protocol listener: %v", err)
os.Exit(1)
}
Config.ProxyProtocol = 0
s := &http.Server{}
if err := s.Serve(proxyListener); err != nil {
logger.ErrorToConsole("could not start HTTP proxy protocol server: %v", err)
os.Exit(1)
}
}()
waitTCPListening(httpAddr)
waitTCPListening(httpProxyAddr)
exitCode := m.Run()
os.Remove(logfilePath) //nolint:errcheck
os.Exit(exitCode)
}
func waitTCPListening(address string) {
for {
conn, err := net.Dial("tcp", address)
if err != nil {
logger.WarnToConsole("tcp server %v not listening: %v\n", address, err)
time.Sleep(100 * time.Millisecond)
continue
}
logger.InfoToConsole("tcp server %v now listening\n", address)
conn.Close()
break
}
}
func initializeDataprovider(trackQuota int) (string, error) {
configDir := ".."
viper.AddConfigPath(configDir)
if err := viper.ReadInConfig(); err != nil {
return "", err
}
var cfg providerConf
if err := viper.Unmarshal(&cfg); err != nil {
return "", err
}
if trackQuota >= 0 && trackQuota <= 2 {
cfg.Config.TrackQuota = trackQuota
}
return cfg.Config.Driver, dataprovider.Initialize(cfg.Config, configDir)
}
func closeDataprovider() error {
return dataprovider.Close()
}
func TestSSHConnections(t *testing.T) {
conn1, conn2 := net.Pipe()
now := time.Now()
sshConn1 := NewSSHConnection("id1", conn1)
sshConn2 := NewSSHConnection("id2", conn2)
assert.Equal(t, "id1", sshConn1.GetID())
assert.Equal(t, "id2", sshConn2.GetID())
sshConn1.UpdateLastActivity()
assert.GreaterOrEqual(t, sshConn1.GetLastActivity().UnixNano(), now.UnixNano())
Connections.AddSSHConnection(sshConn1)
Connections.AddSSHConnection(sshConn2)
Connections.RLock()
assert.Len(t, Connections.sshConnections, 2)
Connections.RUnlock()
Connections.RemoveSSHConnection(sshConn1.id)
Connections.RLock()
assert.Len(t, Connections.sshConnections, 1)
Connections.RUnlock()
Connections.RemoveSSHConnection(sshConn1.id)
Connections.RLock()
assert.Len(t, Connections.sshConnections, 1)
Connections.RUnlock()
Connections.RemoveSSHConnection(sshConn2.id)
Connections.RLock()
assert.Len(t, Connections.sshConnections, 0)
Connections.RUnlock()
assert.NoError(t, sshConn1.Close())
assert.NoError(t, sshConn2.Close())
}
func TestIdleConnections(t *testing.T) {
configCopy := Config
Config.IdleTimeout = 1
Initialize(Config)
conn1, conn2 := net.Pipe()
customConn1 := &customNetConn{
Conn: conn1,
id: "id1",
}
customConn2 := &customNetConn{
Conn: conn2,
id: "id2",
}
sshConn1 := NewSSHConnection(customConn1.id, customConn1)
sshConn2 := NewSSHConnection(customConn2.id, customConn2)
username := "test_user"
user := dataprovider.User{
Username: username,
}
c := NewBaseConnection(sshConn1.id+"_1", ProtocolSFTP, user, nil)
c.lastActivity = time.Now().Add(-24 * time.Hour).UnixNano()
fakeConn := &fakeConnection{
BaseConnection: c,
}
// both ssh connections are expired but they should get removed only
// if there is no associated connection
sshConn1.lastActivity = c.lastActivity
sshConn2.lastActivity = c.lastActivity
Connections.AddSSHConnection(sshConn1)
Connections.Add(fakeConn)
assert.Equal(t, Connections.GetActiveSessions(username), 1)
c = NewBaseConnection(sshConn2.id+"_1", ProtocolSSH, user, nil)
fakeConn = &fakeConnection{
BaseConnection: c,
}
Connections.AddSSHConnection(sshConn2)
Connections.Add(fakeConn)
assert.Equal(t, Connections.GetActiveSessions(username), 2)
cFTP := NewBaseConnection("id2", ProtocolFTP, dataprovider.User{}, nil)
cFTP.lastActivity = time.Now().UnixNano()
fakeConn = &fakeConnection{
BaseConnection: cFTP,
}
Connections.Add(fakeConn)
assert.Equal(t, Connections.GetActiveSessions(username), 2)
assert.Len(t, Connections.GetStats(), 3)
Connections.RLock()
assert.Len(t, Connections.sshConnections, 2)
Connections.RUnlock()
startIdleTimeoutTicker(100 * time.Millisecond)
assert.Eventually(t, func() bool { return Connections.GetActiveSessions(username) == 1 }, 1*time.Second, 200*time.Millisecond)
assert.Eventually(t, func() bool {
Connections.RLock()
defer Connections.RUnlock()
return len(Connections.sshConnections) == 1
}, 1*time.Second, 200*time.Millisecond)
stopIdleTimeoutTicker()
assert.Len(t, Connections.GetStats(), 2)
c.lastActivity = time.Now().Add(-24 * time.Hour).UnixNano()
cFTP.lastActivity = time.Now().Add(-24 * time.Hour).UnixNano()
sshConn2.lastActivity = c.lastActivity
startIdleTimeoutTicker(100 * time.Millisecond)
assert.Eventually(t, func() bool { return len(Connections.GetStats()) == 0 }, 1*time.Second, 200*time.Millisecond)
assert.Eventually(t, func() bool {
Connections.RLock()
defer Connections.RUnlock()
return len(Connections.sshConnections) == 0
}, 1*time.Second, 200*time.Millisecond)
stopIdleTimeoutTicker()
assert.True(t, customConn1.isClosed)
assert.True(t, customConn2.isClosed)
Config = configCopy
}
func TestCloseConnection(t *testing.T) {
c := NewBaseConnection("id", ProtocolSFTP, dataprovider.User{}, nil)
fakeConn := &fakeConnection{
BaseConnection: c,
}
Connections.Add(fakeConn)
assert.Len(t, Connections.GetStats(), 1)
res := Connections.Close(fakeConn.GetID())
assert.True(t, res)
assert.Eventually(t, func() bool { return len(Connections.GetStats()) == 0 }, 300*time.Millisecond, 50*time.Millisecond)
res = Connections.Close(fakeConn.GetID())
assert.False(t, res)
Connections.Remove(fakeConn.GetID())
}
func TestSwapConnection(t *testing.T) {
c := NewBaseConnection("id", ProtocolFTP, dataprovider.User{}, nil)
fakeConn := &fakeConnection{
BaseConnection: c,
}
Connections.Add(fakeConn)
if assert.Len(t, Connections.GetStats(), 1) {
assert.Equal(t, "", Connections.GetStats()[0].Username)
}
c = NewBaseConnection("id", ProtocolFTP, dataprovider.User{
Username: userTestUsername,
}, nil)
fakeConn = &fakeConnection{
BaseConnection: c,
}
err := Connections.Swap(fakeConn)
assert.NoError(t, err)
if assert.Len(t, Connections.GetStats(), 1) {
assert.Equal(t, userTestUsername, Connections.GetStats()[0].Username)
}
res := Connections.Close(fakeConn.GetID())
assert.True(t, res)
assert.Eventually(t, func() bool { return len(Connections.GetStats()) == 0 }, 300*time.Millisecond, 50*time.Millisecond)
err = Connections.Swap(fakeConn)
assert.Error(t, err)
}
func TestAtomicUpload(t *testing.T) {
configCopy := Config
Config.UploadMode = UploadModeStandard
assert.False(t, Config.IsAtomicUploadEnabled())
Config.UploadMode = UploadModeAtomic
assert.True(t, Config.IsAtomicUploadEnabled())
Config.UploadMode = UploadModeAtomicWithResume
assert.True(t, Config.IsAtomicUploadEnabled())
Config = configCopy
}
func TestConnectionStatus(t *testing.T) {
username := "test_user"
user := dataprovider.User{
Username: username,
}
fs := vfs.NewOsFs("", os.TempDir(), nil)
c1 := NewBaseConnection("id1", ProtocolSFTP, user, fs)
fakeConn1 := &fakeConnection{
BaseConnection: c1,
}
t1 := NewBaseTransfer(nil, c1, nil, "/p1", "/r1", TransferUpload, 0, 0, 0, true, fs)
t1.BytesReceived = 123
t2 := NewBaseTransfer(nil, c1, nil, "/p2", "/r2", TransferDownload, 0, 0, 0, true, fs)
t2.BytesSent = 456
c2 := NewBaseConnection("id2", ProtocolSSH, user, nil)
fakeConn2 := &fakeConnection{
BaseConnection: c2,
command: "md5sum",
}
c3 := NewBaseConnection("id3", ProtocolWebDAV, user, nil)
fakeConn3 := &fakeConnection{
BaseConnection: c3,
command: "PROPFIND",
}
t3 := NewBaseTransfer(nil, c3, nil, "/p2", "/r2", TransferDownload, 0, 0, 0, true, fs)
Connections.Add(fakeConn1)
Connections.Add(fakeConn2)
Connections.Add(fakeConn3)
stats := Connections.GetStats()
assert.Len(t, stats, 3)
for _, stat := range stats {
assert.Equal(t, stat.Username, username)
assert.True(t, strings.HasPrefix(stat.GetConnectionInfo(), stat.Protocol))
assert.True(t, strings.HasPrefix(stat.GetConnectionDuration(), "00:"))
if stat.ConnectionID == "SFTP_id1" {
assert.Len(t, stat.Transfers, 2)
assert.Greater(t, len(stat.GetTransfersAsString()), 0)
for _, tr := range stat.Transfers {
if tr.OperationType == operationDownload {
assert.True(t, strings.HasPrefix(tr.getConnectionTransferAsString(), "DL"))
} else if tr.OperationType == operationUpload {
assert.True(t, strings.HasPrefix(tr.getConnectionTransferAsString(), "UL"))
}
}
} else if stat.ConnectionID == "DAV_id3" {
assert.Len(t, stat.Transfers, 1)
assert.Greater(t, len(stat.GetTransfersAsString()), 0)
} else {
assert.Equal(t, 0, len(stat.GetTransfersAsString()))
}
}
err := t1.Close()
assert.NoError(t, err)
err = t2.Close()
assert.NoError(t, err)
err = fakeConn3.SignalTransfersAbort()
assert.NoError(t, err)
assert.Equal(t, int32(1), atomic.LoadInt32(&t3.AbortTransfer))
err = t3.Close()
assert.NoError(t, err)
err = fakeConn3.SignalTransfersAbort()
assert.Error(t, err)
Connections.Remove(fakeConn1.GetID())
Connections.Remove(fakeConn2.GetID())
Connections.Remove(fakeConn3.GetID())
stats = Connections.GetStats()
assert.Len(t, stats, 0)
}
func TestQuotaScans(t *testing.T) {
username := "username"
assert.True(t, QuotaScans.AddUserQuotaScan(username))
assert.False(t, QuotaScans.AddUserQuotaScan(username))
if assert.Len(t, QuotaScans.GetUsersQuotaScans(), 1) {
assert.Equal(t, QuotaScans.GetUsersQuotaScans()[0].Username, username)
}
assert.True(t, QuotaScans.RemoveUserQuotaScan(username))
assert.False(t, QuotaScans.RemoveUserQuotaScan(username))
assert.Len(t, QuotaScans.GetUsersQuotaScans(), 0)
folderName := "/folder"
assert.True(t, QuotaScans.AddVFolderQuotaScan(folderName))
assert.False(t, QuotaScans.AddVFolderQuotaScan(folderName))
if assert.Len(t, QuotaScans.GetVFoldersQuotaScans(), 1) {
assert.Equal(t, QuotaScans.GetVFoldersQuotaScans()[0].MappedPath, folderName)
}
assert.True(t, QuotaScans.RemoveVFolderQuotaScan(folderName))
assert.False(t, QuotaScans.RemoveVFolderQuotaScan(folderName))
assert.Len(t, QuotaScans.GetVFoldersQuotaScans(), 0)
}
func TestProxyProtocolVersion(t *testing.T) {
c := Configuration{
ProxyProtocol: 1,
}
proxyListener, err := c.GetProxyListener(nil)
assert.NoError(t, err)
assert.Nil(t, proxyListener.Policy)
c.ProxyProtocol = 2
proxyListener, err = c.GetProxyListener(nil)
assert.NoError(t, err)
assert.NotNil(t, proxyListener.Policy)
c.ProxyProtocol = 1
c.ProxyAllowed = []string{"invalid"}
_, err = c.GetProxyListener(nil)
assert.Error(t, err)
c.ProxyProtocol = 2
_, err = c.GetProxyListener(nil)
assert.Error(t, err)
}
func TestProxyProtocol(t *testing.T) {
httpClient := httpclient.GetHTTPClient()
resp, err := httpClient.Get(fmt.Sprintf("http://%v", httpProxyAddr))
if assert.NoError(t, err) {
defer resp.Body.Close()
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
}
}
func TestPostConnectHook(t *testing.T) {
Config.PostConnectHook = ""
remoteAddr := &net.IPAddr{
IP: net.ParseIP("127.0.0.1"),
Zone: "",
}
assert.NoError(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolFTP))
Config.PostConnectHook = "http://foo\x7f.com/"
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolSFTP))
Config.PostConnectHook = "http://invalid:1234/"
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolSFTP))
Config.PostConnectHook = fmt.Sprintf("http://%v/404", httpAddr)
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolFTP))
Config.PostConnectHook = fmt.Sprintf("http://%v", httpAddr)
assert.NoError(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolFTP))
Config.PostConnectHook = "invalid"
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolFTP))
if runtime.GOOS == osWindows {
Config.PostConnectHook = "C:\\bad\\command"
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolSFTP))
} else {
Config.PostConnectHook = "/invalid/path"
assert.Error(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolSFTP))
hookCmd, err := exec.LookPath("true")
assert.NoError(t, err)
Config.PostConnectHook = hookCmd
assert.NoError(t, Config.ExecutePostConnectHook(remoteAddr.String(), ProtocolSFTP))
}
Config.PostConnectHook = ""
}

957
common/connection.go Normal file
View File

@@ -0,0 +1,957 @@
package common
import (
"errors"
"fmt"
"os"
"path"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/pkg/sftp"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/vfs"
)
// BaseConnection defines common fields for a connection using any supported protocol
type BaseConnection struct {
// Unique identifier for the connection
ID string
// user associated with this connection if any
User dataprovider.User
// start time for this connection
startTime time.Time
protocol string
Fs vfs.Fs
sync.RWMutex
// last activity for this connection
lastActivity int64
transferID uint64
activeTransfers []ActiveTransfer
}
// NewBaseConnection returns a new BaseConnection
func NewBaseConnection(ID, protocol string, user dataprovider.User, fs vfs.Fs) *BaseConnection {
connID := ID
if utils.IsStringInSlice(protocol, supportedProtocols) {
connID = fmt.Sprintf("%v_%v", protocol, ID)
}
return &BaseConnection{
ID: connID,
User: user,
startTime: time.Now(),
protocol: protocol,
Fs: fs,
lastActivity: time.Now().UnixNano(),
transferID: 0,
}
}
// Log outputs a log entry to the configured logger
func (c *BaseConnection) Log(level logger.LogLevel, format string, v ...interface{}) {
logger.Log(level, c.protocol, c.ID, format, v...)
}
// GetTransferID returns an unique transfer ID for this connection
func (c *BaseConnection) GetTransferID() uint64 {
return atomic.AddUint64(&c.transferID, 1)
}
// GetID returns the connection ID
func (c *BaseConnection) GetID() string {
return c.ID
}
// GetUsername returns the authenticated username associated with this connection if any
func (c *BaseConnection) GetUsername() string {
return c.User.Username
}
// GetProtocol returns the protocol for the connection
func (c *BaseConnection) GetProtocol() string {
return c.protocol
}
// SetProtocol sets the protocol for this connection
func (c *BaseConnection) SetProtocol(protocol string) {
c.protocol = protocol
if utils.IsStringInSlice(c.protocol, supportedProtocols) {
c.ID = fmt.Sprintf("%v_%v", c.protocol, c.ID)
}
}
// GetConnectionTime returns the initial connection time
func (c *BaseConnection) GetConnectionTime() time.Time {
return c.startTime
}
// UpdateLastActivity updates last activity for this connection
func (c *BaseConnection) UpdateLastActivity() {
atomic.StoreInt64(&c.lastActivity, time.Now().UnixNano())
}
// GetLastActivity returns the last connection activity
func (c *BaseConnection) GetLastActivity() time.Time {
return time.Unix(0, atomic.LoadInt64(&c.lastActivity))
}
// AddTransfer associates a new transfer to this connection
func (c *BaseConnection) AddTransfer(t ActiveTransfer) {
c.Lock()
defer c.Unlock()
c.activeTransfers = append(c.activeTransfers, t)
c.Log(logger.LevelDebug, "transfer added, id: %v, active transfers: %v", t.GetID(), len(c.activeTransfers))
}
// RemoveTransfer removes the specified transfer from the active ones
func (c *BaseConnection) RemoveTransfer(t ActiveTransfer) {
c.Lock()
defer c.Unlock()
indexToRemove := -1
for i, v := range c.activeTransfers {
if v.GetID() == t.GetID() {
indexToRemove = i
break
}
}
if indexToRemove >= 0 {
c.activeTransfers[indexToRemove] = c.activeTransfers[len(c.activeTransfers)-1]
c.activeTransfers[len(c.activeTransfers)-1] = nil
c.activeTransfers = c.activeTransfers[:len(c.activeTransfers)-1]
c.Log(logger.LevelDebug, "transfer removed, id: %v active transfers: %v", t.GetID(), len(c.activeTransfers))
} else {
c.Log(logger.LevelWarn, "transfer to remove not found!")
}
}
// GetTransfers returns the active transfers
func (c *BaseConnection) GetTransfers() []ConnectionTransfer {
c.RLock()
defer c.RUnlock()
transfers := make([]ConnectionTransfer, 0, len(c.activeTransfers))
for _, t := range c.activeTransfers {
var operationType string
switch t.GetType() {
case TransferDownload:
operationType = operationDownload
case TransferUpload:
operationType = operationUpload
}
transfers = append(transfers, ConnectionTransfer{
ID: t.GetID(),
OperationType: operationType,
StartTime: utils.GetTimeAsMsSinceEpoch(t.GetStartTime()),
Size: t.GetSize(),
VirtualPath: t.GetVirtualPath(),
})
}
return transfers
}
// SignalTransfersAbort signals to the active transfers to exit as soon as possible
func (c *BaseConnection) SignalTransfersAbort() error {
c.RLock()
defer c.RUnlock()
if len(c.activeTransfers) == 0 {
return errors.New("no active transfer found")
}
for _, t := range c.activeTransfers {
t.SignalClose()
}
return nil
}
func (c *BaseConnection) getRealFsPath(fsPath string) string {
c.RLock()
defer c.RUnlock()
for _, t := range c.activeTransfers {
if p := t.GetRealFsPath(fsPath); len(p) > 0 {
return p
}
}
return fsPath
}
func (c *BaseConnection) truncateOpenHandle(fsPath string, size int64) (int64, error) {
c.RLock()
defer c.RUnlock()
for _, t := range c.activeTransfers {
initialSize, err := t.Truncate(fsPath, size)
if err != errTransferMismatch {
return initialSize, err
}
}
return 0, errNoTransfer
}
// ListDir reads the directory named by fsPath and returns a list of directory entries
func (c *BaseConnection) ListDir(fsPath, virtualPath string) ([]os.FileInfo, error) {
if !c.User.HasPerm(dataprovider.PermListItems, virtualPath) {
return nil, c.GetPermissionDeniedError()
}
files, err := c.Fs.ReadDir(fsPath)
if err != nil {
c.Log(logger.LevelWarn, "error listing directory: %+v", err)
return nil, c.GetFsError(err)
}
return c.User.AddVirtualDirs(files, virtualPath), nil
}
// CreateDir creates a new directory at the specified fsPath
func (c *BaseConnection) CreateDir(fsPath, virtualPath string) error {
if !c.User.HasPerm(dataprovider.PermCreateDirs, path.Dir(virtualPath)) {
return c.GetPermissionDeniedError()
}
if c.User.IsVirtualFolder(virtualPath) {
c.Log(logger.LevelWarn, "mkdir not allowed %#v is a virtual folder", virtualPath)
return c.GetPermissionDeniedError()
}
if err := c.Fs.Mkdir(fsPath); err != nil {
c.Log(logger.LevelWarn, "error creating dir: %#v error: %+v", fsPath, err)
return c.GetFsError(err)
}
vfs.SetPathPermissions(c.Fs, fsPath, c.User.GetUID(), c.User.GetGID())
logger.CommandLog(mkdirLogSender, fsPath, "", c.User.Username, "", c.ID, c.protocol, -1, -1, "", "", "", -1)
return nil
}
// IsRemoveFileAllowed returns an error if removing this file is not allowed
func (c *BaseConnection) IsRemoveFileAllowed(fsPath, virtualPath string) error {
if !c.User.HasPerm(dataprovider.PermDelete, path.Dir(virtualPath)) {
return c.GetPermissionDeniedError()
}
if !c.User.IsFileAllowed(virtualPath) {
c.Log(logger.LevelDebug, "removing file %#v is not allowed", fsPath)
return c.GetPermissionDeniedError()
}
return nil
}
// RemoveFile removes a file at the specified fsPath
func (c *BaseConnection) RemoveFile(fsPath, virtualPath string, info os.FileInfo) error {
if err := c.IsRemoveFileAllowed(fsPath, virtualPath); err != nil {
return err
}
size := info.Size()
action := newActionNotification(&c.User, operationPreDelete, fsPath, "", "", c.protocol, size, nil)
actionErr := action.execute()
if actionErr == nil {
c.Log(logger.LevelDebug, "remove for path %#v handled by pre-delete action", fsPath)
} else {
if err := c.Fs.Remove(fsPath, false); err != nil {
c.Log(logger.LevelWarn, "failed to remove a file/symlink %#v: %+v", fsPath, err)
return c.GetFsError(err)
}
}
logger.CommandLog(removeLogSender, fsPath, "", c.User.Username, "", c.ID, c.protocol, -1, -1, "", "", "", -1)
if info.Mode()&os.ModeSymlink != os.ModeSymlink {
vfolder, err := c.User.GetVirtualFolderForPath(path.Dir(virtualPath))
if err == nil {
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, -1, -size, false) //nolint:errcheck
if vfolder.IsIncludedInUserQuota() {
dataprovider.UpdateUserQuota(c.User, -1, -size, false) //nolint:errcheck
}
} else {
dataprovider.UpdateUserQuota(c.User, -1, -size, false) //nolint:errcheck
}
}
if actionErr != nil {
action := newActionNotification(&c.User, operationDelete, fsPath, "", "", c.protocol, size, nil)
go action.execute() //nolint:errcheck
}
return nil
}
// IsRemoveDirAllowed returns an error if removing this directory is not allowed
func (c *BaseConnection) IsRemoveDirAllowed(fsPath, virtualPath string) error {
if c.Fs.GetRelativePath(fsPath) == "/" {
c.Log(logger.LevelWarn, "removing root dir is not allowed")
return c.GetPermissionDeniedError()
}
if c.User.IsVirtualFolder(virtualPath) {
c.Log(logger.LevelWarn, "removing a virtual folder is not allowed: %#v", virtualPath)
return c.GetPermissionDeniedError()
}
if c.User.HasVirtualFoldersInside(virtualPath) {
c.Log(logger.LevelWarn, "removing a directory with a virtual folder inside is not allowed: %#v", virtualPath)
return c.GetOpUnsupportedError()
}
if c.User.IsMappedPath(fsPath) {
c.Log(logger.LevelWarn, "removing a directory mapped as virtual folder is not allowed: %#v", fsPath)
return c.GetPermissionDeniedError()
}
if !c.User.HasPerm(dataprovider.PermDelete, path.Dir(virtualPath)) {
return c.GetPermissionDeniedError()
}
return nil
}
// RemoveDir removes a directory at the specified fsPath
func (c *BaseConnection) RemoveDir(fsPath, virtualPath string) error {
if err := c.IsRemoveDirAllowed(fsPath, virtualPath); err != nil {
return err
}
var fi os.FileInfo
var err error
if fi, err = c.Fs.Lstat(fsPath); err != nil {
// see #149
if c.Fs.IsNotExist(err) && c.Fs.HasVirtualFolders() {
return nil
}
c.Log(logger.LevelWarn, "failed to remove a dir %#v: stat error: %+v", fsPath, err)
return c.GetFsError(err)
}
if !fi.IsDir() || fi.Mode()&os.ModeSymlink == os.ModeSymlink {
c.Log(logger.LevelDebug, "cannot remove %#v is not a directory", fsPath)
return c.GetGenericError(nil)
}
if err := c.Fs.Remove(fsPath, true); err != nil {
c.Log(logger.LevelWarn, "failed to remove directory %#v: %+v", fsPath, err)
return c.GetFsError(err)
}
logger.CommandLog(rmdirLogSender, fsPath, "", c.User.Username, "", c.ID, c.protocol, -1, -1, "", "", "", -1)
return nil
}
// Rename renames (moves) fsSourcePath to fsTargetPath
func (c *BaseConnection) Rename(fsSourcePath, fsTargetPath, virtualSourcePath, virtualTargetPath string) error {
if c.User.IsMappedPath(fsSourcePath) {
c.Log(logger.LevelWarn, "renaming a directory mapped as virtual folder is not allowed: %#v", fsSourcePath)
return c.GetPermissionDeniedError()
}
if c.User.IsMappedPath(fsTargetPath) {
c.Log(logger.LevelWarn, "renaming to a directory mapped as virtual folder is not allowed: %#v", fsTargetPath)
return c.GetPermissionDeniedError()
}
srcInfo, err := c.Fs.Lstat(fsSourcePath)
if err != nil {
return c.GetFsError(err)
}
if !c.isRenamePermitted(fsSourcePath, virtualSourcePath, virtualTargetPath, srcInfo) {
return c.GetPermissionDeniedError()
}
initialSize := int64(-1)
if dstInfo, err := c.Fs.Lstat(fsTargetPath); err == nil {
if dstInfo.IsDir() {
c.Log(logger.LevelWarn, "attempted to rename %#v overwriting an existing directory %#v",
fsSourcePath, fsTargetPath)
return c.GetOpUnsupportedError()
}
// we are overwriting an existing file/symlink
if dstInfo.Mode().IsRegular() {
initialSize = dstInfo.Size()
}
if !c.User.HasPerm(dataprovider.PermOverwrite, path.Dir(virtualTargetPath)) {
c.Log(logger.LevelDebug, "renaming is not allowed, %#v -> %#v. Target exists but the user "+
"has no overwrite permission", virtualSourcePath, virtualTargetPath)
return c.GetPermissionDeniedError()
}
}
if srcInfo.IsDir() {
if c.User.HasVirtualFoldersInside(virtualSourcePath) {
c.Log(logger.LevelDebug, "renaming the folder %#v is not supported: it has virtual folders inside it",
virtualSourcePath)
return c.GetOpUnsupportedError()
}
if err = c.checkRecursiveRenameDirPermissions(fsSourcePath, fsTargetPath); err != nil {
c.Log(logger.LevelDebug, "error checking recursive permissions before renaming %#v: %+v", fsSourcePath, err)
return c.GetFsError(err)
}
}
if !c.hasSpaceForRename(virtualSourcePath, virtualTargetPath, initialSize, fsSourcePath) {
c.Log(logger.LevelInfo, "denying cross rename due to space limit")
return c.GetGenericError(ErrQuotaExceeded)
}
if err := c.Fs.Rename(fsSourcePath, fsTargetPath); err != nil {
c.Log(logger.LevelWarn, "failed to rename %#v -> %#v: %+v", fsSourcePath, fsTargetPath, err)
return c.GetFsError(err)
}
if dataprovider.GetQuotaTracking() > 0 {
c.updateQuotaAfterRename(virtualSourcePath, virtualTargetPath, fsTargetPath, initialSize) //nolint:errcheck
}
logger.CommandLog(renameLogSender, fsSourcePath, fsTargetPath, c.User.Username, "", c.ID, c.protocol, -1, -1,
"", "", "", -1)
action := newActionNotification(&c.User, operationRename, fsSourcePath, fsTargetPath, "", c.protocol, 0, nil)
// the returned error is used in test cases only, we already log the error inside action.execute
go action.execute() //nolint:errcheck
return nil
}
// CreateSymlink creates fsTargetPath as a symbolic link to fsSourcePath
func (c *BaseConnection) CreateSymlink(fsSourcePath, fsTargetPath, virtualSourcePath, virtualTargetPath string) error {
if c.Fs.GetRelativePath(fsSourcePath) == "/" {
c.Log(logger.LevelWarn, "symlinking root dir is not allowed")
return c.GetPermissionDeniedError()
}
if c.User.IsVirtualFolder(virtualTargetPath) {
c.Log(logger.LevelWarn, "symlinking a virtual folder is not allowed")
return c.GetPermissionDeniedError()
}
if !c.User.HasPerm(dataprovider.PermCreateSymlinks, path.Dir(virtualTargetPath)) {
return c.GetPermissionDeniedError()
}
if c.isCrossFoldersRequest(virtualSourcePath, virtualTargetPath) {
c.Log(logger.LevelWarn, "cross folder symlink is not supported, src: %v dst: %v", virtualSourcePath, virtualTargetPath)
return c.GetOpUnsupportedError()
}
if c.User.IsMappedPath(fsSourcePath) {
c.Log(logger.LevelWarn, "symlinking a directory mapped as virtual folder is not allowed: %#v", fsSourcePath)
return c.GetPermissionDeniedError()
}
if c.User.IsMappedPath(fsTargetPath) {
c.Log(logger.LevelWarn, "symlinking to a directory mapped as virtual folder is not allowed: %#v", fsTargetPath)
return c.GetPermissionDeniedError()
}
if err := c.Fs.Symlink(fsSourcePath, fsTargetPath); err != nil {
c.Log(logger.LevelWarn, "failed to create symlink %#v -> %#v: %+v", fsSourcePath, fsTargetPath, err)
return c.GetFsError(err)
}
logger.CommandLog(symlinkLogSender, fsSourcePath, fsTargetPath, c.User.Username, "", c.ID, c.protocol, -1, -1, "", "", "", -1)
return nil
}
func (c *BaseConnection) getPathForSetStatPerms(fsPath, virtualPath string) string {
pathForPerms := virtualPath
if fi, err := c.Fs.Lstat(fsPath); err == nil {
if fi.IsDir() {
pathForPerms = path.Dir(virtualPath)
}
}
return pathForPerms
}
// DoStat execute a Stat if mode = 0, Lstat if mode = 1
func (c *BaseConnection) DoStat(fsPath string, mode int) (os.FileInfo, error) {
if mode == 1 {
return c.Fs.Lstat(c.getRealFsPath(fsPath))
}
return c.Fs.Stat(c.getRealFsPath(fsPath))
}
// SetStat set StatAttributes for the specified fsPath
func (c *BaseConnection) SetStat(fsPath, virtualPath string, attributes *StatAttributes) error {
if Config.SetstatMode == 1 {
return nil
}
pathForPerms := c.getPathForSetStatPerms(fsPath, virtualPath)
if attributes.Flags&StatAttrPerms != 0 {
if !c.User.HasPerm(dataprovider.PermChmod, pathForPerms) {
return c.GetPermissionDeniedError()
}
if err := c.Fs.Chmod(c.getRealFsPath(fsPath), attributes.Mode); err != nil {
c.Log(logger.LevelWarn, "failed to chmod path %#v, mode: %v, err: %+v", fsPath, attributes.Mode.String(), err)
return c.GetFsError(err)
}
logger.CommandLog(chmodLogSender, fsPath, "", c.User.Username, attributes.Mode.String(), c.ID, c.protocol,
-1, -1, "", "", "", -1)
}
if attributes.Flags&StatAttrUIDGID != 0 {
if !c.User.HasPerm(dataprovider.PermChown, pathForPerms) {
return c.GetPermissionDeniedError()
}
if err := c.Fs.Chown(c.getRealFsPath(fsPath), attributes.UID, attributes.GID); err != nil {
c.Log(logger.LevelWarn, "failed to chown path %#v, uid: %v, gid: %v, err: %+v", fsPath, attributes.UID,
attributes.GID, err)
return c.GetFsError(err)
}
logger.CommandLog(chownLogSender, fsPath, "", c.User.Username, "", c.ID, c.protocol, attributes.UID, attributes.GID,
"", "", "", -1)
}
if attributes.Flags&StatAttrTimes != 0 {
if !c.User.HasPerm(dataprovider.PermChtimes, pathForPerms) {
return c.GetPermissionDeniedError()
}
if err := c.Fs.Chtimes(c.getRealFsPath(fsPath), attributes.Atime, attributes.Mtime); err != nil {
c.Log(logger.LevelWarn, "failed to chtimes for path %#v, access time: %v, modification time: %v, err: %+v",
fsPath, attributes.Atime, attributes.Mtime, err)
return c.GetFsError(err)
}
accessTimeString := attributes.Atime.Format(chtimesFormat)
modificationTimeString := attributes.Mtime.Format(chtimesFormat)
logger.CommandLog(chtimesLogSender, fsPath, "", c.User.Username, "", c.ID, c.protocol, -1, -1,
accessTimeString, modificationTimeString, "", -1)
}
if attributes.Flags&StatAttrSize != 0 {
if !c.User.HasPerm(dataprovider.PermOverwrite, pathForPerms) {
return c.GetPermissionDeniedError()
}
if err := c.truncateFile(fsPath, virtualPath, attributes.Size); err != nil {
c.Log(logger.LevelWarn, "failed to truncate path %#v, size: %v, err: %+v", fsPath, attributes.Size, err)
return c.GetFsError(err)
}
logger.CommandLog(truncateLogSender, fsPath, "", c.User.Username, "", c.ID, c.protocol, -1, -1, "", "", "", attributes.Size)
}
return nil
}
func (c *BaseConnection) truncateFile(fsPath, virtualPath string, size int64) error {
// check first if we have an open transfer for the given path and try to truncate the file already opened
// if we found no transfer we truncate by path.
var initialSize int64
var err error
initialSize, err = c.truncateOpenHandle(fsPath, size)
if err == errNoTransfer {
c.Log(logger.LevelDebug, "file path %#v not found in active transfers, execute trucate by path", fsPath)
var info os.FileInfo
info, err = c.Fs.Stat(fsPath)
if err != nil {
return err
}
initialSize = info.Size()
err = c.Fs.Truncate(fsPath, size)
}
if err == nil && vfs.IsLocalOsFs(c.Fs) {
sizeDiff := initialSize - size
vfolder, err := c.User.GetVirtualFolderForPath(path.Dir(virtualPath))
if err == nil {
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, 0, -sizeDiff, false) //nolint:errcheck
if vfolder.IsIncludedInUserQuota() {
dataprovider.UpdateUserQuota(c.User, 0, -sizeDiff, false) //nolint:errcheck
}
} else {
dataprovider.UpdateUserQuota(c.User, 0, -sizeDiff, false) //nolint:errcheck
}
}
return err
}
func (c *BaseConnection) checkRecursiveRenameDirPermissions(sourcePath, targetPath string) error {
dstPerms := []string{
dataprovider.PermCreateDirs,
dataprovider.PermUpload,
dataprovider.PermCreateSymlinks,
}
err := c.Fs.Walk(sourcePath, func(walkedPath string, info os.FileInfo, err error) error {
if err != nil {
return err
}
dstPath := strings.Replace(walkedPath, sourcePath, targetPath, 1)
virtualSrcPath := c.Fs.GetRelativePath(walkedPath)
virtualDstPath := c.Fs.GetRelativePath(dstPath)
// walk scans the directory tree in order, checking the parent directory permissions we are sure that all contents
// inside the parent path was checked. If the current dir has no subdirs with defined permissions inside it
// and it has all the possible permissions we can stop scanning
if !c.User.HasPermissionsInside(path.Dir(virtualSrcPath)) && !c.User.HasPermissionsInside(path.Dir(virtualDstPath)) {
if c.User.HasPerm(dataprovider.PermRename, path.Dir(virtualSrcPath)) &&
c.User.HasPerm(dataprovider.PermRename, path.Dir(virtualDstPath)) {
return ErrSkipPermissionsCheck
}
if c.User.HasPerm(dataprovider.PermDelete, path.Dir(virtualSrcPath)) &&
c.User.HasPerms(dstPerms, path.Dir(virtualDstPath)) {
return ErrSkipPermissionsCheck
}
}
if !c.isRenamePermitted(walkedPath, virtualSrcPath, virtualDstPath, info) {
c.Log(logger.LevelInfo, "rename %#v -> %#v is not allowed, virtual destination path: %#v",
walkedPath, dstPath, virtualDstPath)
return os.ErrPermission
}
return nil
})
if err == ErrSkipPermissionsCheck {
err = nil
}
return err
}
func (c *BaseConnection) isRenamePermitted(fsSourcePath, virtualSourcePath, virtualTargetPath string, fi os.FileInfo) bool {
if c.Fs.GetRelativePath(fsSourcePath) == "/" {
c.Log(logger.LevelWarn, "renaming root dir is not allowed")
return false
}
if c.User.IsVirtualFolder(virtualSourcePath) || c.User.IsVirtualFolder(virtualTargetPath) {
c.Log(logger.LevelWarn, "renaming a virtual folder is not allowed")
return false
}
if !c.User.IsFileAllowed(virtualSourcePath) || !c.User.IsFileAllowed(virtualTargetPath) {
if fi != nil && fi.Mode().IsRegular() {
c.Log(logger.LevelDebug, "renaming file is not allowed, source: %#v target: %#v",
virtualSourcePath, virtualTargetPath)
return false
}
}
if c.User.HasPerm(dataprovider.PermRename, path.Dir(virtualSourcePath)) &&
c.User.HasPerm(dataprovider.PermRename, path.Dir(virtualTargetPath)) {
return true
}
if !c.User.HasPerm(dataprovider.PermDelete, path.Dir(virtualSourcePath)) {
return false
}
if fi != nil {
if fi.IsDir() {
return c.User.HasPerm(dataprovider.PermCreateDirs, path.Dir(virtualTargetPath))
} else if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
return c.User.HasPerm(dataprovider.PermCreateSymlinks, path.Dir(virtualTargetPath))
}
}
return c.User.HasPerm(dataprovider.PermUpload, path.Dir(virtualTargetPath))
}
func (c *BaseConnection) hasSpaceForRename(virtualSourcePath, virtualTargetPath string, initialSize int64,
fsSourcePath string) bool {
if dataprovider.GetQuotaTracking() == 0 {
return true
}
sourceFolder, errSrc := c.User.GetVirtualFolderForPath(path.Dir(virtualSourcePath))
dstFolder, errDst := c.User.GetVirtualFolderForPath(path.Dir(virtualTargetPath))
if errSrc != nil && errDst != nil {
// rename inside the user home dir
return true
}
if errSrc == nil && errDst == nil {
// rename between virtual folders
if sourceFolder.MappedPath == dstFolder.MappedPath {
// rename inside the same virtual folder
return true
}
}
if errSrc != nil && dstFolder.IsIncludedInUserQuota() {
// rename between user root dir and a virtual folder included in user quota
return true
}
quotaResult := c.HasSpace(true, virtualTargetPath)
return c.hasSpaceForCrossRename(quotaResult, initialSize, fsSourcePath)
}
// hasSpaceForCrossRename checks the quota after a rename between different folders
func (c *BaseConnection) hasSpaceForCrossRename(quotaResult vfs.QuotaCheckResult, initialSize int64, sourcePath string) bool {
if !quotaResult.HasSpace && initialSize == -1 {
// we are over quota and this is not a file replace
return false
}
fi, err := c.Fs.Lstat(sourcePath)
if err != nil {
c.Log(logger.LevelWarn, "cross rename denied, stat error for path %#v: %v", sourcePath, err)
return false
}
var sizeDiff int64
var filesDiff int
if fi.Mode().IsRegular() {
sizeDiff = fi.Size()
filesDiff = 1
if initialSize != -1 {
sizeDiff -= initialSize
filesDiff = 0
}
} else if fi.IsDir() {
filesDiff, sizeDiff, err = c.Fs.GetDirSize(sourcePath)
if err != nil {
c.Log(logger.LevelWarn, "cross rename denied, error getting size for directory %#v: %v", sourcePath, err)
return false
}
}
if !quotaResult.HasSpace && initialSize != -1 {
// we are over quota but we are overwriting an existing file so we check if the quota size after the rename is ok
if quotaResult.QuotaSize == 0 {
return true
}
c.Log(logger.LevelDebug, "cross rename overwrite, source %#v, used size %v, size to add %v",
sourcePath, quotaResult.UsedSize, sizeDiff)
quotaResult.UsedSize += sizeDiff
return quotaResult.GetRemainingSize() >= 0
}
if quotaResult.QuotaFiles > 0 {
remainingFiles := quotaResult.GetRemainingFiles()
c.Log(logger.LevelDebug, "cross rename, source %#v remaining file %v to add %v", sourcePath,
remainingFiles, filesDiff)
if remainingFiles < filesDiff {
return false
}
}
if quotaResult.QuotaSize > 0 {
remainingSize := quotaResult.GetRemainingSize()
c.Log(logger.LevelDebug, "cross rename, source %#v remaining size %v to add %v", sourcePath,
remainingSize, sizeDiff)
if remainingSize < sizeDiff {
return false
}
}
return true
}
// GetMaxWriteSize returns the allowed size for an upload or an error
// if no enough size is available for a resume/append
func (c *BaseConnection) GetMaxWriteSize(quotaResult vfs.QuotaCheckResult, isResume bool, fileSize int64) (int64, error) {
maxWriteSize := quotaResult.GetRemainingSize()
if isResume {
if !c.Fs.IsUploadResumeSupported() {
return 0, c.GetOpUnsupportedError()
}
if c.User.Filters.MaxUploadFileSize > 0 && c.User.Filters.MaxUploadFileSize <= fileSize {
return 0, ErrQuotaExceeded
}
if c.User.Filters.MaxUploadFileSize > 0 {
maxUploadSize := c.User.Filters.MaxUploadFileSize - fileSize
if maxUploadSize < maxWriteSize || maxWriteSize == 0 {
maxWriteSize = maxUploadSize
}
}
} else {
if maxWriteSize > 0 {
maxWriteSize += fileSize
}
if c.User.Filters.MaxUploadFileSize > 0 && (c.User.Filters.MaxUploadFileSize < maxWriteSize || maxWriteSize == 0) {
maxWriteSize = c.User.Filters.MaxUploadFileSize
}
}
return maxWriteSize, nil
}
// HasSpace checks user's quota usage
func (c *BaseConnection) HasSpace(checkFiles bool, requestPath string) vfs.QuotaCheckResult {
result := vfs.QuotaCheckResult{
HasSpace: true,
AllowedSize: 0,
AllowedFiles: 0,
UsedSize: 0,
UsedFiles: 0,
QuotaSize: 0,
QuotaFiles: 0,
}
if dataprovider.GetQuotaTracking() == 0 {
return result
}
var err error
var vfolder vfs.VirtualFolder
vfolder, err = c.User.GetVirtualFolderForPath(path.Dir(requestPath))
if err == nil && !vfolder.IsIncludedInUserQuota() {
if vfolder.HasNoQuotaRestrictions(checkFiles) {
return result
}
result.QuotaSize = vfolder.QuotaSize
result.QuotaFiles = vfolder.QuotaFiles
result.UsedFiles, result.UsedSize, err = dataprovider.GetUsedVirtualFolderQuota(vfolder.MappedPath)
} else {
if c.User.HasNoQuotaRestrictions(checkFiles) {
return result
}
result.QuotaSize = c.User.QuotaSize
result.QuotaFiles = c.User.QuotaFiles
result.UsedFiles, result.UsedSize, err = dataprovider.GetUsedQuota(c.User.Username)
}
if err != nil {
c.Log(logger.LevelWarn, "error getting used quota for %#v request path %#v: %v", c.User.Username, requestPath, err)
result.HasSpace = false
return result
}
result.AllowedFiles = result.QuotaFiles - result.UsedFiles
result.AllowedSize = result.QuotaSize - result.UsedSize
if (checkFiles && result.QuotaFiles > 0 && result.UsedFiles >= result.QuotaFiles) ||
(result.QuotaSize > 0 && result.UsedSize >= result.QuotaSize) {
c.Log(logger.LevelDebug, "quota exceed for user %#v, request path %#v, num files: %v/%v, size: %v/%v check files: %v",
c.User.Username, requestPath, result.UsedFiles, result.QuotaFiles, result.UsedSize, result.QuotaSize, checkFiles)
result.HasSpace = false
return result
}
return result
}
func (c *BaseConnection) isCrossFoldersRequest(virtualSourcePath, virtualTargetPath string) bool {
sourceFolder, errSrc := c.User.GetVirtualFolderForPath(virtualSourcePath)
dstFolder, errDst := c.User.GetVirtualFolderForPath(virtualTargetPath)
if errSrc != nil && errDst != nil {
return false
}
if errSrc == nil && errDst == nil {
return sourceFolder.MappedPath != dstFolder.MappedPath
}
return true
}
func (c *BaseConnection) updateQuotaMoveBetweenVFolders(sourceFolder, dstFolder vfs.VirtualFolder, initialSize,
filesSize int64, numFiles int) {
if sourceFolder.MappedPath == dstFolder.MappedPath {
// both files are inside the same virtual folder
if initialSize != -1 {
dataprovider.UpdateVirtualFolderQuota(dstFolder.BaseVirtualFolder, -numFiles, -initialSize, false) //nolint:errcheck
if dstFolder.IsIncludedInUserQuota() {
dataprovider.UpdateUserQuota(c.User, -numFiles, -initialSize, false) //nolint:errcheck
}
}
return
}
// files are inside different virtual folders
dataprovider.UpdateVirtualFolderQuota(sourceFolder.BaseVirtualFolder, -numFiles, -filesSize, false) //nolint:errcheck
if sourceFolder.IsIncludedInUserQuota() {
dataprovider.UpdateUserQuota(c.User, -numFiles, -filesSize, false) //nolint:errcheck
}
if initialSize == -1 {
dataprovider.UpdateVirtualFolderQuota(dstFolder.BaseVirtualFolder, numFiles, filesSize, false) //nolint:errcheck
if dstFolder.IsIncludedInUserQuota() {
dataprovider.UpdateUserQuota(c.User, numFiles, filesSize, false) //nolint:errcheck
}
} else {
// we cannot have a directory here, initialSize != -1 only for files
dataprovider.UpdateVirtualFolderQuota(dstFolder.BaseVirtualFolder, 0, filesSize-initialSize, false) //nolint:errcheck
if dstFolder.IsIncludedInUserQuota() {
dataprovider.UpdateUserQuota(c.User, 0, filesSize-initialSize, false) //nolint:errcheck
}
}
}
func (c *BaseConnection) updateQuotaMoveFromVFolder(sourceFolder vfs.VirtualFolder, initialSize, filesSize int64, numFiles int) {
// move between a virtual folder and the user home dir
dataprovider.UpdateVirtualFolderQuota(sourceFolder.BaseVirtualFolder, -numFiles, -filesSize, false) //nolint:errcheck
if sourceFolder.IsIncludedInUserQuota() {
dataprovider.UpdateUserQuota(c.User, -numFiles, -filesSize, false) //nolint:errcheck
}
if initialSize == -1 {
dataprovider.UpdateUserQuota(c.User, numFiles, filesSize, false) //nolint:errcheck
} else {
// we cannot have a directory here, initialSize != -1 only for files
dataprovider.UpdateUserQuota(c.User, 0, filesSize-initialSize, false) //nolint:errcheck
}
}
func (c *BaseConnection) updateQuotaMoveToVFolder(dstFolder vfs.VirtualFolder, initialSize, filesSize int64, numFiles int) {
// move between the user home dir and a virtual folder
dataprovider.UpdateUserQuota(c.User, -numFiles, -filesSize, false) //nolint:errcheck
if initialSize == -1 {
dataprovider.UpdateVirtualFolderQuota(dstFolder.BaseVirtualFolder, numFiles, filesSize, false) //nolint:errcheck
if dstFolder.IsIncludedInUserQuota() {
dataprovider.UpdateUserQuota(c.User, numFiles, filesSize, false) //nolint:errcheck
}
} else {
// we cannot have a directory here, initialSize != -1 only for files
dataprovider.UpdateVirtualFolderQuota(dstFolder.BaseVirtualFolder, 0, filesSize-initialSize, false) //nolint:errcheck
if dstFolder.IsIncludedInUserQuota() {
dataprovider.UpdateUserQuota(c.User, 0, filesSize-initialSize, false) //nolint:errcheck
}
}
}
func (c *BaseConnection) updateQuotaAfterRename(virtualSourcePath, virtualTargetPath, targetPath string, initialSize int64) error {
// we don't allow to overwrite an existing directory so targetPath can be:
// - a new file, a symlink is as a new file here
// - a file overwriting an existing one
// - a new directory
// initialSize != -1 only when overwriting files
sourceFolder, errSrc := c.User.GetVirtualFolderForPath(path.Dir(virtualSourcePath))
dstFolder, errDst := c.User.GetVirtualFolderForPath(path.Dir(virtualTargetPath))
if errSrc != nil && errDst != nil {
// both files are contained inside the user home dir
if initialSize != -1 {
// we cannot have a directory here
dataprovider.UpdateUserQuota(c.User, -1, -initialSize, false) //nolint:errcheck
}
return nil
}
filesSize := int64(0)
numFiles := 1
if fi, err := c.Fs.Stat(targetPath); err == nil {
if fi.Mode().IsDir() {
numFiles, filesSize, err = c.Fs.GetDirSize(targetPath)
if err != nil {
c.Log(logger.LevelWarn, "failed to update quota after rename, error scanning moved folder %#v: %v",
targetPath, err)
return err
}
} else {
filesSize = fi.Size()
}
} else {
c.Log(logger.LevelWarn, "failed to update quota after rename, file %#v stat error: %+v", targetPath, err)
return err
}
if errSrc == nil && errDst == nil {
c.updateQuotaMoveBetweenVFolders(sourceFolder, dstFolder, initialSize, filesSize, numFiles)
}
if errSrc == nil && errDst != nil {
c.updateQuotaMoveFromVFolder(sourceFolder, initialSize, filesSize, numFiles)
}
if errSrc != nil && errDst == nil {
c.updateQuotaMoveToVFolder(dstFolder, initialSize, filesSize, numFiles)
}
return nil
}
// GetPermissionDeniedError returns an appropriate permission denied error for the connection protocol
func (c *BaseConnection) GetPermissionDeniedError() error {
switch c.protocol {
case ProtocolSFTP:
return sftp.ErrSSHFxPermissionDenied
case ProtocolWebDAV:
return os.ErrPermission
default:
return ErrPermissionDenied
}
}
// GetNotExistError returns an appropriate not exist error for the connection protocol
func (c *BaseConnection) GetNotExistError() error {
switch c.protocol {
case ProtocolSFTP:
return sftp.ErrSSHFxNoSuchFile
case ProtocolWebDAV:
return os.ErrNotExist
default:
return ErrNotExist
}
}
// GetOpUnsupportedError returns an appropriate operation not supported error for the connection protocol
func (c *BaseConnection) GetOpUnsupportedError() error {
switch c.protocol {
case ProtocolSFTP:
return sftp.ErrSSHFxOpUnsupported
default:
return ErrOpUnsupported
}
}
// GetGenericError returns an appropriate generic error for the connection protocol
func (c *BaseConnection) GetGenericError(err error) error {
switch c.protocol {
case ProtocolSFTP:
return sftp.ErrSSHFxFailure
default:
if err == ErrPermissionDenied || err == ErrNotExist || err == ErrOpUnsupported || err == ErrQuotaExceeded {
return err
}
return ErrGenericFailure
}
}
// GetFsError converts a filesystem error to a protocol error
func (c *BaseConnection) GetFsError(err error) error {
if c.Fs.IsNotExist(err) {
return c.GetNotExistError()
} else if c.Fs.IsPermission(err) {
return c.GetPermissionDeniedError()
} else if err != nil {
return c.GetGenericError(err)
}
return nil
}

1208
common/connection_test.go Normal file

File diff suppressed because it is too large Load Diff

54
common/tlsutils.go Normal file
View File

@@ -0,0 +1,54 @@
package common
import (
"crypto/tls"
"sync"
"github.com/drakkan/sftpgo/logger"
)
// CertManager defines a TLS certificate manager
type CertManager struct {
certPath string
keyPath string
sync.RWMutex
cert *tls.Certificate
}
// LoadCertificate loads the configured x509 key pair
func (m *CertManager) LoadCertificate(logSender string) error {
newCert, err := tls.LoadX509KeyPair(m.certPath, m.keyPath)
if err != nil {
logger.Warn(logSender, "", "unable to load X509 key pair, cert file %#v key file %#v error: %v",
m.certPath, m.keyPath, err)
return err
}
logger.Debug(logSender, "", "TLS certificate %#v successfully loaded", m.certPath)
m.Lock()
defer m.Unlock()
m.cert = &newCert
return nil
}
// GetCertificateFunc returns the loaded certificate
func (m *CertManager) GetCertificateFunc() func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
m.RLock()
defer m.RUnlock()
return m.cert, nil
}
}
// NewCertManager creates a new certificate manager
func NewCertManager(certificateFile, certificateKeyFile, logSender string) (*CertManager, error) {
manager := &CertManager{
cert: nil,
certPath: certificateFile,
keyPath: certificateKeyFile,
}
err := manager.LoadCertificate(logSender)
if err != nil {
return nil, err
}
return manager, nil
}

69
common/tlsutils_test.go Normal file
View File

@@ -0,0 +1,69 @@
package common
import (
"crypto/tls"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
const (
httpsCert = `-----BEGIN CERTIFICATE-----
MIICHTCCAaKgAwIBAgIUHnqw7QnB1Bj9oUsNpdb+ZkFPOxMwCgYIKoZIzj0EAwIw
RTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGElu
dGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDAyMDQwOTUzMDRaFw0zMDAyMDEw
OTUzMDRaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYD
VQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwdjAQBgcqhkjOPQIBBgUrgQQA
IgNiAARCjRMqJ85rzMC998X5z761nJ+xL3bkmGVqWvrJ51t5OxV0v25NsOgR82CA
NXUgvhVYs7vNFN+jxtb2aj6Xg+/2G/BNxkaFspIVCzgWkxiz7XE4lgUwX44FCXZM
3+JeUbKjUzBRMB0GA1UdDgQWBBRhLw+/o3+Z02MI/d4tmaMui9W16jAfBgNVHSME
GDAWgBRhLw+/o3+Z02MI/d4tmaMui9W16jAPBgNVHRMBAf8EBTADAQH/MAoGCCqG
SM49BAMCA2kAMGYCMQDqLt2lm8mE+tGgtjDmtFgdOcI72HSbRQ74D5rYTzgST1rY
/8wTi5xl8TiFUyLMUsICMQC5ViVxdXbhuG7gX6yEqSkMKZICHpO8hqFwOD/uaFVI
dV4vKmHUzwK/eIx+8Ay3neE=
-----END CERTIFICATE-----`
httpsKey = `-----BEGIN EC PARAMETERS-----
BgUrgQQAIg==
-----END EC PARAMETERS-----
-----BEGIN EC PRIVATE KEY-----
MIGkAgEBBDCfMNsN6miEE3rVyUPwElfiJSWaR5huPCzUenZOfJT04GAcQdWvEju3
UM2lmBLIXpGgBwYFK4EEACKhZANiAARCjRMqJ85rzMC998X5z761nJ+xL3bkmGVq
WvrJ51t5OxV0v25NsOgR82CANXUgvhVYs7vNFN+jxtb2aj6Xg+/2G/BNxkaFspIV
CzgWkxiz7XE4lgUwX44FCXZM3+JeUbI=
-----END EC PRIVATE KEY-----`
)
func TestLoadCertificate(t *testing.T) {
certPath := filepath.Join(os.TempDir(), "test.crt")
keyPath := filepath.Join(os.TempDir(), "test.key")
err := ioutil.WriteFile(certPath, []byte(httpsCert), os.ModePerm)
assert.NoError(t, err)
err = ioutil.WriteFile(keyPath, []byte(httpsKey), os.ModePerm)
assert.NoError(t, err)
certManager, err := NewCertManager(certPath, keyPath, logSenderTest)
assert.NoError(t, err)
certFunc := certManager.GetCertificateFunc()
if assert.NotNil(t, certFunc) {
hello := &tls.ClientHelloInfo{
ServerName: "localhost",
CipherSuites: []uint16{tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305},
}
cert, err := certFunc(hello)
assert.NoError(t, err)
assert.Equal(t, certManager.cert, cert)
}
err = os.Remove(certPath)
assert.NoError(t, err)
err = os.Remove(keyPath)
assert.NoError(t, err)
}
func TestLoadInvalidCert(t *testing.T) {
certManager, err := NewCertManager("test.crt", "test.key", logSenderTest)
assert.Error(t, err)
assert.Nil(t, certManager)
}

290
common/transfer.go Normal file
View File

@@ -0,0 +1,290 @@
package common
import (
"errors"
"os"
"path"
"sync"
"sync/atomic"
"time"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/metrics"
"github.com/drakkan/sftpgo/vfs"
)
var (
// ErrTransferClosed defines the error returned for a closed transfer
ErrTransferClosed = errors.New("transfer already closed")
)
// BaseTransfer contains protocols common transfer details for an upload or a download.
type BaseTransfer struct { //nolint:maligned
ID uint64
Fs vfs.Fs
File *os.File
Connection *BaseConnection
cancelFn func()
fsPath string
start time.Time
transferType int
MinWriteOffset int64
InitialSize int64
isNewFile bool
requestPath string
BytesSent int64
BytesReceived int64
MaxWriteSize int64
AbortTransfer int32
sync.Mutex
ErrTransfer error
}
// NewBaseTransfer returns a new BaseTransfer and adds it to the given connection
func NewBaseTransfer(file *os.File, conn *BaseConnection, cancelFn func(), fsPath, requestPath string, transferType int,
minWriteOffset, initialSize, maxWriteSize int64, isNewFile bool, fs vfs.Fs) *BaseTransfer {
t := &BaseTransfer{
ID: conn.GetTransferID(),
File: file,
Connection: conn,
cancelFn: cancelFn,
fsPath: fsPath,
start: time.Now(),
transferType: transferType,
MinWriteOffset: minWriteOffset,
InitialSize: initialSize,
isNewFile: isNewFile,
requestPath: requestPath,
BytesSent: 0,
BytesReceived: 0,
MaxWriteSize: maxWriteSize,
AbortTransfer: 0,
Fs: fs,
}
conn.AddTransfer(t)
return t
}
// GetID returns the transfer ID
func (t *BaseTransfer) GetID() uint64 {
return t.ID
}
// GetType returns the transfer type
func (t *BaseTransfer) GetType() int {
return t.transferType
}
// GetSize returns the transferred size
func (t *BaseTransfer) GetSize() int64 {
if t.transferType == TransferDownload {
return atomic.LoadInt64(&t.BytesSent)
}
return atomic.LoadInt64(&t.BytesReceived)
}
// GetStartTime returns the start time
func (t *BaseTransfer) GetStartTime() time.Time {
return t.start
}
// SignalClose signals that the transfer should be closed.
// For same protocols, for example WebDAV, we have no
// access to the network connection, so we use this method
// to make the next read or write to fail
func (t *BaseTransfer) SignalClose() {
atomic.StoreInt32(&(t.AbortTransfer), 1)
}
// GetVirtualPath returns the transfer virtual path
func (t *BaseTransfer) GetVirtualPath() string {
return t.requestPath
}
// GetFsPath returns the transfer filesystem path
func (t *BaseTransfer) GetFsPath() string {
return t.fsPath
}
// GetRealFsPath returns the real transfer filesystem path.
// If atomic uploads are enabled this differ from fsPath
func (t *BaseTransfer) GetRealFsPath(fsPath string) string {
if fsPath == t.GetFsPath() {
if t.File != nil {
return t.File.Name()
}
return t.fsPath
}
return ""
}
// SetCancelFn sets the cancel function for the transfer
func (t *BaseTransfer) SetCancelFn(cancelFn func()) {
t.cancelFn = cancelFn
}
// Truncate changes the size of the opened file.
// Supported for local fs only
func (t *BaseTransfer) Truncate(fsPath string, size int64) (int64, error) {
if fsPath == t.GetFsPath() {
if t.File != nil {
initialSize := t.InitialSize
err := t.File.Truncate(size)
if err == nil {
t.Lock()
t.InitialSize = size
if t.MaxWriteSize > 0 {
sizeDiff := initialSize - size
t.MaxWriteSize += sizeDiff
metrics.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.transferType, t.ErrTransfer)
atomic.StoreInt64(&t.BytesReceived, 0)
}
t.Unlock()
}
t.Connection.Log(logger.LevelDebug, "file %#v truncated to size %v max write size %v new initial size %v err: %v",
fsPath, size, t.MaxWriteSize, t.InitialSize, err)
return initialSize, err
}
if size == 0 && atomic.LoadInt64(&t.BytesSent) == 0 {
// for cloud providers the file is always truncated to zero, we don't support append/resume for uploads
return 0, nil
}
return 0, ErrOpUnsupported
}
return 0, errTransferMismatch
}
// TransferError is called if there is an unexpected error.
// For example network or client issues
func (t *BaseTransfer) TransferError(err error) {
t.Lock()
defer t.Unlock()
if t.ErrTransfer != nil {
return
}
t.ErrTransfer = err
if t.cancelFn != nil {
t.cancelFn()
}
elapsed := time.Since(t.start).Nanoseconds() / 1000000
t.Connection.Log(logger.LevelWarn, "Unexpected error for transfer, path: %#v, error: \"%v\" bytes sent: %v, "+
"bytes received: %v transfer running since %v ms", t.fsPath, t.ErrTransfer, atomic.LoadInt64(&t.BytesSent),
atomic.LoadInt64(&t.BytesReceived), elapsed)
}
// Close it is called when the transfer is completed.
// It logs the transfer info, updates the user quota (for uploads)
// and executes any defined action.
// If there is an error no action will be executed and, in atomic mode,
// we try to delete the temporary file
func (t *BaseTransfer) Close() error {
defer t.Connection.RemoveTransfer(t)
var err error
numFiles := 0
if t.isNewFile {
numFiles = 1
}
metrics.TransferCompleted(atomic.LoadInt64(&t.BytesSent), atomic.LoadInt64(&t.BytesReceived), t.transferType, t.ErrTransfer)
if t.ErrTransfer == ErrQuotaExceeded && t.File != nil {
// if quota is exceeded we try to remove the partial file for uploads to local filesystem
err = os.Remove(t.File.Name())
if err == nil {
numFiles--
atomic.StoreInt64(&t.BytesReceived, 0)
t.MinWriteOffset = 0
}
t.Connection.Log(logger.LevelWarn, "upload denied due to space limit, delete temporary file: %#v, deletion error: %v",
t.File.Name(), err)
} else if t.transferType == TransferUpload && t.File != nil && t.File.Name() != t.fsPath {
if t.ErrTransfer == nil || Config.UploadMode == UploadModeAtomicWithResume {
err = os.Rename(t.File.Name(), t.fsPath)
t.Connection.Log(logger.LevelDebug, "atomic upload completed, rename: %#v -> %#v, error: %v",
t.File.Name(), t.fsPath, err)
} else {
err = os.Remove(t.File.Name())
t.Connection.Log(logger.LevelWarn, "atomic upload completed with error: \"%v\", delete temporary file: %#v, "+
"deletion error: %v", t.ErrTransfer, t.File.Name(), err)
if err == nil {
numFiles--
atomic.StoreInt64(&t.BytesReceived, 0)
t.MinWriteOffset = 0
}
}
}
elapsed := time.Since(t.start).Nanoseconds() / 1000000
if t.transferType == TransferDownload {
logger.TransferLog(downloadLogSender, t.fsPath, elapsed, atomic.LoadInt64(&t.BytesSent), t.Connection.User.Username,
t.Connection.ID, t.Connection.protocol)
action := newActionNotification(&t.Connection.User, operationDownload, t.fsPath, "", "", t.Connection.protocol,
atomic.LoadInt64(&t.BytesSent), t.ErrTransfer)
go action.execute() //nolint:errcheck
} else {
fileSize := atomic.LoadInt64(&t.BytesReceived) + t.MinWriteOffset
info, err := t.Fs.Stat(t.fsPath)
if err == nil {
fileSize = info.Size()
}
t.Connection.Log(logger.LevelDebug, "upload file size %v stat error %v", fileSize, err)
t.updateQuota(numFiles, fileSize)
logger.TransferLog(uploadLogSender, t.fsPath, elapsed, atomic.LoadInt64(&t.BytesReceived), t.Connection.User.Username,
t.Connection.ID, t.Connection.protocol)
action := newActionNotification(&t.Connection.User, operationUpload, t.fsPath, "", "", t.Connection.protocol,
fileSize, t.ErrTransfer)
go action.execute() //nolint:errcheck
}
if t.ErrTransfer != nil {
t.Connection.Log(logger.LevelWarn, "transfer error: %v, path: %#v", t.ErrTransfer, t.fsPath)
if err == nil {
err = t.ErrTransfer
}
}
return err
}
func (t *BaseTransfer) updateQuota(numFiles int, fileSize int64) bool {
// S3 uploads are atomic, if there is an error nothing is uploaded
if t.File == nil && t.ErrTransfer != nil {
return false
}
sizeDiff := fileSize - t.InitialSize
if t.transferType == TransferUpload && (numFiles != 0 || sizeDiff > 0) {
vfolder, err := t.Connection.User.GetVirtualFolderForPath(path.Dir(t.requestPath))
if err == nil {
dataprovider.UpdateVirtualFolderQuota(vfolder.BaseVirtualFolder, numFiles, //nolint:errcheck
sizeDiff, false)
if vfolder.IsIncludedInUserQuota() {
dataprovider.UpdateUserQuota(t.Connection.User, numFiles, sizeDiff, false) //nolint:errcheck
}
} else {
dataprovider.UpdateUserQuota(t.Connection.User, numFiles, sizeDiff, false) //nolint:errcheck
}
return true
}
return false
}
// HandleThrottle manage bandwidth throttling
func (t *BaseTransfer) HandleThrottle() {
var wantedBandwidth int64
var trasferredBytes int64
if t.transferType == TransferDownload {
wantedBandwidth = t.Connection.User.DownloadBandwidth
trasferredBytes = atomic.LoadInt64(&t.BytesSent)
} else {
wantedBandwidth = t.Connection.User.UploadBandwidth
trasferredBytes = atomic.LoadInt64(&t.BytesReceived)
}
if wantedBandwidth > 0 {
// real and wanted elapsed as milliseconds, bytes as kilobytes
realElapsed := time.Since(t.start).Nanoseconds() / 1000000
// trasferredBytes / 1000 = KB/s, we multiply for 1000 to get milliseconds
wantedElapsed := 1000 * (trasferredBytes / 1000) / wantedBandwidth
if wantedElapsed > realElapsed {
toSleep := time.Duration(wantedElapsed - realElapsed)
time.Sleep(toSleep * time.Millisecond)
}
}
}

254
common/transfer_test.go Normal file
View File

@@ -0,0 +1,254 @@
package common
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/vfs"
)
func TestTransferUpdateQuota(t *testing.T) {
conn := NewBaseConnection("", ProtocolSFTP, dataprovider.User{}, nil)
transfer := BaseTransfer{
Connection: conn,
transferType: TransferUpload,
BytesReceived: 123,
Fs: vfs.NewOsFs("", os.TempDir(), nil),
}
errFake := errors.New("fake error")
transfer.TransferError(errFake)
assert.False(t, transfer.updateQuota(1, 0))
err := transfer.Close()
if assert.Error(t, err) {
assert.EqualError(t, err, errFake.Error())
}
mappedPath := filepath.Join(os.TempDir(), "vdir")
vdirPath := "/vdir"
conn.User.VirtualFolders = append(conn.User.VirtualFolders, vfs.VirtualFolder{
BaseVirtualFolder: vfs.BaseVirtualFolder{
MappedPath: mappedPath,
},
VirtualPath: vdirPath,
QuotaFiles: -1,
QuotaSize: -1,
})
transfer.ErrTransfer = nil
transfer.BytesReceived = 1
transfer.requestPath = "/vdir/file"
assert.True(t, transfer.updateQuota(1, 0))
err = transfer.Close()
assert.NoError(t, err)
}
func TestTransferThrottling(t *testing.T) {
u := dataprovider.User{
Username: "test",
UploadBandwidth: 50,
DownloadBandwidth: 40,
}
fs := vfs.NewOsFs("", os.TempDir(), nil)
testFileSize := int64(131072)
wantedUploadElapsed := 1000 * (testFileSize / 1000) / u.UploadBandwidth
wantedDownloadElapsed := 1000 * (testFileSize / 1000) / u.DownloadBandwidth
// some tolerance
wantedUploadElapsed -= wantedDownloadElapsed / 10
wantedDownloadElapsed -= wantedDownloadElapsed / 10
conn := NewBaseConnection("id", ProtocolSCP, u, nil)
transfer := NewBaseTransfer(nil, conn, nil, "", "", TransferUpload, 0, 0, 0, true, fs)
transfer.BytesReceived = testFileSize
transfer.Connection.UpdateLastActivity()
startTime := transfer.Connection.GetLastActivity()
transfer.HandleThrottle()
elapsed := time.Since(startTime).Nanoseconds() / 1000000
assert.GreaterOrEqual(t, elapsed, wantedUploadElapsed, "upload bandwidth throttling not respected")
err := transfer.Close()
assert.NoError(t, err)
transfer = NewBaseTransfer(nil, conn, nil, "", "", TransferDownload, 0, 0, 0, true, fs)
transfer.BytesSent = testFileSize
transfer.Connection.UpdateLastActivity()
startTime = transfer.Connection.GetLastActivity()
transfer.HandleThrottle()
elapsed = time.Since(startTime).Nanoseconds() / 1000000
assert.GreaterOrEqual(t, elapsed, wantedDownloadElapsed, "download bandwidth throttling not respected")
err = transfer.Close()
assert.NoError(t, err)
}
func TestRealPath(t *testing.T) {
testFile := filepath.Join(os.TempDir(), "afile.txt")
fs := vfs.NewOsFs("123", os.TempDir(), nil)
u := dataprovider.User{
Username: "user",
HomeDir: os.TempDir(),
}
u.Permissions = make(map[string][]string)
u.Permissions["/"] = []string{dataprovider.PermAny}
file, err := os.Create(testFile)
require.NoError(t, err)
conn := NewBaseConnection(fs.ConnectionID(), ProtocolSFTP, u, fs)
transfer := NewBaseTransfer(file, conn, nil, testFile, "/transfer_test_file", TransferUpload, 0, 0, 0, true, fs)
rPath := transfer.GetRealFsPath(testFile)
assert.Equal(t, testFile, rPath)
rPath = conn.getRealFsPath(testFile)
assert.Equal(t, testFile, rPath)
err = transfer.Close()
assert.NoError(t, err)
err = file.Close()
assert.NoError(t, err)
transfer.File = nil
rPath = transfer.GetRealFsPath(testFile)
assert.Equal(t, testFile, rPath)
rPath = transfer.GetRealFsPath("")
assert.Empty(t, rPath)
err = os.Remove(testFile)
assert.NoError(t, err)
assert.Len(t, conn.GetTransfers(), 0)
}
func TestTruncate(t *testing.T) {
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
fs := vfs.NewOsFs("123", os.TempDir(), nil)
u := dataprovider.User{
Username: "user",
HomeDir: os.TempDir(),
}
u.Permissions = make(map[string][]string)
u.Permissions["/"] = []string{dataprovider.PermAny}
file, err := os.Create(testFile)
if !assert.NoError(t, err) {
assert.FailNow(t, "unable to open test file")
}
_, err = file.Write([]byte("hello"))
assert.NoError(t, err)
conn := NewBaseConnection(fs.ConnectionID(), ProtocolSFTP, u, fs)
transfer := NewBaseTransfer(file, conn, nil, testFile, "/transfer_test_file", TransferUpload, 0, 5, 100, false, fs)
err = conn.SetStat(testFile, "/transfer_test_file", &StatAttributes{
Size: 2,
Flags: StatAttrSize,
})
assert.NoError(t, err)
assert.Equal(t, int64(103), transfer.MaxWriteSize)
err = transfer.Close()
assert.NoError(t, err)
err = file.Close()
assert.NoError(t, err)
fi, err := os.Stat(testFile)
if assert.NoError(t, err) {
assert.Equal(t, int64(2), fi.Size())
}
transfer = NewBaseTransfer(file, conn, nil, testFile, "/transfer_test_file", TransferUpload, 0, 0, 100, true, fs)
// file.Stat will fail on a closed file
err = conn.SetStat(testFile, "/transfer_test_file", &StatAttributes{
Size: 2,
Flags: StatAttrSize,
})
assert.Error(t, err)
err = transfer.Close()
assert.NoError(t, err)
transfer = NewBaseTransfer(nil, conn, nil, testFile, "", TransferUpload, 0, 0, 0, true, fs)
_, err = transfer.Truncate("mismatch", 0)
assert.EqualError(t, err, errTransferMismatch.Error())
_, err = transfer.Truncate(testFile, 0)
assert.NoError(t, err)
_, err = transfer.Truncate(testFile, 1)
assert.EqualError(t, err, ErrOpUnsupported.Error())
err = transfer.Close()
assert.NoError(t, err)
err = os.Remove(testFile)
assert.NoError(t, err)
assert.Len(t, conn.GetTransfers(), 0)
}
func TestTransferErrors(t *testing.T) {
isCancelled := false
cancelFn := func() {
isCancelled = true
}
testFile := filepath.Join(os.TempDir(), "transfer_test_file")
fs := vfs.NewOsFs("id", os.TempDir(), nil)
u := dataprovider.User{
Username: "test",
HomeDir: os.TempDir(),
}
err := ioutil.WriteFile(testFile, []byte("test data"), os.ModePerm)
assert.NoError(t, err)
file, err := os.Open(testFile)
if !assert.NoError(t, err) {
assert.FailNow(t, "unable to open test file")
}
conn := NewBaseConnection("id", ProtocolSFTP, u, fs)
transfer := NewBaseTransfer(file, conn, nil, testFile, "/transfer_test_file", TransferUpload, 0, 0, 0, true, fs)
assert.Nil(t, transfer.cancelFn)
assert.Equal(t, testFile, transfer.GetFsPath())
transfer.SetCancelFn(cancelFn)
errFake := errors.New("err fake")
transfer.BytesReceived = 9
transfer.TransferError(ErrQuotaExceeded)
assert.True(t, isCancelled)
transfer.TransferError(errFake)
assert.Error(t, transfer.ErrTransfer, ErrQuotaExceeded.Error())
// the file is closed from the embedding struct before to call close
err = file.Close()
assert.NoError(t, err)
err = transfer.Close()
if assert.Error(t, err) {
assert.Error(t, err, ErrQuotaExceeded.Error())
}
assert.NoFileExists(t, testFile)
err = ioutil.WriteFile(testFile, []byte("test data"), os.ModePerm)
assert.NoError(t, err)
file, err = os.Open(testFile)
if !assert.NoError(t, err) {
assert.FailNow(t, "unable to open test file")
}
fsPath := filepath.Join(os.TempDir(), "test_file")
transfer = NewBaseTransfer(file, conn, nil, fsPath, "/test_file", TransferUpload, 0, 0, 0, true, fs)
transfer.BytesReceived = 9
transfer.TransferError(errFake)
assert.Error(t, transfer.ErrTransfer, errFake.Error())
// the file is closed from the embedding struct before to call close
err = file.Close()
assert.NoError(t, err)
err = transfer.Close()
if assert.Error(t, err) {
assert.Error(t, err, errFake.Error())
}
assert.NoFileExists(t, testFile)
err = ioutil.WriteFile(testFile, []byte("test data"), os.ModePerm)
assert.NoError(t, err)
file, err = os.Open(testFile)
if !assert.NoError(t, err) {
assert.FailNow(t, "unable to open test file")
}
transfer = NewBaseTransfer(file, conn, nil, fsPath, "/test_file", TransferUpload, 0, 0, 0, true, fs)
transfer.BytesReceived = 9
// the file is closed from the embedding struct before to call close
err = file.Close()
assert.NoError(t, err)
err = transfer.Close()
assert.NoError(t, err)
assert.NoFileExists(t, testFile)
assert.FileExists(t, fsPath)
err = os.Remove(fsPath)
assert.NoError(t, err)
assert.Len(t, conn.GetTransfers(), 0)
}

View File

@@ -1,24 +1,26 @@
// Package config manages the configuration.
// Configuration is loaded from sftpgo.conf file.
// If sftpgo.conf is not found or cannot be readed or decoded as json the default configuration is used.
// The default configuration an be found inside the source tree:
// https://github.com/drakkan/sftpgo/blob/master/sftpgo.conf
// Package config manages the configuration
package config
import (
"fmt"
"strings"
"github.com/drakkan/sftpgo/api"
"github.com/spf13/viper"
"github.com/drakkan/sftpgo/common"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/ftpd"
"github.com/drakkan/sftpgo/httpclient"
"github.com/drakkan/sftpgo/httpd"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/sftpd"
"github.com/spf13/viper"
"github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/version"
"github.com/drakkan/sftpgo/webdavd"
)
const (
logSender = "config"
defaultBanner = "SFTPGo"
logSender = "config"
// DefaultConfigName defines the name for the default config file.
// This is the file name without extension, we use viper and so we
// support all the config files format supported by viper
@@ -28,33 +30,83 @@ const (
)
var (
globalConf globalConfig
globalConf globalConfig
defaultSFTPDBanner = fmt.Sprintf("SFTPGo_%v", version.Get().Version)
defaultFTPDBanner = fmt.Sprintf("SFTPGo %v ready", version.Get().Version)
)
type globalConfig struct {
SFTPD sftpd.Configuration `json:"sftpd" mapstructure:"sftpd"`
ProviderConf dataprovider.Config `json:"data_provider" mapstructure:"data_provider"`
HTTPDConfig api.HTTPDConf `json:"httpd" mapstructure:"httpd"`
Common common.Configuration `json:"common" mapstructure:"common"`
SFTPD sftpd.Configuration `json:"sftpd" mapstructure:"sftpd"`
FTPD ftpd.Configuration `json:"ftpd" mapstructure:"ftpd"`
WebDAVD webdavd.Configuration `json:"webdavd" mapstructure:"webdavd"`
ProviderConf dataprovider.Config `json:"data_provider" mapstructure:"data_provider"`
HTTPDConfig httpd.Conf `json:"httpd" mapstructure:"httpd"`
HTTPConfig httpclient.Config `json:"http" mapstructure:"http"`
}
func init() {
// create a default configuration to use if no config file is provided
globalConf = globalConfig{
SFTPD: sftpd.Configuration{
Banner: defaultBanner,
BindPort: 2022,
BindAddress: "",
IdleTimeout: 15,
MaxAuthTries: 0,
Umask: "0022",
UploadMode: 0,
Actions: sftpd.Actions{
ExecuteOn: []string{},
Command: "",
HTTPNotificationURL: "",
Common: common.Configuration{
IdleTimeout: 15,
UploadMode: 0,
Actions: common.ProtocolActions{
ExecuteOn: []string{},
Hook: "",
},
SetstatMode: 0,
ProxyProtocol: 0,
ProxyAllowed: []string{},
},
SFTPD: sftpd.Configuration{
Banner: defaultSFTPDBanner,
BindPort: 2022,
BindAddress: "",
MaxAuthTries: 0,
HostKeys: []string{},
KexAlgorithms: []string{},
Ciphers: []string{},
MACs: []string{},
TrustedUserCAKeys: []string{},
LoginBannerFile: "",
EnabledSSHCommands: sftpd.GetDefaultSSHCommands(),
KeyboardInteractiveHook: "",
PasswordAuthentication: true,
},
FTPD: ftpd.Configuration{
BindPort: 0,
BindAddress: "",
Banner: defaultFTPDBanner,
BannerFile: "",
ActiveTransfersPortNon20: false,
ForcePassiveIP: "",
PassivePortRange: ftpd.PortRange{
Start: 50000,
End: 50100,
},
CertificateFile: "",
CertificateKeyFile: "",
},
WebDAVD: webdavd.Configuration{
BindPort: 0,
BindAddress: "",
CertificateFile: "",
CertificateKeyFile: "",
Cors: webdavd.Cors{
Enabled: false,
AllowedOrigins: []string{},
AllowedMethods: []string{},
AllowedHeaders: []string{},
ExposedHeaders: []string{},
AllowCredentials: false,
MaxAge: 0,
},
Cache: webdavd.Cache{
Enabled: true,
ExpirationTime: 0,
MaxSize: 50,
},
Keys: []sftpd.Key{},
IsSCPEnabled: false,
},
ProviderConf: dataprovider.Config{
Driver: "sqlite",
@@ -64,14 +116,47 @@ func init() {
Username: "",
Password: "",
ConnectionString: "",
UsersTable: "users",
SQLTablesPrefix: "",
ManageUsers: 1,
SSLMode: 0,
TrackQuota: 1,
PoolSize: 0,
UsersBaseDir: "",
Actions: dataprovider.UserActions{
ExecuteOn: []string{},
Hook: "",
},
ExternalAuthHook: "",
ExternalAuthScope: 0,
CredentialsPath: "credentials",
PreLoginHook: "",
PostLoginHook: "",
PostLoginScope: 0,
CheckPasswordHook: "",
CheckPasswordScope: 0,
PasswordHashing: dataprovider.PasswordHashing{
Argon2Options: dataprovider.Argon2Options{
Memory: 65536,
Iterations: 1,
Parallelism: 2,
},
},
UpdateMode: 0,
},
HTTPDConfig: api.HTTPDConf{
BindPort: 8080,
BindAddress: "127.0.0.1",
HTTPDConfig: httpd.Conf{
BindPort: 8080,
BindAddress: "127.0.0.1",
TemplatesPath: "templates",
StaticFilesPath: "static",
BackupsPath: "backups",
AuthUserFile: "",
CertificateFile: "",
CertificateKeyFile: "",
},
HTTPConfig: httpclient.Config{
Timeout: 20,
CACertificates: nil,
SkipTLSVerify: false,
},
}
@@ -79,9 +164,18 @@ func init() {
replacer := strings.NewReplacer(".", "__")
viper.SetEnvKeyReplacer(replacer)
viper.SetConfigName(DefaultConfigName)
setViperAdditionalConfigPaths()
viper.AddConfigPath(".")
viper.AutomaticEnv()
viper.AllowEmptyEnv(true)
}
// GetCommonConfig returns the common protocols configuration
func GetCommonConfig() common.Configuration {
return globalConf.Common
}
// SetCommonConfig sets the common protocols configuration
func SetCommonConfig(config common.Configuration) {
globalConf.Common = config
}
// GetSFTPDConfig returns the configuration for the SFTP server
@@ -89,16 +183,62 @@ func GetSFTPDConfig() sftpd.Configuration {
return globalConf.SFTPD
}
// SetSFTPDConfig sets the configuration for the SFTP server
func SetSFTPDConfig(config sftpd.Configuration) {
globalConf.SFTPD = config
}
// GetFTPDConfig returns the configuration for the FTP server
func GetFTPDConfig() ftpd.Configuration {
return globalConf.FTPD
}
// SetFTPDConfig sets the configuration for the FTP server
func SetFTPDConfig(config ftpd.Configuration) {
globalConf.FTPD = config
}
// GetWebDAVDConfig returns the configuration for the WebDAV server
func GetWebDAVDConfig() webdavd.Configuration {
return globalConf.WebDAVD
}
// SetWebDAVDConfig sets the configuration for the WebDAV server
func SetWebDAVDConfig(config webdavd.Configuration) {
globalConf.WebDAVD = config
}
// GetHTTPDConfig returns the configuration for the HTTP server
func GetHTTPDConfig() api.HTTPDConf {
func GetHTTPDConfig() httpd.Conf {
return globalConf.HTTPDConfig
}
// SetHTTPDConfig sets the configuration for the HTTP server
func SetHTTPDConfig(config httpd.Conf) {
globalConf.HTTPDConfig = config
}
//GetProviderConf returns the configuration for the data provider
func GetProviderConf() dataprovider.Config {
return globalConf.ProviderConf
}
//SetProviderConf sets the configuration for the data provider
func SetProviderConf(config dataprovider.Config) {
globalConf.ProviderConf = config
}
// GetHTTPConfig returns the configuration for HTTP clients
func GetHTTPConfig() httpclient.Config {
return globalConf.HTTPConfig
}
func getRedactedGlobalConf() globalConfig {
conf := globalConf
conf.ProviderConf.Password = "[redacted]"
return conf
}
// LoadConfig loads the configuration
// configDir will be added to the configuration search paths.
// The search path contains by default the current directory and on linux it contains
@@ -107,28 +247,104 @@ func GetProviderConf() dataprovider.Config {
func LoadConfig(configDir, configName string) error {
var err error
viper.AddConfigPath(configDir)
setViperAdditionalConfigPaths()
viper.AddConfigPath(".")
viper.SetConfigName(configName)
if err = viper.ReadInConfig(); err != nil {
logger.Warn(logSender, "error loading configuration file: %v. Default configuration will be used: %+v", err, globalConf)
logger.Warn(logSender, "", "error loading configuration file: %v. Default configuration will be used: %+v",
err, getRedactedGlobalConf())
logger.WarnToConsole("error loading configuration file: %v. Default configuration will be used.", err)
return err
}
err = viper.Unmarshal(&globalConf)
if err != nil {
logger.Warn(logSender, "error parsing configuration file: %v. Default configuration will be used: %+v", err, globalConf)
logger.Warn(logSender, "", "error parsing configuration file: %v. Default configuration will be used: %+v",
err, getRedactedGlobalConf())
logger.WarnToConsole("error parsing configuration file: %v. Default configuration will be used.", err)
return err
}
checkCommonParamsCompatibility()
if strings.TrimSpace(globalConf.SFTPD.Banner) == "" {
globalConf.SFTPD.Banner = defaultBanner
globalConf.SFTPD.Banner = defaultSFTPDBanner
}
if globalConf.SFTPD.UploadMode < 0 || globalConf.SFTPD.UploadMode > 1 {
err = fmt.Errorf("Invalid upload_mode 0 and 1 are supported, configured: %v reset upload_mode to 0",
globalConf.SFTPD.UploadMode)
globalConf.SFTPD.UploadMode = 0
logger.Warn(logSender, "Configuration error: %v", err)
if strings.TrimSpace(globalConf.FTPD.Banner) == "" {
globalConf.FTPD.Banner = defaultFTPDBanner
}
if len(globalConf.ProviderConf.UsersBaseDir) > 0 && !utils.IsFileInputValid(globalConf.ProviderConf.UsersBaseDir) {
err = fmt.Errorf("invalid users base dir %#v will be ignored", globalConf.ProviderConf.UsersBaseDir)
globalConf.ProviderConf.UsersBaseDir = ""
logger.Warn(logSender, "", "Configuration error: %v", err)
logger.WarnToConsole("Configuration error: %v", err)
}
logger.Debug(logSender, "config file used: '%v', config loaded: %+v", viper.ConfigFileUsed(), globalConf)
if globalConf.Common.UploadMode < 0 || globalConf.Common.UploadMode > 2 {
err = fmt.Errorf("invalid upload_mode 0, 1 and 2 are supported, configured: %v reset upload_mode to 0",
globalConf.Common.UploadMode)
globalConf.Common.UploadMode = 0
logger.Warn(logSender, "", "Configuration error: %v", err)
logger.WarnToConsole("Configuration error: %v", err)
}
if globalConf.Common.ProxyProtocol < 0 || globalConf.Common.ProxyProtocol > 2 {
err = fmt.Errorf("invalid proxy_protocol 0, 1 and 2 are supported, configured: %v reset proxy_protocol to 0",
globalConf.Common.ProxyProtocol)
globalConf.Common.ProxyProtocol = 0
logger.Warn(logSender, "", "Configuration error: %v", err)
logger.WarnToConsole("Configuration error: %v", err)
}
if globalConf.ProviderConf.ExternalAuthScope < 0 || globalConf.ProviderConf.ExternalAuthScope > 7 {
err = fmt.Errorf("invalid external_auth_scope: %v reset to 0", globalConf.ProviderConf.ExternalAuthScope)
globalConf.ProviderConf.ExternalAuthScope = 0
logger.Warn(logSender, "", "Configuration error: %v", err)
logger.WarnToConsole("Configuration error: %v", err)
}
if len(globalConf.ProviderConf.CredentialsPath) == 0 {
err = fmt.Errorf("invalid credentials path, reset to \"credentials\"")
globalConf.ProviderConf.CredentialsPath = "credentials"
logger.Warn(logSender, "", "Configuration error: %v", err)
logger.WarnToConsole("Configuration error: %v", err)
}
checkHostKeyCompatibility()
logger.Debug(logSender, "", "config file used: '%#v', config loaded: %+v", viper.ConfigFileUsed(), getRedactedGlobalConf())
return err
}
func checkHostKeyCompatibility() {
// we copy deprecated fields to new ones to keep backward compatibility so lint is disabled
if len(globalConf.SFTPD.Keys) > 0 && len(globalConf.SFTPD.HostKeys) == 0 { //nolint:staticcheck
logger.Warn(logSender, "", "keys is deprecated, please use host_keys")
logger.WarnToConsole("keys is deprecated, please use host_keys")
for _, k := range globalConf.SFTPD.Keys { //nolint:staticcheck
globalConf.SFTPD.HostKeys = append(globalConf.SFTPD.HostKeys, k.PrivateKey)
}
}
}
func checkCommonParamsCompatibility() {
// we copy deprecated fields to new ones to keep backward compatibility so lint is disabled
if globalConf.SFTPD.IdleTimeout > 0 { //nolint:staticcheck
logger.Warn(logSender, "", "sftpd.idle_timeout is deprecated, please use common.idle_timeout")
logger.WarnToConsole("sftpd.idle_timeout is deprecated, please use common.idle_timeout")
globalConf.Common.IdleTimeout = globalConf.SFTPD.IdleTimeout //nolint:staticcheck
}
if len(globalConf.SFTPD.Actions.Hook) > 0 && len(globalConf.Common.Actions.Hook) == 0 { //nolint:staticcheck
logger.Warn(logSender, "", "sftpd.actions is deprecated, please use common.actions")
logger.WarnToConsole("sftpd.actions is deprecated, please use common.actions")
globalConf.Common.Actions.ExecuteOn = globalConf.SFTPD.Actions.ExecuteOn //nolint:staticcheck
globalConf.Common.Actions.Hook = globalConf.SFTPD.Actions.Hook //nolint:staticcheck
}
if globalConf.SFTPD.SetstatMode > 0 && globalConf.Common.SetstatMode == 0 { //nolint:staticcheck
logger.Warn(logSender, "", "sftpd.setstat_mode is deprecated, please use common.setstat_mode")
logger.WarnToConsole("sftpd.setstat_mode is deprecated, please use common.setstat_mode")
globalConf.Common.SetstatMode = globalConf.SFTPD.SetstatMode //nolint:staticcheck
}
if globalConf.SFTPD.UploadMode > 0 && globalConf.Common.UploadMode == 0 { //nolint:staticcheck
logger.Warn(logSender, "", "sftpd.upload_mode is deprecated, please use common.upload_mode")
logger.WarnToConsole("sftpd.upload_mode is deprecated, please use common.upload_mode")
globalConf.Common.UploadMode = globalConf.SFTPD.UploadMode //nolint:staticcheck
}
if globalConf.SFTPD.ProxyProtocol > 0 && globalConf.Common.ProxyProtocol == 0 { //nolint:staticcheck
logger.Warn(logSender, "", "sftpd.proxy_protocol is deprecated, please use common.proxy_protocol")
logger.WarnToConsole("sftpd.proxy_protocol is deprecated, please use common.proxy_protocol")
globalConf.Common.ProxyProtocol = globalConf.SFTPD.ProxyProtocol //nolint:staticcheck
globalConf.Common.ProxyAllowed = globalConf.SFTPD.ProxyAllowed //nolint:staticcheck
}
}

View File

@@ -8,92 +8,275 @@ import (
"strings"
"testing"
"github.com/drakkan/sftpgo/api"
"github.com/stretchr/testify/assert"
"github.com/drakkan/sftpgo/common"
"github.com/drakkan/sftpgo/config"
"github.com/drakkan/sftpgo/dataprovider"
"github.com/drakkan/sftpgo/ftpd"
"github.com/drakkan/sftpgo/httpclient"
"github.com/drakkan/sftpgo/httpd"
"github.com/drakkan/sftpgo/sftpd"
"github.com/drakkan/sftpgo/utils"
)
const (
tempConfigName = "temp"
configName = "sftpgo"
)
func TestLoadConfigTest(t *testing.T) {
configDir := ".."
err := config.LoadConfig(configDir, "")
if err != nil {
t.Errorf("error loading config")
}
emptyHTTPDConf := api.HTTPDConf{}
if config.GetHTTPDConfig() == emptyHTTPDConf {
t.Errorf("error loading httpd conf")
}
emptyProviderConf := dataprovider.Config{}
if config.GetProviderConf() == emptyProviderConf {
t.Errorf("error loading provider conf")
}
emptySFTPDConf := sftpd.Configuration{}
if config.GetSFTPDConfig().BindPort == emptySFTPDConf.BindPort {
t.Errorf("error loading SFTPD conf")
}
err := config.LoadConfig(configDir, configName)
assert.NoError(t, err)
assert.NotEqual(t, httpd.Conf{}, config.GetHTTPConfig())
assert.NotEqual(t, dataprovider.Config{}, config.GetProviderConf())
assert.NotEqual(t, sftpd.Configuration{}, config.GetSFTPDConfig())
assert.NotEqual(t, httpclient.Config{}, config.GetHTTPConfig())
confName := tempConfigName + ".json"
configFilePath := filepath.Join(configDir, confName)
err = config.LoadConfig(configDir, tempConfigName)
if err == nil {
t.Errorf("loading a non existent config file must fail")
}
ioutil.WriteFile(configFilePath, []byte("{invalid json}"), 0666)
assert.NotNil(t, err)
err = ioutil.WriteFile(configFilePath, []byte("{invalid json}"), os.ModePerm)
assert.NoError(t, err)
err = config.LoadConfig(configDir, tempConfigName)
if err == nil {
t.Errorf("loading an invalid config file must fail")
}
ioutil.WriteFile(configFilePath, []byte("{\"sftpd\": {\"bind_port\": \"a\"}}"), 0666)
assert.NotNil(t, err)
err = ioutil.WriteFile(configFilePath, []byte("{\"sftpd\": {\"bind_port\": \"a\"}}"), os.ModePerm)
assert.NoError(t, err)
err = config.LoadConfig(configDir, tempConfigName)
if err == nil {
t.Errorf("loading a config with an invalid bond_port must fail")
}
os.Remove(configFilePath)
assert.NotNil(t, err)
err = os.Remove(configFilePath)
assert.NoError(t, err)
}
func TestEmptyBanner(t *testing.T) {
configDir := ".."
confName := tempConfigName + ".json"
configFilePath := filepath.Join(configDir, confName)
config.LoadConfig(configDir, "")
err := config.LoadConfig(configDir, configName)
assert.NoError(t, err)
sftpdConf := config.GetSFTPDConfig()
sftpdConf.Banner = " "
c := make(map[string]sftpd.Configuration)
c["sftpd"] = sftpdConf
jsonConf, _ := json.Marshal(c)
err := ioutil.WriteFile(configFilePath, jsonConf, 0666)
if err != nil {
t.Errorf("error saving temporary configuration")
}
config.LoadConfig(configDir, tempConfigName)
err = ioutil.WriteFile(configFilePath, jsonConf, os.ModePerm)
assert.NoError(t, err)
err = config.LoadConfig(configDir, tempConfigName)
assert.NoError(t, err)
sftpdConf = config.GetSFTPDConfig()
if strings.TrimSpace(sftpdConf.Banner) == "" {
t.Errorf("SFTPD banner cannot be empty")
}
os.Remove(configFilePath)
assert.NotEmpty(t, strings.TrimSpace(sftpdConf.Banner))
err = os.Remove(configFilePath)
assert.NoError(t, err)
ftpdConf := config.GetFTPDConfig()
ftpdConf.Banner = " "
c1 := make(map[string]ftpd.Configuration)
c1["ftpd"] = ftpdConf
jsonConf, _ = json.Marshal(c1)
err = ioutil.WriteFile(configFilePath, jsonConf, os.ModePerm)
assert.NoError(t, err)
err = config.LoadConfig(configDir, tempConfigName)
assert.NoError(t, err)
ftpdConf = config.GetFTPDConfig()
assert.NotEmpty(t, strings.TrimSpace(ftpdConf.Banner))
err = os.Remove(configFilePath)
assert.NoError(t, err)
}
func TestInvalidUploadMode(t *testing.T) {
configDir := ".."
confName := tempConfigName + ".json"
configFilePath := filepath.Join(configDir, confName)
config.LoadConfig(configDir, "")
err := config.LoadConfig(configDir, configName)
assert.NoError(t, err)
commonConf := config.GetCommonConfig()
commonConf.UploadMode = 10
c := make(map[string]common.Configuration)
c["common"] = commonConf
jsonConf, err := json.Marshal(c)
assert.NoError(t, err)
err = ioutil.WriteFile(configFilePath, jsonConf, os.ModePerm)
assert.NoError(t, err)
err = config.LoadConfig(configDir, tempConfigName)
assert.NotNil(t, err)
err = os.Remove(configFilePath)
assert.NoError(t, err)
}
func TestInvalidExternalAuthScope(t *testing.T) {
configDir := ".."
confName := tempConfigName + ".json"
configFilePath := filepath.Join(configDir, confName)
err := config.LoadConfig(configDir, configName)
assert.NoError(t, err)
providerConf := config.GetProviderConf()
providerConf.ExternalAuthScope = 10
c := make(map[string]dataprovider.Config)
c["data_provider"] = providerConf
jsonConf, err := json.Marshal(c)
assert.NoError(t, err)
err = ioutil.WriteFile(configFilePath, jsonConf, os.ModePerm)
assert.NoError(t, err)
err = config.LoadConfig(configDir, tempConfigName)
assert.NotNil(t, err)
err = os.Remove(configFilePath)
assert.NoError(t, err)
}
func TestInvalidCredentialsPath(t *testing.T) {
configDir := ".."
confName := tempConfigName + ".json"
configFilePath := filepath.Join(configDir, confName)
err := config.LoadConfig(configDir, configName)
assert.NoError(t, err)
providerConf := config.GetProviderConf()
providerConf.CredentialsPath = ""
c := make(map[string]dataprovider.Config)
c["data_provider"] = providerConf
jsonConf, err := json.Marshal(c)
assert.NoError(t, err)
err = ioutil.WriteFile(configFilePath, jsonConf, os.ModePerm)
assert.NoError(t, err)
err = config.LoadConfig(configDir, tempConfigName)
assert.NotNil(t, err)
err = os.Remove(configFilePath)
assert.NoError(t, err)
}
func TestInvalidProxyProtocol(t *testing.T) {
configDir := ".."
confName := tempConfigName + ".json"
configFilePath := filepath.Join(configDir, confName)
err := config.LoadConfig(configDir, configName)
assert.NoError(t, err)
commonConf := config.GetCommonConfig()
commonConf.ProxyProtocol = 10
c := make(map[string]common.Configuration)
c["common"] = commonConf
jsonConf, err := json.Marshal(c)
assert.NoError(t, err)
err = ioutil.WriteFile(configFilePath, jsonConf, os.ModePerm)
assert.NoError(t, err)
err = config.LoadConfig(configDir, tempConfigName)
assert.NotNil(t, err)
err = os.Remove(configFilePath)
assert.NoError(t, err)
}
func TestInvalidUsersBaseDir(t *testing.T) {
configDir := ".."
confName := tempConfigName + ".json"
configFilePath := filepath.Join(configDir, confName)
err := config.LoadConfig(configDir, configName)
assert.NoError(t, err)
providerConf := config.GetProviderConf()
providerConf.UsersBaseDir = "."
c := make(map[string]dataprovider.Config)
c["data_provider"] = providerConf
jsonConf, err := json.Marshal(c)
assert.NoError(t, err)
err = ioutil.WriteFile(configFilePath, jsonConf, os.ModePerm)
assert.NoError(t, err)
err = config.LoadConfig(configDir, tempConfigName)
assert.NotNil(t, err)
err = os.Remove(configFilePath)
assert.NoError(t, err)
}
func TestCommonParamsCompatibility(t *testing.T) {
configDir := ".."
confName := tempConfigName + ".json"
configFilePath := filepath.Join(configDir, confName)
err := config.LoadConfig(configDir, configName)
assert.NoError(t, err)
sftpdConf := config.GetSFTPDConfig()
sftpdConf.UploadMode = 10
sftpdConf.IdleTimeout = 21 //nolint:staticcheck
sftpdConf.Actions.Hook = "http://hook"
sftpdConf.Actions.ExecuteOn = []string{"upload"}
sftpdConf.SetstatMode = 1 //nolint:staticcheck
sftpdConf.UploadMode = common.UploadModeAtomicWithResume //nolint:staticcheck
sftpdConf.ProxyProtocol = 1 //nolint:staticcheck
sftpdConf.ProxyAllowed = []string{"192.168.1.1"} //nolint:staticcheck
c := make(map[string]sftpd.Configuration)
c["sftpd"] = sftpdConf
jsonConf, _ := json.Marshal(c)
err := ioutil.WriteFile(configFilePath, jsonConf, 0666)
if err != nil {
t.Errorf("error saving temporary configuration")
}
jsonConf, err := json.Marshal(c)
assert.NoError(t, err)
err = ioutil.WriteFile(configFilePath, jsonConf, os.ModePerm)
assert.NoError(t, err)
err = config.LoadConfig(configDir, tempConfigName)
if err == nil {
t.Errorf("Loading configuration with invalid upload_mode must fail")
}
os.Remove(configFilePath)
assert.NoError(t, err)
commonConf := config.GetCommonConfig()
assert.Equal(t, 21, commonConf.IdleTimeout)
assert.Equal(t, "http://hook", commonConf.Actions.Hook)
assert.Len(t, commonConf.Actions.ExecuteOn, 1)
assert.True(t, utils.IsStringInSlice("upload", commonConf.Actions.ExecuteOn))
assert.Equal(t, 1, commonConf.SetstatMode)
assert.Equal(t, 1, commonConf.ProxyProtocol)
assert.Len(t, commonConf.ProxyAllowed, 1)
assert.True(t, utils.IsStringInSlice("192.168.1.1", commonConf.ProxyAllowed))
err = os.Remove(configFilePath)
assert.NoError(t, err)
}
func TestHostKeyCompatibility(t *testing.T) {
configDir := ".."
confName := tempConfigName + ".json"
configFilePath := filepath.Join(configDir, confName)
err := config.LoadConfig(configDir, configName)
assert.NoError(t, err)
sftpdConf := config.GetSFTPDConfig()
sftpdConf.Keys = []sftpd.Key{ //nolint:staticcheck
{
PrivateKey: "rsa",
},
{
PrivateKey: "ecdsa",
},
}
c := make(map[string]sftpd.Configuration)
c["sftpd"] = sftpdConf
jsonConf, err := json.Marshal(c)
assert.NoError(t, err)
err = ioutil.WriteFile(configFilePath, jsonConf, os.ModePerm)
assert.NoError(t, err)
err = config.LoadConfig(configDir, tempConfigName)
assert.NoError(t, err)
sftpdConf = config.GetSFTPDConfig()
assert.Equal(t, 2, len(sftpdConf.HostKeys))
assert.True(t, utils.IsStringInSlice("rsa", sftpdConf.HostKeys))
assert.True(t, utils.IsStringInSlice("ecdsa", sftpdConf.HostKeys))
err = os.Remove(configFilePath)
assert.NoError(t, err)
}
func TestSetGetConfig(t *testing.T) {
sftpdConf := config.GetSFTPDConfig()
sftpdConf.MaxAuthTries = 10
config.SetSFTPDConfig(sftpdConf)
assert.Equal(t, sftpdConf.MaxAuthTries, config.GetSFTPDConfig().MaxAuthTries)
dataProviderConf := config.GetProviderConf()
dataProviderConf.Host = "test host"
config.SetProviderConf(dataProviderConf)
assert.Equal(t, dataProviderConf.Host, config.GetProviderConf().Host)
httpdConf := config.GetHTTPDConfig()
httpdConf.BindAddress = "0.0.0.0"
config.SetHTTPDConfig(httpdConf)
assert.Equal(t, httpdConf.BindAddress, config.GetHTTPDConfig().BindAddress)
commonConf := config.GetCommonConfig()
commonConf.IdleTimeout = 10
config.SetCommonConfig(commonConf)
assert.Equal(t, commonConf.IdleTimeout, config.GetCommonConfig().IdleTimeout)
ftpdConf := config.GetFTPDConfig()
ftpdConf.CertificateFile = "cert"
ftpdConf.CertificateKeyFile = "key"
config.SetFTPDConfig(ftpdConf)
assert.Equal(t, ftpdConf.CertificateFile, config.GetFTPDConfig().CertificateFile)
assert.Equal(t, ftpdConf.CertificateKeyFile, config.GetFTPDConfig().CertificateKeyFile)
webDavConf := config.GetWebDAVDConfig()
webDavConf.CertificateFile = "dav_cert"
webDavConf.CertificateKeyFile = "dav_key"
config.SetWebDAVDConfig(webDavConf)
assert.Equal(t, webDavConf.CertificateFile, config.GetWebDAVDConfig().CertificateFile)
assert.Equal(t, webDavConf.CertificateKeyFile, config.GetWebDAVDConfig().CertificateKeyFile)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,17 @@
// +build nobolt
package dataprovider
import (
"errors"
"github.com/drakkan/sftpgo/version"
)
func init() {
version.AddFeature("-bolt")
}
func initializeBoltProvider(basePath string) error {
return errors.New("bolt disabled at build time")
}

File diff suppressed because it is too large Load Diff

675
dataprovider/memory.go Normal file
View File

@@ -0,0 +1,675 @@
package dataprovider
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"sync"
"time"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/vfs"
)
var (
errMemoryProviderClosed = errors.New("memory provider is closed")
)
type memoryProviderHandle struct {
// configuration file to use for loading users
configFile string
sync.Mutex
isClosed bool
// slice with ordered usernames
usernames []string
// mapping between ID and username
usersIdx map[int64]string
// map for users, username is the key
users map[string]User
// map for virtual folders, MappedPath is the key
vfolders map[string]vfs.BaseVirtualFolder
// slice with ordered folders mapped path
vfoldersPaths []string
}
// MemoryProvider auth provider for a memory store
type MemoryProvider struct {
dbHandle *memoryProviderHandle
}
func initializeMemoryProvider(basePath string) error {
logSender = fmt.Sprintf("dataprovider_%v", MemoryDataProviderName)
configFile := ""
if utils.IsFileInputValid(config.Name) {
configFile = config.Name
if !filepath.IsAbs(configFile) {
configFile = filepath.Join(basePath, configFile)
}
}
provider = MemoryProvider{
dbHandle: &memoryProviderHandle{
isClosed: false,
usernames: []string{},
usersIdx: make(map[int64]string),
users: make(map[string]User),
vfolders: make(map[string]vfs.BaseVirtualFolder),
vfoldersPaths: []string{},
configFile: configFile,
},
}
return provider.reloadConfig()
}
func (p MemoryProvider) checkAvailability() error {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return errMemoryProviderClosed
}
return nil
}
func (p MemoryProvider) close() error {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return errMemoryProviderClosed
}
p.dbHandle.isClosed = true
return nil
}
func (p MemoryProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
var user User
if len(password) == 0 {
return user, errors.New("Credentials cannot be null or empty")
}
user, err := p.userExists(username)
if err != nil {
providerLog(logger.LevelWarn, "error authenticating user %#v, error: %v", username, err)
return user, err
}
return checkUserAndPass(user, password, ip, protocol)
}
func (p MemoryProvider) validateUserAndPubKey(username string, pubKey []byte) (User, string, error) {
var user User
if len(pubKey) == 0 {
return user, "", errors.New("Credentials cannot be null or empty")
}
user, err := p.userExists(username)
if err != nil {
providerLog(logger.LevelWarn, "error authenticating user %#v, error: %v", username, err)
return user, "", err
}
return checkUserAndPubKey(user, pubKey)
}
func (p MemoryProvider) getUserByID(ID int64) (User, error) {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return User{}, errMemoryProviderClosed
}
if val, ok := p.dbHandle.usersIdx[ID]; ok {
return p.userExistsInternal(val)
}
return User{}, &RecordNotFoundError{err: fmt.Sprintf("user with ID %v does not exist", ID)}
}
func (p MemoryProvider) updateLastLogin(username string) error {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return errMemoryProviderClosed
}
user, err := p.userExistsInternal(username)
if err != nil {
return err
}
user.LastLogin = utils.GetTimeAsMsSinceEpoch(time.Now())
p.dbHandle.users[user.Username] = user
return nil
}
func (p MemoryProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return errMemoryProviderClosed
}
user, err := p.userExistsInternal(username)
if err != nil {
providerLog(logger.LevelWarn, "unable to update quota for user %#v error: %v", username, err)
return err
}
if reset {
user.UsedQuotaSize = sizeAdd
user.UsedQuotaFiles = filesAdd
} else {
user.UsedQuotaSize += sizeAdd
user.UsedQuotaFiles += filesAdd
}
user.LastQuotaUpdate = utils.GetTimeAsMsSinceEpoch(time.Now())
providerLog(logger.LevelDebug, "quota updated for user %#v, files increment: %v size increment: %v is reset? %v",
username, filesAdd, sizeAdd, reset)
p.dbHandle.users[user.Username] = user
return nil
}
func (p MemoryProvider) getUsedQuota(username string) (int, int64, error) {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return 0, 0, errMemoryProviderClosed
}
user, err := p.userExistsInternal(username)
if err != nil {
providerLog(logger.LevelWarn, "unable to get quota for user %#v error: %v", username, err)
return 0, 0, err
}
return user.UsedQuotaFiles, user.UsedQuotaSize, err
}
func (p MemoryProvider) addUser(user User) error {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return errMemoryProviderClosed
}
err := validateUser(&user)
if err != nil {
return err
}
_, err = p.userExistsInternal(user.Username)
if err == nil {
return fmt.Errorf("username %#v already exists", user.Username)
}
user.ID = p.getNextID()
user.LastQuotaUpdate = 0
user.UsedQuotaSize = 0
user.UsedQuotaFiles = 0
user.LastLogin = 0
user.VirtualFolders = p.joinVirtualFoldersFields(user)
p.dbHandle.users[user.Username] = user
p.dbHandle.usersIdx[user.ID] = user.Username
p.dbHandle.usernames = append(p.dbHandle.usernames, user.Username)
sort.Strings(p.dbHandle.usernames)
return nil
}
func (p MemoryProvider) updateUser(user User) error {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return errMemoryProviderClosed
}
err := validateUser(&user)
if err != nil {
return err
}
u, err := p.userExistsInternal(user.Username)
if err != nil {
return err
}
for _, oldFolder := range u.VirtualFolders {
p.removeUserFromFolderMapping(oldFolder.MappedPath, u.Username)
}
user.VirtualFolders = p.joinVirtualFoldersFields(user)
user.LastQuotaUpdate = u.LastQuotaUpdate
user.UsedQuotaSize = u.UsedQuotaSize
user.UsedQuotaFiles = u.UsedQuotaFiles
user.LastLogin = u.LastLogin
p.dbHandle.users[user.Username] = user
return nil
}
func (p MemoryProvider) deleteUser(user User) error {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return errMemoryProviderClosed
}
u, err := p.userExistsInternal(user.Username)
if err != nil {
return err
}
for _, oldFolder := range u.VirtualFolders {
p.removeUserFromFolderMapping(oldFolder.MappedPath, u.Username)
}
delete(p.dbHandle.users, user.Username)
delete(p.dbHandle.usersIdx, user.ID)
// this could be more efficient
p.dbHandle.usernames = []string{}
for username := range p.dbHandle.users {
p.dbHandle.usernames = append(p.dbHandle.usernames, username)
}
sort.Strings(p.dbHandle.usernames)
return nil
}
func (p MemoryProvider) dumpUsers() ([]User, error) {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
users := make([]User, 0, len(p.dbHandle.usernames))
var err error
if p.dbHandle.isClosed {
return users, errMemoryProviderClosed
}
for _, username := range p.dbHandle.usernames {
user := p.dbHandle.users[username]
err = addCredentialsToUser(&user)
if err != nil {
return users, err
}
users = append(users, user)
}
return users, err
}
func (p MemoryProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
folders := make([]vfs.BaseVirtualFolder, 0, len(p.dbHandle.vfoldersPaths))
if p.dbHandle.isClosed {
return folders, errMemoryProviderClosed
}
for _, f := range p.dbHandle.vfolders {
folders = append(folders, f)
}
return folders, nil
}
func (p MemoryProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
users := make([]User, 0, limit)
var err error
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return users, errMemoryProviderClosed
}
if limit <= 0 {
return users, err
}
if len(username) > 0 {
if offset == 0 {
user, err := p.userExistsInternal(username)
if err == nil {
users = append(users, HideUserSensitiveData(&user))
}
}
return users, err
}
itNum := 0
if order == OrderASC {
for _, username := range p.dbHandle.usernames {
itNum++
if itNum <= offset {
continue
}
user := p.dbHandle.users[username]
users = append(users, HideUserSensitiveData(&user))
if len(users) >= limit {
break
}
}
} else {
for i := len(p.dbHandle.usernames) - 1; i >= 0; i-- {
itNum++
if itNum <= offset {
continue
}
username := p.dbHandle.usernames[i]
user := p.dbHandle.users[username]
users = append(users, HideUserSensitiveData(&user))
if len(users) >= limit {
break
}
}
}
return users, err
}
func (p MemoryProvider) userExists(username string) (User, error) {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return User{}, errMemoryProviderClosed
}
return p.userExistsInternal(username)
}
func (p MemoryProvider) userExistsInternal(username string) (User, error) {
if val, ok := p.dbHandle.users[username]; ok {
return val.getACopy(), nil
}
return User{}, &RecordNotFoundError{err: fmt.Sprintf("username %#v does not exist", username)}
}
func (p MemoryProvider) updateFolderQuota(mappedPath string, filesAdd int, sizeAdd int64, reset bool) error {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return errMemoryProviderClosed
}
folder, err := p.folderExistsInternal(mappedPath)
if err != nil {
providerLog(logger.LevelWarn, "unable to update quota for folder %#v error: %v", mappedPath, err)
return err
}
if reset {
folder.UsedQuotaSize = sizeAdd
folder.UsedQuotaFiles = filesAdd
} else {
folder.UsedQuotaSize += sizeAdd
folder.UsedQuotaFiles += filesAdd
}
folder.LastQuotaUpdate = utils.GetTimeAsMsSinceEpoch(time.Now())
p.dbHandle.vfolders[mappedPath] = folder
return nil
}
func (p MemoryProvider) getUsedFolderQuota(mappedPath string) (int, int64, error) {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return 0, 0, errMemoryProviderClosed
}
folder, err := p.folderExistsInternal(mappedPath)
if err != nil {
providerLog(logger.LevelWarn, "unable to get quota for folder %#v error: %v", mappedPath, err)
return 0, 0, err
}
return folder.UsedQuotaFiles, folder.UsedQuotaSize, err
}
func (p MemoryProvider) joinVirtualFoldersFields(user User) []vfs.VirtualFolder {
var folders []vfs.VirtualFolder
for _, folder := range user.VirtualFolders {
f, err := p.addOrGetFolderInternal(folder.MappedPath, user.Username, folder.UsedQuotaSize, folder.UsedQuotaFiles,
folder.LastQuotaUpdate)
if err == nil {
folder.UsedQuotaFiles = f.UsedQuotaFiles
folder.UsedQuotaSize = f.UsedQuotaSize
folder.LastQuotaUpdate = f.LastQuotaUpdate
folder.ID = f.ID
folders = append(folders, folder)
}
}
return folders
}
func (p MemoryProvider) removeUserFromFolderMapping(mappedPath, username string) {
folder, err := p.folderExistsInternal(mappedPath)
if err == nil {
var usernames []string
for _, user := range folder.Users {
if user != username {
usernames = append(usernames, user)
}
}
folder.Users = usernames
p.dbHandle.vfolders[folder.MappedPath] = folder
}
}
func (p MemoryProvider) updateFoldersMappingInternal(folder vfs.BaseVirtualFolder) {
p.dbHandle.vfolders[folder.MappedPath] = folder
if !utils.IsStringInSlice(folder.MappedPath, p.dbHandle.vfoldersPaths) {
p.dbHandle.vfoldersPaths = append(p.dbHandle.vfoldersPaths, folder.MappedPath)
sort.Strings(p.dbHandle.vfoldersPaths)
}
}
func (p MemoryProvider) addOrGetFolderInternal(mappedPath, username string, usedQuotaSize int64, usedQuotaFiles int, lastQuotaUpdate int64) (vfs.BaseVirtualFolder, error) {
folder, err := p.folderExistsInternal(mappedPath)
if _, ok := err.(*RecordNotFoundError); ok {
folder := vfs.BaseVirtualFolder{
ID: p.getNextFolderID(),
MappedPath: mappedPath,
UsedQuotaSize: usedQuotaSize,
UsedQuotaFiles: usedQuotaFiles,
LastQuotaUpdate: lastQuotaUpdate,
Users: []string{username},
}
p.updateFoldersMappingInternal(folder)
return folder, nil
}
if err == nil && !utils.IsStringInSlice(username, folder.Users) {
folder.Users = append(folder.Users, username)
p.updateFoldersMappingInternal(folder)
}
return folder, err
}
func (p MemoryProvider) folderExistsInternal(mappedPath string) (vfs.BaseVirtualFolder, error) {
if val, ok := p.dbHandle.vfolders[mappedPath]; ok {
return val, nil
}
return vfs.BaseVirtualFolder{}, &RecordNotFoundError{err: fmt.Sprintf("folder %#v does not exist", mappedPath)}
}
func (p MemoryProvider) getFolders(limit, offset int, order, folderPath string) ([]vfs.BaseVirtualFolder, error) {
folders := make([]vfs.BaseVirtualFolder, 0, limit)
var err error
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return folders, errMemoryProviderClosed
}
if limit <= 0 {
return folders, err
}
if len(folderPath) > 0 {
if offset == 0 {
var folder vfs.BaseVirtualFolder
folder, err = p.folderExistsInternal(folderPath)
if err == nil {
folders = append(folders, folder)
}
}
return folders, err
}
itNum := 0
if order == OrderASC {
for _, mappedPath := range p.dbHandle.vfoldersPaths {
itNum++
if itNum <= offset {
continue
}
folder := p.dbHandle.vfolders[mappedPath]
folders = append(folders, folder)
if len(folders) >= limit {
break
}
}
} else {
for i := len(p.dbHandle.vfoldersPaths) - 1; i >= 0; i-- {
itNum++
if itNum <= offset {
continue
}
mappedPath := p.dbHandle.vfoldersPaths[i]
folder := p.dbHandle.vfolders[mappedPath]
folders = append(folders, folder)
if len(folders) >= limit {
break
}
}
}
return folders, err
}
func (p MemoryProvider) getFolderByPath(mappedPath string) (vfs.BaseVirtualFolder, error) {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return vfs.BaseVirtualFolder{}, errMemoryProviderClosed
}
return p.folderExistsInternal(mappedPath)
}
func (p MemoryProvider) addFolder(folder vfs.BaseVirtualFolder) error {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return errMemoryProviderClosed
}
err := validateFolder(&folder)
if err != nil {
return err
}
_, err = p.folderExistsInternal(folder.MappedPath)
if err == nil {
return fmt.Errorf("folder %#v already exists", folder.MappedPath)
}
folder.ID = p.getNextFolderID()
p.dbHandle.vfolders[folder.MappedPath] = folder
p.dbHandle.vfoldersPaths = append(p.dbHandle.vfoldersPaths, folder.MappedPath)
sort.Strings(p.dbHandle.vfoldersPaths)
return nil
}
func (p MemoryProvider) deleteFolder(folder vfs.BaseVirtualFolder) error {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
if p.dbHandle.isClosed {
return errMemoryProviderClosed
}
_, err := p.folderExistsInternal(folder.MappedPath)
if err != nil {
return err
}
for _, username := range folder.Users {
user, err := p.userExistsInternal(username)
if err == nil {
var folders []vfs.VirtualFolder
for _, userFolder := range user.VirtualFolders {
if folder.MappedPath != userFolder.MappedPath {
folders = append(folders, userFolder)
}
}
user.VirtualFolders = folders
p.dbHandle.users[user.Username] = user
}
}
delete(p.dbHandle.vfolders, folder.MappedPath)
p.dbHandle.vfoldersPaths = []string{}
for mappedPath := range p.dbHandle.vfolders {
p.dbHandle.vfoldersPaths = append(p.dbHandle.vfoldersPaths, mappedPath)
}
sort.Strings(p.dbHandle.vfoldersPaths)
return nil
}
func (p MemoryProvider) getNextID() int64 {
nextID := int64(1)
for id := range p.dbHandle.usersIdx {
if id >= nextID {
nextID = id + 1
}
}
return nextID
}
func (p MemoryProvider) getNextFolderID() int64 {
nextID := int64(1)
for _, v := range p.dbHandle.vfolders {
if v.ID >= nextID {
nextID = v.ID + 1
}
}
return nextID
}
func (p MemoryProvider) clear() {
p.dbHandle.Lock()
defer p.dbHandle.Unlock()
p.dbHandle.usernames = []string{}
p.dbHandle.usersIdx = make(map[int64]string)
p.dbHandle.users = make(map[string]User)
p.dbHandle.vfoldersPaths = []string{}
p.dbHandle.vfolders = make(map[string]vfs.BaseVirtualFolder)
}
func (p MemoryProvider) reloadConfig() error {
if len(p.dbHandle.configFile) == 0 {
providerLog(logger.LevelDebug, "no users configuration file defined")
return nil
}
providerLog(logger.LevelDebug, "loading users from file: %#v", p.dbHandle.configFile)
fi, err := os.Stat(p.dbHandle.configFile)
if err != nil {
providerLog(logger.LevelWarn, "error loading users: %v", err)
return err
}
if fi.Size() == 0 {
err = errors.New("users configuration file is invalid, its size must be > 0")
providerLog(logger.LevelWarn, "error loading users: %v", err)
return err
}
if fi.Size() > 10485760 {
err = errors.New("users configuration file is invalid, its size must be <= 10485760 bytes")
providerLog(logger.LevelWarn, "error loading users: %v", err)
return err
}
content, err := ioutil.ReadFile(p.dbHandle.configFile)
if err != nil {
providerLog(logger.LevelWarn, "error loading users: %v", err)
return err
}
var dump BackupData
err = json.Unmarshal(content, &dump)
if err != nil {
providerLog(logger.LevelWarn, "error loading users: %v", err)
return err
}
p.clear()
for _, folder := range dump.Folders {
_, err := p.getFolderByPath(folder.MappedPath)
if err == nil {
logger.Debug(logSender, "", "folder %#v already exists, restore not needed", folder.MappedPath)
continue
}
folder.Users = nil
err = p.addFolder(folder)
if err != nil {
providerLog(logger.LevelWarn, "error adding folder %#v: %v", folder.MappedPath, err)
return err
}
}
for _, user := range dump.Users {
u, err := p.userExists(user.Username)
if err == nil {
user.ID = u.ID
err = p.updateUser(user)
if err != nil {
providerLog(logger.LevelWarn, "error updating user %#v: %v", user.Username, err)
return err
}
} else {
err = p.addUser(user)
if err != nil {
providerLog(logger.LevelWarn, "error adding user %#v: %v", user.Username, err)
return err
}
}
}
providerLog(logger.LevelDebug, "user and folders loaded from file: %#v", p.dbHandle.configFile)
return nil
}
// initializeDatabase does nothing, no initilization is needed for memory provider
func (p MemoryProvider) initializeDatabase() error {
return ErrNoInitRequired
}
func (p MemoryProvider) migrateDatabase() error {
return ErrNoInitRequired
}

View File

@@ -1,12 +1,43 @@
// +build !nomysql
package dataprovider
import (
"context"
"database/sql"
"fmt"
"runtime"
"strings"
"time"
// we import go-sql-driver/mysql here to be able to disable MySQL support using a build tag
_ "github.com/go-sql-driver/mysql"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/version"
"github.com/drakkan/sftpgo/vfs"
)
const (
mysqlUsersTableSQL = "CREATE TABLE `{{users}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, " +
"`username` varchar(255) NOT NULL UNIQUE, `password` varchar(255) NULL, `public_keys` longtext NULL, " +
"`home_dir` varchar(255) NOT NULL, `uid` integer NOT NULL, `gid` integer NOT NULL, `max_sessions` integer NOT NULL, " +
" `quota_size` bigint NOT NULL, `quota_files` integer NOT NULL, `permissions` longtext NOT NULL, " +
"`used_quota_size` bigint NOT NULL, `used_quota_files` integer NOT NULL, `last_quota_update` bigint NOT NULL, " +
"`upload_bandwidth` integer NOT NULL, `download_bandwidth` integer NOT NULL, `expiration_date` bigint(20) NOT NULL, " +
"`last_login` bigint(20) NOT NULL, `status` int(11) NOT NULL, `filters` longtext DEFAULT NULL, " +
"`filesystem` longtext DEFAULT NULL);"
mysqlSchemaTableSQL = "CREATE TABLE `{{schema_version}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `version` integer NOT NULL);"
mysqlV2SQL = "ALTER TABLE `{{users}}` ADD COLUMN `virtual_folders` longtext NULL;"
mysqlV3SQL = "ALTER TABLE `{{users}}` MODIFY `password` longtext NULL;"
mysqlV4SQL = "CREATE TABLE `{{folders}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `path` varchar(512) NOT NULL UNIQUE," +
"`used_quota_size` bigint NOT NULL, `used_quota_files` integer NOT NULL, `last_quota_update` bigint NOT NULL);" +
"ALTER TABLE `{{users}}` MODIFY `home_dir` varchar(512) NOT NULL;" +
"ALTER TABLE `{{users}}` DROP COLUMN `virtual_folders`;" +
"CREATE TABLE `{{folders_mapping}}` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `virtual_path` varchar(512) NOT NULL, " +
"`quota_size` bigint NOT NULL, `quota_files` integer NOT NULL, `folder_id` integer NOT NULL, `user_id` integer NOT NULL);" +
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `unique_mapping` UNIQUE (`user_id`, `folder_id`);" +
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `folders_mapping_folder_id_fk_folders_id` FOREIGN KEY (`folder_id`) REFERENCES `{{folders}}` (`id`) ON DELETE CASCADE;" +
"ALTER TABLE `{{folders_mapping}}` ADD CONSTRAINT `folders_mapping_user_id_fk_users_id` FOREIGN KEY (`user_id`) REFERENCES `{{users}}` (`id`) ON DELETE CASCADE;"
)
// MySQLProvider auth provider for MySQL/MariaDB database
@@ -14,34 +45,50 @@ type MySQLProvider struct {
dbHandle *sql.DB
}
func init() {
version.AddFeature("+mysql")
}
func initializeMySQLProvider() error {
var err error
var connectionString string
if len(config.ConnectionString) == 0 {
connectionString = fmt.Sprintf("%v:%v@tcp([%v]:%v)/%v?charset=utf8&interpolateParams=true&timeout=10s&tls=%v",
config.Username, config.Password, config.Host, config.Port, config.Name, getSSLMode())
} else {
connectionString = config.ConnectionString
}
dbHandle, err := sql.Open("mysql", connectionString)
logSender = fmt.Sprintf("dataprovider_%v", MySQLDataProviderName)
dbHandle, err := sql.Open("mysql", getMySQLConnectionString(false))
if err == nil {
numCPU := runtime.NumCPU()
logger.Debug(logSender, "mysql database handle created, connection string: '%v', pool size: %v", connectionString, numCPU)
dbHandle.SetMaxIdleConns(numCPU)
dbHandle.SetMaxOpenConns(numCPU)
providerLog(logger.LevelDebug, "mysql database handle created, connection string: %#v, pool size: %v",
getMySQLConnectionString(true), config.PoolSize)
dbHandle.SetMaxOpenConns(config.PoolSize)
dbHandle.SetConnMaxLifetime(1800 * time.Second)
provider = MySQLProvider{dbHandle: dbHandle}
} else {
logger.Warn(logSender, "error creating mysql database handler, connection string: '%v', error: %v", connectionString, err)
providerLog(logger.LevelWarn, "error creating mysql database handler, connection string: %#v, error: %v",
getMySQLConnectionString(true), err)
}
return err
}
func (p MySQLProvider) validateUserAndPass(username string, password string) (User, error) {
return sqlCommonValidateUserAndPass(username, password, p.dbHandle)
func getMySQLConnectionString(redactedPwd bool) string {
var connectionString string
if len(config.ConnectionString) == 0 {
password := config.Password
if redactedPwd {
password = "[redacted]"
}
connectionString = fmt.Sprintf("%v:%v@tcp([%v]:%v)/%v?charset=utf8&interpolateParams=true&timeout=10s&tls=%v&writeTimeout=10s&readTimeout=10s",
config.Username, password, config.Host, config.Port, config.Name, getSSLMode())
} else {
connectionString = config.ConnectionString
}
return connectionString
}
func (p MySQLProvider) validateUserAndPubKey(username string, publicKey string) (User, error) {
func (p MySQLProvider) checkAvailability() error {
return sqlCommonCheckAvailability(p.dbHandle)
}
func (p MySQLProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
return sqlCommonValidateUserAndPass(username, password, ip, protocol, p.dbHandle)
}
func (p MySQLProvider) validateUserAndPubKey(username string, publicKey []byte) (User, string, error) {
return sqlCommonValidateUserAndPubKey(username, publicKey, p.dbHandle)
}
@@ -50,27 +97,17 @@ func (p MySQLProvider) getUserByID(ID int64) (User, error) {
}
func (p MySQLProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
tx, err := p.dbHandle.Begin()
if err != nil {
logger.Warn(logSender, "error starting transaction to update quota for user %v: %v", username, err)
return err
}
err = sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
if err == nil {
err = tx.Commit()
} else {
err = tx.Rollback()
}
if err != nil {
logger.Warn(logSender, "error closing transaction to update quota for user %v: %v", username, err)
}
return err
return sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
}
func (p MySQLProvider) getUsedQuota(username string) (int, int64, error) {
return sqlCommonGetUsedQuota(username, p.dbHandle)
}
func (p MySQLProvider) updateLastLogin(username string) error {
return sqlCommonUpdateLastLogin(username, p.dbHandle)
}
func (p MySQLProvider) userExists(username string) (User, error) {
return sqlCommonCheckUserExists(username, p.dbHandle)
}
@@ -87,6 +124,128 @@ func (p MySQLProvider) deleteUser(user User) error {
return sqlCommonDeleteUser(user, p.dbHandle)
}
func (p MySQLProvider) dumpUsers() ([]User, error) {
return sqlCommonDumpUsers(p.dbHandle)
}
func (p MySQLProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
return sqlCommonGetUsers(limit, offset, order, username, p.dbHandle)
}
func (p MySQLProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
return sqlCommonDumpFolders(p.dbHandle)
}
func (p MySQLProvider) getFolders(limit, offset int, order, folderPath string) ([]vfs.BaseVirtualFolder, error) {
return sqlCommonGetFolders(limit, offset, order, folderPath, p.dbHandle)
}
func (p MySQLProvider) getFolderByPath(mappedPath string) (vfs.BaseVirtualFolder, error) {
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
return sqlCommonCheckFolderExists(ctx, mappedPath, p.dbHandle)
}
func (p MySQLProvider) addFolder(folder vfs.BaseVirtualFolder) error {
return sqlCommonAddFolder(folder, p.dbHandle)
}
func (p MySQLProvider) deleteFolder(folder vfs.BaseVirtualFolder) error {
return sqlCommonDeleteFolder(folder, p.dbHandle)
}
func (p MySQLProvider) updateFolderQuota(mappedPath string, filesAdd int, sizeAdd int64, reset bool) error {
return sqlCommonUpdateFolderQuota(mappedPath, filesAdd, sizeAdd, reset, p.dbHandle)
}
func (p MySQLProvider) getUsedFolderQuota(mappedPath string) (int, int64, error) {
return sqlCommonGetFolderUsedQuota(mappedPath, p.dbHandle)
}
func (p MySQLProvider) close() error {
return p.dbHandle.Close()
}
func (p MySQLProvider) reloadConfig() error {
return nil
}
// initializeDatabase creates the initial database structure
func (p MySQLProvider) initializeDatabase() error {
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, false)
if err == nil && dbVersion.Version > 0 {
return ErrNoInitRequired
}
sqlUsers := strings.Replace(mysqlUsersTableSQL, "{{users}}", sqlTableUsers, 1)
tx, err := p.dbHandle.Begin()
if err != nil {
return err
}
_, err = tx.Exec(sqlUsers)
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
_, err = tx.Exec(strings.Replace(mysqlSchemaTableSQL, "{{schema_version}}", sqlTableSchemaVersion, 1))
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
_, err = tx.Exec(strings.Replace(initialDBVersionSQL, "{{schema_version}}", sqlTableSchemaVersion, 1))
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
return tx.Commit()
}
func (p MySQLProvider) migrateDatabase() error {
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
if err != nil {
return err
}
if dbVersion.Version == sqlDatabaseVersion {
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", dbVersion.Version)
return ErrNoInitRequired
}
switch dbVersion.Version {
case 1:
err = updateMySQLDatabaseFrom1To2(p.dbHandle)
if err != nil {
return err
}
err = updateMySQLDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updateMySQLDatabaseFrom3To4(p.dbHandle)
case 2:
err = updateMySQLDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updateMySQLDatabaseFrom3To4(p.dbHandle)
case 3:
return updateMySQLDatabaseFrom3To4(p.dbHandle)
default:
return fmt.Errorf("Database version not handled: %v", dbVersion.Version)
}
}
func updateMySQLDatabaseFrom1To2(dbHandle *sql.DB) error {
logger.InfoToConsole("updating database version: 1 -> 2")
providerLog(logger.LevelInfo, "updating database version: 1 -> 2")
sql := strings.Replace(mysqlV2SQL, "{{users}}", sqlTableUsers, 1)
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 2)
}
func updateMySQLDatabaseFrom2To3(dbHandle *sql.DB) error {
logger.InfoToConsole("updating database version: 2 -> 3")
providerLog(logger.LevelInfo, "updating database version: 2 -> 3")
sql := strings.Replace(mysqlV3SQL, "{{users}}", sqlTableUsers, 1)
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 3)
}
func updateMySQLDatabaseFrom3To4(dbHandle *sql.DB) error {
return sqlCommonUpdateDatabaseFrom3To4(mysqlV4SQL, dbHandle)
}

View File

@@ -0,0 +1,17 @@
// +build nomysql
package dataprovider
import (
"errors"
"github.com/drakkan/sftpgo/version"
)
func init() {
version.AddFeature("-mysql")
}
func initializeMySQLProvider() error {
return errors.New("MySQL disabled at build time")
}

View File

@@ -1,11 +1,42 @@
// +build !nopgsql
package dataprovider
import (
"context"
"database/sql"
"fmt"
"runtime"
"strings"
// we import lib/pq here to be able to disable PostgreSQL support using a build tag
_ "github.com/lib/pq"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/version"
"github.com/drakkan/sftpgo/vfs"
)
const (
pgsqlUsersTableSQL = `CREATE TABLE "{{users}}" ("id" serial NOT NULL PRIMARY KEY, "username" varchar(255) NOT NULL UNIQUE,
"password" varchar(255) NULL, "public_keys" text NULL, "home_dir" varchar(255) NOT NULL, "uid" integer NOT NULL,
"gid" integer NOT NULL, "max_sessions" integer NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL,
"permissions" text NOT NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL,
"last_quota_update" bigint NOT NULL, "upload_bandwidth" integer NOT NULL, "download_bandwidth" integer NOT NULL,
"expiration_date" bigint NOT NULL, "last_login" bigint NOT NULL, "status" integer NOT NULL, "filters" text NULL,
"filesystem" text NULL);`
pgsqlSchemaTableSQL = `CREATE TABLE "{{schema_version}}" ("id" serial NOT NULL PRIMARY KEY, "version" integer NOT NULL);`
pgsqlV2SQL = `ALTER TABLE "{{users}}" ADD COLUMN "virtual_folders" text NULL;`
pgsqlV3SQL = `ALTER TABLE "{{users}}" ALTER COLUMN "password" TYPE text USING "password"::text;`
pgsqlV4SQL = `CREATE TABLE "{{folders}}" ("id" serial NOT NULL PRIMARY KEY, "path" varchar(512) NOT NULL UNIQUE, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL);
ALTER TABLE "{{users}}" ALTER COLUMN "home_dir" TYPE varchar(512) USING "home_dir"::varchar(512);
ALTER TABLE "{{users}}" DROP COLUMN "virtual_folders" CASCADE;
CREATE TABLE "{{folders_mapping}}" ("id" serial NOT NULL PRIMARY KEY, "virtual_path" varchar(512) NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "folder_id" integer NOT NULL, "user_id" integer NOT NULL);
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "unique_mapping" UNIQUE ("user_id", "folder_id");
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "folders_mapping_folder_id_fk_folders_id" FOREIGN KEY ("folder_id") REFERENCES "{{folders}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
ALTER TABLE "{{folders_mapping}}" ADD CONSTRAINT "folders_mapping_user_id_fk_users_id" FOREIGN KEY ("user_id") REFERENCES "{{users}}" ("id") MATCH SIMPLE ON UPDATE NO ACTION ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED;
CREATE INDEX "folders_mapping_folder_id_idx" ON "{{folders_mapping}}" ("folder_id");
CREATE INDEX "folders_mapping_user_id_idx" ON "{{folders_mapping}}" ("user_id");
`
)
// PGSQLProvider auth provider for PostgreSQL database
@@ -13,33 +44,50 @@ type PGSQLProvider struct {
dbHandle *sql.DB
}
func init() {
version.AddFeature("+pgsql")
}
func initializePGSQLProvider() error {
var err error
var connectionString string
if len(config.ConnectionString) == 0 {
connectionString = fmt.Sprintf("host='%v' port=%v dbname='%v' user='%v' password='%v' sslmode=%v connect_timeout=10",
config.Host, config.Port, config.Name, config.Username, config.Password, getSSLMode())
} else {
connectionString = config.ConnectionString
}
dbHandle, err := sql.Open("postgres", connectionString)
logSender = fmt.Sprintf("dataprovider_%v", PGSQLDataProviderName)
dbHandle, err := sql.Open("postgres", getPGSQLConnectionString(false))
if err == nil {
numCPU := runtime.NumCPU()
logger.Debug(logSender, "postgres database handle created, connection string: '%v', pool size: %v", connectionString, numCPU)
dbHandle.SetMaxIdleConns(numCPU)
dbHandle.SetMaxOpenConns(numCPU)
providerLog(logger.LevelDebug, "postgres database handle created, connection string: %#v, pool size: %v",
getPGSQLConnectionString(true), config.PoolSize)
dbHandle.SetMaxOpenConns(config.PoolSize)
provider = PGSQLProvider{dbHandle: dbHandle}
} else {
logger.Warn(logSender, "error creating postgres database handler, connection string: '%v', error: %v", connectionString, err)
providerLog(logger.LevelWarn, "error creating postgres database handler, connection string: %#v, error: %v",
getPGSQLConnectionString(true), err)
}
return err
}
func (p PGSQLProvider) validateUserAndPass(username string, password string) (User, error) {
return sqlCommonValidateUserAndPass(username, password, p.dbHandle)
func getPGSQLConnectionString(redactedPwd bool) string {
var connectionString string
if len(config.ConnectionString) == 0 {
password := config.Password
if redactedPwd {
password = "[redacted]"
}
connectionString = fmt.Sprintf("host='%v' port=%v dbname='%v' user='%v' password='%v' sslmode=%v connect_timeout=10",
config.Host, config.Port, config.Name, config.Username, password, getSSLMode())
} else {
connectionString = config.ConnectionString
}
return connectionString
}
func (p PGSQLProvider) validateUserAndPubKey(username string, publicKey string) (User, error) {
func (p PGSQLProvider) checkAvailability() error {
return sqlCommonCheckAvailability(p.dbHandle)
}
func (p PGSQLProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
return sqlCommonValidateUserAndPass(username, password, ip, protocol, p.dbHandle)
}
func (p PGSQLProvider) validateUserAndPubKey(username string, publicKey []byte) (User, string, error) {
return sqlCommonValidateUserAndPubKey(username, publicKey, p.dbHandle)
}
@@ -48,27 +96,17 @@ func (p PGSQLProvider) getUserByID(ID int64) (User, error) {
}
func (p PGSQLProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
tx, err := p.dbHandle.Begin()
if err != nil {
logger.Warn(logSender, "error starting transaction to update quota for user %v: %v", username, err)
return err
}
err = sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
if err == nil {
err = tx.Commit()
} else {
err = tx.Rollback()
}
if err != nil {
logger.Warn(logSender, "error closing transaction to update quota for user %v: %v", username, err)
}
return err
return sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
}
func (p PGSQLProvider) getUsedQuota(username string) (int, int64, error) {
return sqlCommonGetUsedQuota(username, p.dbHandle)
}
func (p PGSQLProvider) updateLastLogin(username string) error {
return sqlCommonUpdateLastLogin(username, p.dbHandle)
}
func (p PGSQLProvider) userExists(username string) (User, error) {
return sqlCommonCheckUserExists(username, p.dbHandle)
}
@@ -85,6 +123,128 @@ func (p PGSQLProvider) deleteUser(user User) error {
return sqlCommonDeleteUser(user, p.dbHandle)
}
func (p PGSQLProvider) dumpUsers() ([]User, error) {
return sqlCommonDumpUsers(p.dbHandle)
}
func (p PGSQLProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
return sqlCommonGetUsers(limit, offset, order, username, p.dbHandle)
}
func (p PGSQLProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
return sqlCommonDumpFolders(p.dbHandle)
}
func (p PGSQLProvider) getFolders(limit, offset int, order, folderPath string) ([]vfs.BaseVirtualFolder, error) {
return sqlCommonGetFolders(limit, offset, order, folderPath, p.dbHandle)
}
func (p PGSQLProvider) getFolderByPath(mappedPath string) (vfs.BaseVirtualFolder, error) {
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
return sqlCommonCheckFolderExists(ctx, mappedPath, p.dbHandle)
}
func (p PGSQLProvider) addFolder(folder vfs.BaseVirtualFolder) error {
return sqlCommonAddFolder(folder, p.dbHandle)
}
func (p PGSQLProvider) deleteFolder(folder vfs.BaseVirtualFolder) error {
return sqlCommonDeleteFolder(folder, p.dbHandle)
}
func (p PGSQLProvider) updateFolderQuota(mappedPath string, filesAdd int, sizeAdd int64, reset bool) error {
return sqlCommonUpdateFolderQuota(mappedPath, filesAdd, sizeAdd, reset, p.dbHandle)
}
func (p PGSQLProvider) getUsedFolderQuota(mappedPath string) (int, int64, error) {
return sqlCommonGetFolderUsedQuota(mappedPath, p.dbHandle)
}
func (p PGSQLProvider) close() error {
return p.dbHandle.Close()
}
func (p PGSQLProvider) reloadConfig() error {
return nil
}
// initializeDatabase creates the initial database structure
func (p PGSQLProvider) initializeDatabase() error {
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, false)
if err == nil && dbVersion.Version > 0 {
return ErrNoInitRequired
}
sqlUsers := strings.Replace(pgsqlUsersTableSQL, "{{users}}", sqlTableUsers, 1)
tx, err := p.dbHandle.Begin()
if err != nil {
return err
}
_, err = tx.Exec(sqlUsers)
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
_, err = tx.Exec(strings.Replace(pgsqlSchemaTableSQL, "{{schema_version}}", sqlTableSchemaVersion, 1))
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
_, err = tx.Exec(strings.Replace(initialDBVersionSQL, "{{schema_version}}", sqlTableSchemaVersion, 1))
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
return tx.Commit()
}
func (p PGSQLProvider) migrateDatabase() error {
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
if err != nil {
return err
}
if dbVersion.Version == sqlDatabaseVersion {
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", dbVersion.Version)
return ErrNoInitRequired
}
switch dbVersion.Version {
case 1:
err = updatePGSQLDatabaseFrom1To2(p.dbHandle)
if err != nil {
return err
}
err = updatePGSQLDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updatePGSQLDatabaseFrom3To4(p.dbHandle)
case 2:
err = updatePGSQLDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updatePGSQLDatabaseFrom3To4(p.dbHandle)
case 3:
return updatePGSQLDatabaseFrom3To4(p.dbHandle)
default:
return fmt.Errorf("Database version not handled: %v", dbVersion.Version)
}
}
func updatePGSQLDatabaseFrom1To2(dbHandle *sql.DB) error {
logger.InfoToConsole("updating database version: 1 -> 2")
providerLog(logger.LevelInfo, "updating database version: 1 -> 2")
sql := strings.Replace(pgsqlV2SQL, "{{users}}", sqlTableUsers, 1)
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 2)
}
func updatePGSQLDatabaseFrom2To3(dbHandle *sql.DB) error {
logger.InfoToConsole("updating database version: 2 -> 3")
providerLog(logger.LevelInfo, "updating database version: 2 -> 3")
sql := strings.Replace(pgsqlV3SQL, "{{users}}", sqlTableUsers, 1)
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 3)
}
func updatePGSQLDatabaseFrom3To4(dbHandle *sql.DB) error {
return sqlCommonUpdateDatabaseFrom3To4(pgsqlV4SQL, dbHandle)
}

View File

@@ -0,0 +1,17 @@
// +build nopgsql
package dataprovider
import (
"errors"
"github.com/drakkan/sftpgo/version"
)
func init() {
version.AddFeature("-pgsql")
}
func initializePGSQLProvider() error {
return errors.New("PostgreSQL disabled at build time")
}

View File

@@ -1,117 +1,180 @@
package dataprovider
import (
"context"
"database/sql"
"encoding/json"
"errors"
"strings"
"time"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/vfs"
)
func getUserByUsername(username string, dbHandle *sql.DB) (User, error) {
const (
sqlDatabaseVersion = 4
initialDBVersionSQL = "INSERT INTO {{schema_version}} (version) VALUES (1);"
defaultSQLQueryTimeout = 10 * time.Second
longSQLQueryTimeout = 60 * time.Second
)
var errSQLFoldersAssosaction = errors.New("unable to associate virtual folders to user")
type sqlQuerier interface {
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
}
func getUserByUsername(username string, dbHandle sqlQuerier) (User, error) {
var user User
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getUserByUsernameQuery()
stmt, err := dbHandle.Prepare(q)
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
logger.Debug(logSender, "error preparing database query %v: %v", q, err)
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return user, err
}
defer stmt.Close()
row := stmt.QueryRow(username)
return getUserFromDbRow(row, nil)
row := stmt.QueryRowContext(ctx, username)
user, err = getUserFromDbRow(row, nil)
if err != nil {
return user, err
}
return getUserWithVirtualFolders(user, dbHandle)
}
func sqlCommonValidateUserAndPass(username string, password string, dbHandle *sql.DB) (User, error) {
func sqlCommonValidateUserAndPass(username, password, ip, protocol string, dbHandle *sql.DB) (User, error) {
var user User
if len(password) == 0 {
return user, errors.New("Credentials cannot be null or empty")
}
user, err := getUserByUsername(username, dbHandle)
if err != nil {
logger.Warn(logSender, "error authenticating user: %v, error: %v", username, err)
providerLog(logger.LevelWarn, "error authenticating user: %v, error: %v", username, err)
return user, err
}
return checkUserAndPass(user, password)
return checkUserAndPass(user, password, ip, protocol)
}
func sqlCommonValidateUserAndPubKey(username string, pubKey string, dbHandle *sql.DB) (User, error) {
func sqlCommonValidateUserAndPubKey(username string, pubKey []byte, dbHandle *sql.DB) (User, string, error) {
var user User
if len(pubKey) == 0 {
return user, errors.New("Credentials cannot be null or empty")
return user, "", errors.New("Credentials cannot be null or empty")
}
user, err := getUserByUsername(username, dbHandle)
if err != nil {
logger.Warn(logSender, "error authenticating user: %v, error: %v", username, err)
return user, err
providerLog(logger.LevelWarn, "error authenticating user: %v, error: %v", username, err)
return user, "", err
}
return checkUserAndPubKey(user, pubKey)
}
func sqlCommonCheckAvailability(dbHandle *sql.DB) error {
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
return dbHandle.PingContext(ctx)
}
func sqlCommonGetUserByID(ID int64, dbHandle *sql.DB) (User, error) {
var user User
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getUserByIDQuery()
stmt, err := dbHandle.Prepare(q)
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
logger.Debug(logSender, "error preparing database query %v: %v", q, err)
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return user, err
}
defer stmt.Close()
row := stmt.QueryRow(ID)
return getUserFromDbRow(row, nil)
row := stmt.QueryRowContext(ctx, ID)
user, err = getUserFromDbRow(row, nil)
if err != nil {
return user, err
}
return getUserWithVirtualFolders(user, dbHandle)
}
func sqlCommonUpdateQuota(username string, filesAdd int, sizeAdd int64, reset bool, dbHandle *sql.DB) error {
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getUpdateQuotaQuery(reset)
stmt, err := dbHandle.Prepare(q)
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
logger.Debug(logSender, "error preparing database query %v: %v", q, err)
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return err
}
defer stmt.Close()
_, err = stmt.Exec(sizeAdd, filesAdd, utils.GetTimeAsMsSinceEpoch(time.Now()), username)
_, err = stmt.ExecContext(ctx, sizeAdd, filesAdd, utils.GetTimeAsMsSinceEpoch(time.Now()), username)
if err == nil {
logger.Debug(logSender, "quota updated for user %v, files increment: %v size increment: %v is reset? %v",
providerLog(logger.LevelDebug, "quota updated for user %#v, files increment: %v size increment: %v is reset? %v",
username, filesAdd, sizeAdd, reset)
} else {
logger.Warn(logSender, "error updating quota for username %v: %v", username, err)
providerLog(logger.LevelWarn, "error updating quota for user %#v: %v", username, err)
}
return err
}
func sqlCommonGetUsedQuota(username string, dbHandle *sql.DB) (int, int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getQuotaQuery()
stmt, err := dbHandle.Prepare(q)
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
logger.Warn(logSender, "error preparing database query %v: %v", q, err)
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return 0, 0, err
}
defer stmt.Close()
var usedFiles int
var usedSize int64
err = stmt.QueryRow(username).Scan(&usedSize, &usedFiles)
err = stmt.QueryRowContext(ctx, username).Scan(&usedSize, &usedFiles)
if err != nil {
logger.Warn(logSender, "error getting user quota: %v, error: %v", username, err)
providerLog(logger.LevelWarn, "error getting quota for user: %v, error: %v", username, err)
return 0, 0, err
}
return usedFiles, usedSize, err
}
func sqlCommonUpdateLastLogin(username string, dbHandle *sql.DB) error {
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getUpdateLastLoginQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return err
}
defer stmt.Close()
_, err = stmt.ExecContext(ctx, utils.GetTimeAsMsSinceEpoch(time.Now()), username)
if err == nil {
providerLog(logger.LevelDebug, "last login updated for user %#v", username)
} else {
providerLog(logger.LevelWarn, "error updating last login for user %#v: %v", username, err)
}
return err
}
func sqlCommonCheckUserExists(username string, dbHandle *sql.DB) (User, error) {
var user User
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getUserByUsernameQuery()
stmt, err := dbHandle.Prepare(q)
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
logger.Warn(logSender, "error preparing database query %v: %v", q, err)
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return user, err
}
defer stmt.Close()
row := stmt.QueryRow(username)
return getUserFromDbRow(row, nil)
row := stmt.QueryRowContext(ctx, username)
user, err = getUserFromDbRow(row, nil)
if err != nil {
return user, err
}
return getUserWithVirtualFolders(user, dbHandle)
}
func sqlCommonAddUser(user User, dbHandle *sql.DB) error {
@@ -119,24 +182,53 @@ func sqlCommonAddUser(user User, dbHandle *sql.DB) error {
if err != nil {
return err
}
q := getAddUserQuery()
stmt, err := dbHandle.Prepare(q)
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
tx, err := dbHandle.BeginTx(ctx, nil)
if err != nil {
logger.Warn(logSender, "error preparing database query %v: %v", q, err)
return err
}
q := getAddUserQuery()
stmt, err := tx.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
sqlCommonRollbackTransaction(tx)
return err
}
defer stmt.Close()
permissions, err := user.GetPermissionsAsJSON()
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
publicKeys, err := user.GetPublicKeysAsJSON()
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
_, err = stmt.Exec(user.Username, user.Password, string(publicKeys), user.HomeDir, user.UID, user.GID, user.MaxSessions, user.QuotaSize,
user.QuotaFiles, string(permissions), user.UploadBandwidth, user.DownloadBandwidth)
return err
filters, err := user.GetFiltersAsJSON()
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
fsConfig, err := user.GetFsConfigAsJSON()
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
_, err = stmt.ExecContext(ctx, user.Username, user.Password, string(publicKeys), user.HomeDir, user.UID, user.GID, user.MaxSessions, user.QuotaSize,
user.QuotaFiles, string(permissions), user.UploadBandwidth, user.DownloadBandwidth, user.Status, user.ExpirationDate, string(filters),
string(fsConfig))
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
err = generateVirtualFoldersMapping(ctx, user, tx)
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
return tx.Commit()
}
func sqlCommonUpdateUser(user User, dbHandle *sql.DB) error {
@@ -144,69 +236,151 @@ func sqlCommonUpdateUser(user User, dbHandle *sql.DB) error {
if err != nil {
return err
}
q := getUpdateUserQuery()
stmt, err := dbHandle.Prepare(q)
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
tx, err := dbHandle.BeginTx(ctx, nil)
if err != nil {
logger.Warn(logSender, "error preparing database query %v: %v", q, err)
return err
}
q := getUpdateUserQuery()
stmt, err := tx.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
sqlCommonRollbackTransaction(tx)
return err
}
defer stmt.Close()
permissions, err := user.GetPermissionsAsJSON()
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
publicKeys, err := user.GetPublicKeysAsJSON()
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
_, err = stmt.Exec(user.Password, string(publicKeys), user.HomeDir, user.UID, user.GID, user.MaxSessions, user.QuotaSize,
user.QuotaFiles, string(permissions), user.UploadBandwidth, user.DownloadBandwidth, user.ID)
return err
filters, err := user.GetFiltersAsJSON()
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
fsConfig, err := user.GetFsConfigAsJSON()
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
_, err = stmt.ExecContext(ctx, user.Password, string(publicKeys), user.HomeDir, user.UID, user.GID, user.MaxSessions, user.QuotaSize,
user.QuotaFiles, string(permissions), user.UploadBandwidth, user.DownloadBandwidth, user.Status, user.ExpirationDate,
string(filters), string(fsConfig), user.ID)
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
err = generateVirtualFoldersMapping(ctx, user, tx)
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
return tx.Commit()
}
func sqlCommonDeleteUser(user User, dbHandle *sql.DB) error {
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getDeleteUserQuery()
stmt, err := dbHandle.Prepare(q)
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
logger.Warn(logSender, "error preparing database query %v: %v", q, err)
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return err
}
defer stmt.Close()
_, err = stmt.Exec(user.ID)
_, err = stmt.ExecContext(ctx, user.ID)
return err
}
func sqlCommonGetUsers(limit int, offset int, order string, username string, dbHandle *sql.DB) ([]User, error) {
users := []User{}
q := getUsersQuery(order, username)
stmt, err := dbHandle.Prepare(q)
func sqlCommonDumpUsers(dbHandle sqlQuerier) ([]User, error) {
users := make([]User, 0, 100)
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
defer cancel()
q := getDumpUsersQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
logger.Warn(logSender, "error preparing database query %v: %v", q, err)
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return nil, err
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx)
if err != nil {
return users, err
}
defer rows.Close()
for rows.Next() {
u, err := getUserFromDbRow(nil, rows)
if err != nil {
return users, err
}
err = addCredentialsToUser(&u)
if err != nil {
return users, err
}
users = append(users, u)
}
return getUsersWithVirtualFolders(users, dbHandle)
}
func sqlCommonGetUsers(limit int, offset int, order string, username string, dbHandle sqlQuerier) ([]User, error) {
users := make([]User, 0, limit)
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getUsersQuery(order, username)
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return nil, err
}
defer stmt.Close()
var rows *sql.Rows
if len(username) > 0 {
rows, err = stmt.Query(username, limit, offset)
rows, err = stmt.QueryContext(ctx, username, limit, offset) //nolint:rowserrcheck // rows.Err() is checked
} else {
rows, err = stmt.Query(limit, offset)
rows, err = stmt.QueryContext(ctx, limit, offset) //nolint:rowserrcheck // rows.Err() is checked
}
if err == nil {
defer rows.Close()
for rows.Next() {
u, err := getUserFromDbRow(nil, rows)
// hide password and public key
if err == nil {
u.Password = ""
u.PublicKeys = []string{}
users = append(users, u)
} else {
break
if err != nil {
return users, err
}
users = append(users, HideUserSensitiveData(&u))
}
}
err = rows.Err()
if err != nil {
return users, err
}
return getUsersWithVirtualFolders(users, dbHandle)
}
return users, err
func updateUserPermissionsFromDb(user *User, permissions string) error {
var err error
perms := make(map[string][]string)
err = json.Unmarshal([]byte(permissions), &perms)
if err == nil {
user.Permissions = perms
} else {
// compatibility layer: until version 0.9.4 permissions were a string list
var list []string
err = json.Unmarshal([]byte(permissions), &list)
if err != nil {
return err
}
perms["/"] = list
user.Permissions = perms
}
return err
}
func getUserFromDbRow(row *sql.Row, rows *sql.Rows) (User, error) {
@@ -214,16 +388,17 @@ func getUserFromDbRow(row *sql.Row, rows *sql.Rows) (User, error) {
var permissions sql.NullString
var password sql.NullString
var publicKey sql.NullString
var filters sql.NullString
var fsConfig sql.NullString
var err error
if row != nil {
err = row.Scan(&user.ID, &user.Username, &password, &publicKey, &user.HomeDir, &user.UID, &user.GID, &user.MaxSessions,
&user.QuotaSize, &user.QuotaFiles, &permissions, &user.UsedQuotaSize, &user.UsedQuotaFiles, &user.LastQuotaUpdate,
&user.UploadBandwidth, &user.DownloadBandwidth)
&user.UploadBandwidth, &user.DownloadBandwidth, &user.ExpirationDate, &user.LastLogin, &user.Status, &filters, &fsConfig)
} else {
err = rows.Scan(&user.ID, &user.Username, &password, &publicKey, &user.HomeDir, &user.UID, &user.GID, &user.MaxSessions,
&user.QuotaSize, &user.QuotaFiles, &permissions, &user.UsedQuotaSize, &user.UsedQuotaFiles, &user.LastQuotaUpdate,
&user.UploadBandwidth, &user.DownloadBandwidth)
&user.UploadBandwidth, &user.DownloadBandwidth, &user.ExpirationDate, &user.LastLogin, &user.Status, &filters, &fsConfig)
}
if err != nil {
if err == sql.ErrNoRows {
@@ -234,6 +409,9 @@ func getUserFromDbRow(row *sql.Row, rows *sql.Rows) (User, error) {
if password.Valid {
user.Password = password.String
}
// we can have a empty string or an invalid json in null string
// so we do a relaxed test if the field is optional, for example we
// populate public keys only if unmarshal does not return an error
if publicKey.Valid {
var list []string
err = json.Unmarshal([]byte(publicKey.String), &list)
@@ -242,11 +420,523 @@ func getUserFromDbRow(row *sql.Row, rows *sql.Rows) (User, error) {
}
}
if permissions.Valid {
var list []string
err = json.Unmarshal([]byte(permissions.String), &list)
err = updateUserPermissionsFromDb(&user, permissions.String)
if err != nil {
return user, err
}
}
if filters.Valid {
var userFilters UserFilters
err = json.Unmarshal([]byte(filters.String), &userFilters)
if err == nil {
user.Permissions = list
user.Filters = userFilters
}
}
if fsConfig.Valid {
var fs Filesystem
err = json.Unmarshal([]byte(fsConfig.String), &fs)
if err == nil {
user.FsConfig = fs
}
}
return user, err
}
func sqlCommonCheckFolderExists(ctx context.Context, name string, dbHandle sqlQuerier) (vfs.BaseVirtualFolder, error) {
var folder vfs.BaseVirtualFolder
q := getFolderByPathQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return folder, err
}
defer stmt.Close()
row := stmt.QueryRowContext(ctx, name)
err = row.Scan(&folder.ID, &folder.MappedPath, &folder.UsedQuotaSize, &folder.UsedQuotaFiles, &folder.LastQuotaUpdate)
if err == sql.ErrNoRows {
return folder, &RecordNotFoundError{err: err.Error()}
}
return folder, err
}
func sqlCommonAddOrGetFolder(ctx context.Context, name string, usedQuotaSize int64, usedQuotaFiles int, lastQuotaUpdate int64, dbHandle sqlQuerier) (vfs.BaseVirtualFolder, error) {
folder, err := sqlCommonCheckFolderExists(ctx, name, dbHandle)
if _, ok := err.(*RecordNotFoundError); ok {
f := vfs.BaseVirtualFolder{
MappedPath: name,
UsedQuotaSize: usedQuotaSize,
UsedQuotaFiles: usedQuotaFiles,
LastQuotaUpdate: lastQuotaUpdate,
}
err = sqlCommonAddFolder(f, dbHandle)
if err != nil {
return folder, err
}
return sqlCommonCheckFolderExists(ctx, name, dbHandle)
}
return folder, err
}
func sqlCommonAddFolder(folder vfs.BaseVirtualFolder, dbHandle sqlQuerier) error {
err := validateFolder(&folder)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getAddFolderQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return err
}
defer stmt.Close()
_, err = stmt.ExecContext(ctx, folder.MappedPath, folder.UsedQuotaSize, folder.UsedQuotaFiles, folder.LastQuotaUpdate)
return err
}
func sqlCommonDeleteFolder(folder vfs.BaseVirtualFolder, dbHandle sqlQuerier) error {
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getDeleteFolderQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return err
}
defer stmt.Close()
_, err = stmt.ExecContext(ctx, folder.ID)
return err
}
func sqlCommonDumpFolders(dbHandle sqlQuerier) ([]vfs.BaseVirtualFolder, error) {
folders := make([]vfs.BaseVirtualFolder, 0, 50)
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
defer cancel()
q := getDumpFoldersQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return nil, err
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx)
if err != nil {
return folders, err
}
defer rows.Close()
for rows.Next() {
var folder vfs.BaseVirtualFolder
err = rows.Scan(&folder.ID, &folder.MappedPath, &folder.UsedQuotaSize, &folder.UsedQuotaFiles, &folder.LastQuotaUpdate)
if err != nil {
return folders, err
}
folders = append(folders, folder)
}
err = rows.Err()
if err != nil {
return folders, err
}
return getVirtualFoldersWithUsers(folders, dbHandle)
}
func sqlCommonGetFolders(limit, offset int, order, folderPath string, dbHandle sqlQuerier) ([]vfs.BaseVirtualFolder, error) {
folders := make([]vfs.BaseVirtualFolder, 0, limit)
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getFoldersQuery(order, folderPath)
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return nil, err
}
defer stmt.Close()
var rows *sql.Rows
if len(folderPath) > 0 {
rows, err = stmt.QueryContext(ctx, folderPath, limit, offset) //nolint:rowserrcheck // rows.Err() is checked
} else {
rows, err = stmt.QueryContext(ctx, limit, offset) //nolint:rowserrcheck // rows.Err() is checked
}
if err != nil {
return folders, err
}
defer rows.Close()
for rows.Next() {
var folder vfs.BaseVirtualFolder
err = rows.Scan(&folder.ID, &folder.MappedPath, &folder.UsedQuotaSize, &folder.UsedQuotaFiles, &folder.LastQuotaUpdate)
if err != nil {
return folders, err
}
folders = append(folders, folder)
}
err = rows.Err()
if err != nil {
return folders, err
}
return getVirtualFoldersWithUsers(folders, dbHandle)
}
func sqlCommonClearFolderMapping(ctx context.Context, user User, dbHandle sqlQuerier) error {
q := getClearFolderMappingQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return err
}
defer stmt.Close()
_, err = stmt.ExecContext(ctx, user.Username)
return err
}
func sqlCommonAddFolderMapping(ctx context.Context, user User, folder vfs.VirtualFolder, dbHandle sqlQuerier) error {
q := getAddFolderMappingQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return err
}
defer stmt.Close()
_, err = stmt.ExecContext(ctx, folder.VirtualPath, folder.QuotaSize, folder.QuotaFiles, folder.ID, user.Username)
return err
}
func generateVirtualFoldersMapping(ctx context.Context, user User, dbHandle sqlQuerier) error {
err := sqlCommonClearFolderMapping(ctx, user, dbHandle)
if err != nil {
return err
}
for _, vfolder := range user.VirtualFolders {
f, err := sqlCommonAddOrGetFolder(ctx, vfolder.MappedPath, 0, 0, 0, dbHandle)
if err != nil {
return err
}
vfolder.BaseVirtualFolder = f
err = sqlCommonAddFolderMapping(ctx, user, vfolder, dbHandle)
if err != nil {
return err
}
}
return err
}
func getUserWithVirtualFolders(user User, dbHandle sqlQuerier) (User, error) {
users, err := getUsersWithVirtualFolders([]User{user}, dbHandle)
if err != nil {
return user, err
}
if len(users) == 0 {
return user, errSQLFoldersAssosaction
}
return users[0], err
}
func getUsersWithVirtualFolders(users []User, dbHandle sqlQuerier) ([]User, error) {
var err error
usersVirtualFolders := make(map[int64][]vfs.VirtualFolder)
if len(users) == 0 {
return users, err
}
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getRelatedFoldersForUsersQuery(users)
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return nil, err
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var folder vfs.VirtualFolder
var userID int64
err = rows.Scan(&folder.ID, &folder.MappedPath, &folder.UsedQuotaSize, &folder.UsedQuotaFiles,
&folder.LastQuotaUpdate, &folder.VirtualPath, &folder.QuotaSize, &folder.QuotaFiles, &userID)
if err != nil {
return users, err
}
usersVirtualFolders[userID] = append(usersVirtualFolders[userID], folder)
}
err = rows.Err()
if err != nil {
return users, err
}
if len(usersVirtualFolders) == 0 {
return users, err
}
for idx := range users {
ref := &users[idx]
ref.VirtualFolders = usersVirtualFolders[ref.ID]
}
return users, err
}
func getVirtualFoldersWithUsers(folders []vfs.BaseVirtualFolder, dbHandle sqlQuerier) ([]vfs.BaseVirtualFolder, error) {
var err error
vFoldersUsers := make(map[int64][]string)
if len(folders) == 0 {
return folders, err
}
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getRelatedUsersForFoldersQuery(folders)
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return nil, err
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var username string
var folderID int64
err = rows.Scan(&folderID, &username)
if err != nil {
return folders, err
}
vFoldersUsers[folderID] = append(vFoldersUsers[folderID], username)
}
err = rows.Err()
if err != nil {
return folders, err
}
if len(vFoldersUsers) == 0 {
return folders, err
}
for idx := range folders {
ref := &folders[idx]
ref.Users = vFoldersUsers[ref.ID]
}
return folders, err
}
func sqlCommonUpdateFolderQuota(mappedPath string, filesAdd int, sizeAdd int64, reset bool, dbHandle *sql.DB) error {
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getUpdateFolderQuotaQuery(reset)
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return err
}
defer stmt.Close()
_, err = stmt.ExecContext(ctx, sizeAdd, filesAdd, utils.GetTimeAsMsSinceEpoch(time.Now()), mappedPath)
if err == nil {
providerLog(logger.LevelDebug, "quota updated for folder %#v, files increment: %v size increment: %v is reset? %v",
mappedPath, filesAdd, sizeAdd, reset)
} else {
providerLog(logger.LevelWarn, "error updating quota for folder %#v: %v", mappedPath, err)
}
return err
}
func sqlCommonGetFolderUsedQuota(mappedPath string, dbHandle *sql.DB) (int, int64, error) {
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getQuotaFolderQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return 0, 0, err
}
defer stmt.Close()
var usedFiles int
var usedSize int64
err = stmt.QueryRowContext(ctx, mappedPath).Scan(&usedSize, &usedFiles)
if err != nil {
providerLog(logger.LevelWarn, "error getting quota for folder: %v, error: %v", mappedPath, err)
return 0, 0, err
}
return usedFiles, usedSize, err
}
func sqlCommonRollbackTransaction(tx *sql.Tx) {
err := tx.Rollback()
if err != nil {
providerLog(logger.LevelWarn, "error rolling back transaction: %v", err)
}
}
func sqlCommonGetDatabaseVersion(dbHandle *sql.DB, showInitWarn bool) (schemaVersion, error) {
var result schemaVersion
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getDatabaseVersionQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
if showInitWarn && strings.Contains(err.Error(), sqlTableSchemaVersion) {
logger.WarnToConsole("database query error, did you forgot to run the \"initprovider\" command?")
}
return result, err
}
defer stmt.Close()
row := stmt.QueryRowContext(ctx)
err = row.Scan(&result.Version)
return result, err
}
func sqlCommonUpdateDatabaseVersion(ctx context.Context, dbHandle sqlQuerier, version int) error {
q := getUpdateDBVersionQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return err
}
defer stmt.Close()
_, err = stmt.ExecContext(ctx, version)
return err
}
func sqlCommonExecSQLAndUpdateDBVersion(dbHandle *sql.DB, sql []string, newVersion int) error {
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
defer cancel()
tx, err := dbHandle.BeginTx(ctx, nil)
if err != nil {
return err
}
for _, q := range sql {
if len(strings.TrimSpace(q)) == 0 {
continue
}
_, err = tx.ExecContext(ctx, q)
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
}
err = sqlCommonUpdateDatabaseVersion(ctx, tx, newVersion)
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
return tx.Commit()
}
func sqlCommonGetCompatVirtualFolders(dbHandle *sql.DB) ([]userCompactVFolders, error) {
users := []userCompactVFolders{}
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
q := getCompatVirtualFoldersQuery()
stmt, err := dbHandle.PrepareContext(ctx, q)
if err != nil {
providerLog(logger.LevelWarn, "error preparing database query %#v: %v", q, err)
return nil, err
}
defer stmt.Close()
rows, err := stmt.QueryContext(ctx)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
var user userCompactVFolders
var virtualFolders sql.NullString
err = rows.Scan(&user.ID, &user.Username, &virtualFolders)
if err != nil {
return nil, err
}
if virtualFolders.Valid {
var list []virtualFoldersCompact
err = json.Unmarshal([]byte(virtualFolders.String), &list)
if err == nil && len(list) > 0 {
user.VirtualFolders = list
users = append(users, user)
}
}
}
return users, rows.Err()
}
func sqlCommonRestoreCompatVirtualFolders(ctx context.Context, users []userCompactVFolders, dbHandle sqlQuerier) ([]string, error) {
foldersToScan := []string{}
for _, user := range users {
for _, vfolder := range user.VirtualFolders {
providerLog(logger.LevelInfo, "restoring virtual folder: %+v for user %#v", vfolder, user.Username)
// -1 means included in user quota, 0 means unlimited
quotaSize := int64(-1)
quotaFiles := -1
if vfolder.ExcludeFromQuota {
quotaFiles = 0
quotaSize = 0
}
b, err := sqlCommonAddOrGetFolder(ctx, vfolder.MappedPath, 0, 0, 0, dbHandle)
if err != nil {
providerLog(logger.LevelWarn, "error restoring virtual folder for user %#v: %v", user.Username, err)
return foldersToScan, err
}
u := User{
ID: user.ID,
Username: user.Username,
}
f := vfs.VirtualFolder{
BaseVirtualFolder: b,
VirtualPath: vfolder.VirtualPath,
QuotaSize: quotaSize,
QuotaFiles: quotaFiles,
}
err = sqlCommonAddFolderMapping(ctx, u, f, dbHandle)
if err != nil {
providerLog(logger.LevelWarn, "error adding virtual folder mapping for user %#v: %v", user.Username, err)
return foldersToScan, err
}
if !utils.IsStringInSlice(vfolder.MappedPath, foldersToScan) {
foldersToScan = append(foldersToScan, vfolder.MappedPath)
}
providerLog(logger.LevelInfo, "virtual folder: %+v for user %#v successfully restored", vfolder, user.Username)
}
}
return foldersToScan, nil
}
func sqlCommonUpdateDatabaseFrom3To4(sqlV4 string, dbHandle *sql.DB) error {
logger.InfoToConsole("updating database version: 3 -> 4")
providerLog(logger.LevelInfo, "updating database version: 3 -> 4")
users, err := sqlCommonGetCompatVirtualFolders(dbHandle)
if err != nil {
return err
}
sql := strings.ReplaceAll(sqlV4, "{{users}}", sqlTableUsers)
sql = strings.ReplaceAll(sql, "{{folders}}", sqlTableFolders)
sql = strings.ReplaceAll(sql, "{{folders_mapping}}", sqlTableFoldersMapping)
ctx, cancel := context.WithTimeout(context.Background(), longSQLQueryTimeout)
defer cancel()
tx, err := dbHandle.BeginTx(ctx, nil)
if err != nil {
return err
}
for _, q := range strings.Split(sql, ";") {
if len(strings.TrimSpace(q)) == 0 {
continue
}
_, err = tx.ExecContext(ctx, q)
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
}
foldersToScan, err := sqlCommonRestoreCompatVirtualFolders(ctx, users, tx)
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
err = sqlCommonUpdateDatabaseVersion(ctx, tx, 4)
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
err = tx.Commit()
if err == nil {
go updateVFoldersQuotaAfterRestore(foldersToScan)
}
return err
}

View File

@@ -1,13 +1,68 @@
// +build !nosqlite
package dataprovider
import (
"context"
"database/sql"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
// we import go-sqlite3 here to be able to disable SQLite support using a build tag
_ "github.com/mattn/go-sqlite3"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/version"
"github.com/drakkan/sftpgo/vfs"
)
const (
sqliteUsersTableSQL = `CREATE TABLE "{{users}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255)
NOT NULL UNIQUE, "password" varchar(255) NULL, "public_keys" text NULL, "home_dir" varchar(255) NOT NULL, "uid" integer NOT NULL,
"gid" integer NOT NULL, "max_sessions" integer NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL,
"permissions" text NOT NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL,
"last_quota_update" bigint NOT NULL, "upload_bandwidth" integer NOT NULL, "download_bandwidth" integer NOT NULL,
"expiration_date" bigint NOT NULL, "last_login" bigint NOT NULL, "status" integer NOT NULL, "filters" text NULL,
"filesystem" text NULL);`
sqliteSchemaTableSQL = `CREATE TABLE "{{schema_version}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "version" integer NOT NULL);`
sqliteV2SQL = `ALTER TABLE "{{users}}" ADD COLUMN "virtual_folders" text NULL;`
sqliteV3SQL = `CREATE TABLE "new__users" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL UNIQUE,
"password" text NULL, "public_keys" text NULL, "home_dir" varchar(255) NOT NULL, "uid" integer NOT NULL,
"gid" integer NOT NULL, "max_sessions" integer NOT NULL, "quota_size" bigint NOT NULL, "quota_files" integer NOT NULL,
"permissions" text NOT NULL, "used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL,
"upload_bandwidth" integer NOT NULL, "download_bandwidth" integer NOT NULL, "expiration_date" bigint NOT NULL, "last_login" bigint NOT NULL,
"status" integer NOT NULL, "filters" text NULL, "filesystem" text NULL, "virtual_folders" text NULL);
INSERT INTO "new__users" ("id", "username", "public_keys", "home_dir", "uid", "gid", "max_sessions", "quota_size", "quota_files",
"permissions", "used_quota_size", "used_quota_files", "last_quota_update", "upload_bandwidth", "download_bandwidth", "expiration_date",
"last_login", "status", "filters", "filesystem", "virtual_folders", "password") SELECT "id", "username", "public_keys", "home_dir",
"uid", "gid", "max_sessions", "quota_size", "quota_files", "permissions", "used_quota_size", "used_quota_files", "last_quota_update",
"upload_bandwidth", "download_bandwidth", "expiration_date", "last_login", "status", "filters", "filesystem", "virtual_folders",
"password" FROM "{{users}}";
DROP TABLE "{{users}}";
ALTER TABLE "new__users" RENAME TO "{{users}}";`
sqliteV4SQL = `CREATE TABLE "{{folders}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "path" varchar(512) NOT NULL UNIQUE,
"used_quota_size" bigint NOT NULL, "used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL);
CREATE TABLE "{{folders_mapping}}" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "virtual_path" varchar(512) NOT NULL,
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "folder_id" integer NOT NULL REFERENCES "{{folders}}" ("id")
ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, "user_id" integer NOT NULL REFERENCES "{{users}}" ("id") ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT "unique_mapping" UNIQUE ("user_id", "folder_id"));
CREATE TABLE "new__users" ("id" integer NOT NULL PRIMARY KEY AUTOINCREMENT, "username" varchar(255) NOT NULL UNIQUE, "password" text NULL,
"public_keys" text NULL, "home_dir" varchar(512) NOT NULL, "uid" integer NOT NULL, "gid" integer NOT NULL, "max_sessions" integer NOT NULL,
"quota_size" bigint NOT NULL, "quota_files" integer NOT NULL, "permissions" text NOT NULL, "used_quota_size" bigint NOT NULL,
"used_quota_files" integer NOT NULL, "last_quota_update" bigint NOT NULL, "upload_bandwidth" integer NOT NULL, "download_bandwidth" integer NOT NULL,
"expiration_date" bigint NOT NULL, "last_login" bigint NOT NULL, "status" integer NOT NULL, "filters" text NULL, "filesystem" text NULL);
INSERT INTO "new__users" ("id", "username", "password", "public_keys", "home_dir", "uid", "gid", "max_sessions", "quota_size", "quota_files",
"permissions", "used_quota_size", "used_quota_files", "last_quota_update", "upload_bandwidth", "download_bandwidth", "expiration_date",
"last_login", "status", "filters", "filesystem") SELECT "id", "username", "password", "public_keys", "home_dir", "uid", "gid", "max_sessions",
"quota_size", "quota_files", "permissions", "used_quota_size", "used_quota_files", "last_quota_update", "upload_bandwidth", "download_bandwidth",
"expiration_date", "last_login", "status", "filters", "filesystem" FROM "{{users}}";
DROP TABLE "{{users}}";
ALTER TABLE "new__users" RENAME TO "{{users}}";
CREATE INDEX "folders_mapping_folder_id_idx" ON "{{folders_mapping}}" ("folder_id");
CREATE INDEX "folders_mapping_user_id_idx" ON "{{folders_mapping}}" ("user_id");
`
)
// SQLiteProvider auth provider for SQLite database
@@ -15,44 +70,47 @@ type SQLiteProvider struct {
dbHandle *sql.DB
}
func init() {
version.AddFeature("+sqlite")
}
func initializeSQLiteProvider(basePath string) error {
var err error
var connectionString string
logSender = fmt.Sprintf("dataprovider_%v", SQLiteDataProviderName)
if len(config.ConnectionString) == 0 {
dbPath := config.Name
if !utils.IsFileInputValid(dbPath) {
return fmt.Errorf("Invalid database path: %#v", dbPath)
}
if !filepath.IsAbs(dbPath) {
dbPath = filepath.Join(basePath, dbPath)
}
fi, err := os.Stat(dbPath)
if err != nil {
logger.Warn(logSender, "sqlite database file does not exists, please be sure to create and initialize"+
" a database before starting sftpgo")
return err
}
if fi.Size() == 0 {
return errors.New("sqlite database file is invalid, please be sure to create and initialize" +
" a database before starting sftpgo")
}
connectionString = fmt.Sprintf("file:%v?cache=shared", dbPath)
connectionString = fmt.Sprintf("file:%v?cache=shared&_foreign_keys=1", dbPath)
} else {
connectionString = config.ConnectionString
}
dbHandle, err := sql.Open("sqlite3", connectionString)
if err == nil {
logger.Debug(logSender, "sqlite database handle created, connection string: '%v'", connectionString)
providerLog(logger.LevelDebug, "sqlite database handle created, connection string: %#v", connectionString)
dbHandle.SetMaxOpenConns(1)
provider = SQLiteProvider{dbHandle: dbHandle}
} else {
logger.Warn(logSender, "error creating sqlite database handler, connection string: '%v', error: %v", connectionString, err)
providerLog(logger.LevelWarn, "error creating sqlite database handler, connection string: %#v, error: %v",
connectionString, err)
}
return err
}
func (p SQLiteProvider) validateUserAndPass(username string, password string) (User, error) {
return sqlCommonValidateUserAndPass(username, password, p.dbHandle)
func (p SQLiteProvider) checkAvailability() error {
return sqlCommonCheckAvailability(p.dbHandle)
}
func (p SQLiteProvider) validateUserAndPubKey(username string, publicKey string) (User, error) {
func (p SQLiteProvider) validateUserAndPass(username, password, ip, protocol string) (User, error) {
return sqlCommonValidateUserAndPass(username, password, ip, protocol, p.dbHandle)
}
func (p SQLiteProvider) validateUserAndPubKey(username string, publicKey []byte) (User, string, error) {
return sqlCommonValidateUserAndPubKey(username, publicKey, p.dbHandle)
}
@@ -61,8 +119,6 @@ func (p SQLiteProvider) getUserByID(ID int64) (User, error) {
}
func (p SQLiteProvider) updateQuota(username string, filesAdd int, sizeAdd int64, reset bool) error {
// we keep only 1 open connection (SetMaxOpenConns(1)) so a transaction is not needed and it could block
// the database access since it will try to open a new connection
return sqlCommonUpdateQuota(username, filesAdd, sizeAdd, reset, p.dbHandle)
}
@@ -70,6 +126,10 @@ func (p SQLiteProvider) getUsedQuota(username string) (int, int64, error) {
return sqlCommonGetUsedQuota(username, p.dbHandle)
}
func (p SQLiteProvider) updateLastLogin(username string) error {
return sqlCommonUpdateLastLogin(username, p.dbHandle)
}
func (p SQLiteProvider) userExists(username string) (User, error) {
return sqlCommonCheckUserExists(username, p.dbHandle)
}
@@ -86,6 +146,128 @@ func (p SQLiteProvider) deleteUser(user User) error {
return sqlCommonDeleteUser(user, p.dbHandle)
}
func (p SQLiteProvider) dumpUsers() ([]User, error) {
return sqlCommonDumpUsers(p.dbHandle)
}
func (p SQLiteProvider) getUsers(limit int, offset int, order string, username string) ([]User, error) {
return sqlCommonGetUsers(limit, offset, order, username, p.dbHandle)
}
func (p SQLiteProvider) dumpFolders() ([]vfs.BaseVirtualFolder, error) {
return sqlCommonDumpFolders(p.dbHandle)
}
func (p SQLiteProvider) getFolders(limit, offset int, order, folderPath string) ([]vfs.BaseVirtualFolder, error) {
return sqlCommonGetFolders(limit, offset, order, folderPath, p.dbHandle)
}
func (p SQLiteProvider) getFolderByPath(mappedPath string) (vfs.BaseVirtualFolder, error) {
ctx, cancel := context.WithTimeout(context.Background(), defaultSQLQueryTimeout)
defer cancel()
return sqlCommonCheckFolderExists(ctx, mappedPath, p.dbHandle)
}
func (p SQLiteProvider) addFolder(folder vfs.BaseVirtualFolder) error {
return sqlCommonAddFolder(folder, p.dbHandle)
}
func (p SQLiteProvider) deleteFolder(folder vfs.BaseVirtualFolder) error {
return sqlCommonDeleteFolder(folder, p.dbHandle)
}
func (p SQLiteProvider) updateFolderQuota(mappedPath string, filesAdd int, sizeAdd int64, reset bool) error {
return sqlCommonUpdateFolderQuota(mappedPath, filesAdd, sizeAdd, reset, p.dbHandle)
}
func (p SQLiteProvider) getUsedFolderQuota(mappedPath string) (int, int64, error) {
return sqlCommonGetFolderUsedQuota(mappedPath, p.dbHandle)
}
func (p SQLiteProvider) close() error {
return p.dbHandle.Close()
}
func (p SQLiteProvider) reloadConfig() error {
return nil
}
// initializeDatabase creates the initial database structure
func (p SQLiteProvider) initializeDatabase() error {
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, false)
if err == nil && dbVersion.Version > 0 {
return ErrNoInitRequired
}
sqlUsers := strings.Replace(sqliteUsersTableSQL, "{{users}}", sqlTableUsers, 1)
tx, err := p.dbHandle.Begin()
if err != nil {
return err
}
_, err = tx.Exec(sqlUsers)
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
_, err = tx.Exec(strings.Replace(sqliteSchemaTableSQL, "{{schema_version}}", sqlTableSchemaVersion, 1))
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
_, err = tx.Exec(strings.Replace(initialDBVersionSQL, "{{schema_version}}", sqlTableSchemaVersion, 1))
if err != nil {
sqlCommonRollbackTransaction(tx)
return err
}
return tx.Commit()
}
func (p SQLiteProvider) migrateDatabase() error {
dbVersion, err := sqlCommonGetDatabaseVersion(p.dbHandle, true)
if err != nil {
return err
}
if dbVersion.Version == sqlDatabaseVersion {
providerLog(logger.LevelDebug, "sql database is up to date, current version: %v", dbVersion.Version)
return ErrNoInitRequired
}
switch dbVersion.Version {
case 1:
err = updateSQLiteDatabaseFrom1To2(p.dbHandle)
if err != nil {
return err
}
err = updateSQLiteDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updateSQLiteDatabaseFrom3To4(p.dbHandle)
case 2:
err = updateSQLiteDatabaseFrom2To3(p.dbHandle)
if err != nil {
return err
}
return updateSQLiteDatabaseFrom3To4(p.dbHandle)
case 3:
return updateSQLiteDatabaseFrom3To4(p.dbHandle)
default:
return fmt.Errorf("Database version not handled: %v", dbVersion.Version)
}
}
func updateSQLiteDatabaseFrom1To2(dbHandle *sql.DB) error {
logger.InfoToConsole("updating database version: 1 -> 2")
providerLog(logger.LevelInfo, "updating database version: 1 -> 2")
sql := strings.Replace(sqliteV2SQL, "{{users}}", sqlTableUsers, 1)
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 2)
}
func updateSQLiteDatabaseFrom2To3(dbHandle *sql.DB) error {
logger.InfoToConsole("updating database version: 2 -> 3")
providerLog(logger.LevelInfo, "updating database version: 2 -> 3")
sql := strings.ReplaceAll(sqliteV3SQL, "{{users}}", sqlTableUsers)
return sqlCommonExecSQLAndUpdateDBVersion(dbHandle, []string{sql}, 3)
}
func updateSQLiteDatabaseFrom3To4(dbHandle *sql.DB) error {
return sqlCommonUpdateDatabaseFrom3To4(sqliteV4SQL, dbHandle)
}

View File

@@ -0,0 +1,17 @@
// +build nosqlite
package dataprovider
import (
"errors"
"github.com/drakkan/sftpgo/version"
)
func init() {
version.AddFeature("-sqlite")
}
func initializeSQLiteProvider(basePath string) error {
return errors.New("SQLite disabled at build time")
}

View File

@@ -1,16 +1,23 @@
package dataprovider
import "fmt"
import (
"fmt"
"strconv"
"strings"
"github.com/drakkan/sftpgo/vfs"
)
const (
selectUserFields = "id,username,password,public_keys,home_dir,uid,gid,max_sessions,quota_size,quota_files,permissions," +
"used_quota_size,used_quota_files,last_quota_update,upload_bandwidth,download_bandwidth"
selectUserFields = "id,username,password,public_keys,home_dir,uid,gid,max_sessions,quota_size,quota_files,permissions,used_quota_size," +
"used_quota_files,last_quota_update,upload_bandwidth,download_bandwidth,expiration_date,last_login,status,filters,filesystem"
selectFolderFields = "id,path,used_quota_size,used_quota_files,last_quota_update"
)
func getSQLPlaceholders() []string {
var placeholders []string
for i := 1; i <= 20; i++ {
if config.Driver == PGSSQLDataProviderName {
if config.Driver == PGSQLDataProviderName {
placeholders = append(placeholders, fmt.Sprintf("$%v", i))
} else {
placeholders = append(placeholders, "?")
@@ -20,51 +27,160 @@ func getSQLPlaceholders() []string {
}
func getUserByUsernameQuery() string {
return fmt.Sprintf(`SELECT %v FROM %v WHERE username = %v`, selectUserFields, config.UsersTable, sqlPlaceholders[0])
return fmt.Sprintf(`SELECT %v FROM %v WHERE username = %v`, selectUserFields, sqlTableUsers, sqlPlaceholders[0])
}
func getUserByIDQuery() string {
return fmt.Sprintf(`SELECT %v FROM %v WHERE id = %v`, selectUserFields, config.UsersTable, sqlPlaceholders[0])
return fmt.Sprintf(`SELECT %v FROM %v WHERE id = %v`, selectUserFields, sqlTableUsers, sqlPlaceholders[0])
}
func getUsersQuery(order string, username string) string {
if len(username) > 0 {
return fmt.Sprintf(`SELECT %v FROM %v WHERE username = %v ORDER BY username %v LIMIT %v OFFSET %v`,
selectUserFields, config.UsersTable, sqlPlaceholders[0], order, sqlPlaceholders[1], sqlPlaceholders[2])
selectUserFields, sqlTableUsers, sqlPlaceholders[0], order, sqlPlaceholders[1], sqlPlaceholders[2])
}
return fmt.Sprintf(`SELECT %v FROM %v ORDER BY username %v LIMIT %v OFFSET %v`, selectUserFields, config.UsersTable,
return fmt.Sprintf(`SELECT %v FROM %v ORDER BY username %v LIMIT %v OFFSET %v`, selectUserFields, sqlTableUsers,
order, sqlPlaceholders[0], sqlPlaceholders[1])
}
func getDumpUsersQuery() string {
return fmt.Sprintf(`SELECT %v FROM %v`, selectUserFields, sqlTableUsers)
}
func getDumpFoldersQuery() string {
return fmt.Sprintf(`SELECT %v FROM %v`, selectFolderFields, sqlTableFolders)
}
func getUpdateQuotaQuery(reset bool) string {
if reset {
return fmt.Sprintf(`UPDATE %v SET used_quota_size = %v,used_quota_files = %v,last_quota_update = %v
WHERE username = %v`, config.UsersTable, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
WHERE username = %v`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
}
return fmt.Sprintf(`UPDATE %v SET used_quota_size = used_quota_size + %v,used_quota_files = used_quota_files + %v,last_quota_update = %v
WHERE username = %v`, config.UsersTable, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
WHERE username = %v`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
}
func getUpdateLastLoginQuery() string {
return fmt.Sprintf(`UPDATE %v SET last_login = %v WHERE username = %v`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1])
}
func getQuotaQuery() string {
return fmt.Sprintf(`SELECT used_quota_size,used_quota_files FROM %v WHERE username = %v`, config.UsersTable,
return fmt.Sprintf(`SELECT used_quota_size,used_quota_files FROM %v WHERE username = %v`, sqlTableUsers,
sqlPlaceholders[0])
}
func getAddUserQuery() string {
return fmt.Sprintf(`INSERT INTO %v (username,password,public_keys,home_dir,uid,gid,max_sessions,quota_size,quota_files,permissions,
used_quota_size,used_quota_files,last_quota_update,upload_bandwidth,download_bandwidth)
VALUES (%v,%v,%v,%v,%v,%v,%v,%v,%v,%v,0,0,0,%v,%v)`, config.UsersTable, sqlPlaceholders[0], sqlPlaceholders[1],
used_quota_size,used_quota_files,last_quota_update,upload_bandwidth,download_bandwidth,status,last_login,expiration_date,filters,
filesystem)
VALUES (%v,%v,%v,%v,%v,%v,%v,%v,%v,%v,0,0,0,%v,%v,%v,0,%v,%v,%v)`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1],
sqlPlaceholders[2], sqlPlaceholders[3], sqlPlaceholders[4], sqlPlaceholders[5], sqlPlaceholders[6], sqlPlaceholders[7],
sqlPlaceholders[8], sqlPlaceholders[9], sqlPlaceholders[10], sqlPlaceholders[11])
sqlPlaceholders[8], sqlPlaceholders[9], sqlPlaceholders[10], sqlPlaceholders[11], sqlPlaceholders[12], sqlPlaceholders[13],
sqlPlaceholders[14], sqlPlaceholders[15])
}
func getUpdateUserQuery() string {
return fmt.Sprintf(`UPDATE %v SET password=%v,public_keys=%v,home_dir=%v,uid=%v,gid=%v,max_sessions=%v,quota_size=%v,
quota_files=%v,permissions=%v,upload_bandwidth=%v,download_bandwidth=%v WHERE id = %v`, config.UsersTable,
sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3], sqlPlaceholders[4], sqlPlaceholders[5],
sqlPlaceholders[6], sqlPlaceholders[7], sqlPlaceholders[8], sqlPlaceholders[9], sqlPlaceholders[10], sqlPlaceholders[11])
quota_files=%v,permissions=%v,upload_bandwidth=%v,download_bandwidth=%v,status=%v,expiration_date=%v,filters=%v,filesystem=%v
WHERE id = %v`, sqlTableUsers, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3],
sqlPlaceholders[4], sqlPlaceholders[5], sqlPlaceholders[6], sqlPlaceholders[7], sqlPlaceholders[8], sqlPlaceholders[9],
sqlPlaceholders[10], sqlPlaceholders[11], sqlPlaceholders[12], sqlPlaceholders[13], sqlPlaceholders[14], sqlPlaceholders[15])
}
func getDeleteUserQuery() string {
return fmt.Sprintf(`DELETE FROM %v WHERE id = %v`, config.UsersTable, sqlPlaceholders[0])
return fmt.Sprintf(`DELETE FROM %v WHERE id = %v`, sqlTableUsers, sqlPlaceholders[0])
}
func getFolderByPathQuery() string {
return fmt.Sprintf(`SELECT %v FROM %v WHERE path = %v`, selectFolderFields, sqlTableFolders, sqlPlaceholders[0])
}
func getAddFolderQuery() string {
return fmt.Sprintf(`INSERT INTO %v (path,used_quota_size,used_quota_files,last_quota_update) VALUES (%v,%v,%v,%v)`,
sqlTableFolders, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
}
func getDeleteFolderQuery() string {
return fmt.Sprintf(`DELETE FROM %v WHERE id = %v`, sqlTableFolders, sqlPlaceholders[0])
}
func getClearFolderMappingQuery() string {
return fmt.Sprintf(`DELETE FROM %v WHERE user_id = (SELECT id FROM %v WHERE username = %v)`, sqlTableFoldersMapping,
sqlTableUsers, sqlPlaceholders[0])
}
func getAddFolderMappingQuery() string {
return fmt.Sprintf(`INSERT INTO %v (virtual_path,quota_size,quota_files,folder_id,user_id)
VALUES (%v,%v,%v,%v,(SELECT id FROM %v WHERE username = %v))`, sqlTableFoldersMapping, sqlPlaceholders[0],
sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3], sqlTableUsers, sqlPlaceholders[4])
}
func getFoldersQuery(order, folderPath string) string {
if len(folderPath) > 0 {
return fmt.Sprintf(`SELECT %v FROM %v WHERE path = %v ORDER BY path %v LIMIT %v OFFSET %v`,
selectFolderFields, sqlTableFolders, sqlPlaceholders[0], order, sqlPlaceholders[1], sqlPlaceholders[2])
}
return fmt.Sprintf(`SELECT %v FROM %v ORDER BY path %v LIMIT %v OFFSET %v`, selectFolderFields, sqlTableFolders,
order, sqlPlaceholders[0], sqlPlaceholders[1])
}
func getUpdateFolderQuotaQuery(reset bool) string {
if reset {
return fmt.Sprintf(`UPDATE %v SET used_quota_size = %v,used_quota_files = %v,last_quota_update = %v
WHERE path = %v`, sqlTableFolders, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
}
return fmt.Sprintf(`UPDATE %v SET used_quota_size = used_quota_size + %v,used_quota_files = used_quota_files + %v,last_quota_update = %v
WHERE path = %v`, sqlTableFolders, sqlPlaceholders[0], sqlPlaceholders[1], sqlPlaceholders[2], sqlPlaceholders[3])
}
func getQuotaFolderQuery() string {
return fmt.Sprintf(`SELECT used_quota_size,used_quota_files FROM %v WHERE path = %v`, sqlTableFolders,
sqlPlaceholders[0])
}
func getRelatedFoldersForUsersQuery(users []User) string {
var sb strings.Builder
for _, u := range users {
if sb.Len() == 0 {
sb.WriteString("(")
} else {
sb.WriteString(",")
}
sb.WriteString(strconv.FormatInt(u.ID, 10))
}
if sb.Len() > 0 {
sb.WriteString(")")
}
return fmt.Sprintf(`SELECT f.id,f.path,f.used_quota_size,f.used_quota_files,f.last_quota_update,fm.virtual_path,fm.quota_size,fm.quota_files,fm.user_id
FROM %v f INNER JOIN %v fm ON f.id = fm.folder_id WHERE fm.user_id IN %v ORDER BY fm.user_id`, sqlTableFolders,
sqlTableFoldersMapping, sb.String())
}
func getRelatedUsersForFoldersQuery(folders []vfs.BaseVirtualFolder) string {
var sb strings.Builder
for _, f := range folders {
if sb.Len() == 0 {
sb.WriteString("(")
} else {
sb.WriteString(",")
}
sb.WriteString(strconv.FormatInt(f.ID, 10))
}
if sb.Len() > 0 {
sb.WriteString(")")
}
return fmt.Sprintf(`SELECT fm.folder_id,u.username FROM %v fm INNER JOIN %v u ON fm.user_id = u.id
WHERE fm.folder_id IN %v ORDER BY fm.folder_id`, sqlTableFoldersMapping, sqlTableUsers, sb.String())
}
func getDatabaseVersionQuery() string {
return fmt.Sprintf("SELECT version from %v LIMIT 1", sqlTableSchemaVersion)
}
func getUpdateDBVersionQuery() string {
return fmt.Sprintf(`UPDATE %v SET version=%v`, sqlTableSchemaVersion, sqlPlaceholders[0])
}
func getCompatVirtualFoldersQuery() string {
return fmt.Sprintf(`SELECT id,username,virtual_folders FROM %v`, sqlTableUsers)
}

View File

@@ -2,9 +2,19 @@ package dataprovider
import (
"encoding/json"
"errors"
"fmt"
"net"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/drakkan/sftpgo/logger"
"github.com/drakkan/sftpgo/utils"
"github.com/drakkan/sftpgo/vfs"
)
// Available permissions for SFTP users
@@ -17,6 +27,9 @@ const (
PermDownload = "download"
// upload files is allowed
PermUpload = "upload"
// overwrite an existing file, while uploading, is allowed
// upload permission is required to allow file overwrite
PermOverwrite = "overwrite"
// delete files or directories is allowed
PermDelete = "delete"
// rename files or directories is allowed
@@ -25,24 +38,126 @@ const (
PermCreateDirs = "create_dirs"
// create symbolic links is allowed
PermCreateSymlinks = "create_symlinks"
// changing file or directory permissions is allowed
PermChmod = "chmod"
// changing file or directory owner and group is allowed
PermChown = "chown"
// changing file or directory access and modification time is allowed
PermChtimes = "chtimes"
)
// User defines an SFTP user
// Available login methods
const (
LoginMethodNoAuthTryed = "no_auth_tryed"
LoginMethodPassword = "password"
SSHLoginMethodPublicKey = "publickey"
SSHLoginMethodKeyboardInteractive = "keyboard-interactive"
SSHLoginMethodKeyAndPassword = "publickey+password"
SSHLoginMethodKeyAndKeyboardInt = "publickey+keyboard-interactive"
)
var (
errNoMatchingVirtualFolder = errors.New("no matching virtual folder found")
)
// CachedUser adds fields useful for caching to a SFTPGo user
type CachedUser struct {
User User
Expiration time.Time
Password string
}
// IsExpired returns true if the cached user is expired
func (c CachedUser) IsExpired() bool {
if c.Expiration.IsZero() {
return false
}
return c.Expiration.Before(time.Now())
}
// ExtensionsFilter defines filters based on file extensions.
// These restrictions do not apply to files listing for performance reasons, so
// a denied file cannot be downloaded/overwritten/renamed but will still be
// it will still be listed in the list of files.
// System commands such as Git and rsync interacts with the filesystem directly
// and they are not aware about these restrictions so they are not allowed
// inside paths with extensions filters
type ExtensionsFilter struct {
// SFTP/SCP path, if no other specific filter is defined, the filter apply for
// sub directories too.
// For example if filters are defined for the paths "/" and "/sub" then the
// filters for "/" are applied for any file outside the "/sub" directory
Path string `json:"path"`
// only files with these, case insensitive, extensions are allowed.
// Shell like expansion is not supported so you have to specify ".jpg" and
// not "*.jpg"
AllowedExtensions []string `json:"allowed_extensions,omitempty"`
// files with these, case insensitive, extensions are not allowed.
// Denied file extensions are evaluated before the allowed ones
DeniedExtensions []string `json:"denied_extensions,omitempty"`
}
// UserFilters defines additional restrictions for a user
type UserFilters struct {
// only clients connecting from these IP/Mask are allowed.
// IP/Mask must be in CIDR notation as defined in RFC 4632 and RFC 4291
// for example "192.0.2.0/24" or "2001:db8::/32"
AllowedIP []string `json:"allowed_ip,omitempty"`
// clients connecting from these IP/Mask are not allowed.
// Denied rules will be evaluated before allowed ones
DeniedIP []string `json:"denied_ip,omitempty"`
// these login methods are not allowed.
// If null or empty any available login method is allowed
DeniedLoginMethods []string `json:"denied_login_methods,omitempty"`
// these protocols are not allowed.
// If null or empty any available protocol is allowed
DeniedProtocols []string `json:"denied_protocols,omitempty"`
// filters based on file extensions.
// Please note that these restrictions can be easily bypassed.
FileExtensions []ExtensionsFilter `json:"file_extensions,omitempty"`
// max size allowed for a single upload, 0 means unlimited
MaxUploadFileSize int64 `json:"max_upload_file_size,omitempty"`
}
// FilesystemProvider defines the supported storages
type FilesystemProvider int
// supported values for FilesystemProvider
const (
LocalFilesystemProvider FilesystemProvider = iota // Local
S3FilesystemProvider // Amazon S3 compatible
GCSFilesystemProvider // Google Cloud Storage
)
// Filesystem defines cloud storage filesystem details
type Filesystem struct {
Provider FilesystemProvider `json:"provider"`
S3Config vfs.S3FsConfig `json:"s3config,omitempty"`
GCSConfig vfs.GCSFsConfig `json:"gcsconfig,omitempty"`
}
// User defines a SFTPGo user
type User struct {
// Database unique identifier
ID int64 `json:"id"`
// 1 enabled, 0 disabled (login is not allowed)
Status int `json:"status"`
// Username
Username string `json:"username"`
// Account expiration date as unix timestamp in milliseconds. An expired account cannot login.
// 0 means no expiration
ExpirationDate int64 `json:"expiration_date"`
// Password used for password authentication.
// For users created using SFTPGo REST API the password is be stored using argon2id hashing algo.
// Checking passwords stored with bcrypt is supported too.
// Currently, as fallback, there is a clear text password checking but you should not store passwords
// as clear text and this support could be removed at any time, so please don't depend on it.
// Checking passwords stored with bcrypt, pbkdf2, md5crypt and sha512crypt is supported too.
Password string `json:"password,omitempty"`
// PublicKeys used for public key authentication. At least one between password and a public key is mandatory
PublicKeys []string `json:"public_keys,omitempty"`
// The user cannot upload or download files outside this directory. Must be an absolute path
HomeDir string `json:"home_dir"`
// Mapping between virtual paths and filesystem paths outside the home directory.
// Supported for local filesystem only
VirtualFolders []vfs.VirtualFolder `json:"virtual_folders,omitempty"`
// If sftpgo runs as root system user then the created files and directories will be assigned to this system UID
UID int `json:"uid"`
// If sftpgo runs as root system user then the created files and directories will be assigned to this system GID
@@ -54,7 +169,7 @@ type User struct {
// Maximum number of files allowed. 0 means unlimited
QuotaFiles int `json:"quota_files"`
// List of the granted permissions
Permissions []string `json:"permissions"`
Permissions map[string][]string `json:"permissions"`
// Used quota as bytes
UsedQuotaSize int64 `json:"used_quota_size"`
// Used quota as number of files
@@ -65,14 +180,330 @@ type User struct {
UploadBandwidth int64 `json:"upload_bandwidth"`
// Maximum download bandwidth as KB/s, 0 means unlimited
DownloadBandwidth int64 `json:"download_bandwidth"`
// Last login as unix timestamp in milliseconds
LastLogin int64 `json:"last_login"`
// Additional restrictions
Filters UserFilters `json:"filters"`
// Filesystem configuration details
FsConfig Filesystem `json:"filesystem"`
}
// GetFilesystem returns the filesystem for this user
func (u *User) GetFilesystem(connectionID string) (vfs.Fs, error) {
if u.FsConfig.Provider == S3FilesystemProvider {
return vfs.NewS3Fs(connectionID, u.GetHomeDir(), u.FsConfig.S3Config)
} else if u.FsConfig.Provider == GCSFilesystemProvider {
config := u.FsConfig.GCSConfig
config.CredentialFile = u.getGCSCredentialsFilePath()
return vfs.NewGCSFs(connectionID, u.GetHomeDir(), config)
}
return vfs.NewOsFs(connectionID, u.GetHomeDir(), u.VirtualFolders), nil
}
// GetPermissionsForPath returns the permissions for the given path.
// The path must be an SFTP path
func (u *User) GetPermissionsForPath(p string) []string {
permissions := []string{}
if perms, ok := u.Permissions["/"]; ok {
// if only root permissions are defined returns them unconditionally
if len(u.Permissions) == 1 {
return perms
}
// fallback permissions
permissions = perms
}
dirsForPath := utils.GetDirsForSFTPPath(p)
// dirsForPath contains all the dirs for a given path in reverse order
// for example if the path is: /1/2/3/4 it contains:
// [ "/1/2/3/4", "/1/2/3", "/1/2", "/1", "/" ]
// so the first match is the one we are interested to
for _, val := range dirsForPath {
if perms, ok := u.Permissions[val]; ok {
permissions = perms
break
}
}
return permissions
}
// GetVirtualFolderForPath returns the virtual folder containing the specified sftp path.
// If the path is not inside a virtual folder an error is returned
func (u *User) GetVirtualFolderForPath(sftpPath string) (vfs.VirtualFolder, error) {
var folder vfs.VirtualFolder
if len(u.VirtualFolders) == 0 || u.FsConfig.Provider != LocalFilesystemProvider {
return folder, errNoMatchingVirtualFolder
}
dirsForPath := utils.GetDirsForSFTPPath(sftpPath)
for _, val := range dirsForPath {
for _, v := range u.VirtualFolders {
if v.VirtualPath == val {
return v, nil
}
}
}
return folder, errNoMatchingVirtualFolder
}
// AddVirtualDirs adds virtual folders, if defined, to the given files list
func (u *User) AddVirtualDirs(list []os.FileInfo, sftpPath string) []os.FileInfo {
if len(u.VirtualFolders) == 0 {
return list
}
for _, v := range u.VirtualFolders {
if path.Dir(v.VirtualPath) == sftpPath {
fi := vfs.NewFileInfo(v.VirtualPath, true, 0, time.Now(), false)
found := false
for index, f := range list {
if f.Name() == fi.Name() {
list[index] = fi
found = true
break
}
}
if !found {
list = append(list, fi)
}
}
}
return list
}
// IsMappedPath returns true if the specified filesystem path has a virtual folder mapping.
// The filesystem path must be cleaned before calling this method
func (u *User) IsMappedPath(fsPath string) bool {
for _, v := range u.VirtualFolders {
if fsPath == v.MappedPath {
return true
}
}
return false
}
// IsVirtualFolder returns true if the specified sftp path is a virtual folder
func (u *User) IsVirtualFolder(sftpPath string) bool {
for _, v := range u.VirtualFolders {
if sftpPath == v.VirtualPath {
return true
}
}
return false
}
// HasVirtualFoldersInside returns true if there are virtual folders inside the
// specified SFTP path. We assume that path are cleaned
func (u *User) HasVirtualFoldersInside(sftpPath string) bool {
if sftpPath == "/" && len(u.VirtualFolders) > 0 {
return true
}
for _, v := range u.VirtualFolders {
if len(v.VirtualPath) > len(sftpPath) {
if strings.HasPrefix(v.VirtualPath, sftpPath+"/") {
return true
}
}
}
return false
}
// HasPermissionsInside returns true if the specified sftpPath has no permissions itself and
// no subdirs with defined permissions
func (u *User) HasPermissionsInside(sftpPath string) bool {
for dir := range u.Permissions {
if dir == sftpPath {
return true
} else if len(dir) > len(sftpPath) {
if strings.HasPrefix(dir, sftpPath+"/") {
return true
}
}
}
return false
}
// HasOverlappedMappedPaths returns true if this user has virtual folders with overlapped mapped paths
func (u *User) HasOverlappedMappedPaths() bool {
if len(u.VirtualFolders) <= 1 {
return false
}
for _, v1 := range u.VirtualFolders {
for _, v2 := range u.VirtualFolders {
if v1.VirtualPath == v2.VirtualPath {
continue
}
if isMappedDirOverlapped(v1.MappedPath, v2.MappedPath) {
return true
}
}
}
return false
}
// HasPerm returns true if the user has the given permission or any permission
func (u *User) HasPerm(permission string) bool {
if utils.IsStringInSlice(PermAny, u.Permissions) {
func (u *User) HasPerm(permission, path string) bool {
perms := u.GetPermissionsForPath(path)
if utils.IsStringInSlice(PermAny, perms) {
return true
}
return utils.IsStringInSlice(permission, u.Permissions)
return utils.IsStringInSlice(permission, perms)
}
// HasPerms return true if the user has all the given permissions
func (u *User) HasPerms(permissions []string, path string) bool {
perms := u.GetPermissionsForPath(path)
if utils.IsStringInSlice(PermAny, perms) {
return true
}
for _, permission := range permissions {
if !utils.IsStringInSlice(permission, perms) {
return false
}
}
return true
}
// HasNoQuotaRestrictions returns true if no quota restrictions need to be applyed
func (u *User) HasNoQuotaRestrictions(checkFiles bool) bool {
if u.QuotaSize == 0 && (!checkFiles || u.QuotaFiles == 0) {
return true
}
return false
}
// IsLoginMethodAllowed returns true if the specified login method is allowed
func (u *User) IsLoginMethodAllowed(loginMethod string, partialSuccessMethods []string) bool {
if len(u.Filters.DeniedLoginMethods) == 0 {
return true
}
if len(partialSuccessMethods) == 1 {
for _, method := range u.GetNextAuthMethods(partialSuccessMethods, true) {
if method == loginMethod {
return true
}
}
}
if utils.IsStringInSlice(loginMethod, u.Filters.DeniedLoginMethods) {
return false
}
return true
}
// GetNextAuthMethods returns the list of authentications methods that
// can continue for multi-step authentication
func (u *User) GetNextAuthMethods(partialSuccessMethods []string, isPasswordAuthEnabled bool) []string {
var methods []string
if len(partialSuccessMethods) != 1 {
return methods
}
if partialSuccessMethods[0] != SSHLoginMethodPublicKey {
return methods
}
for _, method := range u.GetAllowedLoginMethods() {
if method == SSHLoginMethodKeyAndPassword && isPasswordAuthEnabled {
methods = append(methods, LoginMethodPassword)
}
if method == SSHLoginMethodKeyAndKeyboardInt {
methods = append(methods, SSHLoginMethodKeyboardInteractive)
}
}
return methods
}
// IsPartialAuth returns true if the specified login method is a step for
// a multi-step Authentication.
// We support publickey+password and publickey+keyboard-interactive, so
// only publickey can returns partial success.
// We can have partial success if only multi-step Auth methods are enabled
func (u *User) IsPartialAuth(loginMethod string) bool {
if loginMethod != SSHLoginMethodPublicKey {
return false
}
for _, method := range u.GetAllowedLoginMethods() {
if !utils.IsStringInSlice(method, SSHMultiStepsLoginMethods) {
return false
}
}
return true
}
// GetAllowedLoginMethods returns the allowed login methods
func (u *User) GetAllowedLoginMethods() []string {
var allowedMethods []string
for _, method := range ValidSSHLoginMethods {
if !utils.IsStringInSlice(method, u.Filters.DeniedLoginMethods) {
allowedMethods = append(allowedMethods, method)
}
}
return allowedMethods
}
// IsFileAllowed returns true if the specified file is allowed by the file restrictions filters
func (u *User) IsFileAllowed(sftpPath string) bool {
if len(u.Filters.FileExtensions) == 0 {
return true
}
dirsForPath := utils.GetDirsForSFTPPath(path.Dir(sftpPath))
var filter ExtensionsFilter
for _, dir := range dirsForPath {
for _, f := range u.Filters.FileExtensions {
if f.Path == dir {
filter = f
break
}
}
if len(filter.Path) > 0 {
break
}
}
if len(filter.Path) > 0 {
toMatch := strings.ToLower(sftpPath)
for _, denied := range filter.DeniedExtensions {
if strings.HasSuffix(toMatch, denied) {
return false
}
}
for _, allowed := range filter.AllowedExtensions {
if strings.HasSuffix(toMatch, allowed) {
return true
}
}
return len(filter.AllowedExtensions) == 0
}
return true
}
// IsLoginFromAddrAllowed returns true if the login is allowed from the specified remoteAddr.
// If AllowedIP is defined only the specified IP/Mask can login.
// If DeniedIP is defined the specified IP/Mask cannot login.
// If an IP is both allowed and denied then login will be denied
func (u *User) IsLoginFromAddrAllowed(remoteAddr string) bool {
if len(u.Filters.AllowedIP) == 0 && len(u.Filters.DeniedIP) == 0 {
return true
}
remoteIP := net.ParseIP(utils.GetIPFromRemoteAddress(remoteAddr))
// if remoteIP is invalid we allow login, this should never happen
if remoteIP == nil {
logger.Warn(logSender, "", "login allowed for invalid IP. remote address: %#v", remoteAddr)
return true
}
for _, IPMask := range u.Filters.DeniedIP {
_, IPNet, err := net.ParseCIDR(IPMask)
if err != nil {
return false
}
if IPNet.Contains(remoteIP) {
return false
}
}
for _, IPMask := range u.Filters.AllowedIP {
_, IPNet, err := net.ParseCIDR(IPMask)
if err != nil {
return false
}
if IPNet.Contains(remoteIP) {
return true
}
}
return len(u.Filters.AllowedIP) == 0
}
// GetPermissionsAsJSON returns the permissions as json byte array
@@ -85,6 +516,16 @@ func (u *User) GetPublicKeysAsJSON() ([]byte, error) {
return json.Marshal(u.PublicKeys)
}
// GetFiltersAsJSON returns the filters as json byte array
func (u *User) GetFiltersAsJSON() ([]byte, error) {
return json.Marshal(u.Filters)
}
// GetFsConfigAsJSON returns the filesystem config as json byte array
func (u *User) GetFsConfigAsJSON() ([]byte, error) {
return json.Marshal(u.FsConfig)
}
// GetUID returns a validate uid, suitable for use with os.Chown
func (u *User) GetUID() int {
if u.UID <= 0 || u.UID > 65535 {
@@ -111,12 +552,235 @@ func (u *User) HasQuotaRestrictions() bool {
return u.QuotaFiles > 0 || u.QuotaSize > 0
}
// GetRelativePath returns the path for a file relative to the user's home dir.
// This is the path as seen by SFTP users
func (u *User) GetRelativePath(path string) string {
rel, err := filepath.Rel(u.GetHomeDir(), path)
if err != nil {
return ""
// GetQuotaSummary returns used quota and limits if defined
func (u *User) GetQuotaSummary() string {
var result string
result = "Files: " + strconv.Itoa(u.UsedQuotaFiles)
if u.QuotaFiles > 0 {
result += "/" + strconv.Itoa(u.QuotaFiles)
}
return "/" + filepath.ToSlash(rel)
if u.UsedQuotaSize > 0 || u.QuotaSize > 0 {
result += ". Size: " + utils.ByteCountSI(u.UsedQuotaSize)
if u.QuotaSize > 0 {
result += "/" + utils.ByteCountSI(u.QuotaSize)
}
}
return result
}
// GetPermissionsAsString returns the user's permissions as comma separated string
func (u *User) GetPermissionsAsString() string {
result := ""
for dir, perms := range u.Permissions {
var dirPerms string
for _, p := range perms {
if len(dirPerms) > 0 {
dirPerms += ", "
}
dirPerms += p
}
dp := fmt.Sprintf("%#v: %#v", dir, dirPerms)
if dir == "/" {
if len(result) > 0 {
result = dp + ", " + result
} else {
result = dp
}
} else {
if len(result) > 0 {
result += ", "
}
result += dp
}
}
return result
}
// GetBandwidthAsString returns bandwidth limits if defines
func (u *User) GetBandwidthAsString() string {
result := "Download: "
if u.DownloadBandwidth > 0 {
result += utils.ByteCountSI(u.DownloadBandwidth*1000) + "/s."
} else {
result += "unlimited."
}
result += " Upload: "
if u.UploadBandwidth > 0 {
result += utils.ByteCountSI(u.UploadBandwidth*1000) + "/s."
} else {
result += "unlimited."
}
return result
}
// GetInfoString returns user's info as string.
// Storage provider, number of public keys, max sessions, uid,
// gid, denied and allowed IP/Mask are returned
func (u *User) GetInfoString() string {
var result string
if u.LastLogin > 0 {
t := utils.GetTimeFromMsecSinceEpoch(u.LastLogin)
result += fmt.Sprintf("Last login: %v ", t.Format("2006-01-02 15:04:05")) // YYYY-MM-DD HH:MM:SS
}
if u.FsConfig.Provider == S3FilesystemProvider {
result += "Storage: S3 "
} else if u.FsConfig.Provider == GCSFilesystemProvider {
result += "Storage: GCS "
}
if len(u.PublicKeys) > 0 {
result += fmt.Sprintf("Public keys: %v ", len(u.PublicKeys))
}
if u.MaxSessions > 0 {
result += fmt.Sprintf("Max sessions: %v ", u.MaxSessions)
}
if u.UID > 0 {
result += fmt.Sprintf("UID: %v ", u.UID)
}
if u.GID > 0 {
result += fmt.Sprintf("GID: %v ", u.GID)
}
if len(u.Filters.DeniedIP) > 0 {
result += fmt.Sprintf("Denied IP/Mask: %v ", len(u.Filters.DeniedIP))
}
if len(u.Filters.AllowedIP) > 0 {
result += fmt.Sprintf("Allowed IP/Mask: %v ", len(u.Filters.AllowedIP))
}
return result
}
// GetExpirationDateAsString returns expiration date formatted as YYYY-MM-DD
func (u *User) GetExpirationDateAsString() string {
if u.ExpirationDate > 0 {
t := utils.GetTimeFromMsecSinceEpoch(u.ExpirationDate)
return t.Format("2006-01-02")
}
return ""
}
// GetAllowedIPAsString returns the allowed IP as comma separated string
func (u User) GetAllowedIPAsString() string {
result := ""
for _, IPMask := range u.Filters.AllowedIP {
if len(result) > 0 {
result += ","
}
result += IPMask
}
return result
}
// GetDeniedIPAsString returns the denied IP as comma separated string
func (u User) GetDeniedIPAsString() string {
result := ""
for _, IPMask := range u.Filters.DeniedIP {
if len(result) > 0 {
result += ","
}
result += IPMask
}
return result
}
func (u *User) getACopy() User {
pubKeys := make([]string, len(u.PublicKeys))
copy(pubKeys, u.PublicKeys)
virtualFolders := make([]vfs.VirtualFolder, len(u.VirtualFolders))
copy(virtualFolders, u.VirtualFolders)
permissions := make(map[string][]string)
for k, v := range u.Permissions {
perms := make([]string, len(v))
copy(perms, v)
permissions[k] = perms
}
filters := UserFilters{}
filters.MaxUploadFileSize = u.Filters.MaxUploadFileSize
filters.AllowedIP = make([]string, len(u.Filters.AllowedIP))
copy(filters.AllowedIP, u.Filters.AllowedIP)
filters.DeniedIP = make([]string, len(u.Filters.DeniedIP))
copy(filters.DeniedIP, u.Filters.DeniedIP)
filters.DeniedLoginMethods = make([]string, len(u.Filters.DeniedLoginMethods))
copy(filters.DeniedLoginMethods, u.Filters.DeniedLoginMethods)
filters.FileExtensions = make([]ExtensionsFilter, len(u.Filters.FileExtensions))
copy(filters.FileExtensions, u.Filters.FileExtensions)
filters.DeniedProtocols = make([]string, len(u.Filters.DeniedProtocols))
copy(filters.DeniedProtocols, u.Filters.DeniedProtocols)
fsConfig := Filesystem{
Provider: u.FsConfig.Provider,
S3Config: vfs.S3FsConfig{
Bucket: u.FsConfig.S3Config.Bucket,
Region: u.FsConfig.S3Config.Region,
AccessKey: u.FsConfig.S3Config.AccessKey,
AccessSecret: u.FsConfig.S3Config.AccessSecret,
Endpoint: u.FsConfig.S3Config.Endpoint,
StorageClass: u.FsConfig.S3Config.StorageClass,
KeyPrefix: u.FsConfig.S3Config.KeyPrefix,
UploadPartSize: u.FsConfig.S3Config.UploadPartSize,
UploadConcurrency: u.FsConfig.S3Config.UploadConcurrency,
},
GCSConfig: vfs.GCSFsConfig{
Bucket: u.FsConfig.GCSConfig.Bucket,
CredentialFile: u.FsConfig.GCSConfig.CredentialFile,
AutomaticCredentials: u.FsConfig.GCSConfig.AutomaticCredentials,
StorageClass: u.FsConfig.GCSConfig.StorageClass,
KeyPrefix: u.FsConfig.GCSConfig.KeyPrefix,
},
}
return User{
ID: u.ID,
Username: u.Username,
Password: u.Password,
PublicKeys: pubKeys,
HomeDir: u.HomeDir,
VirtualFolders: virtualFolders,
UID: u.UID,
GID: u.GID,
MaxSessions: u.MaxSessions,
QuotaSize: u.QuotaSize,
QuotaFiles: u.QuotaFiles,
Permissions: permissions,
UsedQuotaSize: u.UsedQuotaSize,
UsedQuotaFiles: u.UsedQuotaFiles,
LastQuotaUpdate: u.LastQuotaUpdate,
UploadBandwidth: u.UploadBandwidth,
DownloadBandwidth: u.DownloadBandwidth,
Status: u.Status,
ExpirationDate: u.ExpirationDate,
LastLogin: u.LastLogin,
Filters: filters,
FsConfig: fsConfig,
}
}
func (u *User) getNotificationFieldsAsSlice(action string) []string {
return []string{action, u.Username,
strconv.FormatInt(u.ID, 10),
strconv.FormatInt(int64(u.Status), 10),
strconv.FormatInt(u.ExpirationDate, 10),
u.HomeDir,
strconv.FormatInt(int64(u.UID), 10),
strconv.FormatInt(int64(u.GID), 10),
}
}
func (u *User) getNotificationFieldsAsEnvVars(action string) []string {
return []string{fmt.Sprintf("SFTPGO_USER_ACTION=%v", action),
fmt.Sprintf("SFTPGO_USER_USERNAME=%v", u.Username),
fmt.Sprintf("SFTPGO_USER_PASSWORD=%v", u.Password),
fmt.Sprintf("SFTPGO_USER_ID=%v", u.ID),
fmt.Sprintf("SFTPGO_USER_STATUS=%v", u.Status),
fmt.Sprintf("SFTPGO_USER_EXPIRATION_DATE=%v", u.ExpirationDate),
fmt.Sprintf("SFTPGO_USER_HOME_DIR=%v", u.HomeDir),
fmt.Sprintf("SFTPGO_USER_UID=%v", u.UID),
fmt.Sprintf("SFTPGO_USER_GID=%v", u.GID),
fmt.Sprintf("SFTPGO_USER_QUOTA_FILES=%v", u.QuotaFiles),
fmt.Sprintf("SFTPGO_USER_QUOTA_SIZE=%v", u.QuotaSize),
fmt.Sprintf("SFTPGO_USER_UPLOAD_BANDWIDTH=%v", u.UploadBandwidth),
fmt.Sprintf("SFTPGO_USER_DOWNLOAD_BANDWIDTH=%v", u.DownloadBandwidth),
fmt.Sprintf("SFTPGO_USER_MAX_SESSIONS=%v", u.MaxSessions),
fmt.Sprintf("SFTPGO_USER_FS_PROVIDER=%v", u.FsConfig.Provider)}
}
func (u *User) getGCSCredentialsFilePath() string {
return filepath.Join(credentialsDirPath, fmt.Sprintf("%v_gcs_credentials.json", u.Username))
}

50
docker/README.md Normal file
View File

@@ -0,0 +1,50 @@
# Official Docker images
SFTPGo provides official Docker images. They are available [here](https://github.com/users/drakkan/packages/container/package/sftpgo).
## Start a SFTPGo server instance
Starting a SFTPGo instance is simple:
```shell
docker run --name some-sftpgo -p 127.0.0.1:8080:8080 -p 2022:2022 -d "ghcr.io/drakkan/sftpgo:edge"
```
Now visit [http://localhost:8080/](http://localhost:8080/) and create a new SFTPGo user. The SFTP service is available on port 2022.
## LOG
The logs are available through Docker's container log:
```shell
docker logs some-sftpgo
```
## Configuration
The runtime configuration can be customized via environment variables that you can set passing the `-e` option to the `docker run` command or inside the `environment` section if you are using [docker stack deploy](https://docs.docker.com/engine/reference/commandline/stack_deploy/) or [docker-compose](https://github.com/docker/compose).
Please take a look [here](../docs/full-configuration.md#environment-variables) to learn how to configure SFTPGo via environment variables.
## Where to Store Data
Important note: There are several ways to store data used by applications that run in Docker containers. We encourage users of the SFTPGo images to familiarize themselves with the options available, including:
- Let Docker manage the storage for SFTPGo data by [writing them to disk on the host system using its own internal volume management](https://docs.docker.com/engine/tutorials/dockervolumes/#adding-a-data-volume). This is the default and is easy and fairly transparent to the user. The downside is that the files may be hard to locate for tools and applications that run directly on the host system, i.e. outside containers.
- Create a data directory on the host system (outside the container) and [mount this to a directory visible from inside the container]((https://docs.docker.com/engine/tutorials/dockervolumes/#mount-a-host-directory-as-a-data-volume)). This places the SFTPGo files in a known location on the host system, and makes it easy for tools and applications on the host system to access the files. The downside is that the user needs to make sure that the directory exists, and that e.g. directory permissions and other security mechanisms on the host system are set up correctly. The SFTPGo images run using `1000` as uid and gid.
The Docker documentation is a good starting point for understanding the different storage options and variations, and there are multiple blogs and forum postings that discuss and give advice in this area. We will simply show the basic procedure here for the latter option above:
1. Create a data directory on a suitable volume on your host system, e.g. `/my/own/sftpgodata`.
2. Start your SFTPGo container like this:
```shell
docker run --name some-sftpgo \
-p 127.0.0.1:8080:8090 \
-p 2022:2022 \
--mount type=bind,source=/my/own/sftpgodata,target=/var/lib/sftpgo \
-e SFTPGO_HTTPD__BIND_PORT=8090 \
-d "ghcr.io/drakkan/sftpgo:edge"
```
The `--mount type=bind,source=/my/own/sftpgodata,target=/var/lib/sftpgo` part of the command mounts the `/my/own/sftpgodata` directory from the underlying host system as `/var/lib/sftpgo` inside the container, where SFTPGo will store its data.

View File

@@ -0,0 +1,8 @@
FROM debian:latest
LABEL maintainer="nicola.murino@gmail.com"
RUN apt-get update && apt-get install -y curl python3-requests python3-pygments
RUN curl https://raw.githubusercontent.com/drakkan/sftpgo/master/examples/rest-api-cli/sftpgo_api_cli --output /usr/bin/sftpgo_api_cli
ENTRYPOINT ["python3", "/usr/bin/sftpgo_api_cli" ]
CMD []

View File

@@ -0,0 +1,50 @@
FROM golang:alpine as builder
RUN apk add --no-cache git gcc g++ ca-certificates \
&& go get -v -d github.com/drakkan/sftpgo
WORKDIR /go/src/github.com/drakkan/sftpgo
ARG TAG
ARG FEATURES
# Use --build-arg TAG=LATEST for latest tag. Use e.g. --build-arg TAG=v1.0.0 for a specific tag/commit. Otherwise HEAD (master) is built.
RUN git checkout $(if [ "${TAG}" = LATEST ]; then echo `git rev-list --tags --max-count=1`; elif [ -n "${TAG}" ]; then echo "${TAG}"; else echo HEAD; fi)
RUN go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -v -o /go/bin/sftpgo
FROM alpine:latest
RUN apk add --no-cache ca-certificates su-exec \
&& mkdir -p /data /etc/sftpgo /srv/sftpgo/config /srv/sftpgo/web /srv/sftpgo/backups
# git and rsync are optional, uncomment the next line to add support for them if needed.
#RUN apk add --no-cache git rsync
COPY --from=builder /go/bin/sftpgo /bin/
COPY --from=builder /go/src/github.com/drakkan/sftpgo/sftpgo.json /etc/sftpgo/sftpgo.json
COPY --from=builder /go/src/github.com/drakkan/sftpgo/templates /srv/sftpgo/web/templates
COPY --from=builder /go/src/github.com/drakkan/sftpgo/static /srv/sftpgo/web/static
COPY docker-entrypoint.sh /bin/entrypoint.sh
RUN chmod +x /bin/entrypoint.sh
VOLUME [ "/data", "/srv/sftpgo/config", "/srv/sftpgo/backups" ]
EXPOSE 2022 8080
# uncomment the following settings to enable FTP support
#ENV SFTPGO_FTPD__BIND_PORT=2121
#ENV SFTPGO_FTPD__FORCE_PASSIVE_IP=<your FTP visibile IP here>
#EXPOSE 2121
# we need to expose the passive ports range too
#EXPOSE 50000-50100
# it is a good idea to provide certificates to enable FTPS too
#ENV SFTPGO_FTPD__CERTIFICATE_FILE=/srv/sftpgo/config/mycert.crt
#ENV SFTPGO_FTPD__CERTIFICATE_KEY_FILE=/srv/sftpgo/config/mycert.key
# uncomment the following setting to enable WebDAV support
#ENV SFTPGO_WEBDAVD__BIND_PORT=8090
# it is a good idea to provide certificates to enable WebDAV over HTTPS
#ENV SFTPGO_WEBDAVD__CERTIFICATE_FILE=${CONFIG_DIR}/mycert.crt
#ENV SFTPGO_WEBDAVD__CERTIFICATE_KEY_FILE=${CONFIG_DIR}/mycert.key
ENTRYPOINT ["/bin/entrypoint.sh"]
CMD ["serve"]

View File

@@ -0,0 +1,61 @@
# SFTPGo with Docker and Alpine
:warning: The recommended way to run SFTPGo on Docker is to use the official [images](https://github.com/users/drakkan/packages/container/package/sftpgo). The documentation here is now obsolete.
This DockerFile is made to build image to host multiple instances of SFTPGo started with different users.
## Example
> 1003 is a custom uid:gid for this instance of SFTPGo
```bash
# Prereq on docker host
sudo groupadd -g 1003 sftpgrp && \
sudo useradd -u 1003 -g 1003 sftpuser -d /home/sftpuser/ && \
sudo -u sftpuser mkdir /home/sftpuser/{conf,data} && \
curl https://raw.githubusercontent.com/drakkan/sftpgo/master/sftpgo.json -o /home/sftpuser/conf/sftpgo.json
# Edit sftpgo.json as you need
# Get and build SFTPGo image.
# Add --build-arg TAG=LATEST to build the latest tag or e.g. TAG=v1.0.0 for a specific tag/commit.
# Add --build-arg FEATURES=<build features comma separated> to specify the features to build.
git clone https://github.com/drakkan/sftpgo.git && \
cd sftpgo && \
sudo docker build -t sftpgo docker/sftpgo/alpine/
# Initialize the configured provider. For PostgreSQL and MySQL providers you need to create the configured database and the "initprovider" command will create the required tables.
sudo docker run --name sftpgo \
-e PUID=1003 \
-e GUID=1003 \
-v /home/sftpuser/conf/:/srv/sftpgo/config \
sftpgo initprovider -c /srv/sftpgo/config
# Start the image
sudo docker rm sftpgo && sudo docker run --name sftpgo \
-e SFTPGO_LOG_FILE_PATH= \
-e SFTPGO_CONFIG_DIR=/srv/sftpgo/config \
-e SFTPGO_HTTPD__TEMPLATES_PATH=/srv/sftpgo/web/templates \
-e SFTPGO_HTTPD__STATIC_FILES_PATH=/srv/sftpgo/web/static \
-e SFTPGO_HTTPD__BACKUPS_PATH=/srv/sftpgo/backups \
-p 8080:8080 \
-p 2022:2022 \
-e PUID=1003 \
-e GUID=1003 \
-v /home/sftpuser/conf/:/srv/sftpgo/config \
-v /home/sftpuser/data:/data \
-v /home/sftpuser/backups:/srv/sftpgo/backups \
sftpgo
```
If you want to enable FTP/S you also need the publish the FTP port and the FTP passive port range, defined in your `Dockerfile`, by adding, for example, the following options to the `docker run` command `-p 2121:2121 -p 50000-50100:50000-50100`. The same goes for WebDAV, you need to publish the configured port.
The script `entrypoint.sh` makes sure to correct the permissions of directories and start the process with the right user.
Several images can be run with different parameters.
## Custom systemd script
An example of systemd script is present [here](sftpgo.service), with `Environment` parameter to set `PUID` and `GUID`
`WorkingDirectory` parameter must be exist with one file in this directory like `sftpgo-${PUID}.env` corresponding to the variable file for SFTPGo instance.

View File

@@ -0,0 +1,7 @@
#!/bin/sh
set -eu
chown -R "${PUID}:${GUID}" /data /etc/sftpgo /srv/sftpgo/config /srv/sftpgo/backups \
&& exec su-exec "${PUID}:${GUID}" \
/bin/sftpgo "$@"

View File

@@ -0,0 +1,35 @@
[Unit]
Description=SFTPGo server
After=docker.service
[Service]
User=root
Group=root
WorkingDirectory=/etc/sftpgo
Environment=PUID=1003
Environment=GUID=1003
EnvironmentFile=-/etc/sysconfig/sftpgo.env
ExecStartPre=-docker kill sftpgo
ExecStartPre=-docker rm sftpgo
ExecStart=docker run --name sftpgo \
--env-file sftpgo-${PUID}.env \
-e PUID=${PUID} \
-e GUID=${GUID} \
-e SFTPGO_LOG_FILE_PATH= \
-e SFTPGO_CONFIG_DIR=/srv/sftpgo/config \
-e SFTPGO_HTTPD__TEMPLATES_PATH=/srv/sftpgo/web/templates \
-e SFTPGO_HTTPD__STATIC_FILES_PATH=/srv/sftpgo/web/static \
-e SFTPGO_HTTPD__BACKUPS_PATH=/srv/sftpgo/backups \
-p 8080:8080 \
-p 2022:2022 \
-v /home/sftpuser/conf/:/srv/sftpgo/config \
-v /home/sftpuser/data:/data \
-v /home/sftpuser/backups:/srv/sftpgo/backups \
sftpgo
ExecStop=docker stop sftpgo
SyslogIdentifier=sftpgo
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,93 @@
# we use a multi stage build to have a separate build and run env
FROM golang:latest as buildenv
LABEL maintainer="nicola.murino@gmail.com"
RUN go get -v -d github.com/drakkan/sftpgo
WORKDIR /go/src/github.com/drakkan/sftpgo
ARG TAG
ARG FEATURES
# Use --build-arg TAG=LATEST for latest tag. Use e.g. --build-arg TAG=v1.0.0 for a specific tag/commit. Otherwise HEAD (master) is built.
RUN git checkout $(if [ "${TAG}" = LATEST ]; then echo `git rev-list --tags --max-count=1`; elif [ -n "${TAG}" ]; then echo "${TAG}"; else echo HEAD; fi)
RUN go build $(if [ -n "${FEATURES}" ]; then echo "-tags ${FEATURES}"; fi) -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -v -o sftpgo
# now define the run environment
FROM debian:latest
# ca-certificates is needed for Cloud Storage Support and for HTTPS/FTPS.
RUN apt-get update && apt-get install -y ca-certificates && apt-get clean
# git and rsync are optional, uncomment the next line to add support for them if needed.
#RUN apt-get update && apt-get install -y git rsync && apt-get clean
ARG BASE_DIR=/app
ARG DATA_REL_DIR=data
ARG CONFIG_REL_DIR=config
ARG BACKUP_REL_DIR=backups
ARG USERNAME=sftpgo
ARG GROUPNAME=sftpgo
ARG UID=515
ARG GID=515
ARG WEB_REL_PATH=web
# HOME_DIR for sftpgo itself
ENV HOME_DIR=${BASE_DIR}/${USERNAME}
# DATA_DIR, this is a volume that you can use hold user's home dirs
ENV DATA_DIR=${BASE_DIR}/${DATA_REL_DIR}
# CONFIG_DIR, this is a volume to persist the daemon private keys, configuration file ecc..
ENV CONFIG_DIR=${BASE_DIR}/${CONFIG_REL_DIR}
# BACKUPS_DIR, this is a volume to store backups done using "dumpdata" REST API
ENV BACKUPS_DIR=${BASE_DIR}/${BACKUP_REL_DIR}
ENV WEB_DIR=${BASE_DIR}/${WEB_REL_PATH}
RUN mkdir -p ${DATA_DIR} ${CONFIG_DIR} ${WEB_DIR} ${BACKUPS_DIR}
RUN groupadd --system -g ${GID} ${GROUPNAME}
RUN useradd --system --create-home --no-log-init --home-dir ${HOME_DIR} --comment "SFTPGo user" --shell /usr/sbin/nologin --gid ${GID} --uid ${UID} ${USERNAME}
WORKDIR ${HOME_DIR}
RUN mkdir -p bin .config/sftpgo
ENV PATH ${HOME_DIR}/bin:$PATH
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/sftpgo bin/sftpgo
# default config file to use if no config file is found inside the CONFIG_DIR volume.
# You can override each configuration options via env vars too
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/sftpgo.json .config/sftpgo/
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/templates ${WEB_DIR}/templates
COPY --from=buildenv /go/src/github.com/drakkan/sftpgo/static ${WEB_DIR}/static
RUN chown -R ${UID}:${GID} ${DATA_DIR} ${BACKUPS_DIR}
# run as non root user
USER ${USERNAME}
EXPOSE 2022 8080
# the defined volumes must have write access for the UID and GID defined above
VOLUME [ "$DATA_DIR", "$CONFIG_DIR", "$BACKUPS_DIR" ]
# override some default configuration options using env vars
ENV SFTPGO_CONFIG_DIR=${CONFIG_DIR}
# setting SFTPGO_LOG_FILE_PATH to an empty string will log to stdout
ENV SFTPGO_LOG_FILE_PATH=""
ENV SFTPGO_HTTPD__BIND_ADDRESS=""
ENV SFTPGO_HTTPD__TEMPLATES_PATH=${WEB_DIR}/templates
ENV SFTPGO_HTTPD__STATIC_FILES_PATH=${WEB_DIR}/static
ENV SFTPGO_DATA_PROVIDER__USERS_BASE_DIR=${DATA_DIR}
ENV SFTPGO_HTTPD__BACKUPS_PATH=${BACKUPS_DIR}
# uncomment the following settings to enable FTP support
#ENV SFTPGO_FTPD__BIND_PORT=2121
#ENV SFTPGO_FTPD__FORCE_PASSIVE_IP=<your FTP visibile IP here>
#EXPOSE 2121
# we need to expose the passive ports range too
#EXPOSE 50000-50100
# it is a good idea to provide certificates to enable FTPS too
#ENV SFTPGO_FTPD__CERTIFICATE_FILE=${CONFIG_DIR}/mycert.crt
#ENV SFTPGO_FTPD__CERTIFICATE_KEY_FILE=${CONFIG_DIR}/mycert.key
# uncomment the following setting to enable WebDAV support
#ENV SFTPGO_WEBDAVD__BIND_PORT=8090
# it is a good idea to provide certificates to enable WebDAV over HTTPS
#ENV SFTPGO_WEBDAVD__CERTIFICATE_FILE=${CONFIG_DIR}/mycert.crt
#ENV SFTPGO_WEBDAVD__CERTIFICATE_KEY_FILE=${CONFIG_DIR}/mycert.key
ENTRYPOINT ["sftpgo"]
CMD ["serve"]

View File

@@ -0,0 +1,59 @@
# Dockerfile based on Debian stable
:warning: The recommended way to run SFTPGo on Docker is to use the official [images](https://github.com/users/drakkan/packages/container/package/sftpgo). The documentation here is now obsolete.
Please read the comments inside the `Dockerfile` to learn how to customize things for your setup.
You can build the container image using `docker build`, for example:
```bash
docker build -t="drakkan/sftpgo" .
```
This will build master of github.com/drakkan/sftpgo.
To build the latest tag you can add `--build-arg TAG=LATEST` and to build a specific tag/commit you can use for example `TAG=v1.0.0`, like this:
```bash
docker build -t="drakkan/sftpgo" --build-arg TAG=v1.0.0 .
```
To specify the features to build you can add `--build-arg FEATURES=<build features comma separated>`. For example you can disable SQLite and S3 support like this:
```bash
docker build -t="drakkan/sftpgo" --build-arg FEATURES=nosqlite,nos3 .
```
Please take a look at the [build from source](./../../../docs/build-from-source.md) documentation for the complete list of the features that can be disabled.
Now create the required folders on the host system, for example:
```bash
sudo mkdir -p /srv/sftpgo/data /srv/sftpgo/config /srv/sftpgo/backups
```
and give write access to them to the UID/GID defined inside the `Dockerfile`. You can choose to create a new user, on the host system, with a matching UID/GID pair, or simply do something like this:
```bash
sudo chown -R <UID>:<GID> /srv/sftpgo/data /srv/sftpgo/config /srv/sftpgo/backups
```
Download the default configuration file and edit it as you need:
```bash
sudo curl https://raw.githubusercontent.com/drakkan/sftpgo/master/sftpgo.json -o /srv/sftpgo/config/sftpgo.json
```
Initialize the configured provider. For PostgreSQL and MySQL providers you need to create the configured database and the `initprovider` command will create the required tables:
```bash
docker run --name sftpgo --mount type=bind,source=/srv/sftpgo/config,target=/app/config drakkan/sftpgo initprovider -c /app/config
```
and finally you can run the image using something like this:
```bash
docker rm sftpgo && docker run --name sftpgo -p 8080:8080 -p 2022:2022 --mount type=bind,source=/srv/sftpgo/data,target=/app/data --mount type=bind,source=/srv/sftpgo/config,target=/app/config --mount type=bind,source=/srv/sftpgo/backups,target=/app/backups drakkan/sftpgo
```
If you want to enable FTP/S you also need the publish the FTP port and the FTP passive port range, defined in your `Dockerfile`, by adding, for example, the following options to the `docker run` command `-p 2121:2121 -p 50000-50100:50000-50100`. The same goes for WebDAV, you need to publish the configured port.

70
docs/account.md Normal file
View File

@@ -0,0 +1,70 @@
# Account's configuration properties
For each account, the following properties can be configured:
- `username`
- `password` used for password authentication. For users created using SFTPGo REST API, if the password has no known hashing algo prefix, it will be stored using argon2id. SFTPGo supports checking passwords stored with bcrypt, pbkdf2, md5crypt and sha512crypt too. For pbkdf2 the supported format is `$<algo>$<iterations>$<salt>$<hashed pwd base64 encoded>`, where algo is `pbkdf2-sha1` or `pbkdf2-sha256` or `pbkdf2-sha512` or `$pbkdf2-b64salt-sha256$`. For example the `pbkdf2-sha256` of the word `password` using 150000 iterations and `E86a9YMX3zC7` as salt must be stored as `$pbkdf2-sha256$150000$E86a9YMX3zC7$R5J62hsSq+pYw00hLLPKBbcGXmq7fj5+/M0IFoYtZbo=`. In pbkdf2 variant with `b64salt` the salt is base64 encoded. For bcrypt the format must be the one supported by golang's [crypto/bcrypt](https://godoc.org/golang.org/x/crypto/bcrypt) package, for example the password `secret` with cost `14` must be stored as `$2a$14$ajq8Q7fbtFRQvXpdCq7Jcuy.Rx1h/L4J60Otx.gyNLbAYctGMJ9tK`. For md5crypt and sha512crypt we support the format used in `/etc/shadow` with the `$1$` and `$6$` prefix, this is useful if you are migrating from Unix system user accounts. We support Apache md5crypt (`$apr1$` prefix) too. Using the REST API you can send a password hashed as bcrypt, pbkdf2, md5crypt or sha512crypt and it will be stored as is.
- `public_keys` array of public keys. At least one public key or the password is mandatory.
- `status` 1 means "active", 0 "inactive". An inactive account cannot login.
- `expiration_date` expiration date as unix timestamp in milliseconds. An expired account cannot login. 0 means no expiration.
- `home_dir` the user cannot upload or download files outside this directory. Must be an absolute path. A local home directory is required for Cloud Storage Backends too: in this case it will store temporary files.
- `virtual_folders` list of mappings between virtual SFTP/SCP paths and local filesystem paths outside the user home directory. More information can be found [here](./virtual-folders.md)
- `uid`, `gid`. If SFTPGo runs as root system user then the created files and directories will be assigned to this system uid/gid. Ignored on windows or if SFTPGo runs as non root user: in this case files and directories for all SFTP users will be owned by the system user that runs SFTPGo.
- `max_sessions` maximum concurrent sessions. 0 means unlimited.
- `quota_size` maximum size allowed as bytes. 0 means unlimited.
- `quota_files` maximum number of files allowed. 0 means unlimited.
- `permissions` for SFTP paths. The following per directory permissions are supported:
- `*` all permissions are granted
- `list` list items is allowed
- `download` download files is allowed
- `upload` upload files is allowed
- `overwrite` overwrite an existing file, while uploading, is allowed. `upload` permission is required to allow file overwrite
- `delete` delete files or directories is allowed
- `rename` rename a file or a directory is allowed if this permission is granted on source and target path. You can enable rename in a more controlled way granting `delete` permission on source directory and `upload`/`create_dirs`/`create_symlinks` permissions on target directory
- `create_dirs` create directories is allowed
- `create_symlinks` create symbolic links is allowed
- `chmod` changing file or directory permissions is allowed. On Windows, only the 0200 bit (owner writable) of mode is used; it controls whether the file's read-only attribute is set or cleared. The other bits are currently unused. Use mode 0400 for a read-only file and 0600 for a readable+writable file.
- `chown` changing file or directory owner and group is allowed. Changing owner and group is not supported on Windows.
- `chtimes` changing file or directory access and modification time is allowed
- `upload_bandwidth` maximum upload bandwidth as KB/s, 0 means unlimited.
- `download_bandwidth` maximum download bandwidth as KB/s, 0 means unlimited.
- `allowed_ip`, List of IP/Mask allowed to login. Any IP address not contained in this list cannot login. IP/Mask must be in CIDR notation as defined in RFC 4632 and RFC 4291, for example "192.0.2.0/24" or "2001:db8::/32"
- `denied_ip`, List of IP/Mask not allowed to login. If an IP address is both allowed and denied then login will be denied
- `max_upload_file_size`, max allowed size, as bytes, for a single file upload. The upload will be aborted if/when the size of the file being sent exceeds this limit. 0 means unlimited. This restriction does not apply for SSH system commands such as `git` and `rsync`
- `denied_login_methods`, List of login methods not allowed. To enable multi-step authentication you have to allow only multi-step login methods. If password login method is denied or no password is set then FTP and WebDAV users cannot login. The following login methods are supported:
- `publickey`
- `password`
- `keyboard-interactive`
- `publickey+password`
- `publickey+keyboard-interactive`
- `denied_protocols`, list of protocols not allowed. The following protocols are supported:
- `SSH`
- `FTP`
- `DAV`
- `file_extensions`, list of struct. These restrictions do not apply to files listing for performance reasons, so a denied file cannot be downloaded/overwritten/renamed but it will still be listed in the list of files. Please note that these restrictions can be easily bypassed. Each struct contains the following fields:
- `allowed_extensions`, list of, case insensitive, allowed files extension. Shell like expansion is not supported so you have to specify `.jpg` and not `*.jpg`. Any file that does not end with this suffix will be denied
- `denied_extensions`, list of, case insensitive, denied files extension. Denied file extensions are evaluated before the allowed ones
- `path`, SFTP/SCP path, if no other specific filter is defined, the filter apply for sub directories too. For example if filters are defined for the paths `/` and `/sub` then the filters for `/` are applied for any file outside the `/sub` directory
- `fs_provider`, filesystem to serve via SFTP. Local filesystem and S3 Compatible Object Storage are supported
- `s3_bucket`, required for S3 filesystem
- `s3_region`, required for S3 filesystem. Must match the region for your bucket. You can find here the list of available [AWS regions](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). For example if your bucket is at `Frankfurt` you have to set the region to `eu-central-1`
- `s3_access_key`
- `s3_access_secret`, if provided it is stored encrypted (AES-256-GCM). You can leave access key and access secret blank to use credentials from environment
- `s3_endpoint`, specifies a S3 endpoint (server) different from AWS. It is not required if you are connecting to AWS
- `s3_storage_class`, leave blank to use the default or specify a valid AWS [storage class](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
- `s3_key_prefix`, allows to restrict access to the folder identified by this prefix and its contents
- `s3_upload_part_size`, the buffer size for multipart uploads (MB). Zero means the default (5 MB). Minimum is 5
- `s3_upload_concurrency` how many parts are uploaded in parallel
- `gcs_bucket`, required for GCS filesystem
- `gcs_credentials`, Google Cloud Storage JSON credentials base64 encoded
- `gcs_automatic_credentials`, integer. Set to 1 to use Application Default Credentials strategy or set to 0 to use explicit credentials via `gcs_credentials`
- `gcs_storage_class`
- `gcs_key_prefix`, allows to restrict access to the folder identified by this prefix and its contents
These properties are stored inside the data provider.
If you want to use your existing accounts, you have these options:
- If your accounts are already stored inside a supported database, you can create a database view. Since a view is read only, you have to disable user management and quota tracking so SFTPGo will never try to write to the view
- you can import your users inside SFTPGo. Take a look at [sftpgo_api_cli](../examples/rest-api-cli#convert-users-from-other-stores "SFTPGo API CLI example"), it can convert and import users from Linux system users and Pure-FTPd/ProFTPD virtual users
- you can use an external authentication program

47
docs/build-from-source.md Normal file
View File

@@ -0,0 +1,47 @@
# Build SFTPGo from source
You can install the package to your [\$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
go get -u github.com/drakkan/sftpgo
```
Or you can download the sources and use `go build`.
Make sure [Git](https://git-scm.com/downloads) is installed on your machine and in your system's `PATH`.
The following build tags are available:
- `nogcs`, disable Google Cloud Storage backend, default enabled
- `nos3`, disable S3 Compabible Object Storage backends, default enabled
- `nobolt`, disable Bolt data provider, default enabled
- `nomysql`, disable MySQL data provider, default enabled
- `nopgsql`, disable PostgreSQL data provider, default enabled
- `nosqlite`, disable SQLite data provider, default enabled
- `noportable`, disable portable mode, default enabled
- `nometrics`, disable Prometheus metrics, default enabled
If no build tag is specified the build will include the default features.
The optional [SQLite driver](https://github.com/mattn/go-sqlite3 "go-sqlite3") is a `CGO` package and so it requires a `C` compiler at build time.
On Linux and macOS, a compiler is easy to install or already installed. On Windows, you need to download [MinGW-w64](https://sourceforge.net/projects/mingw-w64/files/) and build SFTPGo from its command prompt.
The compiler is a build time only dependency. It is not required at runtime.
Version info, such as git commit and build date, can be embedded setting the following string variables at build time:
- `github.com/drakkan/sftpgo/version.commit`
- `github.com/drakkan/sftpgo/version.date`
For example, you can build using the following command:
```bash
go build -tags nogcs,nos3,nosqlite -ldflags "-s -w -X github.com/drakkan/sftpgo/version.commit=`git describe --always --dirty` -X github.com/drakkan/sftpgo/version.date=`date -u +%FT%TZ`" -o sftpgo
```
You should get a version that includes git commit, build date and available features like this one:
```bash
$ ./sftpgo -v
SFTPGo 0.9.6-dev-b30614e-dirty-2020-06-19T11:04:56Z +metrics -gcs -s3 +bolt +mysql +pgsql -sqlite +portable
```

View File

@@ -0,0 +1,45 @@
# Check password hook
This hook allows you to externally check the provided password, its main use case is to allow to easily support things like password+OTP for protocols without keyboard interactive support such as FTP and WebDAV. You can ask your users to login using a string consisting of a fixed password and a One Time Token, you can verify the token inside the hook and ask to SFTPGo to verify the fixed part.
The same thing can be achieved using [External authentication](./external-auth.md) but using this hook is simpler in some use cases.
The `check password hook` can be defined as the absolute path of your program or an HTTP URL.
The expected response is a JSON serialized struct containing the following keys:
- `status` integer. 0 means KO, 1 means OK, 2 means partial success
- `to_verify` string. For `status` = 2 SFTPGo will check this password against the one stored inside SFTPGo data provider
If the hook defines an external program it can read the following environment variables:
- `SFTPGO_AUTHD_USERNAME`
- `SFTPGO_AUTHD_PASSWORD`
- `SFTPGO_AUTHD_IP`
- `SFTPGO_AUTHD_PROTOCOL`, possible values are `SSH`, `FTP`, `DAV`
Previous global environment variables aren't cleared when the script is called. The content of these variables is _not_ quoted. They may contain special characters. They are under the control of a possibly malicious remote user.
The program must write, on its standard output, the expected JSON serialized response described above.
If the hook is an HTTP URL then it will be invoked as HTTP POST. The request body will contain a JSON serialized struct with the following fields:
- `username`
- `password`
- `ip`
- `protocol`, possible values are `SSH`, `FTP`, `DAV`
If authentication succeeds the HTTP response code must be 200 and the response body must contain the expected JSON serialized response described above.
The program hook must finish within 30 seconds, the HTTP hook timeout will use the global configuration for HTTP clients.
You can also restrict the hook scope using the `check_password_scope` configuration key:
- `0` means all supported protocols.
- `1` means SSH only
- `2` means FTP only
- `4` means WebDAV only
You can combine the scopes. For example, 6 means FTP and WebDAV.
An example check password program allowing 2FA using password + one time token can be found inside the source tree [checkpwd](../examples/OTP/authy/checkpwd) directory.

89
docs/custom-actions.md Normal file
View File

@@ -0,0 +1,89 @@
# Custom Actions
The `actions` struct inside the "common" configuration section allows to configure the actions for file operations and SSH commands.
The `hook` can be defined as the absolute path of your program or an HTTP URL.
The `upload` condition includes both uploads to new files and overwrite of existing files. If an upload is aborted for quota limits SFTPGo tries to remove the partial file, so if the notification reports a zero size file and a quota exceeded error the file has been deleted. The `ssh_cmd` condition will be triggered after a command is successfully executed via SSH. `scp` will trigger the `download` and `upload` conditions and not `ssh_cmd`.
The notification will indicate if an error is detected and so, for example, a partial file is uploaded.
The `pre-delete` action, if defined, will be called just before files deletion. If the external command completes with a zero exit status or the HTTP notification response code is `200` then SFTPGo will assume that the file was already deleted/moved and so it will not try to remove the file and it will not execute the hook defined for the `delete` action.
If the `hook` defines a path to an external program, then this program is invoked with the following arguments:
- `action`, string, possible values are: `download`, `upload`, `pre-delete`,`delete`, `rename`, `ssh_cmd`
- `username`
- `path` is the full filesystem path, can be empty for some ssh commands
- `target_path`, non-empty for `rename` action and for `sftpgo-copy` SSH command
- `ssh_cmd`, non-empty for `ssh_cmd` action
The external program can also read the following environment variables:
- `SFTPGO_ACTION`
- `SFTPGO_ACTION_USERNAME`
- `SFTPGO_ACTION_PATH`
- `SFTPGO_ACTION_TARGET`, non-empty for `rename` `SFTPGO_ACTION`
- `SFTPGO_ACTION_SSH_CMD`, non-empty for `ssh_cmd` `SFTPGO_ACTION`
- `SFTPGO_ACTION_FILE_SIZE`, non-empty for `upload`, `download` and `delete` `SFTPGO_ACTION`
- `SFTPGO_ACTION_FS_PROVIDER`, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend
- `SFTPGO_ACTION_BUCKET`, non-empty for S3 and GCS backends
- `SFTPGO_ACTION_ENDPOINT`, non-empty for S3 backend if configured
- `SFTPGO_ACTION_STATUS`, integer. 0 means a generic error occurred. 1 means no error, 2 means quota exceeded error
- `SFTPGO_ACTION_PROTOCOL`, string. Possible values are `SSH`, `SFTP`, `SCP`, `FTP`, `DAV`
Previous global environment variables aren't cleared when the script is called.
The program must finish within 30 seconds.
If the `hook` defines an HTTP URL then this URL will be invoked as HTTP POST. The request body will contain a JSON serialized struct with the following fields:
- `action`
- `username`
- `path`
- `target_path`, not null for `rename` action
- `ssh_cmd`, not null for `ssh_cmd` action
- `file_size`, not null for `upload`, `download`, `delete` actions
- `fs_provider`, `0` for local filesystem, `1` for S3 backend, `2` for Google Cloud Storage (GCS) backend
- `bucket`, not null for S3 and GCS backends
- `endpoint`, not null for S3 backend if configured
- `status`, integer. 0 means a generic error occurred. 1 means no error, 2 means quota exceeded error
- `protocol`, string. Possible values are `SSH`, `FTP`, `DAV`
The HTTP request will use the global configuration for HTTP clients.
The `actions` struct inside the "data_provider" configuration section allows you to configure actions on user add, update, delete.
Actions will not be fired for internal updates, such as the last login or the user quota fields, or after external authentication.
If the `hook` defines a path to an external program, then this program is invoked with the following arguments:
- `action`, string, possible values are: `add`, `update`, `delete`
- `username`
- `ID`
- `status`
- `expiration_date`
- `home_dir`
- `uid`
- `gid`
The external program can also read the following environment variables:
- `SFTPGO_USER_ACTION`
- `SFTPGO_USER_USERNAME`
- `SFTPGO_USER_PASSWORD`, hashed password as stored inside the data provider, can be empty if the user does not login using a password
- `SFTPGO_USER_ID`
- `SFTPGO_USER_STATUS`
- `SFTPGO_USER_EXPIRATION_DATE`
- `SFTPGO_USER_HOME_DIR`
- `SFTPGO_USER_UID`
- `SFTPGO_USER_GID`
- `SFTPGO_USER_QUOTA_FILES`
- `SFTPGO_USER_QUOTA_SIZE`
- `SFTPGO_USER_UPLOAD_BANDWIDTH`
- `SFTPGO_USER_DOWNLOAD_BANDWIDTH`
- `SFTPGO_USER_MAX_SESSIONS`
- `SFTPGO_USER_FS_PROVIDER`
Previous global environment variables aren't cleared when the script is called.
The program must finish within 15 seconds.
If the `hook` defines an HTTP URL then this URL will be invoked as HTTP POST. The action is added to the query string, for example `<hook>?action=update`, and the user is sent serialized as JSON inside the POST body with sensitive fields removed.
The HTTP request will use the global configuration for HTTP clients.

55
docs/dynamic-user-mod.md Normal file
View File

@@ -0,0 +1,55 @@
# Dynamic user creation or modification
Dynamic user creation or modification is supported via an external program or an HTTP URL that can be invoked just before the user login.
To enable dynamic user modification, you must set the absolute path of your program or an HTTP URL using the `pre_login_hook` key in your configuration file.
The external program can read the following environment variables to get info about the user trying to login:
- `SFTPGO_LOGIND_USER`, it contains the user trying to login serialized as JSON. A JSON serialized user id equal to zero means the user does not exist inside SFTPGo
- `SFTPGO_LOGIND_METHOD`, possible values are: `password`, `publickey` and `keyboard-interactive`
- `SFTPGO_LOGIND_IP`, ip address of the user trying to login
- `SFTPGO_LOGIND_PROTOCOL`, possible values are `SSH`, `FTP`, `DAV`
The program must write, on its standard output:
- an empty string (or no response at all) if the user should not be created/updated
- or the SFTPGo user, JSON serialized, if you want to create or update the given user
If the hook is an HTTP URL then it will be invoked as HTTP POST. The login method, the used protocol and the ip address of the user trying to login are added to the query string, for example `<http_url>?login_method=password&ip=1.2.3.4&protocol=SSH`.
The request body will contain the user trying to login serialized as JSON. If no modification is needed the HTTP response code must be 204, otherwise the response code must be 200 and the response body a valid SFTPGo user serialized as JSON.
Actions defined for user's updates will not be executed in this case and an already logged in user with the same username will not be disconnected, you have to handle these things yourself.
The JSON response can include only the fields to update instead of the full user. For example, if you want to disable the user, you can return a response like this:
```json
{"status": 0}
```
Please note that if you want to create a new user, the pre-login hook response must include all the mandatory user fields.
The program hook must finish within 30 seconds, the HTTP hook will use the global configuration for HTTP clients.
If an error happens while executing the hook then login will be denied.
"Dynamic user creation or modification" and "External Authentication" are mutually exclusive, they are quite similar, the difference is that "External Authentication" returns an already authenticated user while using "Dynamic users modification" you simply create or update a user. The authentication will be checked inside SFTPGo.
In other words while using "External Authentication" the external program receives the credentials of the user trying to login (for example the cleartext password) and it needs to validate them. While using "Dynamic users modification" the pre-login program receives the user stored inside the dataprovider (it includes the hashed password if any) and it can modify it, after the modification SFTPGo will check the credentials of the user trying to login.
Let's see a very basic example. Our sample program will grant access to the existing user `test_user` only in the time range 10:00-18:00. Other users will not be modified since the program will terminate with no output.
```shell
#!/bin/bash
CURRENT_TIME=`date +%H:%M`
if [[ "$SFTPGO_LOGIND_USER" =~ "\"test_user\"" ]]
then
if [[ $CURRENT_TIME > "18:00" || $CURRENT_TIME < "10:00" ]]
then
echo '{"status":0}'
else
echo '{"status":1}'
fi
fi
```
Please note that this is a demo program and it might not work in all cases. For example, the username should be obtained by parsing the JSON serialized user and not by searching the username inside the JSON as shown here.

58
docs/external-auth.md Normal file
View File

@@ -0,0 +1,58 @@
# External Authentication
To enable external authentication, you must set the absolute path of your authentication program or an HTTP URL using the `external_auth_hook` key in your configuration file.
The external program can read the following environment variables to get info about the user trying to authenticate:
- `SFTPGO_AUTHD_USERNAME`
- `SFTPGO_AUTHD_IP`
- `SFTPGO_AUTHD_PROTOCOL`, possible values are `SSH`, `FTP`, `DAV`
- `SFTPGO_AUTHD_PASSWORD`, not empty for password authentication
- `SFTPGO_AUTHD_PUBLIC_KEY`, not empty for public key authentication
- `SFTPGO_AUTHD_KEYBOARD_INTERACTIVE`, not empty for keyboard interactive authentication
Previous global environment variables aren't cleared when the script is called. The content of these variables is _not_ quoted. They may contain special characters. They are under the control of a possibly malicious remote user.
The program must write, on its standard output, a valid SFTPGo user serialized as JSON if the authentication succeeds or a user with an empty username if the authentication fails.
If the hook is an HTTP URL then it will be invoked as HTTP POST. The request body will contain a JSON serialized struct with the following fields:
- `username`
- `ip`
- `protocol`, possible values are `SSH`, `FTP`, `DAV`
- `password`, not empty for password authentication
- `public_key`, not empty for public key authentication
- `keyboard_interactive`, not empty for keyboard interactive authentication
If authentication succeeds the HTTP response code must be 200 and the response body a valid SFTPGo user serialized as JSON. If the authentication fails the HTTP response code must be != 200 or the response body must be empty.
If the authentication succeeds, the user will be automatically added/updated inside the defined data provider. Actions defined for users added/updated will not be executed in this case and an already logged in user with the same username will not be disconnected, you have to handle these things yourself.
The program hook must finish within 30 seconds, the HTTP hook timeout will use the global configuration for HTTP clients.
This method is slower than built-in authentication, but it's very flexible as anyone can easily write his own authentication hooks.
You can also restrict the authentication scope for the hook using the `external_auth_scope` configuration key:
- `0` means all supported authentication scopes. The external hook will be used for password, public key and keyboard interactive authentication
- `1` means passwords only
- `2` means public keys only
- `4` means keyboard interactive only
You can combine the scopes. For example, 3 means password and public key, 5 means password and keyboard interactive, and so on.
Let's see a very basic example. Our sample authentication program will only accept user `test_user` with any password or public key.
```shell
#!/bin/sh
if test "$SFTPGO_AUTHD_USERNAME" = "test_user"; then
echo '{"status":1,"username":"test_user","expiration_date":0,"home_dir":"/tmp/test_user","uid":0,"gid":0,"max_sessions":0,"quota_size":0,"quota_files":100000,"permissions":{"/":["*"],"/somedir":["list","download"]},"upload_bandwidth":0,"download_bandwidth":0,"filters":{"allowed_ip":[],"denied_ip":[]},"public_keys":[]}'
else
echo '{"username":""}'
fi
```
An example authentication program allowing to authenticate against an LDAP server can be found inside the source tree [ldapauth](../examples/ldapauth) directory.
An example server, to use as HTTP authentication hook, allowing to authenticate against an LDAP server can be found inside the source tree [ldapauthserver](../examples/ldapauthserver) directory.
If you have an external authentication hook that could be useful to others too, please let us know and/or please send a pull request.

197
docs/full-configuration.md Normal file
View File

@@ -0,0 +1,197 @@
# Configuring SFTPGo
## Command line options
The SFTPGo executable can be used this way:
```console
Usage:
sftpgo [command]
Available Commands:
gen A collection of useful generators
help Help about any command
initprovider Initializes and/or updates the configured data provider
portable Serve a single directory
serve Start the SFTP Server
Flags:
-h, --help help for sftpgo
-v, --version
Use "sftpgo [command] --help" for more information about a command
```
The `serve` command supports the following flags:
- `--config-dir` string. Location of the config dir. This directory should contain the configuration file and is used as the base directory for any files that use a relative path (eg. the private keys for the SFTP server, the SQLite or bblot database if you use SQLite or bbolt as data provider). The default value is "." or the value of `SFTPGO_CONFIG_DIR` environment variable.
- `--config-file` string. Name of the configuration file. It must be the name of a file stored in `config-dir`, not the absolute path to the configuration file. The specified file name must have no extension because we automatically append JSON, YAML, TOML, HCL and Java extensions when we search for the file. The default value is "sftpgo" (and therefore `sftpgo.json`, `sftpgo.yaml` and so on are searched) or the value of `SFTPGO_CONFIG_FILE` environment variable.
- `--log-compress` boolean. Determine if the rotated log files should be compressed using gzip. Default `false` or the value of `SFTPGO_LOG_COMPRESS` environment variable (1 or `true`, 0 or `false`). It is unused if `log-file-path` is empty.
- `--log-file-path` string. Location for the log file, default "sftpgo.log" or the value of `SFTPGO_LOG_FILE_PATH` environment variable. Leave empty to write logs to the standard error.
- `--log-max-age` int. Maximum number of days to retain old log files. Default 28 or the value of `SFTPGO_LOG_MAX_AGE` environment variable. It is unused if `log-file-path` is empty.
- `--log-max-backups` int. Maximum number of old log files to retain. Default 5 or the value of `SFTPGO_LOG_MAX_BACKUPS` environment variable. It is unused if `log-file-path` is empty.
- `--log-max-size` int. Maximum size in megabytes of the log file before it gets rotated. Default 10 or the value of `SFTPGO_LOG_MAX_SIZE` environment variable. It is unused if `log-file-path` is empty.
- `--log-verbose` boolean. Enable verbose logs. Default `true` or the value of `SFTPGO_LOG_VERBOSE` environment variable (1 or `true`, 0 or `false`).
Log file can be rotated on demand sending a `SIGUSR1` signal on Unix based systems and using the command `sftpgo service rotatelogs` on Windows.
If you don't configure any private host key, the daemon will use `id_rsa` and `id_ecdsa` in the configuration directory. If these files don't exist, the daemon will attempt to autogenerate them (if the user that executes SFTPGo has write access to the `config-dir`). The server supports any private key format supported by [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/keys.go#L33).
The `gen` command allows to generate completion scripts for your shell and man pages. Currently the man pages visual representation is wrong, take a look at this upstream [bug](https://github.com/spf13/cobra/issues/1049) for more details.
## Configuration file
The configuration file contains the following sections:
- **"common"**, configuration parameters shared among all the supported protocols
- `idle_timeout`, integer. Time in minutes after which an idle client will be disconnected. 0 means disabled. Default: 15
- `upload_mode` integer. 0 means standard: the files are uploaded directly to the requested path. 1 means atomic: files are uploaded to a temporary path and renamed to the requested path when the client ends the upload. Atomic mode avoids problems such as a web server that serves partial files when the files are being uploaded. In atomic mode, if there is an upload error, the temporary file is deleted and so the requested upload path will not contain a partial file. 2 means atomic with resume support: same as atomic but if there is an upload error, the temporary file is renamed to the requested path and not deleted. This way, a client can reconnect and resume the upload.
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions. See [Custom Actions](./custom-actions.md) for more details
- `execute_on`, list of strings. Valid values are `download`, `upload`, `pre-delete`, `delete`, `rename`, `ssh_cmd`. Leave empty to disable actions.
- `hook`, string. Absolute path to the command to execute or HTTP URL to notify.
- `setstat_mode`, integer. 0 means "normal mode": requests for changing permissions, owner/group and access/modification times are executed. 1 means "ignore mode": requests for changing permissions, owner/group and access/modification times are silently ignored.
- `proxy_protocol`, integer. Support for [HAProxy PROXY protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt). If you are running SFTPGo behind a proxy server such as HAProxy, AWS ELB or NGNIX, you can enable the proxy protocol. It provides a convenient way to safely transport connection information such as a client's address across multiple layers of NAT or TCP proxies to get the real client IP address instead of the proxy IP. Both protocol versions 1 and 2 are supported. If the proxy protocol is enabled in SFTPGo then you have to enable the protocol in your proxy configuration too. For example, for HAProxy, add `send-proxy` or `send-proxy-v2` to each server configuration line. The following modes are supported:
- 0, disabled
- 1, enabled. Proxy header will be used and requests without proxy header will be accepted
- 2, required. Proxy header will be used and requests without proxy header will be rejected
- `proxy_allowed`, List of IP addresses and IP ranges allowed to send the proxy header:
- If `proxy_protocol` is set to 1 and we receive a proxy header from an IP that is not in the list then the connection will be accepted and the header will be ignored
- If `proxy_protocol` is set to 2 and we receive a proxy header from an IP that is not in the list then the connection will be rejected
- `post_connect_hook`, string. Absolute path to the command to execute or HTTP URL to notify. See [Post connect hook](./post-connect-hook.md) for more details. Leave empty to disable
- **"sftpd"**, the configuration for the SFTP server
- `bind_port`, integer. The port used for serving SFTP requests. Default: 2022
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: ""
- `idle_timeout`, integer. Deprecated, please use the same key in `common` section.
- `max_auth_tries` integer. Maximum number of authentication attempts permitted per connection. If set to a negative number, the number of attempts is unlimited. If set to zero, the number of attempts is limited to 6.
- `banner`, string. Identification string used by the server. Leave empty to use the default banner. Default `SFTPGo_<version>`, for example `SSH-2.0-SFTPGo_0.9.5`
- `upload_mode` integer. Deprecated, please use the same key in `common` section.
- `actions`, struct. Deprecated, please use the same key in `common` section.
- `keys`, struct array. Deprecated, please use `host_keys`.
- `private_key`, path to the private key file. It can be a path relative to the config dir or an absolute one.
- `host_keys`, list of strings. It contains the daemon's private host keys. Each host key can be defined as a path relative to the configuration directory or an absolute one. If empty, the daemon will search or try to generate `id_rsa` and `id_ecdsa` keys inside the configuration directory. If you configure absolute paths to files named `id_rsa` and/or `id_ecdsa` then SFTPGo will try to generate these keys using the default settings.
- `kex_algorithms`, list of strings. Available KEX (Key Exchange) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [`crypto/ssh`](https://github.com/golang/crypto/blob/master/ssh/common.go#L46 "Supported kex algos")
- `ciphers`, list of strings. Allowed ciphers. Leave empty to use default values. The supported values can be found here: [crypto/ssh](https://github.com/golang/crypto/blob/master/ssh/common.go#L28 "Supported ciphers")
- `macs`, list of strings. Available MAC (message authentication code) algorithms in preference order. Leave empty to use default values. The supported values can be found here: [crypto/ssh](https://github.com/golang/crypto/blob/master/ssh/common.go#L84 "Supported MACs")
- `trusted_user_ca_keys`, list of public keys paths of certificate authorities that are trusted to sign user certificates for authentication. The paths can be absolute or relative to the configuration directory.
- `login_banner_file`, path to the login banner file. The contents of the specified file, if any, are sent to the remote user before authentication is allowed. It can be a path relative to the config dir or an absolute one. Leave empty to disable login banner.
- `setstat_mode`, integer. Deprecated, please use the same key in `common` section.
- `enabled_ssh_commands`, list of enabled SSH commands. `*` enables all supported commands. More information can be found [here](./ssh-commands.md).
- `keyboard_interactive_auth_hook`, string. Absolute path to an external program or an HTTP URL to invoke for keyboard interactive authentication. See [Keyboard Interactive Authentication](./keyboard-interactive.md) for more details.
- `password_authentication`, boolean. Set to false to disable password authentication. This setting will disable multi-step authentication method using public key + password too. It is useful for public key only configurations if you need to manage old clients that will not attempt to authenticate with public keys if the password login method is advertised. Default: true.
- `proxy_protocol`, integer. Deprecated, please use the same key in `common` section.
- `proxy_allowed`, list of strings. Deprecated, please use the same key in `common` section.
- **"ftpd"**, the configuration for the FTP server
- `bind_port`, integer. The port used for serving FTP requests. 0 means disabled. Default: 0.
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: "".
- `banner`, string. Greeting banner displayed when a connection first comes in. Leave empty to use the default banner. Default `SFTPGo <version> ready`, for example `SFTPGo 1.0.0-dev ready`.
- `banner_file`, path to the banner file. The contents of the specified file, if any, are displayed when someone connects to the server. It can be a path relative to the config dir or an absolute one. If set, it overrides the banner string provided by the `banner` option. Leave empty to disable.
- `active_transfers_port_non_20`, boolean. Do not impose the port 20 for active data transfers. Enabling this option allows to run SFTPGo with less privilege. Default: false.
- `force_passive_ip`, ip address. External IP address to expose for passive connections. Leavy empty to autodetect. Defaut: "".
- `passive_port_range`, struct containing the key `start` and `end`. Port Range for data connections. Random if not specified. Default range is 50000-50100.
- `certificate_file`, string. Certificate for FTPS. This can be an absolute path or a path relative to the config dir.
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If both the certificate and the private key are provided the server will accept both plain FTP an explicit FTP over TLS. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
- `tls_mode`, integer. 0 means accept both cleartext and encrypted sessions. 1 means TLS is required for both control and data connection. Do not enable this blindly, please check that a proper TLS config is in place or no login will be allowed if `tls_mode` is 1.
- **webdavd**, the configuration for the WebDAV server, more info [here](./webdav.md)
- `bind_port`, integer. The port used for serving WebDAV requests. 0 means disabled. Default: 0.
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: "".
- `certificate_file`, string. Certificate for WebDAV over HTTPS. This can be an absolute path or a path relative to the config dir.
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If both the certificate and the private key are provided the server will expect HTTPS connections. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
- `cors` struct containing CORS configuration. SFTPGo uses [Go CORS handler](https://github.com/rs/cors), please refer to upstream documentation for fields meaning and their default values.
- `enabled`, boolean, set to true to enable CORS.
- `allowed_origins`, list of strings.
- `allowed_methods`, list of strings.
- `allowed_headers`, list of strings.
- `exposed_headers`, list of strings.
- `allow_credentials` boolean.
- `max_age`, integer.
- `cache` struct containing cache configuration for the authenticated users.
- `enabled`, boolean, set to true to enable user caching. Default: true.
- `expiration_time`, integer. Expiration time, in minutes, for the cached users. 0 means unlimited. Default: 0.
- `max_size`, integer. Maximum number of users to cache. 0 means unlimited. Default: 50.
- **"data_provider"**, the configuration for the data provider
- `driver`, string. Supported drivers are `sqlite`, `mysql`, `postgresql`, `bolt`, `memory`
- `name`, string. Database name. For driver `sqlite` this can be the database name relative to the config dir or the absolute path to the SQLite database. For driver `memory` this is the (optional) path relative to the config dir or the absolute path to the users dump, obtained using the `dumpdata` REST API, to load. This dump will be loaded at startup and can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows. The `memory` provider will not modify the provided file so quota usage and last login will not be persisted
- `host`, string. Database host. Leave empty for drivers `sqlite`, `bolt` and `memory`
- `port`, integer. Database port. Leave empty for drivers `sqlite`, `bolt` and `memory`
- `username`, string. Database user. Leave empty for drivers `sqlite`, `bolt` and `memory`
- `password`, string. Database password. Leave empty for drivers `sqlite`, `bolt` and `memory`
- `sslmode`, integer. Used for drivers `mysql` and `postgresql`. 0 disable SSL/TLS connections, 1 require ssl, 2 set ssl mode to `verify-ca` for driver `postgresql` and `skip-verify` for driver `mysql`, 3 set ssl mode to `verify-full` for driver `postgresql` and `preferred` for driver `mysql`
- `connectionstring`, string. Provide a custom database connection string. If not empty, this connection string will be used instead of building one using the previous parameters. Leave empty for drivers `bolt` and `memory`
- `sql_tables_prefix`, string. Prefix for SQL tables
- `manage_users`, integer. Set to 0 to disable users management, 1 to enable
- `track_quota`, integer. Set the preferred mode to track users quota between the following choices:
- 0, disable quota tracking. REST API to scan users home directories/virtual folders and update quota will do nothing
- 1, quota is updated each time a user uploads or deletes a file, even if the user has no quota restrictions
- 2, quota is updated each time a user uploads or deletes a file, but only for users with quota restrictions and for virtual folders. With this configuration, the `quota scan` and `folder_quota_scan` REST API can still be used to periodically update space usage for users without quota restrictions and for folders
- `pool_size`, integer. Sets the maximum number of open connections for `mysql` and `postgresql` driver. Default 0 (unlimited)
- `users_base_dir`, string. Users default base directory. If no home dir is defined while adding a new user, and this value is a valid absolute path, then the user home dir will be automatically defined as the path obtained joining the base dir and the username
- `actions`, struct. It contains the command to execute and/or the HTTP URL to notify and the trigger conditions. See [Custom Actions](./custom-actions.md) for more details
- `execute_on`, list of strings. Valid values are `add`, `update`, `delete`. `update` action will not be fired for internal updates such as the last login or the user quota fields.
- `hook`, string. Absolute path to the command to execute or HTTP URL to notify.
- `external_auth_program`, string. Deprecated, please use `external_auth_hook`.
- `external_auth_hook`, string. Absolute path to an external program or an HTTP URL to invoke for users authentication. See [External Authentication](./external-auth.md) for more details. Leave empty to disable.
- `external_auth_scope`, integer. 0 means all supported authentication scopes (passwords, public keys and keyboard interactive). 1 means passwords only. 2 means public keys only. 4 means key keyboard interactive only. The flags can be combined, for example 6 means public keys and keyboard interactive
- `credentials_path`, string. It defines the directory for storing user provided credential files such as Google Cloud Storage credentials. This can be an absolute path or a path relative to the config dir
- `pre_login_program`, string. Deprecated, please use `pre_login_hook`.
- `pre_login_hook`, string. Absolute path to an external program or an HTTP URL to invoke to modify user details just before the login. See [Dynamic user modification](./dynamic-user-mod.md) for more details. Leave empty to disable.
- `post_login_hook`, string. Absolute path to an external program or an HTTP URL to invoke to notify a successful or failed login. See [Post-login hook](./post-login-hook.md) for more details. Leave empty to disable.
- `post_login_scope`, defines the scope for the post-login hook. 0 means notify both failed and successful logins. 1 means notify failed logins. 2 means notify successful logins.
- `check_password_hook`, string. Absolute path to an external program or an HTTP URL to invoke to check the user provided password. See [Check password hook](./check-password-hook.md) for more details. Leave empty to disable.
- `check_password_scope`, defines the scope for the check password hook. 0 means all protocols, 1 means SSH, 2 means FTP, 4 means WebDAV. You can combine the scopes, for example 6 means FTP and WebDAV.
- `password_hashing`, struct. It contains the configuration parameters to be used to generate the password hash. SFTPGo can verify passwords in several formats and uses the `argon2id` algorithm to hash passwords in plain-text before storing them inside the data provider. These options allow you to customize how the hash is generated.
- `argon2_options` struct containing the options for argon2id hashing algorithm. The `memory` and `iterations` parameters control the computational cost of hashing the password. The higher these figures are, the greater the cost of generating the hash and the longer the runtime. It also follows that the greater the cost will be for any attacker trying to guess the password. If the code is running on a machine with multiple cores, then you can decrease the runtime without reducing the cost by increasing the `parallelism` parameter. This controls the number of threads that the work is spread across.
- `memory`, unsigned integer. The amount of memory used by the algorithm (in kibibytes). Default: 65536.
- `iterations`, unsigned integer. The number of iterations over the memory. Default: 1.
- `parallelism`. unsigned 8 bit integer. The number of threads (or lanes) used by the algorithm. Default: 2.
- `update_mode`, integer. Defines how the database will be initialized/updated. 0 means automatically. 1 means manually using the initprovider sub-command.
- **"httpd"**, the configuration for the HTTP server used to serve REST API and to expose the built-in web interface
- `bind_port`, integer. The port used for serving HTTP requests. Set to 0 to disable HTTP server. Default: 8080
- `bind_address`, string. Leave blank to listen on all available network interfaces. Default: "127.0.0.1"
- `templates_path`, string. Path to the HTML web templates. This can be an absolute path or a path relative to the config dir
- `static_files_path`, string. Path to the static files for the web interface. This can be an absolute path or a path relative to the config dir. If both `templates_path` and `static_files_path` are empty the built-in web interface will be disabled
- `backups_path`, string. Path to the backup directory. This can be an absolute path or a path relative to the config dir. We don't allow backups in arbitrary paths for security reasons
- `auth_user_file`, string. Path to a file used to store usernames and passwords for basic authentication. This can be an absolute path or a path relative to the config dir. We support HTTP basic authentication, and the file format must conform to the one generated using the Apache `htpasswd` tool. The supported password formats are bcrypt (`$2y$` prefix) and md5 crypt (`$apr1$` prefix). If empty, HTTP authentication is disabled.
- `certificate_file`, string. Certificate for HTTPS. This can be an absolute path or a path relative to the config dir.
- `certificate_key_file`, string. Private key matching the above certificate. This can be an absolute path or a path relative to the config dir. If both the certificate and the private key are provided, the server will expect HTTPS connections. Certificate and key files can be reloaded on demand sending a `SIGHUP` signal on Unix based systems and a `paramchange` request to the running service on Windows.
- **"http"**, the configuration for HTTP clients. HTTP clients are used for executing hooks such as the ones used for custom actions, external authentication and pre-login user modifications
- `timeout`, integer. Timeout specifies a time limit, in seconds, for requests.
- `ca_certificates`, list of strings. List of paths to extra CA certificates to trust. The paths can be absolute or relative to the config dir. Adding trusted CA certificates is a convenient way to use self-signed certificates without defeating the purpose of using TLS.
- `skip_tls_verify`, boolean. if enabled the HTTP client accepts any TLS certificate presented by the server and any host name in that certificate. In this mode, TLS is susceptible to man-in-the-middle attacks. This should be used only for testing.
A full example showing the default config (in JSON format) can be found [here](../sftpgo.json).
If you want to use a private host key that uses an algorithm/setting different from the auto generated RSA/ECDSA keys, or more than two private keys, you can generate your own keys and replace the empty `keys` array with something like this:
```json
"host_keys": [
"id_rsa",
"id_ecdsa",
"id_ed25519"
]
```
where `id_rsa`, `id_ecdsa` and `id_ed25519`, in this example, are files containing your generated keys. You can use absolute paths or paths relative to the configuration directory.
If you want the default host keys generation in a directory different from the config dir, please specify absolute paths to files named `id_rsa` or `id_ecdsa` like this:
```json
"host_keys": [
"/etc/sftpgo/keys/id_rsa",
"/etc/sftpgo/keys/id_ecdsa"
]
```
then SFTPGo will try to create `id_rsa` and `id_ecdsa`, if they are missing, inside the existing directory `/etc/sftpgo/keys`.
The configuration can be read from JSON, TOML, YAML, HCL, envfile and Java properties config files. If your `config-file` flag is set to `sftpgo` (default value), you need to create a configuration file called `sftpgo.json` or `sftpgo.yaml` and so on inside `config-dir`.
## Environment variables
You can also override all the available configuration options using environment variables. SFTPGo will check for environment variables with a name matching the key uppercased and prefixed with the `SFTPGO_`. You need to use `__` to traverse a struct.
Let's see some examples:
- To set sftpd `bind_port`, you need to define the env var `SFTPGO_SFTPD__BIND_PORT`
- To set the `execute_on` actions, you need to define the env var `SFTPGO_COMMON__ACTIONS__EXECUTE_ON`. For example `SFTPGO_COMMON__ACTIONS__EXECUTE_ON=upload,download`
Please note that in order to override configuration options with environment variables, you need a configuration file containing the options to override, this is a [viper bug](https://github.com/spf13/viper/issues/584). For example, you can deploy the default configuration file and then override the options to customize using environment variables.

View File

@@ -0,0 +1,13 @@
# Google Cloud Storage backend
To connect SFTPGo to Google Cloud Storage you can use use the Application Default Credentials (ADC) strategy to try to find your application's credentials automatically or you can explicitly provide a JSON credentials file that you can obtain from the Google Cloud Console. Take a look [here](https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application) for details.
Specifying a different `key_prefix`, you can assign different "folders" of the same bucket to different users. This is similar to a chroot directory for local filesystem. Each SFTP/SCP user can only access the assigned folder and its contents. The folder identified by `key_prefix` does not need to be pre-created.
You can optionally specify a [storage class](https://cloud.google.com/storage/docs/storage-classes) too. Leave it blank to use the default storage class.
The configured bucket must exist.
Google Cloud Storage is exposed over HTTPS so if you are running SFTPGo as docker image please be sure to uncomment the line that installs `ca-certificates`, inside your `Dockerfile`, to be able to properly verify certificate authorities.
This backend is very similar to the [S3](./s3.md) backend, and it has the same limitations.

6
docs/howto/README.md Normal file
View File

@@ -0,0 +1,6 @@
# Tutorials
Here we collect step-to-step tutorials. SFTPGo users are encouraged to contribute!
- [SFTPGo with PostgreSQL data provider and S3 backend](./postgresql-s3.md)
- [Expose Web Admin and REST API over HTTPS and password protected](./rest-api-https-auth.md)

215
docs/howto/postgresql-s3.md Normal file
View File

@@ -0,0 +1,215 @@
# SFTPGo with PostgreSQL data provider and S3 backend
This tutorial shows the installation of SFTPGo on Ubuntu 20.04 (Focal Fossa) with PostgreSQL data provider and S3 backend. SFTPGo will run as an unprivileged (non-root) user. We assume that you want to serve a single S3 bucket and you want to assign different "virtual folders" of this bucket to different SFTPGo virtual users.
## Preliminary Note
Before proceeding further you need to have a basic minimal installation of Ubuntu 20.04.
## Install PostgreSQL
Before installing any packages on the Ubuntu system, update and upgrade all packages using the `apt` commands below.
```shell
sudo apt update
sudo apt upgrade
```
Install PostgreSQL with this `apt` command.
```shell
sudo apt -y install postgresql
```
Once installation is completed, start the PostgreSQL service and add it to the system boot.
```shell
sudo systemctl start postgresql
sudo systemctl enable postgresql
```
Next, check the PostgreSQL service using the following command.
```shell
systemctl status postgresql
```
## Configure PostgreSQL
PostgreSQL uses roles for user authentication and authorization, it just like Unix-Style permissions. By default, PostgreSQL creates a new user called `postgres` for basic authentication.
In this step, we will create a new PostgreSQL user for SFTPGo.
Login to the PostgreSQL shell using the command below.
```shell
sudo -i -u postgres psql
```
Next, create a new role `sftpgo` with the password `sftpgo_pg_pwd` using the following query.
```sql
create user "sftpgo" with encrypted password 'sftpgo_pg_pwd';
```
Next, create a new database `sftpgo.db` for the SFTPGo service using the following queries.
```sql
create database "sftpgo.db";
grant all privileges on database "sftpgo.db" to "sftpgo";
```
Exit from the PostgreSQL shell typing `\q`.
## Install SFTPGo
To install SFTPGo you can use the PPA [here](https://launchpad.net/~sftpgo/+archive/ubuntu/sftpgo).
Start by adding the PPA.
```shell
sudo add-apt-repository ppa:sftpgo/sftpgo
sudo apt-get update
```
Next install SFTPGo.
```shell
sudo apt install sftpgo
```
After installation SFTPGo should already be running with default configuration and configured to start automatically at boot, check its status using the following command.
```shell
systemctl status sftpgo
```
## Configure AWS credentials
We assume that you want to serve a single S3 bucket and you want to assign different "virtual folders" of this bucket to different SFTPGo virtual users. In this case is very convenient to configure a credential file so SFTPGo will automatically use it and you don't need to specify the same AWS credentials for each user.
You can manually create the `/var/lib/sftpgo/.aws/credentials` file and write your AWS credentials like this.
```shell
[default]
aws_access_key_id=AKIAIOSFODNN7EXAMPLE
aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
```
Alternately you can install `AWS CLI` and manage the credential using this tool.
```shell
sudo apt install awscli
```
and now set your credentials, region, and output format with the following command.
```shell
aws configure
```
Confirm that you can list your bucket contents with the following command.
```shell
aws s3 ls s3://mybucket
```
The AWS CLI will create the credential file in `~/.aws/credentials`. The SFTPGo service runs using the `sftpgo` system user whose home directory is `/var/lib/sftpgo` so you need to copy the credentials file to the sftpgo home directory and assign it the proper permissions.
```shell
sudo mkdir /var/lib/sftpgo/.aws
sudo cp ~/.aws/credentials /var/lib/sftpgo/.aws/
sudo chown -R sftpgo:sftpgo /var/lib/sftpgo/.aws
```
## Configure SFTPGo
Now open the SFTPGo configuration.
```shell
sudo vi /etc/sftpgo/sftpgo.json
```
Search for the `data_provider` section and change it as follow.
```json
"data_provider": {
"driver": "postgresql",
"name": "sftpgo.db",
"host": "127.0.0.1",
"port": 5432,
"username": "sftpgo",
"password": "sftpgo_pg_pwd",
...
}
```
This way we set the PostgreSQL connection parameters and a default base directory for new users.
If you want to connect to PostgreSQL over a Unix Domain socket you have to set the value `/var/run/postgresql` for the `host` configuration key instead of `127.0.0.1`.
You can further customize your configuration adding custom actions and other hooks. A full explanation of all configuration parameters can be found [here](../full-configuration.md).
Next, initialize the data provider with the following command.
```shell
$ sudo su - sftpgo -s /bin/bash -c 'sftpgo initprovider -c /etc/sftpgo'
2020-10-09T21:07:50.000 INF Initializing provider: "postgresql" config file: "/etc/sftpgo/sftpgo.json"
2020-10-09T21:07:50.000 INF updating database version: 1 -> 2
2020-10-09T21:07:50.000 INF updating database version: 2 -> 3
2020-10-09T21:07:50.000 INF updating database version: 3 -> 4
2020-10-09T21:07:50.000 INF Data provider successfully initialized/updated
```
The default sftpgo systemd service will start after the network target, in this setup it is more appropriate to start it after the PostgreSQL service, so edit the service using the following command.
```shell
sudo systemctl edit sftpgo.service
```
And override the unit definition with the following snippet.
```shell
[Unit]
After=postgresql.service
```
Confirm that `sftpgo.service` will start after `postgresql.service` with the next command.
```shell
$ systemctl show sftpgo.service | grep After=
After=postgresql.service systemd-journald.socket system.slice -.mount systemd-tmpfiles-setup.service network.target sysinit.target basic.target
```
Next restart the sftpgo service to use the new configuration and check that it is running.
```shell
sudo systemctl restart sftpgo
systemctl status sftpgo
```
## Add virtual users
The easiest way to add virtual users is to use the built-in Web interface.
You can expose the Web Admin interface over the network replacing `"bind_address": "127.0.0.1"` in the `httpd` configuration section with `"bind_address": ""` and apply the change restarting the SFTPGo service with the following command.
```shell
sudo systemctl restart sftpgo
```
So now open the Web Admin URL.
[http://127.0.0.1:8080/web](http://127.0.0.1:8080/web)
Click `Add` and fill the user details, the minimum required parameters are:
- `Username`
- `Password` or `Public keys`
- `Permissions`
- `Home Dir` can be empty since we defined a default base dir
- Select `Amazon S3 (Compatible)` as storage and then set `Bucket`, `Region` and optionally a `Key Prefix` if you want to restrict the user to a specific virtual folder in the bucket. The specified virtual folder does not need to be pre-created. You can leave `Access Key` and `Access Secret` empty since we defined global credentials for the `sftpgo` user and we use this system user to run the SFTPGo service.
You are done! Now you can connect to you SFTPGo instance using any compatible `sftp` client on port `2022`.
You can mix S3 users with local users but please be aware that we are running the service as the unprivileged `sftpgo` system user so if you set storage as `local` for an SFTPGo virtual user then the home directory for this user must be owned by the `sftpgo` system user. If you don't specify an home directory the default will be `/var/lib/sftpgo/users/<username>` which should be appropriate.

View File

@@ -0,0 +1,122 @@
# Expose Web Admin and REST API over HTTPS and password protected
This tutorial shows how to expose the SFTPGo web interface and REST API over HTTPS and password protect them.
## Preliminary Note
Before proceeding further you need to have a SFTPGo instance already configured and running.
We assume:
- you are running SFTPGo as service using the dedicated `sftpgo` system user
- the SFTPGo configuration directory is `/etc/sftpgo`
- you are running SFTPGo on Ubuntu 20.04, however this instructions can be easily adapted for other Linux variants.
## Authentication Setup
First install the `htpasswd` tool. We use this tool to create the users for the Web Admin/REST API.
```shell
sudo apt install apache2-utils
```
Create a user for web based authentication.
```shell
sudo htpasswd -B -c /etc/sftpgo/httpauth sftpgoweb
```
If you want to create additional users omit the `-c` option.
```shell
sudo htpasswd -B /etc/sftpgo/httpauth anotheruser
```
Next open the SFTPGo configuration.
```shell
sudo vi /etc/sftpgo/sftpgo.json
```
Search for the `httpd` section and change it as follow.
```json
"httpd": {
"bind_port": 8080,
"bind_address": "",
"templates_path": "templates",
"static_files_path": "static",
"backups_path": "backups",
"auth_user_file": "/etc/sftpgo/httpauth",
"certificate_file": "",
"certificate_key_file": ""
}
```
Setting an empty `bind_address` means that the service will listen on all available network interfaces and so it will be exposed over the network.
Now restart the SFTPGo service to apply the changes.
```shell
sudo systemctl restart sftpgo
```
You are done! Now login to the Web Admin interface using the username and password created above.
## Creation of a Self-Signed Certificate
For demostration purpose we use a self-signed certificate here. These certificates are easy to make and do not cost money. However, they do not provide all of the security properties that certificates signed by a Public Certificate Authority (CA) aim to provide, you are encouraged to use a certificate signed by a Public CA.
When creating a new SSL certificate, one needs to specify the duration validity of the same by changing the value 365 (as appearing in the message below) to the preferred number of days. It is important to mention here that the certificate so created stands to auto-expire upon completion of one year.
```shell
sudo mkdir /etc/sftpgo/ssl
sudo openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/sftpgo/ssl/sftpgo.key -out /etc/sftpgo/ssl/sftpgo.crt
```
The above command is rather versatile, and lets you create both the self-signed SSL certificate and the server key to safeguard it, in addition to placing both of these into the `etc/sftpgo/ssl` directory. Answer to the questions to create the certificate and the key for HTTPS.
Assign the proper permissions to the generated certificates.
```shell
sudo chown -R sftpgo:sftpgo /etc/sftpgo/ssl
```
## HTTPS Setup
Open the SFTPGo configuration.
```shell
sudo vi /etc/sftpgo/sftpgo.json
```
Search for the `httpd` section and change it as follow.
```json
"httpd": {
"bind_port": 8080,
"bind_address": "",
"templates_path": "templates",
"static_files_path": "static",
"backups_path": "backups",
"auth_user_file": "/etc/sftpgo/httpauth",
"certificate_file": "/etc/sftpgo/ssl/sftpgo.crt",
"certificate_key_file": "/etc/sftpgo/ssl/sftpgo.key"
}
```
Now restart the SFTPGo service to apply the changes.
```shell
sudo systemctl restart sftpgo
```
You are done! Now SFTPGo web admin and REST API are exposed over HTTPS and password protected.
You can easily replace the self-signed certificate used here with a properly signed certificate.
The certificate could frequently change if you use something like [let's encrypt](https://letsencrypt.org/). SFTPGo allows hot-certificate reloading using the following command.
```shell
sudo systemctl reload sftpgo
```

View File

@@ -0,0 +1,168 @@
# Keyboard Interactive Authentication
Keyboard interactive authentication is, in general, a series of questions asked by the server with responses provided by the client.
This authentication method is typically used for multi-factor authentication.
There are no restrictions on the number of questions asked on a particular authentication stage; there are also no restrictions on the number of stages involving different sets of questions.
To enable keyboard interactive authentication, you must set the absolute path of your authentication program or an HTTP URL using the `keyboard_interactive_auth_hook` key in your configuration file.
The external program can read the following environment variables to get info about the user trying to authenticate:
- `SFTPGO_AUTHD_USERNAME`
- `SFTPGO_AUTHD_IP`
- `SFTPGO_AUTHD_PASSWORD`, this is the hashed password as stored inside the data provider
Previous global environment variables aren't cleared when the script is called. The content of these variables is _not_ quoted. They may contain special characters.
The program must write the questions on its standard output, in a single line, using the following struct JSON serialized:
- `instruction`, string. A short description to show to the user that is trying to authenticate. Can be empty or omitted
- `questions`, list of questions to be asked to the user
- `echos` list of boolean flags corresponding to the questions (so the lengths of both lists must be the same) and indicating whether user's reply for a particular question should be echoed on the screen while they are typing: true if it should be echoed, or false if it should be hidden.
- `check_password` optional integer. Ask exactly one question and set this field to 1 if the expected answer is the user password and you want that SFTPGo checks it for you. If the password is correct, the returned response to the program is `OK`. If the password is wrong, the program will be terminated and an authentication error will be returned to the user that is trying to authenticate.
- `auth_result`, integer. Set this field to 1 to indicate successful authentication. 0 is ignored. Any other value means authentication error. If this field is found and it is different from 0 then SFTPGo will not read any other questions from the external program, and it will finalize the authentication.
SFTPGo writes the user answers to the program standard input, one per line, in the same order as the questions.
Please be sure that your program receives the answers for all the issued questions before asking for the next ones.
Keyboard interactive authentication can be chained to the external authentication.
The authentication must finish within 60 seconds.
Let's see a very basic example. Our sample keyboard interactive authentication program will ask for 2 sets of questions and accept the user if the answer to the last question is `answer3`.
```shell
#!/bin/sh
echo '{"questions":["Question1: ","Question2: "],"instruction":"This is a sample for keyboard interactive authentication","echos":[true,false]}'
read ANSWER1
read ANSWER2
echo '{"questions":["Question3: "],"instruction":"","echos":[true]}'
read ANSWER3
if test "$ANSWER3" = "answer3"; then
echo '{"auth_result":1}'
else
echo '{"auth_result":-1}'
fi
```
and here is an example where SFTPGo checks the user password for you:
```shell
#!/bin/sh
echo '{"questions":["Password: "],"instruction":"This is a sample for keyboard interactive authentication","echos":[false],"check_password":1}'
read ANSWER1
if test "$ANSWER1" != "OK"; then
exit 1
fi
echo '{"questions":["One time token: "],"instruction":"","echos":[false]}'
read ANSWER2
if test "$ANSWER2" = "token"; then
echo '{"auth_result":1}'
else
echo '{"auth_result":-1}'
fi
```
If the hook is an HTTP URL then it will be invoked as HTTP POST multiple times for each login request.
The request body will contain a JSON struct with the following fields:
- `request_id`, string. Unique request identifier
- `username`, string
- `ip`, string
- `password`, string. This is the hashed password as stored inside the data provider
- `answers`, list of string. It will be null for the first request
- `questions`, list of string. It will contain the previously asked questions. It will be null for the first request
The HTTP response code must be 200 and the body must contain the same JSON struct described for the program.
Let's see a basic sample, the configured hook is `http://127.0.0.1:8000/keyIntHookPwd`, as soon as the user tries to login, SFTPGo makes this HTTP POST request:
```shell
POST /keyIntHookPwd HTTP/1.1
Host: 127.0.0.1:8000
User-Agent: Go-http-client/1.1
Content-Length: 189
Content-Type: application/json
Accept-Encoding: gzip
{"request_id":"bq1r5r7cdrpd2qtn25ng","username":"a","ip":"127.0.0.1","password":"$pbkdf2-sha512$150000$ClOPkLNujMTL$XktKy0xuJsOfMYBz+f2bIyPTdbvDTSnJ1q+7+zp/HPq5Qojwp6kcpSIiVHiwvbi8P6HFXI/D3UJv9BLcnQFqPA=="}
```
as you can see in this first requests `answers` and `questions` are null.
Here is the response that instructs SFTPGo to ask for the user password and to check it:
```shell
HTTP/1.1 200 OK
Date: Tue, 31 Mar 2020 21:15:24 GMT
Server: WSGIServer/0.2 CPython/3.8.2
Content-Type: application/json
X-Frame-Options: SAMEORIGIN
Content-Length: 143
{"questions": ["Password: "], "check_password": 1, "instruction": "This is a sample for keyboard interactive authentication", "echos": [false]}
```
The user enters the correct password and so SFTPGo makes a new HTTP POST, please note that the `request_id` is the same of the previous request, this time the asked `questions` and the user's `answers` are not null:
```shell
POST /keyIntHookPwd HTTP/1.1
Host: 127.0.0.1:8000
User-Agent: Go-http-client/1.1
Content-Length: 233
Content-Type: application/json
Accept-Encoding: gzip
{"request_id":"bq1r5r7cdrpd2qtn25ng","username":"a","ip":"127.0.0.1","password":"$pbkdf2-sha512$150000$ClOPkLNujMTL$XktKy0xuJsOfMYBz+f2bIyPTdbvDTSnJ1q+7+zp/HPq5Qojwp6kcpSIiVHiwvbi8P6HFXI/D3UJv9BLcnQFqPA==","answers":["OK"],"questions":["Password: "]}
```
Here is the HTTP response that instructs SFTPGo to ask for a new question:
```shell
HTTP/1.1 200 OK
Date: Tue, 31 Mar 2020 21:15:27 GMT
Server: WSGIServer/0.2 CPython/3.8.2
Content-Type: application/json
X-Frame-Options: SAMEORIGIN
Content-Length: 66
{"questions": ["Question2: "], "instruction": "", "echos": [true]}
```
As soon as the user answer to this question, SFTPGo will make a new HTTP POST request with the user's answers:
```shell
POST /keyIntHookPwd HTTP/1.1
Host: 127.0.0.1:8000
User-Agent: Go-http-client/1.1
Content-Length: 239
Content-Type: application/json
Accept-Encoding: gzip
{"request_id":"bq1r5r7cdrpd2qtn25ng","username":"a","ip":"127.0.0.1","password":"$pbkdf2-sha512$150000$ClOPkLNujMTL$XktKy0xuJsOfMYBz+f2bIyPTdbvDTSnJ1q+7+zp/HPq5Qojwp6kcpSIiVHiwvbi8P6HFXI/D3UJv9BLcnQFqPA==","answers":["answer2"],"questions":["Question2: "]}
```
Here is the final HTTP response that allows the user login:
```shell
HTTP/1.1 200 OK
Date: Tue, 31 Mar 2020 21:15:29 GMT
Server: WSGIServer/0.2 CPython/3.8.2
Content-Type: application/json
X-Frame-Options: SAMEORIGIN
Content-Length: 18
{"auth_result": 1}
```
An example keyboard interactive program allowing to authenticate using [Twillo Authy 2FA](https://www.twilio.com/docs/authy) can be found inside the source tree [authy](../examples/OTP/authy) directory.

56
docs/logs.md Normal file
View File

@@ -0,0 +1,56 @@
# Logs
The log file is a stream of JSON structs. Each struct has a `sender` field that identifies the log type.
The logs can be divided into the following categories:
- **"app logs"**, internal logs used to debug SFTPGo:
- `sender` string. This is generally the package name that emits the log
- `time` string. Date/time with millisecond precision
- `level` string
- `message` string
- **"transfer logs"**, SFTP/SCP transfer logs:
- `sender` string. `Upload` or `Download`
- `time` string. Date/time with millisecond precision
- `level` string
- `elapsed_ms`, int64. Elapsed time, as milliseconds, for the upload/download
- `size_bytes`, int64. Size, as bytes, of the download/upload
- `username`, string
- `file_path` string
- `connection_id` string. Unique connection identifier
- `protocol` string. `SFTP` or `SCP`
- **"command logs"**, SFTP/SCP command logs:
- `sender` string. `Rename`, `Rmdir`, `Mkdir`, `Symlink`, `Remove`, `Chmod`, `Chown`, `Chtimes`, `Truncate`, `SSHCommand`
- `level` string
- `username`, string
- `file_path` string
- `target_path` string
- `filemode` string. Valid for sender `Chmod` otherwise empty
- `uid` integer. Valid for sender `Chown` otherwise -1
- `gid` integer. Valid for sender `Chown` otherwise -1
- `access_time` datetime as YYYY-MM-DDTHH:MM:SS. Valid for sender `Chtimes` otherwise empty
- `modification_time` datetime as YYYY-MM-DDTHH:MM:SS. Valid for sender `Chtimes` otherwise empty
- `size` int64. Valid for sender `Truncate` otherwise -1
- `ssh_command`, string. Valid for sender `SSHCommand` otherwise empty
- `connection_id` string. Unique connection identifier
- `protocol` string. `SFTP`, `SCP` or `SSH`
- **"http logs"**, REST API logs:
- `sender` string. `httpd`
- `level` string
- `remote_addr` string. IP and port of the remote client
- `proto` string, for example `HTTP/1.1`
- `method` string. HTTP method (`GET`, `POST`, `PUT`, `DELETE` etc.)
- `user_agent` string
- `uri` string. Full uri
- `resp_status` integer. HTTP response status code
- `resp_size` integer. Size in bytes of the HTTP response
- `elapsed_ms` int64. Elapsed time, as milliseconds, to complete the request
- `request_id` string. Unique request identifier
- **"connection failed logs"**, logs for failed attempts to initialize a connection. A connection can fail for an authentication error or other errors such as a client abort or a timeout if the login does not happen in two minutes
- `sender` string. `connection_failed`
- `level` string
- `username`, string. Can be empty if the connection is closed before an authentication attempt
- `client_ip` string.
- `protocol` string. Possible values are `SSH`, `FTP`, `DAV`
- `login_type` string. Can be `publickey`, `password`, `keyboard-interactive`, `publickey+password`, `publickey+keyboard-interactive` or `no_auth_tryed`
- `error` string. Optional error description

18
docs/metrics.md Normal file
View File

@@ -0,0 +1,18 @@
# Metrics
SFTPGo exposes [Prometheus](https://prometheus.io/) metrics at the `/metrics` HTTP endpoint.
Several counters and gauges are available, for example:
- Total uploads and downloads
- Total upload and download size
- Total upload and download errors
- Total executed SSH commands
- Total SSH command errors
- Number of active connections
- Data provider availability
- Total successful and failed logins using password, public key, keyboard interactive authentication or supported multi-step authentications
- Total HTTP requests served and totals for response code
- Go's runtime details about GC, number of gouroutines and OS threads
- Process information like CPU, memory, file descriptor usage and start time
Please check the `/metrics` page for more details.

164
docs/performance.md Normal file
View File

@@ -0,0 +1,164 @@
# Performance
SFTPGo can easily saturate a Gigabit connection on low end hardware with no special configuration, this is generally enough for most use cases.
For Multi-Gig connections, some performance improvements and comparisons with OpenSSH have been discussed [here](https://github.com/drakkan/sftpgo/issues/69), most of them have been included in the master branch. To summarize:
- In current state with all performance improvements applied, SFTP performance is very close to OpenSSH however CPU usage is higher. SCP performance match OpenSSH.
- The main bottlenecks are the encryption and the messages authentication, so if you can use a fast cipher with implicit messages authentication, such as `aes128-gcm@openssh.com`, you will get a big performance boost.
- SCP protocol is much simpler than SFTP and so, the multi-platform, SFTPGo's SCP implementation performs better than SFTP.
- Load balancing with HAProxy can greatly improve the performance if CPU not become the bottleneck.
## Benchmark
### Hardware specification
**Server** ||
--- | --- |
OS| Debian 10.2 x64 |
CPU| Ryzen5 3600 |
RAM| 64GB 2400MHz ECC |
Disk| Ramdisk |
Ethernet| Mellanox ConnectX-3 40GbE|
**Client** ||
--- | --- |
OS| Ubuntu 19.10 x64 |
CPU| Threadripper 1920X |
RAM| 64GB 2400MHz ECC |
Disk| Ramdisk |
Ethernet| Mellanox ConnectX-3 40GbE|
### Test configurations
- `Baseline`: SFTPGo version 0.9.6.
- `Devel`: SFTPGo commit b0ed1905918b9dcc22f9a20e89e354313f491734, compiled with Golang 1.14.2. This is basically the same as v1.0.0 as far as performance is concerned.
- `Optimized`: Various [optimizations](#Optimizations-applied) applied on top of `Devel`.
- `Balanced`: Two optimized instances, running on localhost, load balanced by HAProxy 2.1.3.
- `OpenSSH`: OpenSSH_7.9p1 Debian-10+deb10u2, OpenSSL 1.1.1d 10 Sep 2019
Server's CPU is in Eco mode, you can expect better results in certain cases with a stronger CPU, especially multi-stream HAProxy balanced load.
#### Cipher aes128-ctr
The Message Authentication Code (MAC) used is `hmac-sha2-256`.
##### SFTP
Download:
Stream|Baseline MB/s|Devel MB/s|Optimized MB/s|Balanced MB/s|OpenSSH MB/s|
---|---|---|---|---|---|
1|150|243|319|412|452|
2|267|452|600|740|735|
3|351|637|802|991|1045|
4|414|811|1002|1192|1265|
8|536|1451|1742|1552|1798|
Upload:
Stream|Baseline MB/s|Devel MB/s|Optimized MB/s|Balanced MB/s|OpenSSH MB/s|
---|---|---|---|---|---|
1|172|273|343|407|426|
2|284|469|595|673|738|
3|368|644|820|881|1090|
4|446|851|1041|1026|1244|
8|605|1210|1368|1273|1820|
##### SCP
Download:
Stream|Baseline MB/s|Devel MB/s|Optimized MB/s|Balanced MB/s|OpenSSH MB/s|
---|---|---|---|---|---|
1|220|369|525|611|558|
2|437|659|941|1048|856|
3|635|1000|1365|1363|1201|
4|787|1272|1664|1610|1415|
8|1297|2129|2690|2100|1959|
Upload:
Stream|Baseline MB/s|Devel MB/s|Optimized MB/s|Balanced MB/s|OpenSSH MB/s|
---|---|---|---|---|---|
1|208|312|400|458|508|
2|360|516|647|745|926|
3|476|678|861|935|1254|
4|576|836|1080|1099|1569|
8|857|1161|1416|1433|2271|
#### Cipher aes128-gcm@openssh.com
With this cipher the messages authentication is implicit, no SHA256 computation is needed.
##### SFTP
Download:
Stream|Baseline MB/s|Devel MB/s|Optimized MB/s|Balanced MB/s|OpenSSH MB/s|
---|---|---|---|---|---|
1|332|423|<--|583|443|
2|533|755|<--|970|809|
3|666|1045|<--|1249|1098|
4|762|1276|<--|1461|1351|
8|886|2064|<--|1825|1933|
Upload:
Stream|Baseline MB/s|Devel MB/s|Optimized MB/s|Balanced MB/s|OpenSSH MB/s|
---|---|---|---|---|---|
1|348|410|<--|527|469|
2|596|729|<--|842|930|
3|778|974|<--|1088|1341|
4|886|1192|<--|1232|1494|
8|1042|1578|<--|1433|1893|
##### SCP
Download:
Stream|Baseline MB/s|Devel MB/s|Optimized MB/s|Balanced MB/s|OpenSSH MB/s|
---|---|---|---|---|---|
1|776|793|<--|832|578|
2|1343|1415|<--|1435|938|
3|1815|1878|<--|1877|1279|
4|2192|2205|<--|2056|1567|
8|3237|3287|<--|2493|2036|
Upload:
Stream|Baseline MB/s|Devel MB/s|Optimized MB/s|Balanced MB/s|OpenSSH MB/s|
---|---|---|---|---|---|
1|528|545|<--|608|584|
2|872|849|<--|975|1019|
3|1121|1138|<--|1217|1412|
4|1367|1387|<--|1368|1755|
8|1733|1744|<--|1664|2510|
### Optimizations applied
- AES-CTR optimization of Go compiler for x86_64, there is a [patch](https://go-review.googlesource.com/c/go/+/51670) that hasn't been merged yet, you can apply it yourself.
### HAProxy configuration
Here is the relevant HAProxy configuration used for the `Balanced` test configuration:
```console
frontend sftp
bind :2222
mode tcp
timeout client 600s
default_backend sftpgo
backend sftpgo
mode tcp
balance roundrobin
timeout connect 10s
timeout server 600s
timeout queue 30s
option tcp-check
tcp-check expect string SSH-2.0-
server sftpgo1 127.0.0.1:2022 check send-proxy-v2 weight 10 inter 10s rise 2 fall 3
server sftpgo2 127.0.0.1:2024 check send-proxy-v2 weight 10 inter 10s rise 2 fall 3
```

99
docs/portable-mode.md Normal file
View File

@@ -0,0 +1,99 @@
# Portable mode
SFTPGo allows to share a single directory on demand using the `portable` subcommand:
```console
sftpgo portable --help
To serve the current working directory with auto generated credentials simply
use:
$ sftpgo portable
Please take a look at the usage below to customize the serving parameters
Usage:
sftpgo portable [flags]
Flags:
-C, --advertise-credentials If the SFTP/FTP service is
advertised via multicast DNS, this
flag allows to put username/password
inside the advertised TXT record
-S, --advertise-service Advertise SFTP/FTP service using
multicast DNS
--allowed-extensions stringArray Allowed file extensions case
insensitive. The format is
/dir::ext1,ext2.
For example: "/somedir::.jpg,.png"
--denied-extensions stringArray Denied file extensions case
insensitive. The format is
/dir::ext1,ext2.
For example: "/somedir::.jpg,.png"
-d, --directory string Path to the directory to serve.
This can be an absolute path or a path
relative to the current directory
(default ".")
-f, --fs-provider int 0 means local filesystem,
1 Amazon S3 compatible,
2 Google Cloud Storage
--ftpd-cert string Path to the certificate file for FTPS
--ftpd-key string Path to the key file for FTPS
--ftpd-port int 0 means a random unprivileged port,
< 0 disabled (default -1)
--gcs-automatic-credentials int 0 means explicit credentials using
a JSON credentials file, 1 automatic
(default 1)
--gcs-bucket string
--gcs-credentials-file string Google Cloud Storage JSON credentials
file
--gcs-key-prefix string Allows to restrict access to the
virtual folder identified by this
prefix and its contents
--gcs-storage-class string
-h, --help help for portable
-l, --log-file-path string Leave empty to disable logging
-v, --log-verbose Enable verbose logs
-p, --password string Leave empty to use an auto generated
value
-g, --permissions strings User's permissions. "*" means any
permission (default [list,download])
-k, --public-key strings
--s3-access-key string
--s3-access-secret string
--s3-bucket string
--s3-endpoint string
--s3-key-prefix string Allows to restrict access to the
virtual folder identified by this
prefix and its contents
--s3-region string
--s3-storage-class string
--s3-upload-concurrency int How many parts are uploaded in
parallel (default 2)
--s3-upload-part-size int The buffer size for multipart uploads
(MB) (default 5)
-s, --sftpd-port int 0 means a random unprivileged port
-c, --ssh-commands strings SSH commands to enable.
"*" means any supported SSH command
including scp
(default [md5sum,sha1sum,cd,pwd,scp])
-u, --username string Leave empty to use an auto generated
value
--webdav-cert string Path to the certificate file for WebDAV
over HTTPS
--webdav-key string Path to the key file for WebDAV over
HTTPS
--webdav-port int 0 means a random unprivileged port,
< 0 disabled (default -1)
```
In portable mode, SFTPGo can advertise the SFTP/FTP services and, optionally, the credentials via multicast DNS, so there is a standard way to discover the service and to automatically connect to it.
Here is an example of the advertised SFTP service including credentials as seen using `avahi-browse`:
```console
= enp0s31f6 IPv4 SFTPGo portable 53705 SFTP File Transfer local
hostname = [p1.local]
address = [192.168.1.230]
port = [53705]
txt = ["password=EWOo6pJe" "user=user" "version=0.9.3-dev-b409523-dirty-2019-10-26T13:43:32Z"]
```

26
docs/post-connect-hook.md Normal file
View File

@@ -0,0 +1,26 @@
# Post-connect hook
This hook is executed as soon as a new connection is established. It notifies the connection's IP address and protocol. Based on the received response, the connection is accepted or rejected. Combining this hook with the [Post-login hook](./post-login-hook.md) you can implement your own (even for Protocol) blacklist/whitelist of IP addresses.
Please keep in mind that you can easily configure specialized program such as [Fail2ban](http://www.fail2ban.org/) for brute force protection. Executing a hook for each connection can be heavy.
The `post-connect-hook` can be defined as the absolute path of your program or an HTTP URL.
If the hook defines an external program it can read the following environment variables:
- `SFTPGO_CONNECTION_IP`
- `SFTPGO_CONNECTION_PROTOCOL`
If the external command completes with a zero exit status the connection will be accepted otherwise rejected.
Previous global environment variables aren't cleared when the script is called.
The program must finish within 20 seconds.
If the hook defines an HTTP URL then this URL will be invoked as HTTP GET with the following query parameters:
- `ip`
- `protocol`
The connection is accepted if the HTTP response code is `200` otherwise rejected.
The HTTP request will use the global configuration for HTTP clients.

36
docs/post-login-hook.md Normal file
View File

@@ -0,0 +1,36 @@
# Post-login hook
This hook is executed after a login or after closing a connection for authentication timeout. Defining an appropriate `post_login_scope` you can get notifications for failed logins, successful logins or both.
Combining this hook with the [Post-connect hook](./post-connect-hook.md) you can implement your own (even for Protocol) blacklist/whitelist of IP addresses.
Please keep in mind that you can easily configure specialized program such as [Fail2ban](http://www.fail2ban.org/) for brute force protection. Executing a hook after each login can be heavy.
The `post-login-hook` can be defined as the absolute path of your program or an HTTP URL.
If the hook defines an external program it can reads the following environment variables:
- `SFTPGO_LOGIND_USER`, username, can be empty if the connection is closed for authentication timeout
- `SFTPGO_LOGIND_IP`
- `SFTPGO_LOGIND_METHOD`, possible values are `publickey`, `password`, `keyboard-interactive`, `publickey+password`, `publickey+keyboard-interactive` or `no_auth_tryed`
- `SFTPGO_LOGIND_STATUS`, 1 means login OK, 0 login KO
- `SFTPGO_LOGIND_PROTOCOL`, possible values are `SSH`, `FTP`, `DAV`
Previous global environment variables aren't cleared when the script is called.
The program must finish within 20 seconds.
If the hook is an HTTP URL then it will be invoked as HTTP POST. The request body will contain a JSON serialized struct with the following fields:
- `username`
- `login_method`
- `ip`
- `protocol`
- `status`
The HTTP request will use the global configuration for HTTP clients.
The `post_login_scope` supports the following configuration values:
- `0` means notify both failed and successful logins
- `1` means notify failed logins. Connections closed for authentication timeout are notified as failed connections. You will get an empty username in this case
- `2` means notify successful logins

23
docs/profiling.md Normal file
View File

@@ -0,0 +1,23 @@
# Profiling SFTPGo
The built-in profiler lets you collect CPU profiles, traces, allocations and heap profiles that allow to identify and correct specific bottlenecks.
You can enable the built-in profiler using the `--profiler` command flag.
Profiling data are exposed via HTTP/HTTPS in the format expected by the [pprof](https://github.com/google/pprof/blob/master/doc/README.md) visualization tool. You can find the index page at the URL `/debug/pprof/`.
The following profiles are available, you can obtain them via HTTP GET requests:
- `allocs`, a sampling of all past memory allocations
- `block`, stack traces that led to blocking on synchronization primitives
- `goroutine`, stack traces of all current goroutines
- `heap`, a sampling of memory allocations of live objects. You can specify the `gc` GET parameter to run GC before taking the heap sample
- `mutex`, stack traces of holders of contended mutexes
- `profile`, CPU profile. You can specify the duration in the `seconds` GET parameter. After you get the profile file, use the `go tool pprof` command to investigate the profile
- `threadcreate`, stack traces that led to the creation of new OS threads
- `trace`, a trace of execution of the current program. You can specify the duration in the `seconds` GET parameter. After you get the trace file, use the `go tool trace` command to investigate the trace
For example you can:
- download a 30 seconds CPU profile from the URL `/debug/pprof/profile?seconds=30`
- download a sampling of memory allocations of live objects from the URL `/debug/pprof/heap?gc=1`
- download a sampling of all past memory allocations from the URL `/debug/pprof/allocs`

35
docs/rest-api.md Normal file
View File

@@ -0,0 +1,35 @@
# REST API
SFTPGo exposes REST API to manage, backup, and restore users and folders, and to get real time reports of the active connections with the ability to forcibly close a connection.
If quota tracking is enabled in the configuration file, then the used size and number of files are updated each time a file is added/removed. If files are added/removed not using SFTP/SCP, or if you change `track_quota` from `2` to `1`, you can rescan the users home dir and update the used quota using the REST API.
REST API can be protected using HTTP basic authentication and exposed via HTTPS. If you need more advanced security features, you can setup a reverse proxy using an HTTP Server such as Apache or NGNIX.
For example, you can keep SFTPGo listening on localhost and expose it externally configuring a reverse proxy using Apache HTTP Server this way:
```shell
ProxyPass /api/v1 http://127.0.0.1:8080/api/v1
ProxyPassReverse /api/v1 http://127.0.0.1:8080/api/v1
```
and you can add authentication with something like this:
```shell
<Location /api/v1>
AuthType Digest
AuthName "Private"
AuthDigestDomain "/api/v1"
AuthDigestProvider file
AuthUserFile "/etc/httpd/conf/auth_digest"
Require valid-user
</Location>
```
and, of course, you can configure the web server to use HTTPS.
The OpenAPI 3 schema for the exposed API can be found inside the source tree: [openapi.yaml](../httpd/schema/openapi.yaml "OpenAPI 3 specs").
A sample CLI client for the REST API can be found inside the source tree [rest-api-cli](../examples/rest-api-cli) directory.
You can also generate your own REST client in your preferred programming language, or even bash scripts, using an OpenAPI generator such as [swagger-codegen](https://github.com/swagger-api/swagger-codegen) or [OpenAPI Generator](https://openapi-generator.tech/)

38
docs/s3.md Normal file
View File

@@ -0,0 +1,38 @@
# S3 Compatible Object Storage backends
To connect SFTPGo to AWS, you need to specify credentials, a `bucket` and a `region`. Here is the list of available [AWS regions](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). For example, if your bucket is at `Frankfurt`, you have to set the region to `eu-central-1`. You can specify an AWS [storage class](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) too. Leave it blank to use the default AWS storage class. An endpoint is required if you are connecting to a Compatible AWS Storage such as [MinIO](https://min.io/).
AWS SDK has different options for credentials. [More Detail](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html). We support:
1. Providing [Access Keys](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys).
2. Use IAM roles for Amazon EC2
3. Use IAM roles for tasks if your application uses an ECS task definition
So, you need to provide access keys to activate option 1, or leave them blank to use the other ways to specify credentials.
Most S3 backends require HTTPS connections so if you are running SFTPGo as docker image please be sure to uncomment the line that installs `ca-certificates`, inside your `Dockerfile`, to be able to properly verify certificate authorities.
Specifying a different `key_prefix`, you can assign different "folders" of the same bucket to different users. This is similar to a chroot directory for local filesystem. Each SFTP/SCP user can only access the assigned folder and its contents. The folder identified by `key_prefix` does not need to be pre-created.
SFTPGo uses multipart uploads and parallel downloads for storing and retrieving files from S3.
For multipart uploads you can customize the parts size and the upload concurrency. Please note that if the upload bandwidth between the SFTP client and SFTPGo is greater than the upload bandwidth between SFTPGo and S3 then the SFTP client have to wait for the upload of the last parts to S3 after it ends the file upload to SFTPGo, and it may time out. Keep this in mind if you customize these parameters.
The configured bucket must exist.
Some SFTP commands don't work over S3:
- `symlink` and `chtimes` will fail
- `chown` and `chmod` are silently ignored
- `truncate` is not supported
- opening a file for both reading and writing at the same time is not supported
- upload resume is not supported
- upload mode `atomic` is ignored since S3 uploads are already atomic
Other notes:
- `rename` is a two step operation: server-side copy and then deletion. So, it is not atomic as for local filesystem.
- We don't support renaming non empty directories since we should rename all the contents too and this could take a long time: think about directories with thousands of files; for each file we should do an AWS API call.
- For server side encryption, you have to configure the mapped bucket to automatically encrypt objects.
- A local home directory is still required to store temporary files.
- Clients that require advanced filesystem-like features such as `sshfs` are not supported.

145
docs/service.md Normal file
View File

@@ -0,0 +1,145 @@
# Running SFTPGo as a service
Download a binary SFTPGo [release](https://github.com/drakkan/sftpgo/releases) or a build artifact for the [latest commit](https://github.com/drakkan/sftpgo/actions) or build SFTPGo yourself.
Run the following instructions from the directory that contains the sftpgo binary and the accompanying files.
## Linux
The easiest way to run SFTPGo as a service is to download and install the pre-compiled deb/rpm package or use one of the Arch Linux PKGBUILDs we maintain.
This section describes the procedure to use if you prefer to build SFTPGo yourself or if you want to download and configure a pre-built release as tar.
A `systemd` sample [service](../init/sftpgo.service "systemd service") can be found inside the source tree.
Here are some basic instructions to run SFTPGo as service using a dedicated `sftpgo` system account.
Please run the following commands from the directory where you downloaded/compiled SFTPGo:
```bash
# create the sftpgo user and group
sudo groupadd --system sftpgo
sudo useradd --system \
--gid sftpgo \
--no-create-home \
--home-dir /var/lib/sftpgo \
--shell /usr/sbin/nologin \
--comment "SFTPGo user" \
sftpgo
# create the required directories
sudo mkdir -p /etc/sftpgo \
/var/lib/sftpgo \
/usr/share/sftpgo
# install the sftpgo executable
sudo install -Dm755 sftpgo /usr/bin/sftpgo
# install the default configuration file, edit it if required
sudo install -Dm644 sftpgo.json /etc/sftpgo/
# override some configuration keys using environment variables
sudo sh -c 'echo "SFTPGO_HTTPD__TEMPLATES_PATH=/usr/share/sftpgo/templates" > /etc/sftpgo/sftpgo.env'
sudo sh -c 'echo "SFTPGO_HTTPD__STATIC_FILES_PATH=/usr/share/sftpgo/static" >> /etc/sftpgo/sftpgo.env'
sudo sh -c 'echo "SFTPGO_HTTPD__BACKUPS_PATH=/var/lib/sftpgo/backups" >> /etc/sftpgo/sftpgo.env'
sudo sh -c 'echo "SFTPGO_DATA_PROVIDER__CREDENTIALS_PATH=/var/lib/sftpgo/credentials" >> /etc/sftpgo/sftpgo.env'
# if you use a file based data provider such as sqlite or bolt consider to set the database path too, for example:
#sudo sh -c 'echo "SFTPGO_DATA_PROVIDER__NAME=/var/lib/sftpgo/sftpgo.db" >> /etc/sftpgo/sftpgo.env'
# also set the provider's PATH as env var to get initprovider to work with SQLite provider:
#export SFTPGO_DATA_PROVIDER__NAME=/var/lib/sftpgo/sftpgo.db
# install static files and templates for the web UI
sudo cp -r static templates /usr/share/sftpgo/
# set files and directory permissions
sudo chown -R sftpgo:sftpgo /etc/sftpgo /var/lib/sftpgo
sudo chmod 750 /etc/sftpgo /var/lib/sftpgo
sudo chmod 640 /etc/sftpgo/sftpgo.json /etc/sftpgo/sftpgo.env
# initialize the configured data provider
# if you want to use MySQL or PostgreSQL you need to create the configured database before running the initprovider command
sudo -E su - sftpgo -m -s /bin/bash -c 'sftpgo initprovider -c /etc/sftpgo'
# install the systemd service
sudo install -Dm644 init/sftpgo.service /etc/systemd/system
# start the service
sudo systemctl start sftpgo
# verify that the service is started
sudo systemctl status sftpgo
# automatically start sftpgo on boot
sudo systemctl enable sftpgo
# optional, install the REST API CLI. It requires python-requests to run
sudo install -Dm755 examples/rest-api-cli/sftpgo_api_cli /usr/bin/sftpgo_api_cli
# optional, create shell completion script, for example for bash
sudo sh -c '/usr/bin/sftpgo gen completion bash > /usr/share/bash-completion/completions/sftpgo'
# optional, create man pages
sudo /usr/bin/sftpgo gen man -d /usr/share/man/man1
```
## macOS
For macOS, a `launchd` sample [service](../init/com.github.drakkan.sftpgo.plist "launchd plist") can be found inside the source tree. The `launchd` plist assumes that SFTPGo has `/usr/local/opt/sftpgo` as base directory.
Here are some basic instructions to run SFTPGo as service, please run the following commands from the directory where you downloaded SFTPGo:
```bash
# create the required directories
sudo mkdir -p /usr/local/opt/sftpgo/init \
/usr/local/opt/sftpgo/var/lib \
/usr/local/opt/sftpgo/usr/share \
/usr/local/opt/sftpgo/var/log \
/usr/local/opt/sftpgo/etc \
/usr/local/opt/sftpgo/bin
# install sftpgo executable
sudo cp sftpgo /usr/local/opt/sftpgo/bin/
# install the launchd service
sudo cp init/com.github.drakkan.sftpgo.plist /usr/local/opt/sftpgo/init/
sudo chown root:wheel /usr/local/opt/sftpgo/init/com.github.drakkan.sftpgo.plist
# install the default configuration file, edit it if required
sudo cp sftpgo.json /usr/local/opt/sftpgo/etc/
# install static files and templates for the web UI
sudo cp -r static templates /usr/local/opt/sftpgo/usr/share/
# initialize the configured data provider
# if you want to use MySQL or PostgreSQL you need to create the configured database before running the initprovider command
sudo /usr/local/opt/sftpgo/bin/sftpgo initprovider -c /usr/local/opt/sftpgo/etc/
# add sftpgo to the launch daemons
sudo ln -s /usr/local/opt/sftpgo/init/com.github.drakkan.sftpgo.plist /Library/LaunchDaemons/com.github.drakkan.sftpgo.plist
# start the service and enable it to start on boot
sudo launchctl load -w /Library/LaunchDaemons/com.github.drakkan.sftpgo.plist
# verify that the service is started
sudo launchctl list com.github.drakkan.sftpgo
# optional, install the REST API CLI. It requires python-requests to run, this python module is not installed by default
sudo cp examples/rest-api-cli/sftpgo_api_cli /usr/local/opt/sftpgo/bin/
```
## Windows
On Windows, you can register SFTPGo as Windows Service. Take a look at the CLI usage to learn how to do this:
```powershell
PS> sftpgo.exe service --help
Manage SFTPGo Windows Service
Usage:
sftpgo service [command]
Available Commands:
install Install SFTPGo as Windows Service
reload Reload the SFTPGo Windows Service sending a "paramchange" request
rotatelogs Signal to the running service to rotate the logs
start Start SFTPGo Windows Service
status Retrieve the status for the SFTPGo Windows Service
stop Stop SFTPGo Windows Service
uninstall Uninstall SFTPGo Windows Service
Flags:
-h, --help help for service
Use "sftpgo service [command] --help" for more information about a command.
```
The `install` subcommand accepts the same flags that are valid for `serve`.
After installing as a Windows Service, please remember to allow network access to the SFTPGo executable using something like this:
```powershell
PS> netsh advfirewall firewall add rule name="SFTPGo Service" dir=in action=allow program="C:\Program Files\SFTPGo\sftpgo.exe"
```
Or through the Windows Firewall GUI.
The Windows installer will register the service and allow network access for it automatically.

46
docs/ssh-commands.md Normal file
View File

@@ -0,0 +1,46 @@
# SSH commands
Some SSH commands are implemented directly inside SFTPGo, while for others we use system commands that need to be installed and in your system's `PATH`.
For system commands we have no direct control on file creation/deletion and so there are some limitations:
- we cannot allow them if the target directory contains virtual folders or file extensions filters
- system commands work only on local filyestem
- we cannot avoid to leak real filesystem paths
- quota check is suboptimal
- maximum size restriction on single file is not respected
If quota is enabled and SFTPGo receives a system command, the used size and number of files are checked at the command start and not while new files are created/deleted. While the command is running the number of files is not checked, the remaining size is calculated as the difference between the max allowed quota and the used one, and it is checked against the bytes transferred via SSH. The command is aborted if it uploads more bytes than the remaining allowed size calculated at the command start. Anyway, we only see the bytes that the remote command sends to the local one via SSH. These bytes contain both protocol commands and files, and so the size of the files is different from the size transferred via SSH: for example, a command can send compressed files, or a protocol command (few bytes) could delete a big file. To mitigate these issues, quotas are recalculated at the command end with a full scan of the directory specified for the system command. This could be heavy for big directories. If you need system commands and quotas you could consider disabling quota restrictions and periodically update quota usage yourself using the REST API.
For these reasons we should limit system commands usage as much as possible, we currently support the following system commands:
- `git-receive-pack`, `git-upload-pack`, `git-upload-archive`. These commands enable support for Git repositories over SSH. They need to be installed and in your system's `PATH`.
- `rsync`. The `rsync` command needs to be installed and in your system's `PATH`.
At least the following permissions are required to be able to run system commands:
- `list`
- `download`
- `upload`
- `create_dirs`
- `overwrite`
- `delete`
For `rsync` we cannot avoid that it creates symlinks so if the `create_symlinks` permission is granted we add the option `--safe-links`, if it is not already set, to the received `rsync` command. This should prevent to create symlinks that point outside the home directory.
If the user cannot create symlinks we add the option `--munge-links`, if it is not already set, to the received `rsync` command. This should make symlinks unusable (but manually recoverable).
SFTPGo supports the following built-in SSH commands:
- `scp`, SFTPGo implements the SCP protocol so we can support it for cloud filesystems too and we can avoid the other system commands limitations. SCP between two remote hosts is supported using the `-3` scp option.
- `md5sum`, `sha1sum`, `sha256sum`, `sha384sum`, `sha512sum`. Useful to check message digests for uploaded files.
- `cd`, `pwd`. Some SFTP clients do not support the SFTP SSH_FXP_REALPATH packet type, so they use `cd` and `pwd` SSH commands to get the initial directory. Currently `cd` does nothing and `pwd` always returns the `/` path.
- `sftpgo-copy`. This is a built-in copy implementation. It allows server side copy for files and directories. The first argument is the source file/directory and the second one is the destination file/directory, for example `sftpgo-copy <src> <dst>`. The command will fail if the destination exists. Copy for directories spanning virtual folders is not supported. Only local filesystem is supported: recursive copy for Cloud Storage filesystems requires a new request for every file in any case, so a real server side copy is not possible.
- `sftpgo-remove`. This is a built-in remove implementation. It allows to remove single files and to recursively remove directories. The first argument is the file/directory to remove, for example `sftpgo-remove <dst>`. Only local filesystem is supported: recursive remove for Cloud Storage filesystems requires a new request for every file in any case, so a server side remove is not possible.
The following SSH commands are enabled by default:
- `md5sum`
- `sha1sum`
- `cd`
- `pwd`
- `scp`

31
docs/virtual-folders.md Normal file
View File

@@ -0,0 +1,31 @@
# Virtual Folders
A virtual folder is a mapping between an SFTP/SCP virtual path and a filesystem path outside the user home directory.
The specified paths must be absolute and the virtual path cannot be "/", it must be a sub directory.
The parent directory to the specified virtual path must exist. SFTPGo will try to automatically create any missing parent directory for the configured virtual folders at user login.
For each virtual folder, the following properties can be configured:
- `mapped_path`, the full absolute path to the filesystem path to expose as virtual folder
- `virtual_path`, the SFTP/SCP absolute path to use to expose the mapped path
- `quota_size`, maximum size allowed as bytes. 0 means unlimited, -1 included in user quota
- `quota_files`, maximum number of files allowed. 0 means unlimited, -1 included in user quota
For example if you configure `/tmp/mapped` or `C:\mapped` as mapped path and `/vfolder` as virtual path then SFTP/SCP users can access the mapped path via the `/vfolder` SFTP path.
The same virtual folder, identified by the `mapped_path`, can be shared among users and different folder quota limits for each user are supported.
Folder quota limits can also be included inside the user quota but in this case the folder is considered "private" and sharing it with other users will break user quota calculation.
You don't need to create virtual folders, inside the data provider, to associate them to the users: any missing virtual folder will be automatically created when you add/update a user. You only have to create the folder on the filesystem.
Using the REST API you can:
- monitor folders quota usage
- scan quota for folders
- inspect the relationships among users and folders
- delete a virtual folder. SFTPGo removes folders from the data provider, no files deletion will occur
If you remove a folder, from the data provider, any users relationships will be cleared up. If the deleted folder is included inside the user quota you need to do a user quota scan to update its quota. An orphan virtual folder will not be automatically deleted since if you add it again later then a quota scan is needed and it could be quite expensive, anyway you can easily list the orphan folders using the REST API and delete them if they are not needed anymore.
Overlapping virtual paths are not allowed for the same user, overlapping mapped paths are allowed only if quota tracking is globally disabled inside the configuration file (`track_quota` must be set to `0`).
Virtual folders are supported for local filesystem only.

8
docs/web-admin.md Normal file
View File

@@ -0,0 +1,8 @@
# Web Admin
You can easily build your own interface using the exposed REST API. Anyway, SFTPGo also provides a very basic built-in web interface that allows you to manage users and connections.
With the default `httpd` configuration, the web admin is available at the following URL:
[http://127.0.0.1:8080/web](http://127.0.0.1:8080/web)
The web interface can be protected using HTTP basic authentication and exposed via HTTPS. If you need more advanced security features, you can setup a reverse proxy as explained for the [REST API](./rest-api.md).

28
docs/webdav.md Normal file
View File

@@ -0,0 +1,28 @@
# WebDAV
The experimental `WebDAV` support can be enabled by setting a `bind_port` inside the `webdavd` configuration section.
Each user has his own path like `http/s://<SFTPGo ip>:<WevDAVPORT>/<username>` and it must authenticate using password credentials.
WebDAV is quite a different protocol than SCP/FTP, there is no session concept, each command is a separate HTTP request and must be authenticated, performance can be greatly improved enabling caching for the authenticated users (it is enabled by default). This way SFTPGo don't need to do a dataprovider query and a password check for each request.
If you enable quota support a dataprovider query is required, to update the user quota, after each file upload.
The caching configuration allows to set:
- `expiration_time` in minutes. If a user is cached for more than the specified minutes it will be removed from the cache and a new dataprovider query will be performed. Please note that the `last_login` field will not be updated and `external_auth_hook`, `pre_login_hook` and `check_password_hook` will not be executed if the user is obtained from the cache.
- `max_size`. Maximum number of users to cache. When this limit is reached the user with the oldest expiration date will be removed from the cache. 0 means no limit however the cache size cannot exceed the number of users so if you have a small number of users you can leave this setting to 0.
Users are automatically removed from the cache after an update/delete.
WebDAV should work as expected for most use cases but there are some minor issues and some missing features.
Know issues:
- removing a directory tree on Cloud Storage backends could generate a `not found` error when removing the last (virtual) directory. This happens if the client cycles the directories tree itself and removes files and directories one by one instead of issuing a single remove command
- the used [WebDAV library](https://pkg.go.dev/golang.org/x/net/webdav?tab=doc) asks to open a file to execute a `stat` and sometimes reads some bytes to find the content type. We are unable to distinguish a `stat` from a `download` for now, so to be able to properly list a directory you need to grant both `list` and `download` permissions
- the used `WebDAV library` not always returns a proper error code/message, most of the times it simply returns `Method not Allowed`. I'll try to improve the library error codes in the future
- if an object within a directory cannot be accessed, for example due to OS permissions issues or because is a missing mapped path for a virtual folder, the directory listing will fail. In SFTP/FTP the directory listing will succeed and you'll only get an error if you try to access to the problematic file/directory
We plan to add [Dead Properties](https://tools.ietf.org/html/rfc4918#section-3) support in future releases. We need a design decision here, probably the best solution is to store dead properties inside the data provider but this could increase a lot its size. Alternately we could store them on disk for local filesystem and add as metadata for Cloud Storage, this means that we need to do a separate `HEAD` request to retrieve dead properties for an S3 file. For big folders will do a lot of requests to the Cloud Provider, I don't like this solution. Another option is to expose a hook and allow you to implement `dead properties` outside SFTPGo.
If you find any other quirks or problems please let us know opening a GitHub issue, thank you!

View File

@@ -0,0 +1,58 @@
# Authy
These example show how-to integrate [Twillo Authy API](https://www.twilio.com/docs/authy/api) for One-Time-Password logins.
The examples assume that the user has the free [Authy app](https://authy.com/) installed and uses it to generate offline [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_algorithm) codes (soft tokens).
You first need to [create an Authy Application in the Twilio Console](https://twilio.com/console/authy/applications?_ga=2.205553366.451688189.1597667213-1526360003.1597667213), then you can create a new Authy user and store a reference to the matching SFTPGo account.
Verify that your Authy application is successfully registered:
```bash
export AUTHY_API_KEY=<your api key here>
curl 'https://api.authy.com/protected/json/app/details' -H "X-Authy-API-Key: $AUTHY_API_KEY"
```
now create an Authy user:
```bash
curl -XPOST "https://api.authy.com/protected/json/users/new" \
-H "X-Authy-API-Key: $AUTHY_API_KEY" \
--data-urlencode user[email]="user@domain.com" \
--data-urlencode user[cellphone]="317-338-9302" \
--data-urlencode user[country_code]="54"
```
The response is something like this:
```json
{"message":"User created successfully.","user":{"id":xxxxxxxx},"success":true}
```
Save the user id somewhere and add a reference to the matching SFTPGo account.
After this step you can use the Authy app installed on your phone to generate TOTP codes.
Now you can verify the token using an HTTP GET request:
```bash
export TOKEN=<TOTP you read from Authy app>
export AUTHY_ID=<user id>
curl -i "https://api.authy.com/protected/json/verify/${TOKEN}/${AUTHY_ID}" \
-H "X-Authy-API-Key: $AUTHY_API_KEY"
```
So inside your hook you need to check:
- the HTTP response code for the verify request, it must be `200`
- the JSON reponse body, it must contains the key `success` with the value `true` (as string)
If these conditions are met the token is valid and you allow the user to login.
We provide the following examples:
- [Keyboard interactive authentication](./keyint/README.md) for 2FA using password + Authy one time token.
- [External authentication](./extauth/README.md) using Authy one time tokens as passwords.
- [Check password hook](./checkpwd/README.md) for 2FA using a password consisting of a fixed string and a One Time Token.
Please note that these are sample programs not intended for production use, you should write your own hook based on them and you should prefer HTTP based hooks if performance is a concern.

View File

@@ -0,0 +1,3 @@
# Authy 2FA via check password hook
This example shows how to use 2FA via the check password hook using a password consisting of a fixed part and an Authy TOTP token. The hook will check the TOTP token using the Authy API and SFTPGo will check the fixed part. Please read the [sample code](./main.go), it should be self explanatory.

View File

@@ -0,0 +1,3 @@
module github.com/drakkan/sftpgo/authy/checkpwd
go 1.15

View File

@@ -0,0 +1,106 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"time"
)
type userMapping struct {
SFTPGoUsername string
AuthyID int64
AuthyAPIKey string
}
type checkPasswordResponse struct {
// 0 KO, 1 OK, 2 partial success
Status int `json:"status"`
// for status == 2 this is the password that SFTPGo will check against the one stored
// inside the data provider
ToVerify string `json:"to_verify"`
}
var (
mapping []userMapping
)
func init() {
// this is for demo only, you probably want to get this mapping dynamically, for example using a database query
mapping = append(mapping, userMapping{
SFTPGoUsername: "<SFTPGo username>",
AuthyID: 1234567,
AuthyAPIKey: "<your api key>",
})
}
func printResponse(status int, toVerify string) {
r := checkPasswordResponse{
Status: status,
ToVerify: toVerify,
}
resp, _ := json.Marshal(r)
fmt.Printf("%v\n", string(resp))
if status > 0 {
os.Exit(0)
} else {
os.Exit(1)
}
}
func main() {
// get credentials from env vars
username := os.Getenv("SFTPGO_AUTHD_USERNAME")
password := os.Getenv("SFTPGO_AUTHD_PASSWORD")
for _, m := range mapping {
if m.SFTPGoUsername == username {
// Authy token len is 7, we assume that we have the password followed by the token
pwdLen := len(password)
if pwdLen <= 7 {
printResponse(0, "")
}
pwd := password[:pwdLen-7]
authyToken := password[pwdLen-7:]
// now verify the authy token and instruct SFTPGo to check the password if the token is OK
url := fmt.Sprintf("https://api.authy.com/protected/json/verify/%v/%v", authyToken, m.AuthyID)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
log.Fatal(err)
}
req.Header.Set("X-Authy-API-Key", m.AuthyAPIKey)
httpClient := &http.Client{
Timeout: 10 * time.Second,
}
resp, err := httpClient.Do(req)
if err != nil {
printResponse(0, "")
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
// status code 200 is expected
printResponse(0, "")
}
var authyResponse map[string]interface{}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
printResponse(0, "")
}
err = json.Unmarshal(respBody, &authyResponse)
if err != nil {
printResponse(0, "")
}
if authyResponse["success"].(string) == "true" {
printResponse(2, pwd)
}
printResponse(0, "")
break
}
}
// no mapping found
printResponse(0, "")
}

View File

@@ -0,0 +1,3 @@
# Authy external authentication
This example shows how to use Authy TOTP token as password for SFTPGo users. Please read the [sample code](./main.go), it should be self explanatory.

View File

@@ -0,0 +1,3 @@
module github.com/drakkan/sftpgo/authy/extauth
go 1.15

View File

@@ -0,0 +1,109 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"time"
)
type userMapping struct {
SFTPGoUsername string
AuthyID int64
AuthyAPIKey string
}
// we assume that the SFTPGo already exists, we only check the one time token.
// If you need to create the SFTPGo user more fields are needed here
type minimalSFTPGoUser struct {
Status int `json:"status,omitempty"`
Username string `json:"username"`
HomeDir string `json:"home_dir,omitempty"`
Permissions map[string][]string `json:"permissions"`
}
var (
mapping []userMapping
)
func init() {
// this is for demo only, you probably want to get this mapping dynamically, for example using a database query
mapping = append(mapping, userMapping{
SFTPGoUsername: "<SFTPGo username>",
AuthyID: 1234567,
AuthyAPIKey: "<your api key>",
})
}
func printResponse(username string) {
u := minimalSFTPGoUser{
Username: username,
Status: 1,
HomeDir: filepath.Join(os.TempDir(), username),
}
u.Permissions = make(map[string][]string)
u.Permissions["/"] = []string{"*"}
resp, _ := json.Marshal(u)
fmt.Printf("%v\n", string(resp))
if len(username) > 0 {
os.Exit(0)
} else {
os.Exit(1)
}
}
func main() {
// get credentials from env vars
username := os.Getenv("SFTPGO_AUTHD_USERNAME")
password := os.Getenv("SFTPGO_AUTHD_PASSWORD")
if len(password) == 0 {
// login method is not password
printResponse("")
return
}
for _, m := range mapping {
if m.SFTPGoUsername == username {
// mapping found we can now verify the token
url := fmt.Sprintf("https://api.authy.com/protected/json/verify/%v/%v", password, m.AuthyID)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
log.Fatal(err)
}
req.Header.Set("X-Authy-API-Key", m.AuthyAPIKey)
httpClient := &http.Client{
Timeout: 10 * time.Second,
}
resp, err := httpClient.Do(req)
if err != nil {
printResponse("")
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
// status code 200 is expected
printResponse("")
}
var authyResponse map[string]interface{}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
printResponse("")
}
err = json.Unmarshal(respBody, &authyResponse)
if err != nil {
printResponse("")
}
if authyResponse["success"].(string) == "true" {
printResponse(username)
}
printResponse("")
break
}
}
// no mapping found
printResponse("")
}

Some files were not shown because too many files have changed in this diff Show More