mirror of
https://github.com/drakkan/sftpgo.git
synced 2025-12-08 07:10:56 +03:00
s3: upload concurrency is now configurable
Please note that if the upload bandwidth between the SFTP client and SFTPGo is greater than the upload bandwidth between SFTPGo and S3 then the SFTP client have to wait for the upload of the last parts to S3 after it ends the file upload to SFTPGo, and it may time out. Keep this in mind if you customize parts size and upload concurrency
This commit is contained in:
@@ -482,7 +482,10 @@ func compareS3Config(expected *dataprovider.User, actual *dataprovider.User) err
|
||||
return errors.New("S3 storage class mismatch")
|
||||
}
|
||||
if expected.FsConfig.S3Config.UploadPartSize != actual.FsConfig.S3Config.UploadPartSize {
|
||||
return errors.New("S3 upload part size class mismatch")
|
||||
return errors.New("S3 upload part size mismatch")
|
||||
}
|
||||
if expected.FsConfig.S3Config.UploadConcurrency != actual.FsConfig.S3Config.UploadConcurrency {
|
||||
return errors.New("S3 upload concurrency mismatch")
|
||||
}
|
||||
if expected.FsConfig.S3Config.KeyPrefix != actual.FsConfig.S3Config.KeyPrefix &&
|
||||
expected.FsConfig.S3Config.KeyPrefix+"/" != actual.FsConfig.S3Config.KeyPrefix {
|
||||
|
||||
@@ -400,6 +400,12 @@ func TestAddUserInvalidFsConfig(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with invalid fs config: %v", err)
|
||||
}
|
||||
u.FsConfig.S3Config.UploadPartSize = 0
|
||||
u.FsConfig.S3Config.UploadConcurrency = -1
|
||||
_, _, err = httpd.AddUser(u, http.StatusBadRequest)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error adding user with invalid fs config: %v", err)
|
||||
}
|
||||
u = getTestUser()
|
||||
u.FsConfig.Provider = 2
|
||||
u.FsConfig.GCSConfig.Bucket = ""
|
||||
@@ -646,6 +652,7 @@ func TestUserS3Config(t *testing.T) {
|
||||
user.FsConfig.S3Config.AccessKey = "Server-Access-Key"
|
||||
user.FsConfig.S3Config.AccessSecret = "Server-Access-Secret"
|
||||
user.FsConfig.S3Config.Endpoint = "http://127.0.0.1:9000"
|
||||
user.FsConfig.S3Config.UploadPartSize = 8
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
@@ -668,6 +675,7 @@ func TestUserS3Config(t *testing.T) {
|
||||
user.FsConfig.S3Config.AccessKey = "Server-Access-Key1"
|
||||
user.FsConfig.S3Config.Endpoint = "http://localhost:9000"
|
||||
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir"
|
||||
user.FsConfig.S3Config.UploadConcurrency = 5
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
@@ -679,6 +687,8 @@ func TestUserS3Config(t *testing.T) {
|
||||
user.FsConfig.S3Config.AccessSecret = ""
|
||||
user.FsConfig.S3Config.Endpoint = ""
|
||||
user.FsConfig.S3Config.KeyPrefix = ""
|
||||
user.FsConfig.S3Config.UploadPartSize = 0
|
||||
user.FsConfig.S3Config.UploadConcurrency = 0
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
@@ -691,6 +701,8 @@ func TestUserS3Config(t *testing.T) {
|
||||
user.FsConfig.S3Config.AccessSecret = ""
|
||||
user.FsConfig.S3Config.Endpoint = ""
|
||||
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir"
|
||||
user.FsConfig.S3Config.UploadPartSize = 6
|
||||
user.FsConfig.S3Config.UploadConcurrency = 4
|
||||
user, _, err = httpd.UpdateUser(user, http.StatusOK)
|
||||
if err != nil {
|
||||
t.Errorf("unable to update user: %v", err)
|
||||
@@ -2017,6 +2029,7 @@ func TestWebUserS3Mock(t *testing.T) {
|
||||
user.FsConfig.S3Config.StorageClass = "Standard"
|
||||
user.FsConfig.S3Config.KeyPrefix = "somedir/subdir/"
|
||||
user.FsConfig.S3Config.UploadPartSize = 5
|
||||
user.FsConfig.S3Config.UploadConcurrency = 4
|
||||
form := make(url.Values)
|
||||
form.Set("username", user.Username)
|
||||
form.Set("home_dir", user.HomeDir)
|
||||
@@ -2050,8 +2063,16 @@ func TestWebUserS3Mock(t *testing.T) {
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
// now add the user
|
||||
// test invalid s3_concurrency
|
||||
form.Set("s3_upload_part_size", strconv.FormatInt(user.FsConfig.S3Config.UploadPartSize, 10))
|
||||
form.Set("s3_upload_concurrency", "a")
|
||||
b, contentType, _ = getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
rr = executeRequest(req)
|
||||
checkResponseCode(t, http.StatusOK, rr.Code)
|
||||
// now add the user
|
||||
form.Set("s3_upload_concurrency", strconv.Itoa(user.FsConfig.S3Config.UploadConcurrency))
|
||||
b, contentType, _ = getMultipartFormData(form, "", "")
|
||||
req, _ = http.NewRequest(http.MethodPost, webUserPath+"/"+strconv.FormatInt(user.ID, 10), &b)
|
||||
req.Header.Set("Content-Type", contentType)
|
||||
@@ -2072,9 +2093,6 @@ func TestWebUserS3Mock(t *testing.T) {
|
||||
if updateUser.ExpirationDate != 1577836800000 {
|
||||
t.Errorf("invalid expiration date: %v", updateUser.ExpirationDate)
|
||||
}
|
||||
if updateUser.FsConfig.Provider != user.FsConfig.Provider {
|
||||
t.Error("fs provider mismatch")
|
||||
}
|
||||
if updateUser.FsConfig.S3Config.Bucket != user.FsConfig.S3Config.Bucket {
|
||||
t.Error("s3 bucket mismatch")
|
||||
}
|
||||
@@ -2099,6 +2117,9 @@ func TestWebUserS3Mock(t *testing.T) {
|
||||
if updateUser.FsConfig.S3Config.UploadPartSize != user.FsConfig.S3Config.UploadPartSize {
|
||||
t.Error("s3 upload part size mismatch")
|
||||
}
|
||||
if updateUser.FsConfig.S3Config.UploadConcurrency != user.FsConfig.S3Config.UploadConcurrency {
|
||||
t.Error("s3 upload concurrency mismatch")
|
||||
}
|
||||
if len(updateUser.Filters.FileExtensions) != 2 {
|
||||
t.Errorf("unexpected extensions filter: %+v", updateUser.Filters.FileExtensions)
|
||||
}
|
||||
|
||||
@@ -369,6 +369,11 @@ func TestCompareUserFsConfig(t *testing.T) {
|
||||
t.Errorf("S3 upload part size does not match")
|
||||
}
|
||||
expected.FsConfig.S3Config.UploadPartSize = 0
|
||||
expected.FsConfig.S3Config.UploadConcurrency = 3
|
||||
err = compareUserFsConfig(expected, actual)
|
||||
if err == nil {
|
||||
t.Errorf("S3 upload concurrency does not match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareUserGCSConfig(t *testing.T) {
|
||||
|
||||
@@ -1009,7 +1009,7 @@ components:
|
||||
description: the buffer size (in MB) to use for multipart uploads. The minimum allowed part size is 5MB, and if this value is set to zero, the default value (5MB) for the AWS SDK will be used. The minimum allowed value is 5.
|
||||
upload_concurrency:
|
||||
type: integer
|
||||
description: the number of parts to upload in parallel. If this value is set to zero, 2 will be used
|
||||
description: the number of parts to upload in parallel. If this value is set to zero, the default value (2) will be used
|
||||
key_prefix:
|
||||
type: string
|
||||
description: key_prefix is similar to a chroot directory for a local filesystem. If specified the SFTP user will only see contents that starts with this prefix and so you can restrict access to a specific virtual folder. The prefix, if not empty, must not start with "/" and must end with "/". If empty the whole bucket contents will be available
|
||||
|
||||
@@ -333,6 +333,10 @@ func getFsConfigFromUserPostFields(r *http.Request) (dataprovider.Filesystem, er
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
fs.S3Config.UploadConcurrency, err = strconv.Atoi(r.Form.Get("s3_upload_concurrency"))
|
||||
if err != nil {
|
||||
return fs, err
|
||||
}
|
||||
} else if fs.Provider == 2 {
|
||||
fs.GCSConfig.Bucket = r.Form.Get("gcs_bucket")
|
||||
fs.GCSConfig.StorageClass = r.Form.Get("gcs_storage_class")
|
||||
|
||||
Reference in New Issue
Block a user