add support for data transfer bandwidth limits

with total limit or separate settings for uploads and downloads and
overrides based on the client's IP address.

Limits can be reset using the REST API

Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
This commit is contained in:
Nicola Murino
2022-01-30 11:42:36 +01:00
parent aca71bff7a
commit 1df1225eed
55 changed files with 3573 additions and 643 deletions

View File

@@ -60,7 +60,7 @@ func TestTransfersCheckerDiskQuota(t *testing.T) {
BaseConnection: conn1,
}
transfer1 := NewBaseTransfer(nil, conn1, nil, filepath.Join(user.HomeDir, "file1"), filepath.Join(user.HomeDir, "file1"),
"/file1", TransferUpload, 0, 0, 120, 0, true, fsUser)
"/file1", TransferUpload, 0, 0, 120, 0, true, fsUser, dataprovider.TransferQuota{})
transfer1.BytesReceived = 150
Connections.Add(fakeConn1)
// the transferschecker will do nothing if there is only one ongoing transfer
@@ -73,7 +73,7 @@ func TestTransfersCheckerDiskQuota(t *testing.T) {
BaseConnection: conn2,
}
transfer2 := NewBaseTransfer(nil, conn2, nil, filepath.Join(user.HomeDir, "file2"), filepath.Join(user.HomeDir, "file2"),
"/file2", TransferUpload, 0, 0, 120, 40, true, fsUser)
"/file2", TransferUpload, 0, 0, 120, 40, true, fsUser, dataprovider.TransferQuota{})
transfer1.BytesReceived = 50
transfer2.BytesReceived = 60
Connections.Add(fakeConn2)
@@ -84,7 +84,7 @@ func TestTransfersCheckerDiskQuota(t *testing.T) {
BaseConnection: conn3,
}
transfer3 := NewBaseTransfer(nil, conn3, nil, filepath.Join(user.HomeDir, "file3"), filepath.Join(user.HomeDir, "file3"),
"/file3", TransferDownload, 0, 0, 120, 0, true, fsUser)
"/file3", TransferDownload, 0, 0, 120, 0, true, fsUser, dataprovider.TransferQuota{})
transfer3.BytesReceived = 60 // this value will be ignored, this is a download
Connections.Add(fakeConn3)
@@ -145,7 +145,7 @@ func TestTransfersCheckerDiskQuota(t *testing.T) {
}
transfer4 := NewBaseTransfer(nil, conn4, nil, filepath.Join(os.TempDir(), folderName, "file1"),
filepath.Join(os.TempDir(), folderName, "file1"), path.Join(vdirPath, "/file1"), TransferUpload, 0, 0,
100, 0, true, fsFolder)
100, 0, true, fsFolder, dataprovider.TransferQuota{})
Connections.Add(fakeConn4)
connID5 := xid.New().String()
conn5 := NewBaseConnection(connID5, ProtocolSFTP, "", "", user)
@@ -154,7 +154,7 @@ func TestTransfersCheckerDiskQuota(t *testing.T) {
}
transfer5 := NewBaseTransfer(nil, conn5, nil, filepath.Join(os.TempDir(), folderName, "file2"),
filepath.Join(os.TempDir(), folderName, "file2"), path.Join(vdirPath, "/file2"), TransferUpload, 0, 0,
100, 0, true, fsFolder)
100, 0, true, fsFolder, dataprovider.TransferQuota{})
Connections.Add(fakeConn5)
transfer4.BytesReceived = 50
@@ -188,6 +188,17 @@ func TestTransfersCheckerDiskQuota(t *testing.T) {
assert.NoError(t, err)
}
err = transfer1.Close()
assert.NoError(t, err)
err = transfer2.Close()
assert.NoError(t, err)
err = transfer3.Close()
assert.NoError(t, err)
err = transfer4.Close()
assert.NoError(t, err)
err = transfer5.Close()
assert.NoError(t, err)
Connections.Remove(fakeConn1.GetID())
Connections.Remove(fakeConn2.GetID())
Connections.Remove(fakeConn3.GetID())
@@ -207,6 +218,118 @@ func TestTransfersCheckerDiskQuota(t *testing.T) {
assert.NoError(t, err)
}
func TestTransferCheckerTransferQuota(t *testing.T) {
username := "transfers_check_username"
user := dataprovider.User{
BaseUser: sdk.BaseUser{
Username: username,
Password: "test_pwd",
HomeDir: filepath.Join(os.TempDir(), username),
Status: 1,
TotalDataTransfer: 1,
Permissions: map[string][]string{
"/": {dataprovider.PermAny},
},
},
}
err := dataprovider.AddUser(&user, "", "")
assert.NoError(t, err)
connID1 := xid.New().String()
fsUser, err := user.GetFilesystemForPath("/file1", connID1)
assert.NoError(t, err)
conn1 := NewBaseConnection(connID1, ProtocolSFTP, "", "192.168.1.1", user)
fakeConn1 := &fakeConnection{
BaseConnection: conn1,
}
transfer1 := NewBaseTransfer(nil, conn1, nil, filepath.Join(user.HomeDir, "file1"), filepath.Join(user.HomeDir, "file1"),
"/file1", TransferUpload, 0, 0, 0, 0, true, fsUser, dataprovider.TransferQuota{AllowedTotalSize: 100})
transfer1.BytesReceived = 150
Connections.Add(fakeConn1)
// the transferschecker will do nothing if there is only one ongoing transfer
Connections.checkTransfers()
assert.Nil(t, transfer1.errAbort)
connID2 := xid.New().String()
conn2 := NewBaseConnection(connID2, ProtocolSFTP, "", "127.0.0.1", user)
fakeConn2 := &fakeConnection{
BaseConnection: conn2,
}
transfer2 := NewBaseTransfer(nil, conn2, nil, filepath.Join(user.HomeDir, "file2"), filepath.Join(user.HomeDir, "file2"),
"/file2", TransferUpload, 0, 0, 0, 0, true, fsUser, dataprovider.TransferQuota{AllowedTotalSize: 100})
transfer2.BytesReceived = 150
Connections.Add(fakeConn2)
Connections.checkTransfers()
assert.Nil(t, transfer1.errAbort)
assert.Nil(t, transfer2.errAbort)
// now test overquota
transfer1.BytesReceived = 1024*1024 + 1
transfer2.BytesReceived = 0
Connections.checkTransfers()
assert.True(t, conn1.IsQuotaExceededError(transfer1.errAbort))
assert.Nil(t, transfer2.errAbort)
transfer1.errAbort = nil
transfer1.BytesReceived = 1024*1024 + 1
transfer2.BytesReceived = 1024
Connections.checkTransfers()
assert.True(t, conn1.IsQuotaExceededError(transfer1.errAbort))
assert.True(t, conn2.IsQuotaExceededError(transfer2.errAbort))
transfer1.BytesReceived = 0
transfer2.BytesReceived = 0
transfer1.errAbort = nil
transfer2.errAbort = nil
err = transfer1.Close()
assert.NoError(t, err)
err = transfer2.Close()
assert.NoError(t, err)
Connections.Remove(fakeConn1.GetID())
Connections.Remove(fakeConn2.GetID())
connID3 := xid.New().String()
conn3 := NewBaseConnection(connID3, ProtocolSFTP, "", "", user)
fakeConn3 := &fakeConnection{
BaseConnection: conn3,
}
transfer3 := NewBaseTransfer(nil, conn3, nil, filepath.Join(user.HomeDir, "file1"), filepath.Join(user.HomeDir, "file1"),
"/file1", TransferDownload, 0, 0, 0, 0, true, fsUser, dataprovider.TransferQuota{AllowedDLSize: 100})
transfer3.BytesSent = 150
Connections.Add(fakeConn3)
connID4 := xid.New().String()
conn4 := NewBaseConnection(connID4, ProtocolSFTP, "", "", user)
fakeConn4 := &fakeConnection{
BaseConnection: conn4,
}
transfer4 := NewBaseTransfer(nil, conn4, nil, filepath.Join(user.HomeDir, "file2"), filepath.Join(user.HomeDir, "file2"),
"/file2", TransferDownload, 0, 0, 0, 0, true, fsUser, dataprovider.TransferQuota{AllowedDLSize: 100})
transfer4.BytesSent = 150
Connections.Add(fakeConn4)
Connections.checkTransfers()
assert.Nil(t, transfer3.errAbort)
assert.Nil(t, transfer4.errAbort)
transfer3.BytesSent = 512 * 1024
transfer4.BytesSent = 512*1024 + 1
Connections.checkTransfers()
if assert.Error(t, transfer3.errAbort) {
assert.Contains(t, transfer3.errAbort.Error(), ErrReadQuotaExceeded.Error())
}
if assert.Error(t, transfer4.errAbort) {
assert.Contains(t, transfer4.errAbort.Error(), ErrReadQuotaExceeded.Error())
}
Connections.Remove(fakeConn3.GetID())
Connections.Remove(fakeConn4.GetID())
stats := Connections.GetStats()
assert.Len(t, stats, 0)
err = dataprovider.DeleteUser(user.Username, "", "")
assert.NoError(t, err)
err = os.RemoveAll(user.GetHomeDir())
assert.NoError(t, err)
}
func TestAggregateTransfers(t *testing.T) {
checker := transfersCheckerMem{}
checker.AddTransfer(dataprovider.ActiveTransfer{
@@ -221,7 +344,7 @@ func TestAggregateTransfers(t *testing.T) {
CreatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
})
usersToFetch, aggregations := checker.aggregateTransfers()
usersToFetch, aggregations := checker.aggregateUploadTransfers()
assert.Len(t, usersToFetch, 0)
assert.Len(t, aggregations, 1)
@@ -238,9 +361,9 @@ func TestAggregateTransfers(t *testing.T) {
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
})
usersToFetch, aggregations = checker.aggregateTransfers()
usersToFetch, aggregations = checker.aggregateUploadTransfers()
assert.Len(t, usersToFetch, 0)
assert.Len(t, aggregations, 2)
assert.Len(t, aggregations, 1)
checker.AddTransfer(dataprovider.ActiveTransfer{
ID: 1,
@@ -255,9 +378,9 @@ func TestAggregateTransfers(t *testing.T) {
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
})
usersToFetch, aggregations = checker.aggregateTransfers()
usersToFetch, aggregations = checker.aggregateUploadTransfers()
assert.Len(t, usersToFetch, 0)
assert.Len(t, aggregations, 3)
assert.Len(t, aggregations, 2)
checker.AddTransfer(dataprovider.ActiveTransfer{
ID: 1,
@@ -272,9 +395,9 @@ func TestAggregateTransfers(t *testing.T) {
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
})
usersToFetch, aggregations = checker.aggregateTransfers()
usersToFetch, aggregations = checker.aggregateUploadTransfers()
assert.Len(t, usersToFetch, 0)
assert.Len(t, aggregations, 4)
assert.Len(t, aggregations, 3)
checker.AddTransfer(dataprovider.ActiveTransfer{
ID: 1,
@@ -289,13 +412,13 @@ func TestAggregateTransfers(t *testing.T) {
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
})
usersToFetch, aggregations = checker.aggregateTransfers()
usersToFetch, aggregations = checker.aggregateUploadTransfers()
assert.Len(t, usersToFetch, 1)
val, ok := usersToFetch["user"]
assert.True(t, ok)
assert.False(t, val)
assert.Len(t, aggregations, 4)
aggregate, ok := aggregations["user0"]
assert.Len(t, aggregations, 3)
aggregate, ok := aggregations[0]
assert.True(t, ok)
assert.Len(t, aggregate, 2)
@@ -312,13 +435,13 @@ func TestAggregateTransfers(t *testing.T) {
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
})
usersToFetch, aggregations = checker.aggregateTransfers()
usersToFetch, aggregations = checker.aggregateUploadTransfers()
assert.Len(t, usersToFetch, 1)
val, ok = usersToFetch["user"]
assert.True(t, ok)
assert.False(t, val)
assert.Len(t, aggregations, 4)
aggregate, ok = aggregations["user0"]
assert.Len(t, aggregations, 3)
aggregate, ok = aggregations[0]
assert.True(t, ok)
assert.Len(t, aggregate, 3)
@@ -335,16 +458,16 @@ func TestAggregateTransfers(t *testing.T) {
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
})
usersToFetch, aggregations = checker.aggregateTransfers()
usersToFetch, aggregations = checker.aggregateUploadTransfers()
assert.Len(t, usersToFetch, 1)
val, ok = usersToFetch["user"]
assert.True(t, ok)
assert.True(t, val)
assert.Len(t, aggregations, 4)
aggregate, ok = aggregations["user0"]
assert.Len(t, aggregations, 3)
aggregate, ok = aggregations[0]
assert.True(t, ok)
assert.Len(t, aggregate, 3)
aggregate, ok = aggregations["userfolder0"]
aggregate, ok = aggregations[1]
assert.True(t, ok)
assert.Len(t, aggregate, 2)
@@ -361,20 +484,67 @@ func TestAggregateTransfers(t *testing.T) {
UpdatedAt: util.GetTimeAsMsSinceEpoch(time.Now()),
})
usersToFetch, aggregations = checker.aggregateTransfers()
usersToFetch, aggregations = checker.aggregateUploadTransfers()
assert.Len(t, usersToFetch, 1)
val, ok = usersToFetch["user"]
assert.True(t, ok)
assert.True(t, val)
assert.Len(t, aggregations, 4)
aggregate, ok = aggregations["user0"]
assert.Len(t, aggregations, 3)
aggregate, ok = aggregations[0]
assert.True(t, ok)
assert.Len(t, aggregate, 4)
aggregate, ok = aggregations["userfolder0"]
aggregate, ok = aggregations[1]
assert.True(t, ok)
assert.Len(t, aggregate, 2)
}
func TestDataTransferExceeded(t *testing.T) {
user := dataprovider.User{
BaseUser: sdk.BaseUser{
TotalDataTransfer: 1,
},
}
transfer := dataprovider.ActiveTransfer{
CurrentULSize: 0,
CurrentDLSize: 0,
}
user.UsedDownloadDataTransfer = 1024 * 1024
user.UsedUploadDataTransfer = 512 * 1024
checker := transfersCheckerMem{}
res := checker.isDataTransferExceeded(user, transfer, 100, 100)
assert.False(t, res)
transfer.CurrentULSize = 1
res = checker.isDataTransferExceeded(user, transfer, 100, 100)
assert.True(t, res)
user.UsedDownloadDataTransfer = 512*1024 - 100
user.UsedUploadDataTransfer = 512*1024 - 100
res = checker.isDataTransferExceeded(user, transfer, 100, 100)
assert.False(t, res)
res = checker.isDataTransferExceeded(user, transfer, 101, 100)
assert.True(t, res)
user.TotalDataTransfer = 0
user.DownloadDataTransfer = 1
user.UsedDownloadDataTransfer = 512 * 1024
transfer.CurrentULSize = 0
transfer.CurrentDLSize = 100
res = checker.isDataTransferExceeded(user, transfer, 0, 512*1024)
assert.False(t, res)
res = checker.isDataTransferExceeded(user, transfer, 0, 512*1024+1)
assert.True(t, res)
user.DownloadDataTransfer = 0
user.UploadDataTransfer = 1
user.UsedUploadDataTransfer = 512 * 1024
transfer.CurrentULSize = 0
transfer.CurrentDLSize = 0
res = checker.isDataTransferExceeded(user, transfer, 512*1024+1, 0)
assert.False(t, res)
transfer.CurrentULSize = 1
res = checker.isDataTransferExceeded(user, transfer, 512*1024+1, 0)
assert.True(t, res)
}
func TestGetUsersForQuotaCheck(t *testing.T) {
usersToFetch := make(map[string]bool)
for i := 0; i < 50; i++ {
@@ -407,6 +577,17 @@ func TestGetUsersForQuotaCheck(t *testing.T) {
QuotaSize: 100,
},
},
Filters: dataprovider.UserFilters{
BaseUserFilters: sdk.BaseUserFilters{
DataTransferLimits: []sdk.DataTransferLimit{
{
Sources: []string{"172.16.0.0/16"},
UploadDataTransfer: 50,
DownloadDataTransfer: 80,
},
},
},
},
}
err = dataprovider.AddUser(&user, "", "")
assert.NoError(t, err)
@@ -434,6 +615,14 @@ func TestGetUsersForQuotaCheck(t *testing.T) {
assert.Len(t, user.VirtualFolders, 0, user.Username)
}
}
ul, dl, total := user.GetDataTransferLimits("127.1.1.1")
assert.Equal(t, int64(0), ul)
assert.Equal(t, int64(0), dl)
assert.Equal(t, int64(0), total)
ul, dl, total = user.GetDataTransferLimits("172.16.2.3")
assert.Equal(t, int64(50*1024*1024), ul)
assert.Equal(t, int64(80*1024*1024), dl)
assert.Equal(t, int64(0), total)
}
for i := 0; i < 40; i++ {
@@ -447,3 +636,87 @@ func TestGetUsersForQuotaCheck(t *testing.T) {
assert.NoError(t, err)
assert.Len(t, users, 0)
}
func TestDBTransferChecker(t *testing.T) {
if !isDbTransferCheckerSupported() {
t.Skip("this test is not supported with the current database provider")
}
providerConf := dataprovider.GetProviderConfig()
err := dataprovider.Close()
assert.NoError(t, err)
providerConf.IsShared = 1
err = dataprovider.Initialize(providerConf, configDir, true)
assert.NoError(t, err)
c := getTransfersChecker(1)
checker, ok := c.(*transfersCheckerDB)
assert.True(t, ok)
assert.True(t, checker.lastCleanup.IsZero())
transfer1 := dataprovider.ActiveTransfer{
ID: 1,
Type: TransferDownload,
ConnID: xid.New().String(),
Username: "user1",
FolderName: "folder1",
IP: "127.0.0.1",
}
checker.AddTransfer(transfer1)
transfers, err := dataprovider.GetActiveTransfers(time.Now().Add(24 * time.Hour))
assert.NoError(t, err)
assert.Len(t, transfers, 0)
transfers, err = dataprovider.GetActiveTransfers(time.Now().Add(-periodicTimeoutCheckInterval * 2))
assert.NoError(t, err)
var createdAt, updatedAt int64
if assert.Len(t, transfers, 1) {
transfer := transfers[0]
assert.Equal(t, transfer1.ID, transfer.ID)
assert.Equal(t, transfer1.Type, transfer.Type)
assert.Equal(t, transfer1.ConnID, transfer.ConnID)
assert.Equal(t, transfer1.Username, transfer.Username)
assert.Equal(t, transfer1.IP, transfer.IP)
assert.Equal(t, transfer1.FolderName, transfer.FolderName)
assert.Greater(t, transfer.CreatedAt, int64(0))
assert.Greater(t, transfer.UpdatedAt, int64(0))
assert.Equal(t, int64(0), transfer.CurrentDLSize)
assert.Equal(t, int64(0), transfer.CurrentULSize)
createdAt = transfer.CreatedAt
updatedAt = transfer.UpdatedAt
}
time.Sleep(100 * time.Millisecond)
checker.UpdateTransferCurrentSizes(100, 150, transfer1.ID, transfer1.ConnID)
transfers, err = dataprovider.GetActiveTransfers(time.Now().Add(-periodicTimeoutCheckInterval * 2))
assert.NoError(t, err)
if assert.Len(t, transfers, 1) {
transfer := transfers[0]
assert.Equal(t, int64(150), transfer.CurrentDLSize)
assert.Equal(t, int64(100), transfer.CurrentULSize)
assert.Equal(t, createdAt, transfer.CreatedAt)
assert.Greater(t, transfer.UpdatedAt, updatedAt)
}
res := checker.GetOverquotaTransfers()
assert.Len(t, res, 0)
checker.RemoveTransfer(transfer1.ID, transfer1.ConnID)
transfers, err = dataprovider.GetActiveTransfers(time.Now().Add(-periodicTimeoutCheckInterval * 2))
assert.NoError(t, err)
assert.Len(t, transfers, 0)
err = dataprovider.Close()
assert.NoError(t, err)
res = checker.GetOverquotaTransfers()
assert.Len(t, res, 0)
providerConf.IsShared = 0
err = dataprovider.Initialize(providerConf, configDir, true)
assert.NoError(t, err)
}
func isDbTransferCheckerSupported() bool {
// SQLite shares the implementation with other SQL-based provider but it makes no sense
// to use it outside test cases
switch dataprovider.GetProviderStatus().Driver {
case dataprovider.MySQLDataProviderName, dataprovider.PGSQLDataProviderName,
dataprovider.CockroachDataProviderName, dataprovider.SQLiteDataProviderName:
return true
default:
return false
}
}