add support for conditional resuming of uploads

Signed-off-by: Nicola Murino <nicola.murino@gmail.com>
This commit is contained in:
Nicola Murino
2023-10-22 16:09:30 +02:00
parent f1e52d99ba
commit e3c4ee0833
23 changed files with 260 additions and 53 deletions

View File

@@ -264,6 +264,23 @@ func (fs *AzureBlobFs) Create(name string, flag, checks int) (File, *PipeWriter,
metric.AZTransferCompleted(r.GetReadedBytes(), 0, err)
}()
if checks&CheckResume != 0 {
readCh := make(chan error, 1)
go func() {
err = fs.downloadToWriter(name, p)
readCh <- err
}()
err = <-readCh
if err != nil {
cancelFn()
p.Close()
fsLog(fs, logger.LevelDebug, "download before resume failed, writer closed and read cancelled")
return nil, nil, nil, err
}
}
return nil, p, cancelFn, nil
}
@@ -458,6 +475,12 @@ func (*AzureBlobFs) IsUploadResumeSupported() bool {
return false
}
// IsConditionalUploadResumeSupported returns if resuming uploads is supported
// for the specified size
func (*AzureBlobFs) IsConditionalUploadResumeSupported(size int64) bool {
return size <= resumeMaxSize
}
// IsAtomicUploadSupported returns true if atomic upload is supported.
// Azure Blob uploads are already atomic, we don't need to upload to a temporary
// file
@@ -965,7 +988,7 @@ func (fs *AzureBlobFs) handleMultipartDownload(ctx context.Context, blockBlob *b
fsLog(fs, logger.LevelError, "unable to get blob properties, download aborted: %+v", err)
return err
}
if readMetadata > 0 {
if readMetadata > 0 && pipeReader != nil {
pipeReader.setMetadataFromPointerVal(props.Metadata)
}
contentLength := util.GetIntFromPointer(props.ContentLength)
@@ -1172,6 +1195,19 @@ func (fs *AzureBlobFs) getCopyOptions() *blob.StartCopyFromURLOptions {
return copyOptions
}
func (fs *AzureBlobFs) downloadToWriter(name string, w *PipeWriter) error {
fsLog(fs, logger.LevelDebug, "starting download before resuming upload, path %q", name)
ctx, cancelFn := context.WithTimeout(context.Background(), preResumeTimeout)
defer cancelFn()
blockBlob := fs.containerClient.NewBlockBlobClient(name)
err := fs.handleMultipartDownload(ctx, blockBlob, 0, w, nil)
fsLog(fs, logger.LevelDebug, "download before resuming upload completed, path %q size: %d, err: %+v",
name, w.GetWrittenBytes(), err)
metric.AZTransferCompleted(w.GetWrittenBytes(), 1, err)
return err
}
func (fs *AzureBlobFs) getStorageID() string {
if fs.config.Endpoint != "" {
if !strings.HasSuffix(fs.config.Endpoint, "/") {