Skip to content
This repository was archived by the owner on Jul 31, 2025. It is now read-only.

Commit 26be2b4

Browse files
committed
service/s3/s3manager: Move part buffer pool upwards to allow reuse.
1 parent f69a35e commit 26be2b4

File tree

2 files changed

+31
-8
lines changed

2 files changed

+31
-8
lines changed

CHANGELOG_PENDING.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
### SDK Features
22

33
### SDK Enhancements
4+
* `service/s3/s3manager`: Allow reuse of Uploader buffer `sync.Pool` amongst multiple Upload calls ([#2863](https://github.com/aws/aws-sdk-go/pull/2863))
5+
* The `sync.Pool` used for the reuse of `[]byte` slices when handling streaming payloads will now be shared across multiple Upload calls when the upload part size remains constant.
46

57
### SDK Bugs

service/s3/s3manager/upload.go

Lines changed: 29 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,9 @@ type Uploader struct {
165165

166166
// Defines the buffer strategy used when uploading a part
167167
BufferProvider ReadSeekerWriteToProvider
168+
169+
// partPool allows for the re-usage of streaming payload part buffers between upload calls
170+
partPool *partPool
168171
}
169172

170173
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
@@ -201,6 +204,8 @@ func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
201204
option(u)
202205
}
203206

207+
u.partPool = newPartPool(u.PartSize)
208+
204209
return u
205210
}
206211

@@ -283,6 +288,13 @@ func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ..
283288
for _, opt := range opts {
284289
opt(&i.cfg)
285290
}
291+
292+
// If PartSize was changed or partPool was never setup then we need to allocated a new pool
293+
// so that we return []byte slices of the correct size
294+
if i.cfg.partPool == nil || i.cfg.partPool.partSize != i.cfg.PartSize {
295+
i.cfg.partPool = newPartPool(i.cfg.PartSize)
296+
}
297+
286298
i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
287299

288300
return i.upload()
@@ -352,8 +364,6 @@ type uploader struct {
352364

353365
readerPos int64 // current reader position
354366
totalSize int64 // set to -1 if the size is not known
355-
356-
bufferPool sync.Pool
357367
}
358368

359369
// internal logic for deciding whether to upload a single part or use a
@@ -393,10 +403,6 @@ func (u *uploader) init() error {
393403
u.cfg.MaxUploadParts = MaxUploadParts
394404
}
395405

396-
u.bufferPool = sync.Pool{
397-
New: func() interface{} { return make([]byte, u.cfg.PartSize) },
398-
}
399-
400406
// Try to get the total size for some optimizations
401407
return u.initSize()
402408
}
@@ -466,12 +472,12 @@ func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
466472
return reader, int(n), cleanup, err
467473

468474
default:
469-
part := u.bufferPool.Get().([]byte)
475+
part := u.cfg.partPool.Get().([]byte)
470476
n, err := readFillBuf(r, part)
471477
u.readerPos += int64(n)
472478

473479
cleanup := func() {
474-
u.bufferPool.Put(part)
480+
u.cfg.partPool.Put(part)
475481
}
476482

477483
return bytes.NewReader(part[0:n]), n, cleanup, err
@@ -751,3 +757,18 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
751757

752758
return resp
753759
}
760+
761+
type partPool struct {
762+
partSize int64
763+
sync.Pool
764+
}
765+
766+
func newPartPool(partSize int64) *partPool {
767+
p := &partPool{partSize: partSize}
768+
769+
p.New = func() interface{} {
770+
return make([]byte, p.partSize)
771+
}
772+
773+
return p
774+
}

0 commit comments

Comments
 (0)