Skip to content
This repository was archived by the owner on Jul 31, 2025. It is now read-only.

Commit fe72a52

Browse files
authored
service/s3/s3manager: Move part buffer pool upwards to allow reuse. (#2863)
1 parent 02207b1 commit fe72a52

File tree

2 files changed

+29
-6
lines changed

2 files changed

+29
-6
lines changed

CHANGELOG_PENDING.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
### SDK Features
22

33
### SDK Enhancements
4+
* `service/s3/s3manager`: Allow reuse of Uploader buffer `sync.Pool` amongst multiple Upload calls ([#2863](https://github.com/aws/aws-sdk-go/pull/2863))
5+
* The `sync.Pool` used for the reuse of `[]byte` slices when handling streaming payloads will now be shared across multiple Upload calls when the upload part size remains constant.
46

57
### SDK Bugs
68
* `internal/ini`: Fix ini parser to handle empty values [#2860](https://github.com/aws/aws-sdk-go/pull/2860)

service/s3/s3manager/upload.go

Lines changed: 27 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,9 @@ type Uploader struct {
165165

166166
// Defines the buffer strategy used when uploading a part
167167
BufferProvider ReadSeekerWriteToProvider
168+
169+
// partPool allows for the re-usage of streaming payload part buffers between upload calls
170+
partPool *partPool
168171
}
169172

170173
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
@@ -201,6 +204,8 @@ func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
201204
option(u)
202205
}
203206

207+
u.partPool = newPartPool(u.PartSize)
208+
204209
return u
205210
}
206211

@@ -283,6 +288,7 @@ func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ..
283288
for _, opt := range opts {
284289
opt(&i.cfg)
285290
}
291+
286292
i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
287293

288294
return i.upload()
@@ -352,8 +358,6 @@ type uploader struct {
352358

353359
readerPos int64 // current reader position
354360
totalSize int64 // set to -1 if the size is not known
355-
356-
bufferPool sync.Pool
357361
}
358362

359363
// internal logic for deciding whether to upload a single part or use a
@@ -393,8 +397,10 @@ func (u *uploader) init() error {
393397
u.cfg.MaxUploadParts = MaxUploadParts
394398
}
395399

396-
u.bufferPool = sync.Pool{
397-
New: func() interface{} { return make([]byte, u.cfg.PartSize) },
400+
// If PartSize was changed or partPool was never setup then we need to allocated a new pool
401+
// so that we return []byte slices of the correct size
402+
if u.cfg.partPool == nil || u.cfg.partPool.partSize != u.cfg.PartSize {
403+
u.cfg.partPool = newPartPool(u.cfg.PartSize)
398404
}
399405

400406
// Try to get the total size for some optimizations
@@ -466,12 +472,12 @@ func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
466472
return reader, int(n), cleanup, err
467473

468474
default:
469-
part := u.bufferPool.Get().([]byte)
475+
part := u.cfg.partPool.Get().([]byte)
470476
n, err := readFillBuf(r, part)
471477
u.readerPos += int64(n)
472478

473479
cleanup := func() {
474-
u.bufferPool.Put(part)
480+
u.cfg.partPool.Put(part)
475481
}
476482

477483
return bytes.NewReader(part[0:n]), n, cleanup, err
@@ -751,3 +757,18 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
751757

752758
return resp
753759
}
760+
761+
type partPool struct {
762+
partSize int64
763+
sync.Pool
764+
}
765+
766+
func newPartPool(partSize int64) *partPool {
767+
p := &partPool{partSize: partSize}
768+
769+
p.New = func() interface{} {
770+
return make([]byte, p.partSize)
771+
}
772+
773+
return p
774+
}

0 commit comments

Comments
 (0)