@@ -162,6 +162,9 @@ type Uploader struct {
162
162
163
163
// Defines the buffer strategy used when uploading a part
164
164
BufferProvider ReadSeekerWriteToProvider
165
+
166
+ // partPool allows for the re-usage of streaming payload part buffers between upload calls
167
+ partPool * partPool
165
168
}
166
169
167
170
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
@@ -179,8 +182,12 @@ type Uploader struct {
179
182
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
180
183
// })
181
184
func NewUploader (cfg aws.Config , options ... func (* Uploader )) * Uploader {
185
+ return newUploader (s3 .New (cfg ), options ... )
186
+ }
187
+
188
+ func newUploader (client s3iface.ClientAPI , options ... func (* Uploader )) * Uploader {
182
189
u := & Uploader {
183
- S3 : s3 . New ( cfg ) ,
190
+ S3 : client ,
184
191
PartSize : DefaultUploadPartSize ,
185
192
Concurrency : DefaultUploadConcurrency ,
186
193
LeavePartsOnError : false ,
@@ -192,6 +199,8 @@ func NewUploader(cfg aws.Config, options ...func(*Uploader)) *Uploader {
192
199
option (u )
193
200
}
194
201
202
+ u .partPool = newPartPool (u .PartSize )
203
+
195
204
return u
196
205
}
197
206
@@ -214,20 +223,7 @@ func NewUploader(cfg aws.Config, options ...func(*Uploader)) *Uploader {
214
223
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
215
224
// })
216
225
func NewUploaderWithClient (svc s3iface.ClientAPI , options ... func (* Uploader )) * Uploader {
217
- u := & Uploader {
218
- S3 : svc ,
219
- PartSize : DefaultUploadPartSize ,
220
- Concurrency : DefaultUploadConcurrency ,
221
- LeavePartsOnError : false ,
222
- MaxUploadParts : MaxUploadParts ,
223
- BufferProvider : defaultUploadBufferProvider (),
224
- }
225
-
226
- for _ , option := range options {
227
- option (u )
228
- }
229
-
230
- return u
226
+ return newUploader (svc , options ... )
231
227
}
232
228
233
229
// Upload uploads an object to S3, intelligently buffering large files into
@@ -287,6 +283,7 @@ func (u Uploader) UploadWithContext(ctx context.Context, input *UploadInput, opt
287
283
for _ , opt := range opts {
288
284
opt (& i .cfg )
289
285
}
286
+
290
287
i .cfg .RequestOptions = append (i .cfg .RequestOptions , request .WithAppendUserAgent ("S3Manager" ))
291
288
292
289
return i .upload ()
@@ -391,6 +388,15 @@ func (u *uploader) init() error {
391
388
if u .cfg .PartSize == 0 {
392
389
u .cfg .PartSize = DefaultUploadPartSize
393
390
}
391
+ if u .cfg .MaxUploadParts == 0 {
392
+ u .cfg .MaxUploadParts = MaxUploadParts
393
+ }
394
+
395
+ // If PartSize was changed or partPool was never setup then we need to allocated a new pool
396
+ // so that we return []byte slices of the correct size
397
+ if u .cfg .partPool == nil || u .cfg .partPool .partSize != u .cfg .PartSize {
398
+ u .cfg .partPool = newPartPool (u .cfg .PartSize )
399
+ }
394
400
395
401
// Try to get the total size for some optimizations
396
402
return u .initSize ()
@@ -460,11 +466,13 @@ func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
460
466
return reader , int (n ), cleanup , err
461
467
462
468
default :
463
- part := make ([] byte , u .cfg .PartSize )
469
+ part := u .cfg .partPool . Get ().([] byte )
464
470
n , err := readFillBuf (r , part )
465
471
u .readerPos += int64 (n )
466
472
467
- cleanup := func () {}
473
+ cleanup := func () {
474
+ u .cfg .partPool .Put (part )
475
+ }
468
476
469
477
return bytes .NewReader (part [0 :n ]), n , cleanup , err
470
478
}
@@ -751,3 +759,18 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
751
759
752
760
return resp .CompleteMultipartUploadOutput
753
761
}
762
+
763
+ type partPool struct {
764
+ partSize int64
765
+ sync.Pool
766
+ }
767
+
768
+ func newPartPool (partSize int64 ) * partPool {
769
+ p := & partPool {partSize : partSize }
770
+
771
+ p .New = func () interface {} {
772
+ return make ([]byte , p .partSize )
773
+ }
774
+
775
+ return p
776
+ }
0 commit comments