@@ -348,7 +348,10 @@ export class MongoSyncBucketStorage
348348 // 1. We can calculate the document size accurately without serializing again.
349349 // 2. We can delay parsing the results until it's needed.
350350 // We manually use bson.deserialize below
351- raw : true
351+ raw : true ,
352+
353+ // Limit the time for the operation to complete, to avoid getting connection timeouts
354+ maxTimeMS : lib_mongo . db . MONGO_OPERATION_TIMEOUT_MS
352355 }
353356 ) as unknown as mongo . FindCursor < Buffer > ;
354357
@@ -357,7 +360,9 @@ export class MongoSyncBucketStorage
357360 // to the lower of the batch count and size limits.
358361 // This is similar to using `singleBatch: true` in the find options, but allows
359362 // detecting "hasMore".
360- let { data, hasMore : batchHasMore } = await readSingleBatch ( cursor ) ;
363+ let { data, hasMore : batchHasMore } = await readSingleBatch ( cursor ) . catch ( ( e ) => {
364+ throw lib_mongo . mapQueryError ( e , 'while reading bucket data' ) ;
365+ } ) ;
361366 if ( data . length == batchLimit ) {
362367 // Limit reached - could have more data, despite the cursor being drained.
363368 batchHasMore = true ;
@@ -486,9 +491,12 @@ export class MongoSyncBucketStorage
486491 }
487492 }
488493 ] ,
489- { session : undefined , readConcern : 'snapshot' }
494+ { session : undefined , readConcern : 'snapshot' , maxTimeMS : lib_mongo . db . MONGO_OPERATION_TIMEOUT_MS }
490495 )
491- . toArray ( ) ;
496+ . toArray ( )
497+ . catch ( ( e ) => {
498+ throw lib_mongo . mapQueryError ( e , 'while reading checksums' ) ;
499+ } ) ;
492500
493501 return new Map < string , storage . PartialChecksum > (
494502 aggregate . map ( ( doc ) => {
0 commit comments