Skip to content

Commit 558e6b6

Browse files
goffriecarllerche
authored andcommitted
Avoid reclaiming frames for dead streams. (#262)
In `clear_queue` we drop all the queued frames for a stream, but this doesn't take into account a buffered frame inside of the `FramedWrite`. This can lead to a panic when `reclaim_frame` tries to recover a frame onto a stream that has already been destroyed, or in general cause wrong behaviour. Instead, let's keep track of what frame is currently in-flight; then, when we `clear_queue` a stream with an in-flight data frame, mark the frame to be dropped instead of reclaimed.
1 parent 11f9141 commit 558e6b6

File tree

3 files changed

+203
-5
lines changed

3 files changed

+203
-5
lines changed

src/proto/streams/prioritize.rs

Lines changed: 38 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ use codec::UserError::*;
88

99
use bytes::buf::Take;
1010

11-
use std::{cmp, fmt};
11+
use std::{cmp, fmt, mem};
1212
use std::io;
1313

1414
/// # Warning
@@ -48,6 +48,19 @@ pub(super) struct Prioritize {
4848

4949
/// Stream ID of the last stream opened.
5050
last_opened_id: StreamId,
51+
52+
/// What `DATA` frame is currently being sent in the codec.
53+
in_flight_data_frame: InFlightData,
54+
}
55+
56+
#[derive(Debug, Eq, PartialEq)]
57+
enum InFlightData {
58+
/// There is no `DATA` frame in flight.
59+
Nothing,
60+
/// There is a `DATA` frame in flight belonging to the given stream.
61+
DataFrame(store::Key),
62+
/// There was a `DATA` frame, but the stream's queue was since cleared.
63+
Drop,
5164
}
5265

5366
pub(crate) struct Prioritized<B> {
@@ -79,7 +92,8 @@ impl Prioritize {
7992
pending_capacity: store::Queue::new(),
8093
pending_open: store::Queue::new(),
8194
flow: flow,
82-
last_opened_id: StreamId::ZERO
95+
last_opened_id: StreamId::ZERO,
96+
in_flight_data_frame: InFlightData::Nothing,
8397
}
8498
}
8599

@@ -456,6 +470,10 @@ impl Prioritize {
456470
Some(frame) => {
457471
trace!("writing frame={:?}", frame);
458472

473+
debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing);
474+
if let Frame::Data(ref frame) = frame {
475+
self.in_flight_data_frame = InFlightData::DataFrame(frame.payload().stream);
476+
}
459477
dst.buffer(frame).ok().expect("invalid frame");
460478

461479
// Ensure the codec is ready to try the loop again.
@@ -503,12 +521,23 @@ impl Prioritize {
503521
trace!(
504522
" -> reclaimed; frame={:?}; sz={}",
505523
frame,
506-
frame.payload().remaining()
524+
frame.payload().inner.get_ref().remaining()
507525
);
508526

509527
let mut eos = false;
510528
let key = frame.payload().stream;
511529

530+
match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) {
531+
InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"),
532+
InFlightData::Drop => {
533+
trace!("not reclaiming frame for cancelled stream");
534+
return false;
535+
}
536+
InFlightData::DataFrame(k) => {
537+
debug_assert_eq!(k, key);
538+
}
539+
}
540+
512541
let mut frame = frame.map(|prioritized| {
513542
// TODO: Ensure fully written
514543
eos = prioritized.end_of_stream;
@@ -558,6 +587,12 @@ impl Prioritize {
558587

559588
stream.buffered_send_data = 0;
560589
stream.requested_send_capacity = 0;
590+
if let InFlightData::DataFrame(key) = self.in_flight_data_frame {
591+
if stream.key() == key {
592+
// This stream could get cleaned up now - don't allow the buffered frame to get reclaimed.
593+
self.in_flight_data_frame = InFlightData::Drop;
594+
}
595+
}
561596
}
562597

563598
fn pop_frame<B>(

tests/stream_states.rs

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -878,3 +878,70 @@ fn rst_while_closing() {
878878

879879
client.join(srv).wait().expect("wait");
880880
}
881+
882+
#[test]
883+
fn rst_with_buffered_data() {
884+
use futures::future::lazy;
885+
886+
// Data is buffered in `FramedWrite` and the stream is reset locally before
887+
// the data is fully flushed. Given that resetting a stream requires
888+
// clearing all associated state for that stream, this test ensures that the
889+
// buffered up frame is correctly handled.
890+
let _ = ::env_logger::try_init();
891+
892+
// This allows the settings + headers frame through
893+
let (io, srv) = mock::new_with_write_capacity(73);
894+
895+
// Synchronize the client / server on response
896+
let (tx, rx) = ::futures::sync::oneshot::channel();
897+
898+
let srv = srv.assert_client_handshake()
899+
.unwrap()
900+
.recv_settings()
901+
.recv_frame(
902+
frames::headers(1)
903+
.request("POST", "https://example.com/")
904+
)
905+
.buffer_bytes(128)
906+
.send_frame(frames::headers(1).response(204).eos())
907+
.send_frame(frames::reset(1).cancel())
908+
.wait_for(rx)
909+
.unbounded_bytes()
910+
.recv_frame(
911+
frames::data(1, vec![0; 16_384]))
912+
.close()
913+
;
914+
915+
// A large body
916+
let body = vec![0; 2 * frame::DEFAULT_INITIAL_WINDOW_SIZE as usize];
917+
918+
let client = client::handshake(io)
919+
.expect("handshake")
920+
.and_then(|(mut client, conn)| {
921+
let request = Request::builder()
922+
.method(Method::POST)
923+
.uri("https://example.com/")
924+
.body(())
925+
.unwrap();
926+
927+
// Send the request
928+
let (resp, mut stream) = client.send_request(request, false)
929+
.expect("send_request");
930+
931+
// Send the data
932+
stream.send_data(body.into(), true).unwrap();
933+
934+
conn.drive({
935+
resp.then(|res| {
936+
Ok::<_, ()>(())
937+
})
938+
})
939+
})
940+
.and_then(move |(conn, _)| {
941+
tx.send(()).unwrap();
942+
conn.unwrap()
943+
});
944+
945+
946+
client.join(srv).wait().expect("wait");
947+
}

tests/support/mock.rs

Lines changed: 98 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ use futures::task::{self, Task};
1010
use tokio_io::{AsyncRead, AsyncWrite};
1111
use tokio_io::io::read_exact;
1212

13-
use std::{cmp, fmt, io};
13+
use std::{cmp, fmt, io, usize};
1414
use std::io::ErrorKind::WouldBlock;
1515
use std::sync::{Arc, Mutex};
1616

@@ -32,22 +32,44 @@ pub struct Pipe {
3232

3333
#[derive(Debug)]
3434
struct Inner {
35+
/// Data written by the test case to the h2 lib.
3536
rx: Vec<u8>,
37+
38+
/// Notify when data is ready to be received.
3639
rx_task: Option<Task>,
40+
41+
/// Data written by the `h2` library to be read by the test case.
3742
tx: Vec<u8>,
43+
44+
/// Notify when data is written. This notifies the test case waiters.
3845
tx_task: Option<Task>,
46+
47+
/// Number of bytes that can be written before `write` returns `NotReady`.
48+
tx_rem: usize,
49+
50+
/// Task to notify when write capacity becomes available.
51+
tx_rem_task: Option<Task>,
52+
53+
/// True when the pipe is closed.
3954
closed: bool,
4055
}
4156

4257
const PREFACE: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
4358

4459
/// Create a new mock and handle
4560
pub fn new() -> (Mock, Handle) {
61+
new_with_write_capacity(usize::MAX)
62+
}
63+
64+
/// Create a new mock and handle allowing up to `cap` bytes to be written.
65+
pub fn new_with_write_capacity(cap: usize) -> (Mock, Handle) {
4666
let inner = Arc::new(Mutex::new(Inner {
4767
rx: vec![],
4868
rx_task: None,
4969
tx: vec![],
5070
tx_task: None,
71+
tx_rem: cap,
72+
tx_rem_task: None,
5173
closed: false,
5274
}));
5375

@@ -303,14 +325,24 @@ impl io::Read for Mock {
303325
impl AsyncRead for Mock {}
304326

305327
impl io::Write for Mock {
306-
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
328+
fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> {
307329
let mut me = self.pipe.inner.lock().unwrap();
308330

309331
if me.closed {
310332
return Err(io::Error::new(io::ErrorKind::BrokenPipe, "mock closed"));
311333
}
312334

335+
if me.tx_rem == 0 {
336+
me.tx_rem_task = Some(task::current());
337+
return Err(io::ErrorKind::WouldBlock.into());
338+
}
339+
340+
if buf.len() > me.tx_rem {
341+
buf = &buf[..me.tx_rem];
342+
}
343+
313344
me.tx.extend(buf);
345+
me.tx_rem -= buf.len();
314346

315347
if let Some(task) = me.tx_task.take() {
316348
task.notify();
@@ -477,6 +509,70 @@ pub trait HandleFutureExt {
477509
}))
478510
}
479511

512+
fn buffer_bytes(self, num: usize) -> Box<Future<Item = Handle, Error = Self::Error>>
513+
where Self: Sized + 'static,
514+
Self: Future<Item = Handle>,
515+
Self::Error: fmt::Debug,
516+
{
517+
use futures::future::poll_fn;
518+
519+
Box::new(self.and_then(move |mut handle| {
520+
// Set tx_rem to num
521+
{
522+
let mut i = handle.codec.get_mut().inner.lock().unwrap();
523+
i.tx_rem = num;
524+
}
525+
526+
let mut handle = Some(handle);
527+
528+
poll_fn(move || {
529+
{
530+
let mut inner = handle.as_mut().unwrap()
531+
.codec.get_mut().inner.lock().unwrap();
532+
533+
if inner.tx_rem == 0 {
534+
inner.tx_rem = usize::MAX;
535+
} else {
536+
inner.tx_task = Some(task::current());
537+
return Ok(Async::NotReady);
538+
}
539+
}
540+
541+
Ok(handle.take().unwrap().into())
542+
})
543+
}))
544+
}
545+
546+
fn unbounded_bytes(self) -> Box<Future<Item = Handle, Error = Self::Error>>
547+
where Self: Sized + 'static,
548+
Self: Future<Item = Handle>,
549+
Self::Error: fmt::Debug,
550+
{
551+
Box::new(self.and_then(|mut handle| {
552+
{
553+
let mut i = handle.codec.get_mut().inner.lock().unwrap();
554+
i.tx_rem = usize::MAX;
555+
556+
if let Some(task) = i.tx_rem_task.take() {
557+
task.notify();
558+
}
559+
}
560+
561+
Ok(handle.into())
562+
}))
563+
}
564+
565+
fn then_notify(self, tx: oneshot::Sender<()>) -> Box<Future<Item = Handle, Error = Self::Error>>
566+
where Self: Sized + 'static,
567+
Self: Future<Item = Handle>,
568+
Self::Error: fmt::Debug,
569+
{
570+
Box::new(self.map(move |handle| {
571+
tx.send(()).unwrap();
572+
handle
573+
}))
574+
}
575+
480576
fn wait_for<F>(self, other: F) -> Box<Future<Item = Self::Item, Error = Self::Error>>
481577
where
482578
F: Future + 'static,

0 commit comments

Comments
 (0)