diff --git a/src/server/conn.rs b/src/server/conn.rs index f0379d950c..897ec640e2 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -58,6 +58,11 @@ use crate::error::{Kind, Parse}; #[cfg(feature = "http1")] use crate::upgrade::Upgraded; +#[cfg(all(feature = "backports", feature = "http1"))] +pub mod http1; +#[cfg(all(feature = "backports", feature = "http2"))] +pub mod http2; + cfg_feature! { #![any(feature = "http1", feature = "http2")] @@ -327,7 +332,7 @@ impl Http { self } - /// Set a timeout for reading client request headers. If a client does not + /// Set a timeout for reading client request headers. If a client does not /// transmit the entire header within this time, the connection is closed. /// /// Default is None. @@ -809,7 +814,12 @@ where let mut conn = Some(self); futures_util::future::poll_fn(move |cx| { ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; - Poll::Ready(conn.take().unwrap().try_into_parts().ok_or_else(crate::Error::new_without_shutdown_not_h1)) + Poll::Ready( + conn.take() + .unwrap() + .try_into_parts() + .ok_or_else(crate::Error::new_without_shutdown_not_h1), + ) }) } diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs new file mode 100644 index 0000000000..b2e54976e7 --- /dev/null +++ b/src/server/conn/http1.rs @@ -0,0 +1,446 @@ +//! HTTP/1 Server Connections + +use std::error::Error as StdError; +use std::fmt; +use std::time::Duration; + +use bytes::Bytes; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::common::{task, Future, Pin, Poll, Unpin}; +use crate::proto; +use crate::service::HttpService; + +type Http1Dispatcher = proto::h1::Dispatcher< + proto::h1::dispatch::Server, + B, + T, + proto::ServerTransaction, +>; + +pin_project_lite::pin_project! { + /// A future binding an http1 connection with a Service. + /// + /// Polling this future will drive HTTP forward. + #[must_use = "futures do nothing unless polled"] + pub struct Connection + where + S: HttpService, + { + conn: Http1Dispatcher, + } +} + +/// A configuration builder for HTTP/1 server connections. +#[derive(Clone, Debug)] +pub struct Builder { + h1_half_close: bool, + h1_keep_alive: bool, + h1_title_case_headers: bool, + h1_preserve_header_case: bool, + h1_header_read_timeout: Option, + h1_writev: Option, + max_buf_size: Option, + pipeline_flush: bool, +} + +/// Deconstructed parts of a `Connection`. +/// +/// This allows taking apart a `Connection` at a later time, in order to +/// reclaim the IO object, and additional related pieces. +#[derive(Debug)] +pub struct Parts { + /// The original IO object used in the handshake. + pub io: T, + /// A buffer of bytes that have been read but not processed as HTTP. + /// + /// If the client sent additional bytes after its last request, and + /// this connection "ended" with an upgrade, the read buffer will contain + /// those bytes. + /// + /// You will want to check for any existing bytes if you plan to continue + /// communicating on the IO object. + pub read_buf: Bytes, + /// The `Service` used to serve this connection. + pub service: S, + _inner: (), +} + +// ===== impl Connection ===== + +impl fmt::Debug for Connection +where + S: HttpService, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + /// + /// # Note + /// + /// This should only be called while the `Connection` future is still + /// pending. If called after `Connection::poll` has resolved, this does + /// nothing. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + self.conn.disable_keep_alive(); + } + + /// Return the inner IO object, and additional information. + /// + /// If the IO object has been "rewound" the io will not contain those bytes rewound. + /// This should only be called after `poll_without_shutdown` signals + /// that the connection is "done". Otherwise, it may not have finished + /// flushing all necessary HTTP bytes. + /// + /// # Panics + /// This method will panic if this connection is using an h2 protocol. + pub fn into_parts(self) -> Parts { + let (io, read_buf, dispatch) = self.conn.into_inner(); + Parts { + io, + read_buf, + service: dispatch.into_service(), + _inner: (), + } + } + + /// Poll the connection for completion, but without calling `shutdown` + /// on the underlying IO. + /// + /// This is useful to allow running a connection while doing an HTTP + /// upgrade. Once the upgrade is completed, the connection would be "done", + /// but it is not desired to actually shutdown the IO object. Instead you + /// would take it back using `into_parts`. + pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> + where + S: Unpin, + S::Future: Unpin, + B: Unpin, + { + self.conn.poll_without_shutdown(cx) + } + + /// Prevent shutdown of the underlying IO object at the end of service the request, + /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. + /// + /// # Error + /// + /// This errors if the underlying connection protocol is not HTTP/1. + pub fn without_shutdown(self) -> impl Future>> + where + S: Unpin, + S::Future: Unpin, + B: Unpin, + { + let mut zelf = Some(self); + futures_util::future::poll_fn(move |cx| { + ready!(zelf.as_mut().unwrap().conn.poll_without_shutdown(cx))?; + Poll::Ready(Ok(zelf.take().unwrap().into_parts())) + }) + } + + /// Enable this connection to support higher-level HTTP upgrades. + /// + /// See [the `upgrade` module](crate::upgrade) for more. + pub fn with_upgrades(self) -> upgrades::UpgradeableConnection + where + I: Send, + { + upgrades::UpgradeableConnection { inner: Some(self) } + } +} + +impl Future for Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + 'static, + B: Body + 'static, + B::Error: Into>, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.conn).poll(cx)) { + Ok(done) => { + match done { + proto::Dispatched::Shutdown => {} + proto::Dispatched::Upgrade(pending) => { + // With no `Send` bound on `I`, we can't try to do + // upgrades here. In case a user was trying to use + // `Body::on_upgrade` with this API, send a special + // error letting them know about that. + pending.manual(); + } + }; + return Poll::Ready(Ok(())); + } + Err(e) => Poll::Ready(Err(e)), + } + } +} + +// ===== impl Builder ===== + +impl Builder { + /// Create a new connection builder. + pub fn new() -> Self { + Self { + h1_half_close: false, + h1_keep_alive: true, + h1_title_case_headers: false, + h1_preserve_header_case: false, + h1_header_read_timeout: None, + h1_writev: None, + max_buf_size: None, + pipeline_flush: false, + } + } + /// Set whether HTTP/1 connections should support half-closures. + /// + /// Clients can chose to shutdown their write-side while waiting + /// for the server to respond. Setting this to `true` will + /// prevent closing the connection immediately if `read` + /// detects an EOF in the middle of a request. + /// + /// Default is `false`. + pub fn half_close(&mut self, val: bool) -> &mut Self { + self.h1_half_close = val; + self + } + + /// Enables or disables HTTP/1 keep-alive. + /// + /// Default is true. + pub fn keep_alive(&mut self, val: bool) -> &mut Self { + self.h1_keep_alive = val; + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Default is false. + pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self { + self.h1_title_case_headers = enabled; + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Request`. It will also look for and use + /// such an extension in any provided `Response`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Default is false. + pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self { + self.h1_preserve_header_case = enabled; + self + } + + /// Set a timeout for reading client request headers. If a client does not + /// transmit the entire header within this time, the connection is closed. + /// + /// Default is None. + pub fn header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self { + self.h1_header_read_timeout = Some(read_timeout); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + pub fn writev(&mut self, val: bool) -> &mut Self { + self.h1_writev = Some(val); + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + pub fn max_buf_size(&mut self, max: usize) -> &mut Self { + assert!( + max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, + "the max_buf_size cannot be smaller than the minimum that h1 specifies." + ); + self.max_buf_size = Some(max); + self + } + + /// Aggregates flushes to better support pipelined responses. + /// + /// Experimental, may have bugs. + /// + /// Default is false. + pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { + self.pipeline_flush = enabled; + self + } + + // /// Set the timer used in background tasks. + // pub fn timer(&mut self, timer: M) -> &mut Self + // where + // M: Timer + Send + Sync + 'static, + // { + // self.timer = Time::Timer(Arc::new(timer)); + // self + // } + + /// Bind a connection together with a [`Service`](crate::service::Service). + /// + /// This returns a Future that must be polled in order for HTTP to be + /// driven on the connection. + /// + /// # Example + /// + /// ``` + /// # use hyper::{Body as Incoming, Request, Response}; + /// # use hyper::service::Service; + /// # use hyper::server::conn::http1::Builder; + /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # async fn run(some_io: I, some_service: S) + /// # where + /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + /// # S: Service, Response=hyper::Response> + Send + 'static, + /// # S::Error: Into>, + /// # S::Future: Send, + /// # { + /// let http = Builder::new(); + /// let conn = http.serve_connection(some_io, some_service); + /// + /// if let Err(e) = conn.await { + /// eprintln!("server connection error: {}", e); + /// } + /// # } + /// # fn main() {} + /// ``` + pub fn serve_connection(&self, io: I, service: S) -> Connection + where + S: HttpService, + S::Error: Into>, + S::ResBody: 'static, + ::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + { + let mut conn = proto::Conn::new(io); + if !self.h1_keep_alive { + conn.disable_keep_alive(); + } + if self.h1_half_close { + conn.set_allow_half_close(); + } + if self.h1_title_case_headers { + conn.set_title_case_headers(); + } + if self.h1_preserve_header_case { + conn.set_preserve_header_case(); + } + if let Some(header_read_timeout) = self.h1_header_read_timeout { + conn.set_http1_header_read_timeout(header_read_timeout); + } + if let Some(writev) = self.h1_writev { + if writev { + conn.set_write_strategy_queue(); + } else { + conn.set_write_strategy_flatten(); + } + } + conn.set_flush_pipeline(self.pipeline_flush); + if let Some(max) = self.max_buf_size { + conn.set_max_buf_size(max); + } + let sd = proto::h1::dispatch::Server::new(service); + let proto = proto::h1::Dispatcher::new(sd, conn); + Connection { conn: proto } + } +} + +mod upgrades { + use crate::upgrade::Upgraded; + + use super::*; + + // A future binding a connection with a Service with Upgrade support. + // + // This type is unnameable outside the crate. + #[must_use = "futures do nothing unless polled"] + #[allow(missing_debug_implementations)] + pub struct UpgradeableConnection + where + S: HttpService, + { + pub(super) inner: Option>, + } + + impl UpgradeableConnection + where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, + { + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + Pin::new(self.inner.as_mut().unwrap()).graceful_shutdown() + } + } + + impl Future for UpgradeableConnection + where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Error: Into>, + { + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.inner.as_mut().unwrap().conn).poll(cx)) { + Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())), + Ok(proto::Dispatched::Upgrade(pending)) => { + let (io, buf, _) = self.inner.take().unwrap().conn.into_inner(); + pending.fulfill(Upgraded::new(io, buf)); + Poll::Ready(Ok(())) + } + Err(e) => Poll::Ready(Err(e)), + } + } + } +} diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs new file mode 100644 index 0000000000..978c646e10 --- /dev/null +++ b/src/server/conn/http2.rs @@ -0,0 +1,257 @@ +//! HTTP/2 Server Connections + +use std::error::Error as StdError; +use std::fmt; +use std::time::Duration; + +use pin_project_lite::pin_project; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::common::exec::ConnStreamExec; +use crate::common::{task, Future, Pin, Poll, Unpin}; +use crate::proto; +use crate::service::HttpService; + +pin_project! { + /// A future binding an HTTP/2 connection with a Service. + /// + /// Polling this future will drive HTTP forward. + #[must_use = "futures do nothing unless polled"] + pub struct Connection + where + S: HttpService, + { + conn: proto::h2::Server, + } +} + +/// A configuration builder for HTTP/2 server connections. +#[derive(Clone, Debug)] +pub struct Builder { + exec: E, + h2_builder: proto::h2::server::Config, +} + +// ===== impl Connection ===== + +impl fmt::Debug for Connection +where + S: HttpService, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, + E: ConnStreamExec, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + /// + /// # Note + /// + /// This should only be called while the `Connection` future is still + /// pending. If called after `Connection::poll` has resolved, this does + /// nothing. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + self.conn.graceful_shutdown(); + } +} + +impl Future for Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + 'static, + B: Body + 'static, + B::Error: Into>, + E: ConnStreamExec, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.conn).poll(cx)) { + Ok(_done) => { + //TODO: the proto::h2::Server no longer needs to return + //the Dispatched enum + Poll::Ready(Ok(())) + } + Err(e) => Poll::Ready(Err(e)), + } + } +} + +// ===== impl Builder ===== + +impl Builder { + /// Create a new connection builder. + /// + /// This starts with the default options, and an executor. + pub fn new(exec: E) -> Self { + Self { + exec: exec, + h2_builder: Default::default(), + } + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_stream_window_size = sz; + } + self + } + + /// Sets the max connection-level flow control for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_conn_window_size = sz; + } + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `initial_stream_window_size` and + /// `initial_connection_window_size`. + pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { + use proto::h2::SPEC_WINDOW_SIZE; + + self.h2_builder.adaptive_window = enabled; + if enabled { + self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; + self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; + } + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.max_frame_size = sz; + } + self + } + + /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 + /// connections. + /// + /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS + pub fn max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { + self.h2_builder.max_concurrent_streams = max.into(); + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + /// + /// # Cargo Feature + /// + pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { + self.h2_builder.keep_alive_interval = interval.into(); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + /// + /// # Cargo Feature + /// + pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.h2_builder.keep_alive_timeout = timeout; + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently ~400KB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.h2_builder.max_send_buffer_size = max; + self + } + + /// Enables the [extended CONNECT protocol]. + /// + /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + pub fn enable_connect_protocol(&mut self) -> &mut Self { + self.h2_builder.enable_connect_protocol = true; + self + } + + /// Sets the max size of received header frames. + /// + /// Default is currently ~16MB, but may change. + pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { + self.h2_builder.max_header_list_size = max; + self + } + + // /// Set the timer used in background tasks. + // pub fn timer(&mut self, timer: M) -> &mut Self + // where + // M: Timer + Send + Sync + 'static, + // { + // self.timer = Time::Timer(Arc::new(timer)); + // self + // } + + /// Bind a connection together with a [`Service`](crate::service::Service). + /// + /// This returns a Future that must be polled in order for HTTP to be + /// driven on the connection. + pub fn serve_connection(&self, io: I, service: S) -> Connection + where + S: HttpService, + S::Error: Into>, + Bd: Body + 'static, + Bd::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + E: ConnStreamExec, + { + let proto = proto::h2::Server::new(io, service, &self.h2_builder, self.exec.clone()); + Connection { conn: proto } + } +} diff --git a/tests/server.rs b/tests/server.rs index d5e09f9795..67b48d344f 100644 --- a/tests/server.rs +++ b/tests/server.rs @@ -2641,6 +2641,144 @@ async fn http2_keep_alive_count_server_pings() { .expect("timed out waiting for pings"); } +// Tests for backported 1.0 APIs +mod backports { + use super::*; + use hyper::server::conn::{http1, http2}; + + #[tokio::test] + async fn http_connect() { + let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = listener.local_addr().unwrap(); + + let (tx, rx) = oneshot::channel(); + + thread::spawn(move || { + let mut tcp = connect(&addr); + tcp.write_all( + b"\ + CONNECT localhost:80 HTTP/1.1\r\n\ + \r\n\ + eagerly optimistic\ + ", + ) + .expect("write 1"); + let mut buf = [0; 256]; + tcp.read(&mut buf).expect("read 1"); + + let expected = "HTTP/1.1 200 OK\r\n"; + assert_eq!(s(&buf[..expected.len()]), expected); + let _ = tx.send(()); + + let n = tcp.read(&mut buf).expect("read 2"); + assert_eq!(s(&buf[..n]), "foo=bar"); + tcp.write_all(b"bar=foo").expect("write 2"); + }); + + let (socket, _) = listener.accept().await.unwrap(); + let conn = http1::Builder::new().serve_connection( + socket, + service_fn(|_| { + // In 1.0 we would use `http_body_util::Empty::::new()` to construct + // an empty body + let res = Response::builder().status(200).body(Body::empty()).unwrap(); + future::ready(Ok::<_, hyper::Error>(res)) + }), + ); + + let parts = conn.without_shutdown().await.unwrap(); + assert_eq!(parts.read_buf, "eagerly optimistic"); + + // wait so that we don't write until other side saw 101 response + rx.await.unwrap(); + + let mut io = parts.io; + io.write_all(b"foo=bar").await.unwrap(); + let mut vec = vec![]; + io.read_to_end(&mut vec).await.unwrap(); + assert_eq!(vec, b"bar=foo"); + } + + #[tokio::test] + async fn h2_connect() { + let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = listener.local_addr().unwrap(); + + let conn = connect_async(addr).await; + + let (h2, connection) = h2::client::handshake(conn).await.unwrap(); + tokio::spawn(async move { + connection.await.unwrap(); + }); + let mut h2 = h2.ready().await.unwrap(); + + async fn connect_and_recv_bread( + h2: &mut SendRequest, + ) -> (RecvStream, SendStream) { + let request = Request::connect("localhost").body(()).unwrap(); + let (response, send_stream) = h2.send_request(request, false).unwrap(); + let response = response.await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let mut body = response.into_body(); + let bytes = body.data().await.unwrap().unwrap(); + assert_eq!(&bytes[..], b"Bread?"); + let _ = body.flow_control().release_capacity(bytes.len()); + + (body, send_stream) + } + + tokio::spawn(async move { + let (mut recv_stream, mut send_stream) = connect_and_recv_bread(&mut h2).await; + + send_stream.send_data("Baguette!".into(), true).unwrap(); + + assert!(recv_stream.data().await.unwrap().unwrap().is_empty()); + }); + + // In 1.0 the `Body` struct is renamed to `IncomingBody` + let svc = service_fn(move |req: Request| { + let on_upgrade = hyper::upgrade::on(req); + + tokio::spawn(async move { + let mut upgraded = on_upgrade.await.expect("on_upgrade"); + upgraded.write_all(b"Bread?").await.unwrap(); + + let mut vec = vec![]; + upgraded.read_to_end(&mut vec).await.unwrap(); + assert_eq!(s(&vec), "Baguette!"); + + upgraded.shutdown().await.unwrap(); + }); + + future::ok::<_, hyper::Error>( + // In 1.0 we would use `http_body_util::Empty::::new()` to construct + // an empty body + Response::builder().status(200).body(Body::empty()).unwrap(), + ) + }); + + let (socket, _) = listener.accept().await.unwrap(); + http2::Builder::new(TokioExecutor) + .serve_connection(socket, svc) + .await + .unwrap(); + } + + #[derive(Clone)] + /// An Executor that uses the tokio runtime. + pub struct TokioExecutor; + + impl hyper::rt::Executor for TokioExecutor + where + F: std::future::Future + Send + 'static, + F::Output: Send + 'static, + { + fn execute(&self, fut: F) { + tokio::task::spawn(fut); + } + } +} // ------------------------------------------------- // the Server that is used to run all the tests with // -------------------------------------------------