From b38f14c534aca230f32430854b7fda77e7e402c4 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 13:22:57 -0800 Subject: [PATCH 01/47] switch block-style to line-style comments --- dropshot/examples/basic.rs | 70 +- dropshot/examples/file_server.rs | 98 +- dropshot/examples/https.rs | 86 +- dropshot/examples/index.rs | 38 +- dropshot/examples/module-basic.rs | 80 +- dropshot/examples/module-shared-context.rs | 76 +- dropshot/examples/multiple-servers.rs | 258 ++-- dropshot/examples/pagination-basic.rs | 80 +- .../examples/pagination-multiple-resources.rs | 68 +- .../examples/pagination-multiple-sorts.rs | 308 +++-- dropshot/examples/petstore.rs | 4 +- dropshot/examples/schema-with-example.rs | 28 +- dropshot/examples/self-referential.rs | 8 +- dropshot/examples/websocket.rs | 44 +- dropshot/examples/well-tagged.rs | 40 +- dropshot/src/api_description.rs | 280 ++--- dropshot/src/config.rs | 104 +- dropshot/src/error.rs | 268 ++-- dropshot/src/from_map.rs | 104 +- dropshot/src/handler.rs | 998 +++++++-------- dropshot/src/http_util.rs | 158 ++- dropshot/src/lib.rs | 1093 ++++++++--------- dropshot/src/logging.rs | 196 ++- dropshot/src/pagination.rs | 658 +++++----- dropshot/src/router.rs | 504 ++++---- dropshot/src/server.rs | 214 ++-- dropshot/src/test_util.rs | 464 +++---- dropshot/src/to_map.rs | 16 +- dropshot/src/type_util.rs | 46 +- dropshot/src/websocket.rs | 136 +- dropshot/tests/common/mod.rs | 20 +- dropshot/tests/test_config.rs | 68 +- dropshot/tests/test_demo.rs | 210 ++-- dropshot/tests/test_openapi.rs | 28 +- dropshot/tests/test_pagination.rs | 302 ++--- dropshot/tests/test_tls.rs | 16 +- dropshot_endpoint/src/lib.rs | 28 +- rustfmt.toml | 4 + 38 files changed, 3039 insertions(+), 4162 deletions(-) diff --git a/dropshot/examples/basic.rs b/dropshot/examples/basic.rs index 139bf3d9c..c11adbbcb 100644 --- a/dropshot/examples/basic.rs +++ b/dropshot/examples/basic.rs @@ -1,7 +1,5 @@ // Copyright 2020 Oxide Computer Company -/*! - * Example use of Dropshot. - */ +//! Example use of Dropshot. use dropshot::endpoint; use dropshot::ApiDescription; @@ -23,85 +21,63 @@ use std::sync::Arc; #[tokio::main] async fn main() -> Result<(), String> { - /* - * We must specify a configuration with a bind address. We'll use 127.0.0.1 - * since it's available and won't expose this server outside the host. We - * request port 0, which allows the operating system to pick any available - * port. - */ + // We must specify a configuration with a bind address. We'll use 127.0.0.1 + // since it's available and won't expose this server outside the host. We + // request port 0, which allows the operating system to pick any available + // port. let config_dropshot: ConfigDropshot = Default::default(); - /* - * For simplicity, we'll configure an "info"-level logger that writes to - * stderr assuming that it's a terminal. - */ + // For simplicity, we'll configure an "info"-level logger that writes to + // stderr assuming that it's a terminal. let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }; let log = config_logging .to_logger("example-basic") .map_err(|error| format!("failed to create logger: {}", error))?; - /* - * Build a description of the API. - */ + // Build a description of the API. let mut api = ApiDescription::new(); api.register(example_api_get_counter).unwrap(); api.register(example_api_put_counter).unwrap(); - /* - * The functions that implement our API endpoints will share this context. - */ + // The functions that implement our API endpoints will share this context. let api_context = ExampleContext::new(); - /* - * Set up the server. - */ + // Set up the server. let server = HttpServerStarter::new(&config_dropshot, api, api_context, &log) .map_err(|error| format!("failed to create server: {}", error))? .start(); - /* - * Wait for the server to stop. Note that there's not any code to shut down - * this server, so we should never get past this point. - */ + // Wait for the server to stop. Note that there's not any code to shut down + // this server, so we should never get past this point. server.await } -/** - * Application-specific example context (state shared by handler functions) - */ +/// Application-specific example context (state shared by handler functions) struct ExampleContext { - /** counter that can be manipulated by requests to the HTTP API */ + /// counter that can be manipulated by requests to the HTTP API counter: AtomicU64, } impl ExampleContext { - /** - * Return a new ExampleContext. - */ + /// Return a new ExampleContext. pub fn new() -> ExampleContext { ExampleContext { counter: AtomicU64::new(0) } } } -/* - * HTTP API interface - */ +// HTTP API interface -/** - * `CounterValue` represents the value of the API's counter, either as the - * response to a GET request to fetch the counter or as the body of a PUT - * request to update the counter. - */ +/// `CounterValue` represents the value of the API's counter, either as the +/// response to a GET request to fetch the counter or as the body of a PUT +/// request to update the counter. #[derive(Deserialize, Serialize, JsonSchema)] struct CounterValue { counter: u64, } -/** - * Fetch the current value of the counter. - */ +/// Fetch the current value of the counter. #[endpoint { method = GET, path = "/counter", @@ -116,10 +92,8 @@ async fn example_api_get_counter( })) } -/** - * Update the current value of the counter. Note that the special value of 10 - * is not allowed (just to demonstrate how to generate an error). - */ +/// Update the current value of the counter. Note that the special value of 10 +/// is not allowed (just to demonstrate how to generate an error). #[endpoint { method = PUT, path = "/counter", diff --git a/dropshot/examples/file_server.rs b/dropshot/examples/file_server.rs index cd0f9da15..59296bf3c 100644 --- a/dropshot/examples/file_server.rs +++ b/dropshot/examples/file_server.rs @@ -16,69 +16,51 @@ use serde::Deserialize; use std::path::PathBuf; use std::sync::Arc; -/** - * Our context is simply the root of the directory we want to serve. - */ +/// Our context is simply the root of the directory we want to serve. struct FileServerContext { base: PathBuf, } #[tokio::main] async fn main() -> Result<(), String> { - /* - * We must specify a configuration with a bind address. We'll use 127.0.0.1 - * since it's available and won't expose this server outside the host. We - * request port 0, which allows the operating system to pick any available - * port. - */ + // We must specify a configuration with a bind address. We'll use 127.0.0.1 + // since it's available and won't expose this server outside the host. We + // request port 0, which allows the operating system to pick any available + // port. let config_dropshot = Default::default(); - /* - * For simplicity, we'll configure an "info"-level logger that writes to - * stderr assuming that it's a terminal. - */ + // For simplicity, we'll configure an "info"-level logger that writes to + // stderr assuming that it's a terminal. let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }; let log = config_logging .to_logger("example-basic") .map_err(|error| format!("failed to create logger: {}", error))?; - /* - * Build a description of the API -- in this case it's not much of an API!. - */ + // Build a description of the API -- in this case it's not much of an API!. let mut api = ApiDescription::new(); api.register(static_content).unwrap(); - /* - * Specify the directory we want to serve. - */ + // Specify the directory we want to serve. let context = FileServerContext { base: PathBuf::from(".") }; - /* - * Set up the server. - */ + // Set up the server. let server = HttpServerStarter::new(&config_dropshot, api, context, &log) .map_err(|error| format!("failed to create server: {}", error))? .start(); - /* - * Wait for the server to stop. Note that there's not any code to shut down - * this server, so we should never get past this point. - */ + // Wait for the server to stop. Note that there's not any code to shut down + // this server, so we should never get past this point. server.await } -/** - * Dropshot deserializes the input path into this Vec. - */ +/// Dropshot deserializes the input path into this Vec. #[derive(Deserialize, JsonSchema)] struct AllPath { path: Vec, } -/** - * Serve files from the specified root path. - */ +/// Serve files from the specified root path. #[endpoint { method = GET, @@ -100,25 +82,23 @@ async fn static_content( let path = path.into_inner().path; let mut entry = rqctx.context().base.clone(); for component in &path { - /* The previous iteration needs to have resulted in a directory. */ + // The previous iteration needs to have resulted in a directory. if !entry.is_dir() { return Err(HttpError::for_bad_request( None, format!("expected directory: {:?}", entry), )); } - /* Dropshot won't ever give us dot-components. */ + // Dropshot won't ever give us dot-components. assert_ne!(component, "."); assert_ne!(component, ".."); entry.push(component); - /* - * We explicitly prohibit consumers from following symlinks to prevent - * showing data outside of the intended directory. - * TODO-security There's a time-of-check to time-of-use race here! - * Someone could replace "entry" with a symlink immediately after we - * check. - */ + // We explicitly prohibit consumers from following symlinks to prevent + // showing data outside of the intended directory. + // TODO-security There's a time-of-check to time-of-use race here! + // Someone could replace "entry" with a symlink immediately after we + // check. let m = entry.symlink_metadata().map_err(|e| { HttpError::for_bad_request( None, @@ -133,10 +113,8 @@ async fn static_content( } } - /* - * If the entry is a directory, we serve a listing of its contents. If it's - * regular file we serve the file. - */ + // If the entry is a directory, we serve a listing of its contents. If it's + // regular file we serve the file. if entry.is_dir() { let body = dir_body(&entry).await.map_err(|e| { HttpError::for_bad_request( @@ -158,7 +136,7 @@ async fn static_content( })?; let file_stream = hyper_staticfile::FileBytesStream::new(file); - /* Derive the MIME type from the file name */ + // Derive the MIME type from the file name let content_type = mime_guess::from_path(&entry) .first() .map_or_else(|| "text/plain".to_string(), |m| m.to_string()); @@ -170,10 +148,8 @@ async fn static_content( } } -/** - * Generate a simple HTML listing of files within the directory. - * See the note below regarding the handling of trailing slashes. - */ +/// Generate a simple HTML listing of files within the directory. +/// See the note below regarding the handling of trailing slashes. async fn dir_body(dir_path: &PathBuf) -> Result { let dir_link = dir_path.to_string_lossy(); let mut dir = tokio::fs::read_dir(&dir_path).await?; @@ -195,18 +171,16 @@ async fn dir_body(dir_path: &PathBuf) -> Result { while let Some(entry) = dir.next_entry().await? { let name = entry.file_name(); let name = name.to_string_lossy(); - /* - * Note that Dropshot handles paths with and without trailing slashes - * as identical. This is important with respect to relative paths as - * the destination of a relative path is different depending on whether - * or not a trailing slash is present in the browser's location bar. - * For example, a relative url of "bar" would go from the location - * "localhost:123/foo" to "localhost:123/bar" and from the location - * "localhost:123/foo/" to "localhost:123/foo/bar". More robust - * handling would require distinct handling of the trailing slash - * and a redirect in the case of its absence when navigating to a - * directory. - */ + // Note that Dropshot handles paths with and without trailing slashes + // as identical. This is important with respect to relative paths as + // the destination of a relative path is different depending on whether + // or not a trailing slash is present in the browser's location bar. + // For example, a relative url of "bar" would go from the location + // "localhost:123/foo" to "localhost:123/bar" and from the location + // "localhost:123/foo/" to "localhost:123/foo/bar". More robust + // handling would require distinct handling of the trailing slash + // and a redirect in the case of its absence when navigating to a + // directory. body.push_str( format!( r#"
  • {}
  • "#, diff --git a/dropshot/examples/https.rs b/dropshot/examples/https.rs index 60c946d0e..524458723 100644 --- a/dropshot/examples/https.rs +++ b/dropshot/examples/https.rs @@ -1,8 +1,6 @@ // Copyright 2020 Oxide Computer Company -/*! - * Example use of Dropshot with TLS enabled - */ +//! Example use of Dropshot with TLS enabled use dropshot::endpoint; use dropshot::ApiDescription; @@ -25,10 +23,8 @@ use std::sync::atomic::Ordering; use std::sync::Arc; use tempfile::NamedTempFile; -/* - * This function would not be used in a normal application. It is used to - * generate temporary keys and certificates for the purpose of this demo. - */ +// This function would not be used in a normal application. It is used to +// generate temporary keys and certificates for the purpose of this demo. fn generate_keys() -> Result<(NamedTempFile, NamedTempFile), String> { let keypair = rcgen::generate_simple_self_signed(vec!["localhost".to_string()]) @@ -57,20 +53,16 @@ fn generate_keys() -> Result<(NamedTempFile, NamedTempFile), String> { #[tokio::main] async fn main() -> Result<(), String> { - /* - * Begin by generating TLS certificates and keys. A normal application would - * just pass the paths to these via ConfigDropshot. - */ + // Begin by generating TLS certificates and keys. A normal application would + // just pass the paths to these via ConfigDropshot. let (cert_file, key_file) = generate_keys()?; - /* - * We must specify a configuration with a bind address. We'll use 127.0.0.1 - * since it's available and won't expose this server outside the host. We - * request port 0, which allows the operating system to pick any available - * port. - * - * In addition, we'll make this an HTTPS server. - */ + // We must specify a configuration with a bind address. We'll use 127.0.0.1 + // since it's available and won't expose this server outside the host. We + // request port 0, which allows the operating system to pick any available + // port. + // + // In addition, we'll make this an HTTPS server. let config_dropshot = ConfigDropshot { tls: Some(ConfigTls::AsFile { cert_file: cert_file.path().to_path_buf(), @@ -79,77 +71,57 @@ async fn main() -> Result<(), String> { ..Default::default() }; - /* - * For simplicity, we'll configure an "info"-level logger that writes to - * stderr assuming that it's a terminal. - */ + // For simplicity, we'll configure an "info"-level logger that writes to + // stderr assuming that it's a terminal. let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }; let log = config_logging .to_logger("example-basic") .map_err(|error| format!("failed to create logger: {}", error))?; - /* - * Build a description of the API. - */ + // Build a description of the API. let mut api = ApiDescription::new(); api.register(example_api_get_counter).unwrap(); api.register(example_api_put_counter).unwrap(); - /* - * The functions that implement our API endpoints will share this context. - */ + // The functions that implement our API endpoints will share this context. let api_context = ExampleContext::new(); - /* - * Set up the server. - */ + // Set up the server. let server = HttpServerStarter::new(&config_dropshot, api, api_context, &log) .map_err(|error| format!("failed to create server: {}", error))? .start(); - /* - * Wait for the server to stop. Note that there's not any code to shut down - * this server, so we should never get past this point. - */ + // Wait for the server to stop. Note that there's not any code to shut down + // this server, so we should never get past this point. server.await } -/** - * Application-specific example context (state shared by handler functions) - */ +/// Application-specific example context (state shared by handler functions) struct ExampleContext { - /** counter that can be manipulated by requests to the HTTP API */ + /// counter that can be manipulated by requests to the HTTP API counter: AtomicU64, } impl ExampleContext { - /** - * Return a new ExampleContext. - */ + /// Return a new ExampleContext. pub fn new() -> ExampleContext { ExampleContext { counter: AtomicU64::new(0) } } } -/* - * HTTP API interface - */ +// HTTP API interface -/** - * `CounterValue` represents the value of the API's counter, either as the - * response to a GET request to fetch the counter or as the body of a PUT - * request to update the counter. - */ +/// `CounterValue` represents the value of the API's counter, either as the +/// response to a GET request to fetch the counter or as the body of a PUT +/// request to update the counter. #[derive(Deserialize, Serialize, JsonSchema)] struct CounterValue { counter: u64, } -/** - * Fetch the current value of the counter. - */ +/// Fetch the current value of the counter. #[endpoint { method = GET, path = "/counter", @@ -164,10 +136,8 @@ async fn example_api_get_counter( })) } -/** - * Update the current value of the counter. Note that the special value of 10 - * is not allowed (just to demonstrate how to generate an error). - */ +/// Update the current value of the counter. Note that the special value of 10 +/// is not allowed (just to demonstrate how to generate an error). #[endpoint { method = PUT, path = "/counter", diff --git a/dropshot/examples/index.rs b/dropshot/examples/index.rs index 1f1773812..827d74cfa 100644 --- a/dropshot/examples/index.rs +++ b/dropshot/examples/index.rs @@ -1,7 +1,5 @@ // Copyright 2021 Oxide Computer Company -/*! - * Example use of Dropshot for matching wildcard paths to serve static content. - */ +//! Example use of Dropshot for matching wildcard paths to serve static content. use dropshot::ApiDescription; use dropshot::ConfigDropshot; @@ -19,41 +17,31 @@ use std::sync::Arc; #[tokio::main] async fn main() -> Result<(), String> { - /* - * We must specify a configuration with a bind address. We'll use 127.0.0.1 - * since it's available and won't expose this server outside the host. We - * request port 0, which allows the operating system to pick any available - * port. - */ + // We must specify a configuration with a bind address. We'll use 127.0.0.1 + // since it's available and won't expose this server outside the host. We + // request port 0, which allows the operating system to pick any available + // port. let config_dropshot: ConfigDropshot = Default::default(); - /* - * For simplicity, we'll configure an "info"-level logger that writes to - * stderr assuming that it's a terminal. - */ + // For simplicity, we'll configure an "info"-level logger that writes to + // stderr assuming that it's a terminal. let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }; let log = config_logging .to_logger("example-basic") .map_err(|error| format!("failed to create logger: {}", error))?; - /* - * Build a description of the API. - */ + // Build a description of the API. let mut api = ApiDescription::new(); api.register(index).unwrap(); - /* - * Set up the server. - */ + // Set up the server. let server = HttpServerStarter::new(&config_dropshot, api, (), &log) .map_err(|error| format!("failed to create server: {}", error))? .start(); - /* - * Wait for the server to stop. Note that there's not any code to shut down - * this server, so we should never get past this point. - */ + // Wait for the server to stop. Note that there's not any code to shut down + // this server, so we should never get past this point. server.await } @@ -62,9 +50,7 @@ struct AllPath { path: Vec, } -/** - * Return static content for all paths. - */ +/// Return static content for all paths. #[endpoint { method = GET, diff --git a/dropshot/examples/module-basic.rs b/dropshot/examples/module-basic.rs index 3b486d7c7..f6407afe2 100644 --- a/dropshot/examples/module-basic.rs +++ b/dropshot/examples/module-basic.rs @@ -1,7 +1,5 @@ // Copyright 2020 Oxide Computer Company -/*! - * Example use of Dropshot. - */ +//! Example use of Dropshot. use dropshot::ApiDescription; use dropshot::ConfigLogging; @@ -14,86 +12,64 @@ use std::sync::atomic::AtomicU64; #[tokio::main] async fn main() -> Result<(), String> { - /* - * We must specify a configuration with a bind address. We'll use 127.0.0.1 - * since it's available and won't expose this server outside the host. We - * request port 0, which allows the operating system to pick any available - * port. - */ + // We must specify a configuration with a bind address. We'll use 127.0.0.1 + // since it's available and won't expose this server outside the host. We + // request port 0, which allows the operating system to pick any available + // port. let config_dropshot = Default::default(); - /* - * For simplicity, we'll configure an "info"-level logger that writes to - * stderr assuming that it's a terminal. - */ + // For simplicity, we'll configure an "info"-level logger that writes to + // stderr assuming that it's a terminal. let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }; let log = config_logging .to_logger("example-basic") .map_err(|error| format!("failed to create logger: {}", error))?; - /* - * Build a description of the API. - */ + // Build a description of the API. let mut api = ApiDescription::new(); api.register(routes::example_api_get_counter).unwrap(); api.register(routes::example_api_put_counter).unwrap(); - /* - * The functions that implement our API endpoints will share this context. - */ + // The functions that implement our API endpoints will share this context. let api_context = ExampleContext::new(); - /* - * Set up the server. - */ + // Set up the server. let server = HttpServerStarter::new(&config_dropshot, api, api_context, &log) .map_err(|error| format!("failed to create server: {}", error))? .start(); - /* - * Wait for the server to stop. Note that there's not any code to shut down - * this server, so we should never get past this point. - */ + // Wait for the server to stop. Note that there's not any code to shut down + // this server, so we should never get past this point. server.await } -/** - * Application-specific example context (state shared by handler functions) - */ +/// Application-specific example context (state shared by handler functions) pub struct ExampleContext { - /** counter that can be manipulated by requests to the HTTP API */ + /// counter that can be manipulated by requests to the HTTP API pub counter: AtomicU64, } impl ExampleContext { - /** - * Return a new ExampleContext. - */ + /// Return a new ExampleContext. pub fn new() -> ExampleContext { ExampleContext { counter: AtomicU64::new(0) } } } -/* - * HTTP API interface - */ +// HTTP API interface -/** - * `CounterValue` represents the value of the API's counter, either as the - * response to a GET request to fetch the counter or as the body of a PUT - * request to update the counter. - */ +/// `CounterValue` represents the value of the API's counter, either as the +/// response to a GET request to fetch the counter or as the body of a PUT +/// request to update the counter. #[derive(Deserialize, Serialize, JsonSchema)] pub struct CounterValue { counter: u64, } -/** - * The routes module might be imported from another crate that publishes - * mountable routes - */ +/// The routes module might be imported from another crate that publishes +/// mountable routes pub mod routes { use crate::{CounterValue, ExampleContext}; use dropshot::endpoint; @@ -105,11 +81,9 @@ pub mod routes { use std::sync::atomic::Ordering; use std::sync::Arc; - /** - * Fetch the current value of the counter. - * NOTE: The endpoint macro inherits its module visibility from - * the endpoint async function definition - */ + /// Fetch the current value of the counter. + /// NOTE: The endpoint macro inherits its module visibility from + /// the endpoint async function definition #[endpoint { method = GET, path = "/counter", @@ -124,10 +98,8 @@ pub mod routes { })) } - /** - * Update the current value of the counter. Note that the special value of 10 - * is not allowed (just to demonstrate how to generate an error). - */ + /// Update the current value of the counter. Note that the special value of 10 + /// is not allowed (just to demonstrate how to generate an error). #[endpoint { method = PUT, path = "/counter", diff --git a/dropshot/examples/module-shared-context.rs b/dropshot/examples/module-shared-context.rs index 367c48ee2..94d108402 100644 --- a/dropshot/examples/module-shared-context.rs +++ b/dropshot/examples/module-shared-context.rs @@ -1,8 +1,6 @@ // Copyright 2021 Oxide Computer Company -/*! - * Example use of Dropshot where a client wants to act on - * a custom context object that outlives endpoint functions. - */ +//! Example use of Dropshot where a client wants to act on +//! a custom context object that outlives endpoint functions. use dropshot::endpoint; use dropshot::ApiDescription; @@ -22,38 +20,28 @@ use std::sync::Arc; #[tokio::main] async fn main() -> Result<(), String> { - /* - * We must specify a configuration with a bind address. We'll use 127.0.0.1 - * since it's available and won't expose this server outside the host. We - * request port 0, which allows the operating system to pick any available - * port. - */ + // We must specify a configuration with a bind address. We'll use 127.0.0.1 + // since it's available and won't expose this server outside the host. We + // request port 0, which allows the operating system to pick any available + // port. let config_dropshot = Default::default(); - /* - * For simplicity, we'll configure an "info"-level logger that writes to - * stderr assuming that it's a terminal. - */ + // For simplicity, we'll configure an "info"-level logger that writes to + // stderr assuming that it's a terminal. let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }; let log = config_logging .to_logger("example-basic") .map_err(|error| format!("failed to create logger: {}", error))?; - /* - * Build a description of the API. - */ + // Build a description of the API. let mut api = ApiDescription::new(); api.register(example_api_get_counter).unwrap(); - /* - * The functions that implement our API endpoints will share this context. - */ + // The functions that implement our API endpoints will share this context. let api_context = Arc::new(ExampleContext::new()); - /* - * Set up the server. - */ + // Set up the server. let server = HttpServerStarter::new( &config_dropshot, api, @@ -63,17 +51,15 @@ async fn main() -> Result<(), String> { .map_err(|error| format!("failed to create server: {}", error))? .start(); - /* - * Wait for the server to stop. Note that there's not any code to shut down - * this server, so we should never get past this point. - * - * Even with the endpoints acting on the `ExampleContext` object, - * we can still hold a reference and act on the object beyond the lifetime - * of those endpoints. - * - * In this example, we increment the counter every five seconds, - * regardless of received HTTP requests. - */ + // Wait for the server to stop. Note that there's not any code to shut down + // this server, so we should never get past this point. + // + // Even with the endpoints acting on the `ExampleContext` object, + // we can still hold a reference and act on the object beyond the lifetime + // of those endpoints. + // + // In this example, we increment the counter every five seconds, + // regardless of received HTTP requests. futures::pin_mut!(server); loop { let sleep = @@ -88,38 +74,28 @@ async fn main() -> Result<(), String> { Ok(()) } -/** - * Application-specific example context (state shared by handler functions) - */ +/// Application-specific example context (state shared by handler functions) pub struct ExampleContext { - /** counter that can be read by requests to the HTTP API */ + /// counter that can be read by requests to the HTTP API pub counter: AtomicU64, } impl ExampleContext { - /** - * Return a new ExampleContext. - */ + /// Return a new ExampleContext. pub fn new() -> ExampleContext { ExampleContext { counter: AtomicU64::new(0) } } } -/* - * HTTP API interface - */ +// HTTP API interface -/** - * `CounterValue` represents the value of the API's counter. - */ +/// `CounterValue` represents the value of the API's counter. #[derive(Deserialize, Serialize, JsonSchema)] pub struct CounterValue { counter: u64, } -/** - * Fetch the current value of the counter. - */ +/// Fetch the current value of the counter. #[endpoint { method = GET, path = "/counter", diff --git a/dropshot/examples/multiple-servers.rs b/dropshot/examples/multiple-servers.rs index 936feaf0c..8a46de776 100644 --- a/dropshot/examples/multiple-servers.rs +++ b/dropshot/examples/multiple-servers.rs @@ -1,73 +1,71 @@ // Copyright 2023 Oxide Computer Company -/*! - * Example use of Dropshot with multiple servers sharing context. - * - * This example initially starts two servers named "A" and "B" listening on - * `127.0.0.1:12345` and `127.0.0.1:12346`. On either address, a client can - * query the list of currently-running servers: - * - * ```text - * sh$ curl -X GET http://127.0.0.1:12345/servers | jq - * [ - * { - * "name": "B", - * "bind_addr": "127.0.0.1:12346" - * }, - * { - * "name": "A", - * "bind_addr": "127.0.0.1:12345" - * } - * ] - * ``` - * - * start a new server, as long as the name and bind address aren't already in - * use: - * - * ```text - * sh$ curl -X POST -H 'Content-Type: application/json' http://127.0.0.1:12345/servers/C -d '"127.0.0.1:12347"' | jq - * { - * "name": "C", - * "bind_addr": "127.0.0.1:12347" - * } - * sh$ % curl -X GET http://127.0.0.1:12345/servers | jq - * [ - * { - * "name": "B", - * "bind_addr": "127.0.0.1:12346" - * }, - * { - * "name": "C", - * "bind_addr": "127.0.0.1:12347" - * }, - * { - * "name": "A", - * "bind_addr": "127.0.0.1:12345" - * } - * ] - * ``` - * - * or stop a running server by name: - * - * ```text - * sh$ % curl -X DELETE http://127.0.0.1:12347/servers/B - * sh$ curl -X GET http://127.0.0.1:12345/servers | jq - * [ - * { - * "name": "C", - * "bind_addr": "127.0.0.1:12347" - * }, - * { - * "name": "A", - * "bind_addr": "127.0.0.1:12345" - * } - * ] - * ``` - * - * The final example shows deleting server "B" via server C's address, and then - * querying server "A" to show that "B" is gone. The logfiles of the running - * process will also note the shutdown of server B. - */ +//! Example use of Dropshot with multiple servers sharing context. +//! +//! This example initially starts two servers named "A" and "B" listening on +//! `127.0.0.1:12345` and `127.0.0.1:12346`. On either address, a client can +//! query the list of currently-running servers: +//! +//! ```text +//! sh$ curl -X GET http://127.0.0.1:12345/servers | jq +//! [ +//! { +//! "name": "B", +//! "bind_addr": "127.0.0.1:12346" +//! }, +//! { +//! "name": "A", +//! "bind_addr": "127.0.0.1:12345" +//! } +//! ] +//! ``` +//! +//! start a new server, as long as the name and bind address aren't already in +//! use: +//! +//! ```text +//! sh$ curl -X POST -H 'Content-Type: application/json' http://127.0.0.1:12345/servers/C -d '"127.0.0.1:12347"' | jq +//! { +//! "name": "C", +//! "bind_addr": "127.0.0.1:12347" +//! } +//! sh$ % curl -X GET http://127.0.0.1:12345/servers | jq +//! [ +//! { +//! "name": "B", +//! "bind_addr": "127.0.0.1:12346" +//! }, +//! { +//! "name": "C", +//! "bind_addr": "127.0.0.1:12347" +//! }, +//! { +//! "name": "A", +//! "bind_addr": "127.0.0.1:12345" +//! } +//! ] +//! ``` +//! +//! or stop a running server by name: +//! +//! ```text +//! sh$ % curl -X DELETE http://127.0.0.1:12347/servers/B +//! sh$ curl -X GET http://127.0.0.1:12345/servers | jq +//! [ +//! { +//! "name": "C", +//! "bind_addr": "127.0.0.1:12347" +//! }, +//! { +//! "name": "A", +//! "bind_addr": "127.0.0.1:12345" +//! } +//! ] +//! ``` +//! +//! The final example shows deleting server "B" via server C's address, and then +//! querying server "A" to show that "B" is gone. The logfiles of the running +//! process will also note the shutdown of server B. use dropshot::endpoint; use dropshot::ApiDescription; @@ -102,16 +100,12 @@ use tokio::sync::Mutex; #[tokio::main] async fn main() -> Result<(), String> { - /* - * Initial set of servers to start. Once they're running, we may add or - * remove servers based on client requests. - */ + // Initial set of servers to start. Once they're running, we may add or + // remove servers based on client requests. let initial_servers = [("A", "127.0.0.1:12345"), ("B", "127.0.0.1:12346")]; - /* - * We keep the set of running servers in a `FuturesUnordered` to allow us to - * drive them all concurrently. - */ + // We keep the set of running servers in a `FuturesUnordered` to allow us to + // drive them all concurrently. let mut running_servers = FuturesUnordered::new(); let (running_servers_tx, mut running_servers_rx) = mpsc::channel(8); @@ -122,21 +116,17 @@ async fn main() -> Result<(), String> { shared_context.start_server(name, bind_address).await?; } - /* - * Explicitly drop `shared_context` so we can detect when all servers are - * gone via `running_servers_rx` (which returns `None` when all transmitters - * are dropped). - */ + // Explicitly drop `shared_context` so we can detect when all servers are + // gone via `running_servers_rx` (which returns `None` when all transmitters + // are dropped). mem::drop(shared_context); - /* - * Loop until all servers are shut down. - * - * If we receive a new server on `running_servers_rx`, we added it to - * `running_servers`. If `running_servers_rx` indicates the channel is - * closed, we know all server contexts have been dropped and no servers - * remain. - */ + // Loop until all servers are shut down. + // + // If we receive a new server on `running_servers_rx`, we added it to + // `running_servers`. If `running_servers_rx` indicates the channel is + // closed, we know all server contexts have been dropped and no servers + // remain. loop { tokio::select! { maybe_new_server = running_servers_rx.recv() => { @@ -158,9 +148,7 @@ async fn main() -> Result<(), String> { type ServerShutdownFuture = BoxFuture<'static, Result<(), String>>; -/** - * Application-specific server context (state shared by handler functions) - */ +/// Application-specific server context (state shared by handler functions) struct MultiServerContext { // All running servers have the same underlying `shared` context. shared: Arc, @@ -177,9 +165,7 @@ impl Drop for MultiServerContext { } } -/** - * Context shared by all running servers. - */ +/// Context shared by all running servers. struct SharedMultiServerContext { servers: Mutex>>, started_server_shutdown_handles: mpsc::Sender, @@ -205,36 +191,28 @@ impl SharedMultiServerContext { Entry::Vacant(slot) => slot, }; - /* - * For simplicity, we'll configure an "info"-level logger that writes to - * stderr assuming that it's a terminal. - */ + // For simplicity, we'll configure an "info"-level logger that writes to + // stderr assuming that it's a terminal. let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }; let log = config_logging .to_logger(format!("example-multiserver-{name}")) .map_err(|error| format!("failed to create logger: {}", error))?; - /* - * Build a description of the API. - * - * TODO: Could `ApiDescription` implement `Clone`, or could we pass an - * `Arc` instead? - */ + // Build a description of the API. + // + // TODO: Could `ApiDescription` implement `Clone`, or could we pass an + // `Arc` instead? let mut api = ApiDescription::new(); api.register(api_get_servers).unwrap(); api.register(api_start_server).unwrap(); api.register(api_stop_server).unwrap(); - /* - * Configure the server with the requested bind address. - */ + // Configure the server with the requested bind address. let config_dropshot = ConfigDropshot { bind_address, ..Default::default() }; - /* - * Set up the server. - */ + // Set up the server. let context = MultiServerContext { shared: Arc::clone(self), name: name.to_string(), @@ -248,19 +226,15 @@ impl SharedMultiServerContext { slot.insert(server); - /* - * Explicitly drop `servers`, releasing the lock, before we potentially - * block waiting to tell `main()` about this new server. - */ + // Explicitly drop `servers`, releasing the lock, before we potentially + // block waiting to tell `main()` about this new server. mem::drop(servers); - /* - * Tell `main()` about this new running server, allowing it to wait for - * its shutdown. - * - * Ignore the result of this `send()`: we can't unwrap due to missing - * `Debug` impls, but if `main()` is gone we don't care. - */ + // Tell `main()` about this new running server, allowing it to wait for + // its shutdown. + // + // Ignore the result of this `send()`: we can't unwrap due to missing + // `Debug` impls, but if `main()` is gone we don't care. _ = self .started_server_shutdown_handles .send(shutdown_handle.boxed()) @@ -270,9 +244,7 @@ impl SharedMultiServerContext { } } -/* - * HTTP API interface - */ +// HTTP API interface #[derive(Debug, Serialize, JsonSchema)] struct ServerDescription { @@ -280,9 +252,7 @@ struct ServerDescription { bind_addr: SocketAddr, } -/** - * Fetch the current list of running servers. - */ +/// Fetch the current list of running servers. #[endpoint { method = GET, path = "/servers", @@ -309,9 +279,7 @@ struct PathName { name: String, } -/** - * Start a new running server. - */ +/// Start a new running server. #[endpoint { method = POST, path = "/servers/{name}", @@ -326,12 +294,10 @@ async fn api_start_server( let bind_addr = body.into_inner(); api_context.shared.start_server(&name, bind_addr).await.map_err(|err| { - /* - * `for_bad_request` _might_ not be right (e.g., we might have some - * spurious OS error starting the server), but it's likely right (the - * most likely cause for failure is a duplicate name or already-in-use - * bind address), and we're being lazy with errors in this example. - */ + // `for_bad_request` _might_ not be right (e.g., we might have some + // spurious OS error starting the server), but it's likely right (the + // most likely cause for failure is a duplicate name or already-in-use + // bind address), and we're being lazy with errors in this example. HttpError::for_bad_request( Some("StartServerFailed".to_string()), format!("failed to start server {name:?}: {err}"), @@ -341,9 +307,7 @@ async fn api_start_server( Ok(HttpResponseCreated(ServerDescription { name, bind_addr })) } -/** - * Stop a running server by name. - */ +/// Stop a running server by name. #[endpoint { method = DELETE, path = "/servers/{name}", @@ -363,14 +327,12 @@ async fn api_stop_server( ) })?; - /* - * We want to shut down `server`, but it might be the very server handling - * this request! Move the shutdown onto a background task to allow this - * request to complete; otherwise, we deadlock. - * - * We can safely discard the result of `close()` because `main()` also gets - * it via the shutdown handle it received when `server` was created. - */ + // We want to shut down `server`, but it might be the very server handling + // this request! Move the shutdown onto a background task to allow this + // request to complete; otherwise, we deadlock. + // + // We can safely discard the result of `close()` because `main()` also gets + // it via the shutdown handle it received when `server` was created. tokio::spawn(server.close()); Ok(HttpResponseDeleted()) diff --git a/dropshot/examples/pagination-basic.rs b/dropshot/examples/pagination-basic.rs index 4935654f4..b34c8541e 100644 --- a/dropshot/examples/pagination-basic.rs +++ b/dropshot/examples/pagination-basic.rs @@ -1,20 +1,18 @@ // Copyright 2020 Oxide Computer Company -/*! - * Example showing a relatively simple use of the pagination API - * - * When you run this program, it will start an HTTP server on an available local - * port. See the log entry to see what port it ran on. Then use curl to use - * it, like this: - * - * ```ignore - * $ curl localhost:50568/projects - * ``` - * - * (Replace 50568 with whatever port your server is listening on.) - * - * Try passing different values of the `limit` query parameter. Try passing the - * next page token from the response as a query parameter, too. - */ +//! Example showing a relatively simple use of the pagination API +//! +//! When you run this program, it will start an HTTP server on an available local +//! port. See the log entry to see what port it ran on. Then use curl to use +//! it, like this: +//! +//! ```ignore +//! $ curl localhost:50568/projects +//! ``` +//! +//! (Replace 50568 with whatever port your server is listening on.) +//! +//! Try passing different values of the `limit` query parameter. Try passing the +//! next page token from the response as a query parameter, too. use dropshot::endpoint; use dropshot::ApiDescription; @@ -39,39 +37,33 @@ use std::net::SocketAddr; use std::ops::Bound; use std::sync::Arc; -/** - * Object returned by our paginated endpoint - * - * Like anything returned by Dropshot, we must implement `JsonSchema` and - * `Serialize`. We also implement `Clone` to simplify the example. - */ +/// Object returned by our paginated endpoint +/// +/// Like anything returned by Dropshot, we must implement `JsonSchema` and +/// `Serialize`. We also implement `Clone` to simplify the example. #[derive(Clone, JsonSchema, Serialize)] struct Project { name: String, // lots more fields } -/** - * Parameters describing the client's position in a scan through all projects - * - * This implementation only needs the name of the last project seen, as we only - * support listing projects in ascending order by name. - * - * This must be `Serialize` so that Dropshot can turn it into a page token to - * include with each page of results, and it must be `Deserialize` to get it - * back in a querystring. - */ +/// Parameters describing the client's position in a scan through all projects +/// +/// This implementation only needs the name of the last project seen, as we only +/// support listing projects in ascending order by name. +/// +/// This must be `Serialize` so that Dropshot can turn it into a page token to +/// include with each page of results, and it must be `Deserialize` to get it +/// back in a querystring. #[derive(Deserialize, JsonSchema, Serialize)] struct ProjectPage { name: String, } -/** - * API endpoint for listing projects - * - * This implementation stores all the projects in a BTreeMap, which makes it - * very easy to fetch a particular range of items based on the key. - */ +/// API endpoint for listing projects +/// +/// This implementation stores all the projects in a BTreeMap, which makes it +/// very easy to fetch a particular range of items based on the key. #[endpoint { method = GET, path = "/projects" @@ -85,14 +77,14 @@ async fn example_list_projects( let tree = rqctx.context(); let projects = match &pag_params.page { WhichPage::First(..) => { - /* Return a list of the first "limit" projects. */ + // Return a list of the first "limit" projects. tree.iter() .take(limit) .map(|(_, project)| project.clone()) .collect() } WhichPage::Next(ProjectPage { name: last_seen }) => { - /* Return a list of the first "limit" projects after this name. */ + // Return a list of the first "limit" projects after this name. tree.range((Bound::Excluded(last_seen.clone()), Bound::Unbounded)) .take(limit) .map(|(_, project)| project.clone()) @@ -116,9 +108,7 @@ async fn main() -> Result<(), String> { .map_err(|e| format!("failed to parse \"port\" argument: {}", e))? .unwrap_or(0); - /* - * Create 1000 projects up front. - */ + // Create 1000 projects up front. let mut tree = BTreeMap::new(); for n in 1..1000 { let name = format!("project{:03}", n); @@ -126,9 +116,7 @@ async fn main() -> Result<(), String> { tree.insert(name, project); } - /* - * Run the Dropshot server. - */ + // Run the Dropshot server. let ctx = tree; let config_dropshot = ConfigDropshot { bind_address: SocketAddr::from((Ipv4Addr::LOCALHOST, port)), diff --git a/dropshot/examples/pagination-multiple-resources.rs b/dropshot/examples/pagination-multiple-resources.rs index 2184a4829..d5730e21e 100644 --- a/dropshot/examples/pagination-multiple-resources.rs +++ b/dropshot/examples/pagination-multiple-resources.rs @@ -1,9 +1,7 @@ // Copyright 2020 Oxide Computer Company -/*! - * Example that shows a paginated API that uses the same pagination fields on - * multiple resources. See the other pagination examples for more information - * about how to run this. - */ +//! Example that shows a paginated API that uses the same pagination fields on +//! multiple resources. See the other pagination examples for more information +//! about how to run this. use dropshot::endpoint; use dropshot::ApiDescription; @@ -31,10 +29,8 @@ use std::ops::Bound; use std::sync::Arc; use uuid::Uuid; -/* - * Example API data model: we have three resources, each having an "id" and - * "name". We'll have one endpoint for each resource to list it. - */ +// Example API data model: we have three resources, each having an "id" and +// "name". We'll have one endpoint for each resource to list it. #[derive(Clone, JsonSchema, Serialize)] struct Project { @@ -57,12 +53,10 @@ struct Instance { // lots more instance-like fields } -/* - * In an API with many resources sharing the same identifying fields, we might - * define a trait to get those fields. Then we could define pagination in terms - * of that trait. To avoid hand-writing the impls, we use a macro. (This might - * be better as a "derive" procedural macro.) - */ +// In an API with many resources sharing the same identifying fields, we might +// define a trait to get those fields. Then we could define pagination in terms +// of that trait. To avoid hand-writing the impls, we use a macro. (This might +// be better as a "derive" procedural macro.) trait HasIdentity { fn id(&self) -> &Uuid; fn name(&self) -> &str; @@ -85,9 +79,7 @@ impl_HasIdentity!(Project); impl_HasIdentity!(Disk); impl_HasIdentity!(Instance); -/* - * Pagination-related types - */ +// Pagination-related types #[derive(Deserialize, Clone, JsonSchema, Serialize)] struct ExScanParams { #[serde(default = "default_sort_mode")] @@ -155,13 +147,11 @@ fn scan_params(p: &WhichPage) -> ExScanParams { } } -/* - * Paginated endpoints to list each type of resource. - * - * These could be commonized further (to the point where each of these endpoint - * functions is just a one-line call to a generic function), but we implement - * them separately here for clarity. - */ +// Paginated endpoints to list each type of resource. +// +// These could be commonized further (to the point where each of these endpoint +// functions is just a one-line call to a generic function), but we implement +// them separately here for clarity. #[endpoint { method = GET, @@ -274,9 +264,7 @@ where } } -/* - * General Dropshot-server boilerplate - */ +// General Dropshot-server boilerplate #[tokio::main] async fn main() -> Result<(), String> { @@ -287,9 +275,7 @@ async fn main() -> Result<(), String> { .map_err(|e| format!("failed to parse \"port\" argument: {}", e))? .unwrap_or(0); - /* - * Run the Dropshot server. - */ + // Run the Dropshot server. let ctx = DataCollection::new(); let config_dropshot = ConfigDropshot { bind_address: SocketAddr::from((Ipv4Addr::LOCALHOST, port)), @@ -311,11 +297,9 @@ async fn main() -> Result<(), String> { server.await } -/** - * Tracks a (static) collection of Projects indexed in two different ways to - * demonstrate an endpoint that provides multiple ways to scan a large - * collection. - */ +/// Tracks a (static) collection of Projects indexed in two different ways to +/// demonstrate an endpoint that provides multiple ways to scan a large +/// collection. struct DataCollection { projects_by_name: BTreeMap>, projects_by_id: BTreeMap>, @@ -328,10 +312,8 @@ struct DataCollection { type ItemIter<'a, T> = Box> + 'a>; impl DataCollection { - /** - * Constructs an example collection of projects, disks, and instances to - * back the API endpoints - */ + /// Constructs an example collection of projects, disks, and instances to + /// back the API endpoints pub fn new() -> DataCollection { let mut data = DataCollection { projects_by_id: BTreeMap::new(), @@ -399,10 +381,8 @@ impl DataCollection { self.make_iter(iter) } - /** - * Helper function to turn the initial iterators produced above into what we - * actually need to provide consumers. - */ + /// Helper function to turn the initial iterators produced above into what we + /// actually need to provide consumers. fn make_iter<'a, K, I, T>(&'a self, iter: I) -> ItemIter<'a, T> where I: Iterator)> + 'a, diff --git a/dropshot/examples/pagination-multiple-sorts.rs b/dropshot/examples/pagination-multiple-sorts.rs index e475877ff..0a49c834d 100644 --- a/dropshot/examples/pagination-multiple-sorts.rs +++ b/dropshot/examples/pagination-multiple-sorts.rs @@ -1,94 +1,92 @@ // Copyright 2020 Oxide Computer Company -/*! - * Example of an API endpoint that supports pagination using several different - * fields as the sorting key. - * - * When you run this program, it will start an HTTP server on an available local - * port. See the log for example URLs to use. Try passing different values of - * the `limit` query parameter. Try passing the `next_page` token from the - * response as a query parameter called `page_token`, too. - * - * For background, see src/pagination.rs. This example uses a resource called a - * "Project", which only has a "name" and an "mtime" (modification time). The - * server creates 1,000 projects on startup and provides one API endpoint to - * page through them. - * - * Initially, a client just invokes the API to list the first page of results - * using the default sort order (we'll use limit=3 to keep the result set - * short): - * - * ```ignore - * $ curl -s http://127.0.0.1:50800/projects?limit=3 | json - * { - * "next_page": "eyJ2IjoidjEiLCJwYWdlX3N0YXJ0Ijp7Im5hbWUiOlsiYXNjZW5kaW5nIiwicHJvamVjdDAwMyJdfX0=", - * "items": [ - * { - * "name": "project001", - * "mtime": "2020-07-13T17:35:00Z" - * }, - * { - * "name": "project002", - * "mtime": "2020-07-13T17:34:59.999Z" - * }, - * { - * "name": "project003", - * "mtime": "2020-07-13T17:34:59.998Z" - * } - * ] - * } - * ``` - * - * This should be pretty self-explanatory: we have three projects here and - * they're sorted in ascending order by name. The "next_page" token is used to - * fetch the next page of results as follows: - * - * ```ignore - * $ curl -s http://127.0.0.1:50800/projects?limit=3'&'page_token=eyJ2IjoidjEiLCJwYWdlX3N0YXJ0Ijp7Im5hbWUiOlsiYXNjZW5kaW5nIiwicHJvamVjdDAwMyJdfX0= | json - * { - * "next_page": "eyJ2IjoidjEiLCJwYWdlX3N0YXJ0Ijp7Im5hbWUiOlsiYXNjZW5kaW5nIiwicHJvamVjdDAwNiJdfX0=", - * "items": [ - * { - * "name": "project004", - * "mtime": "2020-07-13T17:34:59.997Z" - * }, - * { - * "name": "project005", - * "mtime": "2020-07-13T17:34:59.996Z" - * }, - * { - * "name": "project006", - * "mtime": "2020-07-13T17:34:59.995Z" - * } - * ] - * } - * ``` - * - * Now we have the next three projects and a new token. We can continue this - * way until we've listed all the projects. - * - * What does that page token look like? It's implementation-defined, so you - * shouldn't rely on the structure. In this case, it's a base64-encoded, - * versioned JSON structure describing the scan and the client's position in the - * scan: - * - * ```ignore - * $ echo -n 'eyJ2IjoidjEiLCJwYWdlX3N0YXJ0Ijp7Im5hbWUiOlsiYXNjZW5kaW5nIiwicHJvamVjdDAwNiJdfX0=' | base64 -d | json - * { - * "v": "v1", - * "page_start": { - * "name": [ - * "ascending", - * "project006" - * ] - * } - * } - * ``` - * - * This token says that we're scanning in ascending order of "name" and the last - * one we saw was "project006". Again, this is subject to change and should not - * be relied upon. We mention it here just to help explain how the pagination - * mechanism works. - */ +//! Example of an API endpoint that supports pagination using several different +//! fields as the sorting key. +//! +//! When you run this program, it will start an HTTP server on an available local +//! port. See the log for example URLs to use. Try passing different values of +//! the `limit` query parameter. Try passing the `next_page` token from the +//! response as a query parameter called `page_token`, too. +//! +//! For background, see src/pagination.rs. This example uses a resource called a +//! "Project", which only has a "name" and an "mtime" (modification time). The +//! server creates 1,000 projects on startup and provides one API endpoint to +//! page through them. +//! +//! Initially, a client just invokes the API to list the first page of results +//! using the default sort order (we'll use limit=3 to keep the result set +//! short): +//! +//! ```ignore +//! $ curl -s http://127.0.0.1:50800/projects?limit=3 | json +//! { +//! "next_page": "eyJ2IjoidjEiLCJwYWdlX3N0YXJ0Ijp7Im5hbWUiOlsiYXNjZW5kaW5nIiwicHJvamVjdDAwMyJdfX0=", +//! "items": [ +//! { +//! "name": "project001", +//! "mtime": "2020-07-13T17:35:00Z" +//! }, +//! { +//! "name": "project002", +//! "mtime": "2020-07-13T17:34:59.999Z" +//! }, +//! { +//! "name": "project003", +//! "mtime": "2020-07-13T17:34:59.998Z" +//! } +//! ] +//! } +//! ``` +//! +//! This should be pretty self-explanatory: we have three projects here and +//! they're sorted in ascending order by name. The "next_page" token is used to +//! fetch the next page of results as follows: +//! +//! ```ignore +//! $ curl -s http://127.0.0.1:50800/projects?limit=3'&'page_token=eyJ2IjoidjEiLCJwYWdlX3N0YXJ0Ijp7Im5hbWUiOlsiYXNjZW5kaW5nIiwicHJvamVjdDAwMyJdfX0= | json +//! { +//! "next_page": "eyJ2IjoidjEiLCJwYWdlX3N0YXJ0Ijp7Im5hbWUiOlsiYXNjZW5kaW5nIiwicHJvamVjdDAwNiJdfX0=", +//! "items": [ +//! { +//! "name": "project004", +//! "mtime": "2020-07-13T17:34:59.997Z" +//! }, +//! { +//! "name": "project005", +//! "mtime": "2020-07-13T17:34:59.996Z" +//! }, +//! { +//! "name": "project006", +//! "mtime": "2020-07-13T17:34:59.995Z" +//! } +//! ] +//! } +//! ``` +//! +//! Now we have the next three projects and a new token. We can continue this +//! way until we've listed all the projects. +//! +//! What does that page token look like? It's implementation-defined, so you +//! shouldn't rely on the structure. In this case, it's a base64-encoded, +//! versioned JSON structure describing the scan and the client's position in the +//! scan: +//! +//! ```ignore +//! $ echo -n 'eyJ2IjoidjEiLCJwYWdlX3N0YXJ0Ijp7Im5hbWUiOlsiYXNjZW5kaW5nIiwicHJvamVjdDAwNiJdfX0=' | base64 -d | json +//! { +//! "v": "v1", +//! "page_start": { +//! "name": [ +//! "ascending", +//! "project006" +//! ] +//! } +//! } +//! ``` +//! +//! This token says that we're scanning in ascending order of "name" and the last +//! one we saw was "project006". Again, this is subject to change and should not +//! be relied upon. We mention it here just to help explain how the pagination +//! mechanism works. use chrono::offset::TimeZone; use chrono::DateTime; @@ -122,12 +120,10 @@ use std::sync::Arc; #[macro_use] extern crate slog; -/** - * Item returned by our paginated endpoint - * - * Like anything returned by Dropshot, we must implement `JsonSchema` and - * `Serialize`. We also implement `Clone` to simplify the example. - */ +/// Item returned by our paginated endpoint +/// +/// Like anything returned by Dropshot, we must implement `JsonSchema` and +/// `Serialize`. We also implement `Clone` to simplify the example. #[derive(Clone, JsonSchema, Serialize)] struct Project { name: String, @@ -135,19 +131,17 @@ struct Project { // lots more fields } -/** - * Specifies how the client wants to page through results (typically: what - * field(s) to sort by and whether the sort should be ascending or descending) - * - * It's up to the consumer (e.g., this example) to decide exactly which modes - * are supported here and what each one means. This type represents an - * interface that's part of the OpenAPI specification for the service. - * - * NOTE: To be useful, this field must be deserializable using the - * `serde_querystring` module. You can test this by writing test code to - * serialize it using `serde_querystring`. That code could fail at runtime for - * certain types of values (e.g., enum variants that contain data). - */ +/// Specifies how the client wants to page through results (typically: what +/// field(s) to sort by and whether the sort should be ascending or descending) +/// +/// It's up to the consumer (e.g., this example) to decide exactly which modes +/// are supported here and what each one means. This type represents an +/// interface that's part of the OpenAPI specification for the service. +/// +/// NOTE: To be useful, this field must be deserializable using the +/// `serde_querystring` module. You can test this by writing test code to +/// serialize it using `serde_querystring`. That code could fail at runtime for +/// certain types of values (e.g., enum variants that contain data). #[derive(Clone, Deserialize, JsonSchema, Serialize)] struct ProjectScanParams { #[serde(default = "default_project_sort")] @@ -161,31 +155,29 @@ fn default_project_sort() -> ProjectSort { #[derive(Deserialize, Clone, JsonSchema, Serialize)] #[serde(rename_all = "kebab-case")] enum ProjectSort { - /** by name ascending */ + /// by name ascending ByNameAscending, - /** by name descending */ + /// by name descending ByNameDescending, - /** by mtime ascending, then by name ascending */ + /// by mtime ascending, then by name ascending ByMtimeAscending, - /** by mtime descending, then by name descending */ + /// by mtime descending, then by name descending ByMtimeDescending, } -/** - * Specifies the scan mode and the client's current position in the scan - * - * Dropshot uses this information to construct a page token that's sent to the - * client with each page of results. The client provides that page token in a - * subsequent request for the next page of results. Your endpoint is expected - * to use this information to resume the scan where the previous request left - * off. - * - * The most common robust and scalable implementation is to have this structure - * include the scan mode (see above) and the last value seen the key field(s) - * (i.e., the fields that the results are sorted by). When you get this - * selector back, you find the object having the next value after the one stored - * in the token and start returning results from there. - */ +/// Specifies the scan mode and the client's current position in the scan +/// +/// Dropshot uses this information to construct a page token that's sent to the +/// client with each page of results. The client provides that page token in a +/// subsequent request for the next page of results. Your endpoint is expected +/// to use this information to resume the scan where the previous request left +/// off. +/// +/// The most common robust and scalable implementation is to have this structure +/// include the scan mode (see above) and the last value seen the key field(s) +/// (i.e., the fields that the results are sorted by). When you get this +/// selector back, you find the object having the next value after the one stored +/// in the token and start returning results from there. #[derive(Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] enum ProjectScanPageSelector { @@ -193,11 +185,9 @@ enum ProjectScanPageSelector { MtimeName(PaginationOrder, DateTime, String), } -/** - * Given a project (typically representing the last project in a page of - * results) and scan mode, return a page selector that can be sent to the client - * to request the next page of results. - */ +/// Given a project (typically representing the last project in a page of +/// results) and scan mode, return a page selector that can be sent to the client +/// to request the next page of results. fn page_selector_for( last_item: &Project, scan_params: &ProjectScanParams, @@ -222,12 +212,10 @@ fn page_selector_for( } } -/** - * API endpoint for listing projects - * - * This implementation stores all the projects in a BTreeMap, which makes it - * very easy to fetch a particular range of items based on the key. - */ +/// API endpoint for listing projects +/// +/// This implementation stores all the projects in a BTreeMap, which makes it +/// very easy to fetch a particular range of items based on the key. #[endpoint { method = GET, path = "/projects" @@ -303,9 +291,7 @@ async fn main() -> Result<(), String> { .map_err(|e| format!("failed to parse \"port\" argument: {}", e))? .unwrap_or(0); - /* - * Run the Dropshot server. - */ + // Run the Dropshot server. let ctx = ProjectCollection::new(); let config_dropshot = ConfigDropshot { bind_address: SocketAddr::from((Ipv4Addr::LOCALHOST, port)), @@ -322,9 +308,7 @@ async fn main() -> Result<(), String> { .map_err(|error| format!("failed to create server: {}", error))? .start(); - /* - * Print out some example requests to start with. - */ + // Print out some example requests to start with. print_example_requests(log, &server.local_addr()); server.await @@ -350,11 +334,9 @@ fn print_example_requests(log: slog::Logger, addr: &SocketAddr) { } } -/** - * Tracks a (static) collection of Projects indexed in two different ways to - * demonstrate an endpoint that provides multiple ways to scan a large - * collection. - */ +/// Tracks a (static) collection of Projects indexed in two different ways to +/// demonstrate an endpoint that provides multiple ways to scan a large +/// collection. struct ProjectCollection { by_name: BTreeMap>, by_mtime: BTreeMap<(DateTime, String), Arc>, @@ -363,7 +345,7 @@ struct ProjectCollection { type ProjectIter<'a> = Box> + 'a>; impl ProjectCollection { - /** Constructs an example collection of projects to back the API endpoint */ + /// Constructs an example collection of projects to back the API endpoint pub fn new() -> ProjectCollection { let mut data = ProjectCollection { by_name: BTreeMap::new(), @@ -379,12 +361,10 @@ impl ProjectCollection { name: name.clone(), mtime: Utc.timestamp_millis_opt(timestamp).unwrap(), }); - /* - * To make this dataset at least somewhat interesting in terms of - * exercising different pagination parameters, we'll make the mtimes - * decrease with the names, and we'll have some objects with the same - * mtime. - */ + // To make this dataset at least somewhat interesting in terms of + // exercising different pagination parameters, we'll make the mtimes + // decrease with the names, and we'll have some objects with the same + // mtime. if n % 10 != 0 { timestamp = timestamp - 1; } @@ -395,9 +375,7 @@ impl ProjectCollection { data } - /* - * Iterate by name (ascending, descending) - */ + // Iterate by name (ascending, descending) pub fn iter_by_name_asc(&self) -> ProjectIter { self.make_iter(self.by_name.iter()) @@ -419,9 +397,7 @@ impl ProjectCollection { self.make_iter(iter) } - /* - * Iterate by mtime (ascending, descending) - */ + // Iterate by mtime (ascending, descending) pub fn iter_by_mtime_asc(&self) -> ProjectIter { self.make_iter(self.by_mtime.iter()) @@ -452,10 +428,8 @@ impl ProjectCollection { self.make_iter(iter) } - /** - * Helper function to turn the initial iterators produced above into what we - * actually need to provide consumers. - */ + /// Helper function to turn the initial iterators produced above into what we + /// actually need to provide consumers. fn make_iter<'a, K, I>(&'a self, iter: I) -> ProjectIter<'a> where I: Iterator)> + 'a, diff --git a/dropshot/examples/petstore.rs b/dropshot/examples/petstore.rs index 5040cd2dc..ae0c19631 100644 --- a/dropshot/examples/petstore.rs +++ b/dropshot/examples/petstore.rs @@ -7,9 +7,7 @@ use serde::{Deserialize, Serialize}; use std::sync::Arc; fn main() -> Result<(), String> { - /* - * Build a description of the API. - */ + // Build a description of the API. let mut api = ApiDescription::new(); api.register(get_pet_by_id).unwrap(); api.register(update_pet_with_form).unwrap(); diff --git a/dropshot/examples/schema-with-example.rs b/dropshot/examples/schema-with-example.rs index f5bf1cb1e..dc306fcf3 100644 --- a/dropshot/examples/schema-with-example.rs +++ b/dropshot/examples/schema-with-example.rs @@ -1,8 +1,6 @@ -/*! -* Example use of dropshot to output OpenAPI compatible JSON. This program -* specifically illustrates how to add examples to each schema using schemars, -* and how that will be reflected in the resultant JSON generated when ran. -*/ +//! Example use of dropshot to output OpenAPI compatible JSON. This program +//! specifically illustrates how to add examples to each schema using schemars, +//! and how that will be reflected in the resultant JSON generated when ran. use dropshot::{ endpoint, ApiDescription, HttpError, HttpResponseOk, RequestContext, @@ -11,17 +9,15 @@ use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::sync::Arc; -/* - * Define 2 structs here - Bar is nested inside Foo and should result in an - * example that looks like: - * - * { - * "id": 1, - * "bar": { - * "id: 2 - * } - * } - */ +// Define 2 structs here - Bar is nested inside Foo and should result in an +// example that looks like: +// +// { +// "id": 1, +// "bar": { +// "id: 2 +// } +// } #[derive(Deserialize, Serialize, JsonSchema)] #[schemars(example = "foo_example")] diff --git a/dropshot/examples/self-referential.rs b/dropshot/examples/self-referential.rs index 78e726c7b..0261f3671 100644 --- a/dropshot/examples/self-referential.rs +++ b/dropshot/examples/self-referential.rs @@ -62,9 +62,7 @@ async fn main() -> Result<(), String> { shutdown.await } -/** - * Application-specific example context (state shared by handler functions) - */ +/// Application-specific example context (state shared by handler functions) struct ExampleContext { counter: AtomicU64, } @@ -75,9 +73,7 @@ impl ExampleContext { } } -/* - * HTTP API interface - */ +// HTTP API interface #[derive(Deserialize, Serialize, JsonSchema)] struct CounterValue { diff --git a/dropshot/examples/websocket.rs b/dropshot/examples/websocket.rs index 615e42553..a79c79a80 100644 --- a/dropshot/examples/websocket.rs +++ b/dropshot/examples/websocket.rs @@ -1,7 +1,5 @@ // Copyright 2022 Oxide Computer Company -/*! - * Example use of Dropshot with a websocket endpoint. - */ +//! Example use of Dropshot with a websocket endpoint. use dropshot::channel; use dropshot::ApiDescription; @@ -21,57 +19,43 @@ use tokio_tungstenite::tungstenite::Message; #[tokio::main] async fn main() -> Result<(), String> { - /* - * We must specify a configuration with a bind address. We'll use 127.0.0.1 - * since it's available and won't expose this server outside the host. We - * request port 0, which allows the operating system to pick any available - * port. - */ + // We must specify a configuration with a bind address. We'll use 127.0.0.1 + // since it's available and won't expose this server outside the host. We + // request port 0, which allows the operating system to pick any available + // port. let config_dropshot: ConfigDropshot = Default::default(); - /* - * For simplicity, we'll configure an "info"-level logger that writes to - * stderr assuming that it's a terminal. - */ + // For simplicity, we'll configure an "info"-level logger that writes to + // stderr assuming that it's a terminal. let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }; let log = config_logging .to_logger("example-basic") .map_err(|error| format!("failed to create logger: {}", error))?; - /* - * Build a description of the API. - */ + // Build a description of the API. let mut api = ApiDescription::new(); api.register(example_api_websocket_counter).unwrap(); - /* - * Set up the server. - */ + // Set up the server. let server = HttpServerStarter::new(&config_dropshot, api, (), &log) .map_err(|error| format!("failed to create server: {}", error))? .start(); - /* - * Wait for the server to stop. Note that there's not any code to shut down - * this server, so we should never get past this point. - */ + // Wait for the server to stop. Note that there's not any code to shut down + // this server, so we should never get past this point. server.await } -/* - * HTTP API interface - */ +// HTTP API interface #[derive(Deserialize, JsonSchema)] struct QueryParams { start: Option, } -/** - * An eternally-increasing sequence of bytes, wrapping on overflow, starting - * from the value given for the query parameter "start." - */ +/// An eternally-increasing sequence of bytes, wrapping on overflow, starting +/// from the value given for the query parameter "start." #[channel { protocol = WEBSOCKETS, path = "/counter", diff --git a/dropshot/examples/well-tagged.rs b/dropshot/examples/well-tagged.rs index 976caa483..85693bf2c 100644 --- a/dropshot/examples/well-tagged.rs +++ b/dropshot/examples/well-tagged.rs @@ -1,11 +1,9 @@ // Copyright 2022 Oxide Computer Company -/*! - * Example of an API that applies a rigorous tag policy in which each endpoint - * must use exactly one of the predetermined tags. Tags are often used by - * documentation generators; Dropshot's tag policies are intended to make - * proper tagging innate. - */ +//! Example of an API that applies a rigorous tag policy in which each endpoint +//! must use exactly one of the predetermined tags. Tags are often used by +//! documentation generators; Dropshot's tag policies are intended to make +//! proper tagging innate. use std::sync::Arc; @@ -50,27 +48,21 @@ async fn get_fryism( #[tokio::main] async fn main() -> Result<(), String> { - /* - * We must specify a configuration with a bind address. We'll use 127.0.0.1 - * since it's available and won't expose this server outside the host. We - * request port 0, which allows the operating system to pick any available - * port. - */ + // We must specify a configuration with a bind address. We'll use 127.0.0.1 + // since it's available and won't expose this server outside the host. We + // request port 0, which allows the operating system to pick any available + // port. let config_dropshot = Default::default(); - /* - * For simplicity, we'll configure an "info"-level logger that writes to - * stderr assuming that it's a terminal. - */ + // For simplicity, we'll configure an "info"-level logger that writes to + // stderr assuming that it's a terminal. let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Info }; let log = config_logging .to_logger("example-basic") .map_err(|error| format!("failed to create logger: {}", error))?; - /* - * Build a description of the API -- in this case it's not much of an API!. - */ + // Build a description of the API -- in this case it's not much of an API!. let mut api = ApiDescription::new().tag_config(TagConfig { allow_other_tags: false, endpoint_tag_policy: EndpointTagPolicy::ExactlyOne, @@ -108,16 +100,12 @@ async fn main() -> Result<(), String> { api.register(get_barneyism).unwrap(); api.register(get_fryism).unwrap(); - /* - * Set up the server. - */ + // Set up the server. let server = HttpServerStarter::new(&config_dropshot, api, (), &log) .map_err(|error| format!("failed to create server: {}", error))? .start(); - /* - * Wait for the server to stop. Note that there's not any code to shut down - * this server, so we should never get past this point. - */ + // Wait for the server to stop. Note that there's not any code to shut down + // this server, so we should never get past this point. server.await } diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index e4771bc32..7ef06f5ca 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -1,7 +1,5 @@ // Copyright 2022 Oxide Computer Company -/*! - * Describes the endpoints and handler functions in your API - */ +//! Describes the endpoints and handler functions in your API use crate::handler::HttpHandlerFunc; use crate::handler::HttpResponse; @@ -27,12 +25,10 @@ use std::collections::BTreeMap; use std::collections::HashMap; use std::collections::HashSet; -/** - * ApiEndpoint represents a single API endpoint associated with an - * ApiDescription. It has a handler, HTTP method (e.g. GET, POST), and a path-- - * provided explicitly--as well as parameters and a description which can be - * inferred from function parameter types and doc comments (respectively). - */ +/// ApiEndpoint represents a single API endpoint associated with an +/// ApiDescription. It has a handler, HTTP method (e.g. GET, POST), and a path-- +/// provided explicitly--as well as parameters and a description which can be +/// inferred from function parameter types and doc comments (respectively). #[derive(Debug)] pub struct ApiEndpoint { pub operation_id: String, @@ -111,11 +107,9 @@ impl<'a, Context: ServerContext> ApiEndpoint { } } -/** - * ApiEndpointParameter represents the discrete path and query parameters for a - * given API endpoint. These are typically derived from the members of stucts - * used as parameters to handler functions. - */ +/// ApiEndpointParameter represents the discrete path and query parameters for a +/// given API endpoint. These are typically derived from the members of stucts +/// used as parameters to handler functions. #[derive(Debug)] pub struct ApiEndpointParameter { pub metadata: ApiEndpointParameterMetadata, @@ -181,11 +175,11 @@ pub enum ApiEndpointParameterMetadata { #[derive(Debug, Clone)] pub enum ApiEndpointBodyContentType { - /** application/octet-stream */ + /// application/octet-stream Bytes, - /** application/json */ + /// application/json Json, - /** application/x-www-form-urlencoded */ + /// application/x-www-form-urlencoded UrlEncoded, } @@ -222,9 +216,7 @@ pub struct ApiEndpointHeader { pub required: bool, } -/** - * Metadata for an API endpoint response: type information and status code. - */ +/// Metadata for an API endpoint response: type information and status code. #[derive(Debug, Default)] pub struct ApiEndpointResponse { pub schema: Option, @@ -233,9 +225,7 @@ pub struct ApiEndpointResponse { pub description: Option, } -/** - * Wrapper for both dynamically generated and pre-generated schemas. - */ +/// Wrapper for both dynamically generated and pre-generated schemas. pub enum ApiSchemaGenerator { Gen { name: fn() -> String, @@ -259,13 +249,11 @@ impl std::fmt::Debug for ApiSchemaGenerator { } } -/** - * An ApiDescription represents the endpoints and handler functions in your API. - * Other metadata could also be provided here. This object can be used to - * generate an OpenAPI spec or to run an HTTP server implementing the API. - */ +/// An ApiDescription represents the endpoints and handler functions in your API. +/// Other metadata could also be provided here. This object can be used to +/// generate an OpenAPI spec or to run an HTTP server implementing the API. pub struct ApiDescription { - /** In practice, all the information we need is encoded in the router. */ + /// In practice, all the information we need is encoded in the router. router: HttpRouter, tag_config: TagConfig, } @@ -283,9 +271,7 @@ impl ApiDescription { self } - /** - * Register a new API endpoint. - */ + /// Register a new API endpoint. pub fn register(&mut self, endpoint: T) -> Result<(), String> where T: Into>, @@ -302,11 +288,9 @@ impl ApiDescription { Ok(()) } - /** - * Validate that the tags conform to the tags policy. - */ + /// Validate that the tags conform to the tags policy. fn validate_tags(&self, e: &ApiEndpoint) -> Result<(), String> { - /* Don't care about endpoints that don't appear in the OpenAPI */ + // Don't care about endpoints that don't appear in the OpenAPI if !e.visible { return Ok(()); } @@ -332,10 +316,8 @@ impl ApiDescription { Ok(()) } - /** - * Validate that the parameters specified in the path match the parameters - * specified by the path parameter arguments to the handler function. - */ + /// Validate that the parameters specified in the path match the parameters + /// specified by the path parameter arguments to the handler function. fn validate_path_parameters( &self, e: &ApiEndpoint, @@ -392,9 +374,7 @@ impl ApiDescription { Ok(()) } - /** - * Validate that we have a single body parameter. - */ + /// Validate that we have a single body parameter. fn validate_body_parameters( &self, e: &ApiEndpoint, @@ -419,11 +399,9 @@ impl ApiDescription { Ok(()) } - /** - * Validate that named parameters have appropriate types and their aren't - * duplicates. Parameters must have scalar types except in the case of the - * received for a wildcard path which must be an array of String. - */ + /// Validate that named parameters have appropriate types and their aren't + /// duplicates. Parameters must have scalar types except in the case of the + /// received for a wildcard path which must be an array of String. fn validate_named_parameters( &self, e: &ApiEndpoint, @@ -449,13 +427,13 @@ impl ApiDescription { .collect::>(); for param in &e.parameters { - /* Skip anything that's not a path or query parameter (i.e. body) */ + // Skip anything that's not a path or query parameter (i.e. body) match ¶m.metadata { ApiEndpointParameterMetadata::Path(_) | ApiEndpointParameterMetadata::Query(_) => (), _ => continue, } - /* Only body parameters should have unresolved schemas */ + // Only body parameters should have unresolved schemas let (schema, dependencies) = match ¶m.schema { ApiSchemaGenerator::Static { schema, dependencies } => { (schema, dependencies) @@ -494,15 +472,13 @@ impl ApiDescription { Ok(()) } - /** - * Build the OpenAPI definition describing this API. Returns an - * [`OpenApiDefinition`] which can be used to specify the contents of the - * definition and select an output format. - * - * The arguments to this function will be used for the mandatory `title` and - * `version` properties that the `Info` object in an OpenAPI definition must - * contain. - */ + /// Build the OpenAPI definition describing this API. Returns an + /// [`OpenApiDefinition`] which can be used to specify the contents of the + /// definition and select an output format. + /// + /// The arguments to this function will be used for the mandatory `title` and + /// `version` properties that the `Info` object in an OpenAPI definition must + /// contain. pub fn openapi( &self, title: S1, @@ -515,17 +491,15 @@ impl ApiDescription { OpenApiDefinition::new(self, title.as_ref(), version.as_ref()) } - /** - * Internal routine for constructing the OpenAPI definition describing this - * API in its JSON form. - */ + /// Internal routine for constructing the OpenAPI definition describing this + /// API in its JSON form. fn gen_openapi(&self, info: openapiv3::Info) -> openapiv3::OpenAPI { let mut openapi = openapiv3::OpenAPI::default(); openapi.openapi = "3.0.3".to_string(); openapi.info = info; - /* Gather up the ad hoc tags from endpoints */ + // Gather up the ad hoc tags from endpoints let endpoint_tags = (&self.router) .into_iter() .flat_map(|(_, _, endpoint)| { @@ -538,7 +512,7 @@ impl ApiDescription { .into_iter() .map(|tag| openapiv3::Tag { name: tag, ..Default::default() }); - /* Bundle those with the explicit tags provided by the consumer */ + // Bundle those with the explicit tags provided by the consumer openapi.tags = self .tag_config .tag_definitions @@ -558,7 +532,7 @@ impl ApiDescription { .chain(endpoint_tags) .collect(); - /* Sort the tags for stability */ + // Sort the tags for stability openapi.tags.sort_by(|a, b| a.name.cmp(&b.name)); let settings = schemars::gen::SchemaSettings::openapi3(); @@ -889,19 +863,15 @@ impl ApiDescription { openapi } - /* - * TODO-cleanup is there a way to make this available only within this - * crate? Once we do that, we don't need to consume the ApiDescription to - * do this. - */ + // TODO-cleanup is there a way to make this available only within this + // crate? Once we do that, we don't need to consume the ApiDescription to + // do this. pub fn into_router(self) -> HttpRouter { self.router } } -/** - * Returns true iff the schema represents the void schema that matches no data. - */ +/// Returns true iff the schema represents the void schema that matches no data. fn is_empty(schema: &schemars::schema::Schema) -> bool { if let schemars::schema::Schema::Bool(false) = schema { return true; @@ -957,27 +927,21 @@ fn is_empty(schema: &schemars::schema::Schema) -> bool { false } -/** - * Convert from JSON Schema into OpenAPI. - */ -/* - * TODO Initially this seemed like it was going to be a win, but the versions - * of JSON Schema that the schemars and openapiv3 crates adhere to are just - * different enough to make the conversion a real pain in the neck. A better - * approach might be a derive(OpenAPI)-like thing, or even a generic - * derive(schema) that we could then marshall into OpenAPI. - * The schemars crate also seems a bit inflexible when it comes to how the - * schema is generated wrt references vs. inline types. - */ +/// Convert from JSON Schema into OpenAPI. +// TODO Initially this seemed like it was going to be a win, but the versions +// of JSON Schema that the schemars and openapiv3 crates adhere to are just +// different enough to make the conversion a real pain in the neck. A better +// approach might be a derive(OpenAPI)-like thing, or even a generic +// derive(schema) that we could then marshall into OpenAPI. +// The schemars crate also seems a bit inflexible when it comes to how the +// schema is generated wrt references vs. inline types. fn j2oas_schema( name: Option<&String>, schema: &schemars::schema::Schema, ) -> openapiv3::ReferenceOr { match schema { - /* - * The permissive, "match anything" schema. We'll typically see this - * when consumers use a type such as serde_json::Value. - */ + // The permissive, "match anything" schema. We'll typically see this + // when consumers use a type such as serde_json::Value. schemars::schema::Schema::Bool(true) => { openapiv3::ReferenceOr::Item(openapiv3::Schema { schema_data: openapiv3::SchemaData::default(), @@ -1397,13 +1361,11 @@ fn j2oas_object( } } -/** - * This object is used to specify configuration for building an OpenAPI - * definition document. It is constructed using [`ApiDescription::openapi()`]. - * Additional optional properties may be added and then the OpenAPI definition - * document may be generated via [`write()`](`OpenApiDefinition::write`) or - * [`json()`](`OpenApiDefinition::json`). - */ +/// This object is used to specify configuration for building an OpenAPI +/// definition document. It is constructed using [`ApiDescription::openapi()`]. +/// Additional optional properties may be added and then the OpenAPI definition +/// document may be generated via [`write()`](`OpenApiDefinition::write`) or +/// [`json()`](`OpenApiDefinition::json`). pub struct OpenApiDefinition<'a, Context: ServerContext> { api: &'a ApiDescription, info: openapiv3::Info, @@ -1423,25 +1385,21 @@ impl<'a, Context: ServerContext> OpenApiDefinition<'a, Context> { OpenApiDefinition { api, info } } - /** - * Provide a short description of the API. CommonMark syntax may be - * used for rich text representation. - * - * This routine will set the `description` field of the `Info` object in the - * OpenAPI definition. - */ + /// Provide a short description of the API. CommonMark syntax may be + /// used for rich text representation. + /// + /// This routine will set the `description` field of the `Info` object in the + /// OpenAPI definition. pub fn description>(&mut self, description: S) -> &mut Self { self.info.description = Some(description.as_ref().to_string()); self } - /** - * Include a Terms of Service URL for the API. Must be in the format of a - * URL. - * - * This routine will set the `termsOfService` field of the `Info` object in - * the OpenAPI definition. - */ + /// Include a Terms of Service URL for the API. Must be in the format of a + /// URL. + /// + /// This routine will set the `termsOfService` field of the `Info` object in + /// the OpenAPI definition. pub fn terms_of_service>(&mut self, url: S) -> &mut Self { self.info.terms_of_service = Some(url.as_ref().to_string()); self @@ -1454,36 +1412,30 @@ impl<'a, Context: ServerContext> OpenApiDefinition<'a, Context> { self.info.contact.as_mut().unwrap() } - /** - * Set the identifying name of the contact person or organisation - * responsible for the API. - * - * This routine will set the `name` property of the `Contact` object within - * the `Info` object in the OpenAPI definition. - */ + /// Set the identifying name of the contact person or organisation + /// responsible for the API. + /// + /// This routine will set the `name` property of the `Contact` object within + /// the `Info` object in the OpenAPI definition. pub fn contact_name>(&mut self, name: S) -> &mut Self { self.contact_mut().name = Some(name.as_ref().to_string()); self } - /** - * Set a contact URL for the API. Must be in the format of a URL. - * - * This routine will set the `url` property of the `Contact` object within - * the `Info` object in the OpenAPI definition. - */ + /// Set a contact URL for the API. Must be in the format of a URL. + /// + /// This routine will set the `url` property of the `Contact` object within + /// the `Info` object in the OpenAPI definition. pub fn contact_url>(&mut self, url: S) -> &mut Self { self.contact_mut().url = Some(url.as_ref().to_string()); self } - /** - * Set the email address of the contact person or organisation responsible - * for the API. Must be in the format of an email address. - * - * This routine will set the `email` property of the `Contact` object within - * the `Info` object in the OpenAPI definition. - */ + /// Set the email address of the contact person or organisation responsible + /// for the API. Must be in the format of an email address. + /// + /// This routine will set the `email` property of the `Contact` object within + /// the `Info` object in the OpenAPI definition. pub fn contact_email>(&mut self, email: S) -> &mut Self { self.contact_mut().email = Some(email.as_ref().to_string()); self @@ -1499,13 +1451,11 @@ impl<'a, Context: ServerContext> OpenApiDefinition<'a, Context> { self.info.license.as_mut().unwrap() } - /** - * Provide the name of the licence used for the API, and a URL (must be in - * URL format) displaying the licence text. - * - * This routine will set the `name` and optional `url` properties of the - * `License` object within the `Info` object in the OpenAPI definition. - */ + /// Provide the name of the licence used for the API, and a URL (must be in + /// URL format) displaying the licence text. + /// + /// This routine will set the `name` and optional `url` properties of the + /// `License` object within the `Info` object in the OpenAPI definition. pub fn license(&mut self, name: S1, url: S2) -> &mut Self where S1: AsRef, @@ -1515,28 +1465,22 @@ impl<'a, Context: ServerContext> OpenApiDefinition<'a, Context> { self } - /** - * Provide the name of the licence used for the API. - * - * This routine will set the `name` property of the License object within - * the `Info` object in the OpenAPI definition. - */ + /// Provide the name of the licence used for the API. + /// + /// This routine will set the `name` property of the License object within + /// the `Info` object in the OpenAPI definition. pub fn license_name>(&mut self, name: S) -> &mut Self { self.license_mut(name.as_ref()); self } - /** - * Build a JSON object containing the OpenAPI definition for this API. - */ + /// Build a JSON object containing the OpenAPI definition for this API. pub fn json(&self) -> serde_json::Result { serde_json::to_value(&self.api.gen_openapi(self.info.clone())) } - /** - * Build a JSON object containing the OpenAPI definition for this API and - * write it to the provided stream. - */ + /// Build a JSON object containing the OpenAPI definition for this API and + /// write it to the provided stream. pub fn write( &self, out: &mut dyn std::io::Write, @@ -1548,14 +1492,12 @@ impl<'a, Context: ServerContext> OpenApiDefinition<'a, Context> { } } -/** - * Configuration used describe OpenAPI tags and to validate per-endpoint tags. - * Consumers may use this ensure that--for example--endpoints pick a tag from a - * known set, or that each endpoint has at least one tag. - */ +/// Configuration used describe OpenAPI tags and to validate per-endpoint tags. +/// Consumers may use this ensure that--for example--endpoints pick a tag from a +/// known set, or that each endpoint has at least one tag. #[derive(Debug, Serialize, Deserialize)] pub struct TagConfig { - /** Are endpoints allowed to use tags not specified in this config? */ + /// Are endpoints allowed to use tags not specified in this config? pub allow_other_tags: bool, pub endpoint_tag_policy: EndpointTagPolicy, pub tag_definitions: HashMap, @@ -1571,34 +1513,32 @@ impl Default for TagConfig { } } -/** Endpoint tagging policy */ +/// Endpoint tagging policy #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] pub enum EndpointTagPolicy { - /** Any number of tags is permitted */ + /// Any number of tags is permitted Any, - /** At least one tag is required and more are allowed */ + /// At least one tag is required and more are allowed AtLeastOne, - /** There must be exactly one tag */ + /// There must be exactly one tag ExactlyOne, } -/** Details for a named tag */ +/// Details for a named tag #[derive(Debug, Default, Serialize, Deserialize)] pub struct TagDetails { pub description: Option, pub external_docs: Option, } -/** External docs description */ +/// External docs description #[derive(Debug, Serialize, Deserialize)] pub struct TagExternalDocs { pub description: Option, pub url: String, } -/** - * Dropshot/Progenitor features used by endpoints which are not a part of the base OpenAPI spec. - */ +/// Dropshot/Progenitor features used by endpoints which are not a part of the base OpenAPI spec. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum ExtensionMode { None, @@ -1639,7 +1579,7 @@ mod test { use std::str::from_utf8; use std::sync::Arc; - use crate as dropshot; /* for "endpoint" macro */ + use crate as dropshot; // for "endpoint" macro #[derive(Deserialize, JsonSchema)] #[allow(dead_code)] @@ -1943,10 +1883,8 @@ mod test { #[test] fn test_tags_set() { - /* - * Validate that pre-defined tags and ad-hoc tags are all accounted - * for and aren't duplicated. - */ + // Validate that pre-defined tags and ad-hoc tags are all accounted + // for and aren't duplicated. let mut api = ApiDescription::new().tag_config(TagConfig { allow_other_tags: true, endpoint_tag_policy: EndpointTagPolicy::AtLeastOne, diff --git a/dropshot/src/config.rs b/dropshot/src/config.rs index a32e3ebc8..74ad41e53 100644 --- a/dropshot/src/config.rs +++ b/dropshot/src/config.rs @@ -1,63 +1,59 @@ // Copyright 2020 Oxide Computer Company -/*! - * Configuration for Dropshot - */ +//! Configuration for Dropshot use serde::Deserialize; use serde::Serialize; use std::net::SocketAddr; use std::path::PathBuf; -/** - * Configuration for a Dropshot server. - * - * This type implements [`serde::Deserialize`] and [`serde::Serialize`] and it - * can be composed with the consumer's configuration (whatever format that's - * in). For example, consumers could define a custom `MyAppConfig` for an app - * that contains a Dropshot server: - * - * ``` - * use dropshot::ConfigDropshot; - * use serde::Deserialize; - * - * #[derive(Deserialize)] - * struct MyAppConfig { - * http_api_server: ConfigDropshot, - * /* ... (other app-specific config) */ - * } - * - * fn main() -> Result<(), String> { - * let my_config: MyAppConfig = toml::from_str( - * r##" - * [http_api_server] - * bind_address = "127.0.0.1:12345" - * request_body_max_bytes = 1024 - * ## Optional, to enable TLS - * [http_api_server.tls] - * type = "AsFile" - * cert_file = "/path/to/certs.pem" - * key_file = "/path/to/key.pem" - * - * - * ## ... (other app-specific config) - * "## - * ).map_err(|error| format!("parsing config: {}", error))?; - * - * let dropshot_config: &ConfigDropshot = &my_config.http_api_server; - * /* ... (use the config to create a server) */ - * Ok(()) - * } - * ``` - */ +/// Configuration for a Dropshot server. +/// +/// This type implements [`serde::Deserialize`] and [`serde::Serialize`] and it +/// can be composed with the consumer's configuration (whatever format that's +/// in). For example, consumers could define a custom `MyAppConfig` for an app +/// that contains a Dropshot server: +/// +/// ``` +/// use dropshot::ConfigDropshot; +/// use serde::Deserialize; +/// +/// #[derive(Deserialize)] +/// struct MyAppConfig { +/// http_api_server: ConfigDropshot, +/// /* ... (other app-specific config) */ +/// } +/// +/// fn main() -> Result<(), String> { +/// let my_config: MyAppConfig = toml::from_str( +/// r##" +/// [http_api_server] +/// bind_address = "127.0.0.1:12345" +/// request_body_max_bytes = 1024 +/// ## Optional, to enable TLS +/// [http_api_server.tls] +/// type = "AsFile" +/// cert_file = "/path/to/certs.pem" +/// key_file = "/path/to/key.pem" +/// +/// +/// ## ... (other app-specific config) +/// "## +/// ).map_err(|error| format!("parsing config: {}", error))?; +/// +/// let dropshot_config: &ConfigDropshot = &my_config.http_api_server; +/// /* ... (use the config to create a server) */ +/// Ok(()) +/// } +/// ``` #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] #[serde(default)] pub struct ConfigDropshot { - /** IP address and TCP port to which to bind for accepting connections */ + /// IP address and TCP port to which to bind for accepting connections pub bind_address: SocketAddr, - /** maximum allowed size of a request body, defaults to 1024 */ + /// maximum allowed size of a request body, defaults to 1024 pub request_body_max_bytes: usize, - /** If present, enables TLS with the given configuration */ + /// If present, enables TLS with the given configuration pub tls: Option, } @@ -65,15 +61,13 @@ pub struct ConfigDropshot { #[serde(tag = "type")] pub enum ConfigTls { AsFile { - /** Path to a PEM file containing a certificate chain for the - * server to identify itself with. The first certificate is the - * end-entity certificate, and the remaining are intermediate - * certificates on the way to a trusted CA. - */ + /// Path to a PEM file containing a certificate chain for the + /// server to identify itself with. The first certificate is the + /// end-entity certificate, and the remaining are intermediate + /// certificates on the way to a trusted CA. cert_file: PathBuf, - /** Path to a PEM-encoded PKCS #8 file containing the private key the - * server will use. - */ + /// Path to a PEM-encoded PKCS #8 file containing the private key the + /// server will use. key_file: PathBuf, }, AsBytes { diff --git a/dropshot/src/error.rs b/dropshot/src/error.rs index 5fb33bfb2..90ca6a124 100644 --- a/dropshot/src/error.rs +++ b/dropshot/src/error.rs @@ -1,48 +1,46 @@ // Copyright 2020 Oxide Computer Company -/*! - * Generic server error handling facilities - * - * Error handling in an API - * ------------------------ - * - * Our approach for managing errors within the API server balances several - * goals: - * - * * Every incoming HTTP request should conclude with a response, which is - * either successful (200-level or 300-level status code) or a failure - * (400-level for client errors, 500-level for server errors). - * * There are several different sources of errors within an API server: - * * The HTTP layer of the server may generate an error. In this case, it - * may be just as easy to generate the appropriate HTTP response (with a - * 400-level or 500-level status code) as it would be to generate an Error - * object of some kind. - * * An HTTP-agnostic layer of the API server code base may generate an - * error. It would be nice (but not essential) if these layers did not - * need to know about HTTP-specific things like status codes, particularly - * since they may not map straightforwardly. For example, a NotFound - * error from the model may not result in a 404 out the API -- it might - * just mean that something in the model layer needs to create an object - * before using it. - * * A library that's not part of the API server code base may generate an - * error. This would include standard library interfaces returning - * `std::io::Error` and Hyper returning `hyper::Error`, for examples. - * * We'd like to take advantage of Rust's built-in error handling control flow - * tools, like Results and the '?' operator. - * - * Dropshot itself is concerned only with HTTP errors. We define `HttpError`, - * which provides a status code, error code (via an Enum), external message (for - * sending in the response), optional metadata, and an internal message (for the - * log file or other instrumentation). The HTTP layers of the request-handling - * stack may use this struct directly. **The set of possible error codes here - * is part of a service's OpenAPI contract, as is the schema for any metadata.** - * By the time an error bubbles up to the top of the request handling stack, it - * must be an HttpError. - * - * For the HTTP-agnostic layers of an API server (i.e., consumers of Dropshot), - * we recommend a separate enum to represent their errors in an HTTP-agnostic - * way. Consumers can provide a `From` implementation that converts these - * errors into HttpErrors. - */ +//! Generic server error handling facilities +//! +//! Error handling in an API +//! ------------------------ +//! +//! Our approach for managing errors within the API server balances several +//! goals: +//! +//! * Every incoming HTTP request should conclude with a response, which is +//! either successful (200-level or 300-level status code) or a failure +//! (400-level for client errors, 500-level for server errors). +//! * There are several different sources of errors within an API server: +//! * The HTTP layer of the server may generate an error. In this case, it +//! may be just as easy to generate the appropriate HTTP response (with a +//! 400-level or 500-level status code) as it would be to generate an Error +//! object of some kind. +//! * An HTTP-agnostic layer of the API server code base may generate an +//! error. It would be nice (but not essential) if these layers did not +//! need to know about HTTP-specific things like status codes, particularly +//! since they may not map straightforwardly. For example, a NotFound +//! error from the model may not result in a 404 out the API -- it might +//! just mean that something in the model layer needs to create an object +//! before using it. +//! * A library that's not part of the API server code base may generate an +//! error. This would include standard library interfaces returning +//! `std::io::Error` and Hyper returning `hyper::Error`, for examples. +//! * We'd like to take advantage of Rust's built-in error handling control flow +//! tools, like Results and the '?' operator. +//! +//! Dropshot itself is concerned only with HTTP errors. We define `HttpError`, +//! which provides a status code, error code (via an Enum), external message (for +//! sending in the response), optional metadata, and an internal message (for the +//! log file or other instrumentation). The HTTP layers of the request-handling +//! stack may use this struct directly. **The set of possible error codes here +//! is part of a service's OpenAPI contract, as is the schema for any metadata.** +//! By the time an error bubbles up to the top of the request handling stack, it +//! must be an HttpError. +//! +//! For the HTTP-agnostic layers of an API server (i.e., consumers of Dropshot), +//! we recommend a separate enum to represent their errors in an HTTP-agnostic +//! way. Consumers can provide a `From` implementation that converts these +//! errors into HttpErrors. use hyper::Error as HyperError; use schemars::JsonSchema; @@ -51,67 +49,57 @@ use serde::Serialize; use std::error::Error; use std::fmt; -/** - * `HttpError` represents an error generated as part of handling an API - * request. When these bubble up to the top of the request handling stack - * (which is most of the time that they're generated), these are turned into an - * HTTP response, which includes: - * - * * a status code, which is likely either 400-level (indicating a client - * error, like bad input) or 500-level (indicating a server error). - * * a structured (JSON) body, which includes: - * * a string error code, which identifies the underlying error condition - * so that clients can potentially make programmatic decisions based on - * the error type - * * a string error message, which is the human-readable summary of the - * issue, intended to make sense for API users (i.e., not API server - * developers) - * * optionally: additional metadata describing the issue. For a - * validation error, this could include information about which - * parameter was invalid and why. This should conform to a schema - * associated with the error code. - * - * It's easy to go overboard with the error codes and metadata. Generally, we - * should avoid creating specific codes and metadata unless there's a good - * reason for a client to care. - * - * Besides that, `HttpError`s also have an internal error message, which may - * differ from the error message that gets reported to users. For example, if - * the request fails because an internal database is unreachable, the client may - * just see "internal error", while the server log would include more details - * like "failed to acquire connection to database at 10.1.2.3". - */ +/// `HttpError` represents an error generated as part of handling an API +/// request. When these bubble up to the top of the request handling stack +/// (which is most of the time that they're generated), these are turned into an +/// HTTP response, which includes: +/// +/// * a status code, which is likely either 400-level (indicating a client +/// error, like bad input) or 500-level (indicating a server error). +/// * a structured (JSON) body, which includes: +/// * a string error code, which identifies the underlying error condition +/// so that clients can potentially make programmatic decisions based on +/// the error type +/// * a string error message, which is the human-readable summary of the +/// issue, intended to make sense for API users (i.e., not API server +/// developers) +/// * optionally: additional metadata describing the issue. For a +/// validation error, this could include information about which +/// parameter was invalid and why. This should conform to a schema +/// associated with the error code. +/// +/// It's easy to go overboard with the error codes and metadata. Generally, we +/// should avoid creating specific codes and metadata unless there's a good +/// reason for a client to care. +/// +/// Besides that, `HttpError`s also have an internal error message, which may +/// differ from the error message that gets reported to users. For example, if +/// the request fails because an internal database is unreachable, the client may +/// just see "internal error", while the server log would include more details +/// like "failed to acquire connection to database at 10.1.2.3". #[derive(Debug)] pub struct HttpError { - /* - * TODO-coverage add coverage in the test suite for error_code - * TODO-robustness should error_code just be required? It'll be confusing - * to clients if it's missing sometimes. Should this class be parametrized - * by some enum type? - * TODO-polish add cause chain for a complete log message? - */ - /** HTTP status code for this error */ + // TODO-coverage add coverage in the test suite for error_code + // TODO-robustness should error_code just be required? It'll be confusing + // to clients if it's missing sometimes. Should this class be parametrized + // by some enum type? + // TODO-polish add cause chain for a complete log message? + /// HTTP status code for this error pub status_code: http::StatusCode, - /** - * Optional string error code for this error. Callers are advised to - * use an enum to populate this field. - */ + /// Optional string error code for this error. Callers are advised to + /// use an enum to populate this field. pub error_code: Option, - /** Error message to be sent to API client for this error */ + /// Error message to be sent to API client for this error pub external_message: String, - /** Error message recorded in the log for this error */ + /// Error message recorded in the log for this error pub internal_message: String, } -/** - * Body of an HTTP response for an `HttpError`. This type can be used to - * deserialize an HTTP response corresponding to an error in order to access the - * error code, message, etc. - */ -/* - * TODO: does this need to be pub if it's going to be expressed in the OpenAPI - * output? - */ +/// Body of an HTTP response for an `HttpError`. This type can be used to +/// deserialize an HTTP response corresponding to an error in order to access the +/// error code, message, etc. +// TODO: does this need to be pub if it's going to be expressed in the OpenAPI +// output? #[derive(Debug, Deserialize, Serialize, JsonSchema)] #[schemars(rename = "Error")] #[schemars(description = "Error information from a response.")] @@ -127,10 +115,8 @@ pub struct HttpErrorResponseBody { impl From for HttpError { fn from(error: HyperError) -> Self { - /* - * TODO-correctness dig deeper into the various cases to make sure this - * is a valid way to represent it. - */ + // TODO-correctness dig deeper into the various cases to make sure this + // is a valid way to represent it. HttpError::for_bad_request( None, format!("error processing request: {}", error), @@ -140,10 +126,8 @@ impl From for HttpError { impl From for HttpError { fn from(error: http::Error) -> Self { - /* - * TODO-correctness dig deeper into the various cases to make sure this - * is a valid way to represent it. - */ + // TODO-correctness dig deeper into the various cases to make sure this + // is a valid way to represent it. HttpError::for_bad_request( None, format!("error processing request: {}", error), @@ -152,12 +136,10 @@ impl From for HttpError { } impl HttpError { - /** - * Generates an `HttpError` for any 400-level client error with a custom - * `message` used for both the internal and external message. The - * expectation here is that for most 400-level errors, there's no need for a - * separate internal message. - */ + /// Generates an `HttpError` for any 400-level client error with a custom + /// `message` used for both the internal and external message. The + /// expectation here is that for most 400-level errors, there's no need for a + /// separate internal message. pub fn for_client_error( error_code: Option, status_code: http::StatusCode, @@ -172,10 +154,8 @@ impl HttpError { } } - /** - * Generates an `HttpError` for a 500 "Internal Server Error" error with the - * given `internal_message` for the internal message. - */ + /// Generates an `HttpError` for a 500 "Internal Server Error" error with the + /// given `internal_message` for the internal message. pub fn for_internal_error(internal_message: String) -> Self { let status_code = http::StatusCode::INTERNAL_SERVER_ERROR; HttpError { @@ -189,10 +169,8 @@ impl HttpError { } } - /** - * Generates an `HttpError` for a 503 "Service Unavailable" error with the - * given `internal_message` for the internal message. - */ + /// Generates an `HttpError` for a 503 "Service Unavailable" error with the + /// given `internal_message` for the internal message. pub fn for_unavail( error_code: Option, internal_message: String, @@ -209,11 +187,9 @@ impl HttpError { } } - /** - * Generates a 400 "Bad Request" error with the given `message` used for - * both the internal and external message. This is a convenience wrapper - * around [`HttpError::for_client_error`]. - */ + /// Generates a 400 "Bad Request" error with the given `message` used for + /// both the internal and external message. This is a convenience wrapper + /// around [`HttpError::for_client_error`]. pub fn for_bad_request( error_code: Option, message: String, @@ -225,26 +201,22 @@ impl HttpError { ) } - /** - * Generates an `HttpError` for the given HTTP `status_code` where the - * internal and external messages for the error come from the standard label - * for this status code (e.g., the message for status code 404 is "Not - * Found"). - */ + /// Generates an `HttpError` for the given HTTP `status_code` where the + /// internal and external messages for the error come from the standard label + /// for this status code (e.g., the message for status code 404 is "Not + /// Found"). pub fn for_status( error_code: Option, status_code: http::StatusCode, ) -> Self { - /* TODO-polish This should probably be our own message. */ + // TODO-polish This should probably be our own message. let message = status_code.canonical_reason().unwrap().to_string(); HttpError::for_client_error(error_code, status_code, message) } - /** - * Generates an `HttpError` for a 404 "Not Found" error with a custom - * internal message `internal_message`. The external message will be "Not - * Found" (i.e., the standard label for status code 404). - */ + /// Generates an `HttpError` for a 404 "Not Found" error with a custom + /// internal message `internal_message`. The external message will be "Not + /// Found" (i.e., the standard label for status code 404). pub fn for_not_found( error_code: Option, internal_message: String, @@ -260,24 +232,20 @@ impl HttpError { } } - /** - * Generates an HTTP response for the given `HttpError`, using `request_id` - * for the response's request id. - */ + /// Generates an HTTP response for the given `HttpError`, using `request_id` + /// for the response's request id. pub fn into_response( self, request_id: &str, ) -> hyper::Response { - /* - * TODO-hardening: consider handling the operational errors that the - * Serde serialization fails or the response construction fails. In - * those cases, we should probably try to report this as a serious - * problem (e.g., to the log) and send back a 500-level response. (Of - * course, that could fail in the same way, but it's less likely because - * there's only one possible set of input and we can test it. We'll - * probably have to use unwrap() there and make sure we've tested that - * code at least once!) - */ + // TODO-hardening: consider handling the operational errors that the + // Serde serialization fails or the response construction fails. In + // those cases, we should probably try to report this as a serious + // problem (e.g., to the log) and send back a 500-level response. (Of + // course, that could fail in the same way, but it's less likely because + // there's only one possible set of input and we can test it. We'll + // probably have to use unwrap() there and make sure we've tested that + // code at least once!) hyper::Response::builder() .status(self.status_code) .header( diff --git a/dropshot/src/from_map.rs b/dropshot/src/from_map.rs index 13d78c1ba..ee601b569 100644 --- a/dropshot/src/from_map.rs +++ b/dropshot/src/from_map.rs @@ -14,11 +14,9 @@ use std::collections::BTreeMap; use std::fmt::Debug; use std::fmt::Display; -/** - * Deserialize a BTreeMap into a type, invoking - * String::parse() for all values according to the required type. MapValue may - * be either a single String or a sequence of Strings. - */ +/// Deserialize a BTreeMap into a type, invoking +/// String::parse() for all values according to the required type. MapValue may +/// be either a single String or a sequence of Strings. pub(crate) fn from_map<'a, T, Z>( map: &'a BTreeMap, ) -> Result @@ -48,10 +46,8 @@ impl MapValue for String { } } -/** - * Deserializer for BTreeMap that interprets the values. It has - * two modes: about to iterate over the map or about to process a single value. - */ +/// Deserializer for BTreeMap that interprets the values. It has +/// two modes: about to iterate over the map or about to process a single value. #[derive(Debug)] enum MapDeserializer<'de, Z: MapValue + Debug + Clone + 'static> { Map(&'de BTreeMap), @@ -66,10 +62,8 @@ where MapDeserializer::Map(input) } - /** - * Helper function to extract pattern match for Value. Fail if we're - * expecting a Map or return the result of the provided function. - */ + /// Helper function to extract pattern match for Value. Fail if we're + /// expecting a Map or return the result of the provided function. fn value(&self, deserialize: F) -> Result where F: FnOnce(&Z) -> Result, @@ -104,9 +98,7 @@ impl serde::de::Error for MapError { impl std::error::Error for MapError {} -/** - * Stub out Deserializer trait functions that aren't applicable. - */ +/// Stub out Deserializer trait functions that aren't applicable. macro_rules! de_unimp { ($i:ident $(, $p:ident : $t:ty )*) => { fn $i(self $(, $p: $t)*, _visitor: V) -> Result @@ -121,11 +113,9 @@ macro_rules! de_unimp { }; } -/* - * Generate handlers for primitive types using FromStr::parse() to deserialize - * from the string form. Note that for integral types parse does not accept - * prefixes such as "0x", but we could add this easily with a custom handler. - */ +// Generate handlers for primitive types using FromStr::parse() to deserialize +// from the string form. Note that for integral types parse does not accept +// prefixes such as "0x", but we could add this easily with a custom handler. macro_rules! de_value { ($i:ident) => { paste! { @@ -199,10 +189,8 @@ where { self.deserialize_map(visitor) } - /* - * This will only be called when deserializing a structure that contains a - * flattened structure. See `deserialize_any` below for details. - */ + // This will only be called when deserializing a structure that contains a + // flattened structure. See `deserialize_any` below for details. fn deserialize_map(self, visitor: V) -> Result where V: Visitor<'de>, @@ -251,27 +239,25 @@ where self.value(|raw_value| visitor.visit_str(raw_value.as_value()?)) } - /* - * We really shouldn't have to implement this, and we can't actually do so - * properly, but due to the way that serde currently handles flattened - * structs this will be called for all members in flattened (i.e. non- - * root) structs - * - * See serde-rs/serde#1183 for details. The macro for serde::Deserialize - * can't know the members of flattened structs (those are in a different - * scope) so serde forces all items to be deserialized, saves them in a - * map, and then deserialized them into the flattened structures without - * interpretation (as opposed to the interpretation of strings that we - * do in *this* Deserializer for a similar map). This is generally true - * for all non-self-describing formats. - * - * A better approach in serde might be to defer type assignment with a - * new function analogous to deserialize_any, but with the option of - * returning the raw data, frozen for future processing. The serde - * internal `FlatMapDeserializer` would then need to be able to send - * those "frozen" values back to the Deserializer to be processed with - * type information. - */ + // We really shouldn't have to implement this, and we can't actually do so + // properly, but due to the way that serde currently handles flattened + // structs this will be called for all members in flattened (i.e. non- + // root) structs + // + // See serde-rs/serde#1183 for details. The macro for serde::Deserialize + // can't know the members of flattened structs (those are in a different + // scope) so serde forces all items to be deserialized, saves them in a + // map, and then deserialized them into the flattened structures without + // interpretation (as opposed to the interpretation of strings that we + // do in *this* Deserializer for a similar map). This is generally true + // for all non-self-describing formats. + // + // A better approach in serde might be to defer type assignment with a + // new function analogous to deserialize_any, but with the option of + // returning the raw data, frozen for future processing. The serde + // internal `FlatMapDeserializer` would then need to be able to send + // those "frozen" values back to the Deserializer to be processed with + // type information. fn deserialize_any(self, visitor: V) -> Result where V: Visitor<'de>, @@ -307,9 +293,7 @@ where } } -/* - * Deserializer component for processing enums. - */ +// Deserializer component for processing enums. impl<'de, Z> EnumAccess<'de> for &mut MapDeserializer<'de, Z> where Z: MapValue + Debug + Clone + 'static, @@ -328,9 +312,7 @@ where } } -/* - * Deserializer component for processing enum variants. - */ +// Deserializer component for processing enum variants. impl<'de, Z> VariantAccess<'de> for &mut MapDeserializer<'de, Z> where Z: MapValue + Clone + Debug + 'static, @@ -371,13 +353,11 @@ where } } -/* - * Deserializer component for iterating over the Map. - */ +// Deserializer component for iterating over the Map. struct MapMapAccess { - /** Iterator through the Map */ + /// Iterator through the Map iter: Box>, - /** Pending value in a key-value pair */ + /// Pending value in a key-value pair value: Option, } @@ -396,9 +376,9 @@ where { match self.iter.next() { Some((key, value)) => { - /* Save the value for later. */ + // Save the value for later. self.value.replace(value); - /* Create a Deserializer for that single value. */ + // Create a Deserializer for that single value. let mut deserializer = MapDeserializer::Value(key); seed.deserialize(&mut deserializer).map(Some) } @@ -414,10 +394,8 @@ where let mut deserializer = MapDeserializer::Value(value); seed.deserialize(&mut deserializer) } - /* - * This means we were called without a corresponding call to - * next_key_seed() which should not be possible. - */ + // This means we were called without a corresponding call to + // next_key_seed() which should not be possible. None => unreachable!(), } } diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index c05abc3ab..ed92026ea 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -1,37 +1,35 @@ // Copyright 2020 Oxide Computer Company -/*! - * Interface for implementing HTTP endpoint handler functions. - * - * For information about supported endpoint function signatures, argument types, - * extractors, and return types, see the top-level documentation for this crate. - * As documented there, we support several different sets of function arguments - * and return types. - * - * We allow for variation in the function arguments not so much for programmer - * convenience (since parsing the query string or JSON body could be implemented - * in a line or two of code each, with the right helper functions) but rather so - * that the type signature of the handler function can be programmatically - * analyzed to generate an OpenAPI snippet for this endpoint. This approach of - * treating the server implementation as the source of truth for the API - * specification ensures that--at least in many important ways--the - * implementation cannot diverge from the spec. - * - * Just like we want API input types to be represented in function arguments, we - * want API response types to be represented in function return values so that - * OpenAPI tooling can identify them at build time. The more specific a type - * returned by the handler function, the more can be validated at build-time, - * and the more specific an OpenAPI schema can be generated from the source - * alone. - * - * We go through considerable effort below to make this interface possible. - * Both the interface (primarily) and the implementation (less so) are inspired - * by Actix-Web. The Actix implementation is significantly more general (and - * commensurately complex). It would be possible to implement richer facilities - * here, like extractors for backend server state, headers, and so on; allowing - * for server and request parameters to be omitted; and so on; but those other - * facilities don't seem that valuable right now since they largely don't affect - * OpenAPI document generation. - */ +//! Interface for implementing HTTP endpoint handler functions. +//! +//! For information about supported endpoint function signatures, argument types, +//! extractors, and return types, see the top-level documentation for this crate. +//! As documented there, we support several different sets of function arguments +//! and return types. +//! +//! We allow for variation in the function arguments not so much for programmer +//! convenience (since parsing the query string or JSON body could be implemented +//! in a line or two of code each, with the right helper functions) but rather so +//! that the type signature of the handler function can be programmatically +//! analyzed to generate an OpenAPI snippet for this endpoint. This approach of +//! treating the server implementation as the source of truth for the API +//! specification ensures that--at least in many important ways--the +//! implementation cannot diverge from the spec. +//! +//! Just like we want API input types to be represented in function arguments, we +//! want API response types to be represented in function return values so that +//! OpenAPI tooling can identify them at build time. The more specific a type +//! returned by the handler function, the more can be validated at build-time, +//! and the more specific an OpenAPI schema can be generated from the source +//! alone. +//! +//! We go through considerable effort below to make this interface possible. +//! Both the interface (primarily) and the implementation (less so) are inspired +//! by Actix-Web. The Actix implementation is significantly more general (and +//! commensurately complex). It would be possible to implement richer facilities +//! here, like extractors for backend server state, headers, and so on; allowing +//! for server and request parameters to be omitted; and so on; but those other +//! facilities don't seem that valuable right now since they largely don't affect +//! OpenAPI document generation. use super::error::HttpError; use super::http_util::http_extract_path_params; @@ -76,55 +74,45 @@ use std::marker::PhantomData; use std::num::NonZeroU32; use std::sync::Arc; -/** - * Type alias for the result returned by HTTP handler functions. - */ +/// Type alias for the result returned by HTTP handler functions. pub type HttpHandlerResult = Result, HttpError>; -/** - * Handle for various interfaces useful during request processing. - */ -/* - * TODO-cleanup What's the right way to package up "request"? The only time we - * need it to be mutable is when we're reading the body (e.g., as part of the - * JSON extractor). In order to support that, we wrap it in something that - * supports interior mutability. It also needs to be thread-safe, since we're - * using async/await. That brings us to Arc>, but it seems like - * overkill since it will only really be used by one thread at a time (at all, - * let alone mutably) and there will never be contention on the Mutex. - */ +/// Handle for various interfaces useful during request processing. +// TODO-cleanup What's the right way to package up "request"? The only time we +// need it to be mutable is when we're reading the body (e.g., as part of the +// JSON extractor). In order to support that, we wrap it in something that +// supports interior mutability. It also needs to be thread-safe, since we're +// using async/await. That brings us to Arc>, but it seems like +// overkill since it will only really be used by one thread at a time (at all, +// let alone mutably) and there will never be contention on the Mutex. #[derive(Debug)] pub struct RequestContext { - /** shared server state */ + /// shared server state pub server: Arc>, - /** HTTP request details */ + /// HTTP request details pub request: Arc>>, - /** HTTP request routing variables */ + /// HTTP request routing variables pub path_variables: VariableSet, - /** expected request body mime type */ + /// expected request body mime type pub body_content_type: ApiEndpointBodyContentType, - /** unique id assigned to this request */ + /// unique id assigned to this request pub request_id: String, - /** logger for this specific request */ + /// logger for this specific request pub log: Logger, } impl RequestContext { - /** - * Returns the server context state. - */ + /// Returns the server context state. pub fn context(&self) -> &Context { &self.server.private } - /** - * Returns the appropriate count of items to return for a paginated request - * - * This first looks at any client-requested limit and clamps it based on the - * server-configured maximum page size. If the client did not request any - * particular limit, this function returns the server-configured default - * page size. - */ + /// Returns the appropriate count of items to return for a paginated request + /// + /// This first looks at any client-requested limit and clamps it based on the + /// server-configured maximum page size. If the client did not request any + /// particular limit, this function returns the server-configured default + /// page size. pub fn page_limit( &self, pag_params: &PaginationParams, @@ -137,28 +125,22 @@ impl RequestContext { Ok(pag_params .limit - /* - * Compare the client-provided limit to the configured max for the - * server and take the smaller one. - */ + // Compare the client-provided limit to the configured max for the + // server and take the smaller one. .map(|limit| min(limit, server_config.page_max_nitems)) - /* - * If no limit was provided by the client, use the configured - * default. - */ + // If no limit was provided by the client, use the configured + // default. .unwrap_or(server_config.page_default_nitems)) } } -/** - * Helper trait for extracting the underlying Context type from the - * first argument to an endpoint. This trait exists to help the - * endpoint macro parse this argument. - * - * The first argument to an endpoint handler must be of the form: - * `Arc>` where `T` is a caller-supplied - * value that implements `ServerContext`. - */ +/// Helper trait for extracting the underlying Context type from the +/// first argument to an endpoint. This trait exists to help the +/// endpoint macro parse this argument. +/// +/// The first argument to an endpoint handler must be of the form: +/// `Arc>` where `T` is a caller-supplied +/// value that implements `ServerContext`. pub trait RequestContextArgument { type Context; } @@ -169,26 +151,22 @@ impl RequestContextArgument type Context = T; } -/** - * `Extractor` defines an interface allowing a type to be constructed from a - * `RequestContext`. Unlike most traits, `Extractor` essentially defines only a - * constructor function, not instance functions. - * - * The extractors that we provide (`Query`, `Path`, `TypedBody`, `UntypedBody`, and - * `WebsocketUpgrade`) implement `Extractor` in order to construct themselves from - * the request. For example, `Extractor` is implemented for `Query` with a - * function that reads the query string from the request, parses it, and - * constructs a `Query` with it. - * - * We also define implementations of `Extractor` for tuples of types that - * themselves implement `Extractor`. See the implementation of - * `HttpRouteHandler` for more on why this needed. - */ +/// `Extractor` defines an interface allowing a type to be constructed from a +/// `RequestContext`. Unlike most traits, `Extractor` essentially defines only a +/// constructor function, not instance functions. +/// +/// The extractors that we provide (`Query`, `Path`, `TypedBody`, `UntypedBody`, and +/// `WebsocketUpgrade`) implement `Extractor` in order to construct themselves from +/// the request. For example, `Extractor` is implemented for `Query` with a +/// function that reads the query string from the request, parses it, and +/// constructs a `Query` with it. +/// +/// We also define implementations of `Extractor` for tuples of types that +/// themselves implement `Extractor`. See the implementation of +/// `HttpRouteHandler` for more on why this needed. #[async_trait] pub trait Extractor: Send + Sync + Sized { - /** - * Construct an instance of this type from a `RequestContext`. - */ + /// Construct an instance of this type from a `RequestContext`. async fn from_request( rqctx: Arc>, ) -> Result; @@ -198,19 +176,15 @@ pub trait Extractor: Send + Sync + Sized { ) -> ExtractorMetadata; } -/** - * Metadata associated with an extractor including parameters and whether or not - * the associated endpoint is paginated. - */ +/// Metadata associated with an extractor including parameters and whether or not +/// the associated endpoint is paginated. pub struct ExtractorMetadata { pub extension_mode: ExtensionMode, pub parameters: Vec, } -/** - * `impl_derived_for_tuple!` defines implementations of `Extractor` for tuples - * whose elements themselves implement `Extractor`. - */ +/// `impl_derived_for_tuple!` defines implementations of `Extractor` for tuples +/// whose elements themselves implement `Extractor`. macro_rules! impl_extractor_for_tuple { ($( $T:ident),*) => { #[async_trait] @@ -248,23 +222,21 @@ impl_extractor_for_tuple!(T1); impl_extractor_for_tuple!(T1, T2); impl_extractor_for_tuple!(T1, T2, T3); -/** - * `HttpHandlerFunc` is a trait providing a single function, `handle_request()`, - * which takes an HTTP request and produces an HTTP response (or - * `HttpError`). - * - * As described above, handler functions can have a number of different - * signatures. They all consume a reference to the current request context. - * They may also consume some number of extractor arguments. The - * `HttpHandlerFunc` trait is parametrized by the type `FuncParams`, which is - * expected to be a tuple describing these extractor arguments. - * - * Below, we define implementations of `HttpHandlerFunc` for various function - * types. In this way, we can treat functions with different signatures as - * different kinds of `HttpHandlerFunc`. However, since the signature shows up - * in the `FuncParams` type parameter, we'll need additional abstraction to - * treat different handlers interchangeably. See `RouteHandler` below. - */ +/// `HttpHandlerFunc` is a trait providing a single function, `handle_request()`, +/// which takes an HTTP request and produces an HTTP response (or +/// `HttpError`). +/// +/// As described above, handler functions can have a number of different +/// signatures. They all consume a reference to the current request context. +/// They may also consume some number of extractor arguments. The +/// `HttpHandlerFunc` trait is parametrized by the type `FuncParams`, which is +/// expected to be a tuple describing these extractor arguments. +/// +/// Below, we define implementations of `HttpHandlerFunc` for various function +/// types. In this way, we can treat functions with different signatures as +/// different kinds of `HttpHandlerFunc`. However, since the signature shows up +/// in the `FuncParams` type parameter, we'll need additional abstraction to +/// treat different handlers interchangeably. See `RouteHandler` below. #[async_trait] pub trait HttpHandlerFunc: Send + Sync + 'static @@ -280,88 +252,84 @@ where ) -> HttpHandlerResult; } -/** - * Defines an implementation of the `HttpHandlerFunc` trait for functions - * matching one of the supported signatures for HTTP endpoint handler functions. - * We use a macro to do this because we need to provide different - * implementations for functions that take 0 arguments, 1 argument, 2 arguments, - * etc., but the implementations are almost identical. - */ -/* - * For background: as the module-level documentation explains, we want to - * support API endpoint handler functions that vary in their signature so that - * the signature can accurately reflect details about their expected input and - * output instead of a generic `Request -> Response` description. The - * `HttpHandlerFunc` trait defines an interface for invoking one of these - * functions. This macro defines an implementation of `HttpHandlerFunc` that - * says how to take any of these HTTP endpoint handler function and provide that - * uniform interface for callers. The implementation essentially does three - * things: - * - * 1. Converts the uniform arguments of `handle_request()` into the appropriate - * arguments for the underlying function. This is easier than it sounds at - * this point because we require that one of the arguments be a tuple whose - * types correspond to the argument types for the function, so we just need - * to unpack them from the tuple into function arguments. - * - * 2. Converts a call to the `handle_request()` method into a call to the - * underlying function. - * - * 3. Converts the return type of the underlying function into the uniform - * return type expected by callers of `handle_request()`. This, too, is - * easier than it sounds because we require that the return value implement - * `HttpResponse`. - * - * As mentioned above, we're implementing the trait `HttpHandlerFunc` on _any_ - * type `FuncType` that matches the trait bounds below. In particular, it must - * take a request context argument and whatever other type parameters have been - * passed to this macro. - * - * The function's return type deserves further explanation. (Actually, these - * functions all return a `Future`, but for convenience when we say "return - * type" in the comments here we're referring to the output type of the returned - * future.) Again, as described above, we'd like to allow HTTP endpoint - * functions to return a variety of different return types that are ultimately - * converted into `Result, HttpError>`. To do that, the trait - * bounds below say that the function must produce a `Result` where `ResponseType` is a type that implements `HttpResponse`. - * We provide a few implementations of the trait `HttpTypedResponse` that - * includes a HTTP status code and structured output. In addition we allow for - * functions to hand-craft a `Response`. For both we implement - * `HttpResponse` (trivially in the latter case). - * - * 1. Handler function - * | - * | returns: - * v - * 2. Result - * | - * | This may fail with an HttpError which we return immediately. - * | On success, this will be Ok(ResponseType) for some specific - * | ResponseType that implements HttpResponse. We'll end up - * | invoking: - * v - * 3. ResponseType::to_result() - * | - * | This is a type-specific conversion from `ResponseType` into - * | `Response` that's allowed to fail with an `HttpError`. - * v - * 4. Result, HttpError> - * - * Note that the handler function may fail due to an internal error *or* the - * conversion to JSON may successively fail in the call to - * `serde_json::to_string()`. - * - * The `HttpResponse` trait lets us handle both generic responses via - * `Response` as well as more structured responses via structures - * implementing `HttpResponse`. The latter gives us a typed - * structure as well as response code that we use to generate rich OpenAPI - * content. - * - * Note: the macro parameters really ought to be `$i:literal` and `$T:ident`, - * however that causes us to run afoul of issue dtolnay/async-trait#46. The - * workaround is to make both parameters `tt` (token tree). - */ +/// Defines an implementation of the `HttpHandlerFunc` trait for functions +/// matching one of the supported signatures for HTTP endpoint handler functions. +/// We use a macro to do this because we need to provide different +/// implementations for functions that take 0 arguments, 1 argument, 2 arguments, +/// etc., but the implementations are almost identical. +// For background: as the module-level documentation explains, we want to +// support API endpoint handler functions that vary in their signature so that +// the signature can accurately reflect details about their expected input and +// output instead of a generic `Request -> Response` description. The +// `HttpHandlerFunc` trait defines an interface for invoking one of these +// functions. This macro defines an implementation of `HttpHandlerFunc` that +// says how to take any of these HTTP endpoint handler function and provide that +// uniform interface for callers. The implementation essentially does three +// things: +// +// 1. Converts the uniform arguments of `handle_request()` into the appropriate +// arguments for the underlying function. This is easier than it sounds at +// this point because we require that one of the arguments be a tuple whose +// types correspond to the argument types for the function, so we just need +// to unpack them from the tuple into function arguments. +// +// 2. Converts a call to the `handle_request()` method into a call to the +// underlying function. +// +// 3. Converts the return type of the underlying function into the uniform +// return type expected by callers of `handle_request()`. This, too, is +// easier than it sounds because we require that the return value implement +// `HttpResponse`. +// +// As mentioned above, we're implementing the trait `HttpHandlerFunc` on _any_ +// type `FuncType` that matches the trait bounds below. In particular, it must +// take a request context argument and whatever other type parameters have been +// passed to this macro. +// +// The function's return type deserves further explanation. (Actually, these +// functions all return a `Future`, but for convenience when we say "return +// type" in the comments here we're referring to the output type of the returned +// future.) Again, as described above, we'd like to allow HTTP endpoint +// functions to return a variety of different return types that are ultimately +// converted into `Result, HttpError>`. To do that, the trait +// bounds below say that the function must produce a `Result` where `ResponseType` is a type that implements `HttpResponse`. +// We provide a few implementations of the trait `HttpTypedResponse` that +// includes a HTTP status code and structured output. In addition we allow for +// functions to hand-craft a `Response`. For both we implement +// `HttpResponse` (trivially in the latter case). +// +// 1. Handler function +// | +// | returns: +// v +// 2. Result +// | +// | This may fail with an HttpError which we return immediately. +// | On success, this will be Ok(ResponseType) for some specific +// | ResponseType that implements HttpResponse. We'll end up +// | invoking: +// v +// 3. ResponseType::to_result() +// | +// | This is a type-specific conversion from `ResponseType` into +// | `Response` that's allowed to fail with an `HttpError`. +// v +// 4. Result, HttpError> +// +// Note that the handler function may fail due to an internal error *or* the +// conversion to JSON may successively fail in the call to +// `serde_json::to_string()`. +// +// The `HttpResponse` trait lets us handle both generic responses via +// `Response` as well as more structured responses via structures +// implementing `HttpResponse`. The latter gives us a typed +// structure as well as response code that we use to generate rich OpenAPI +// content. +// +// Note: the macro parameters really ought to be `$i:literal` and `$T:ident`, +// however that causes us to run afoul of issue dtolnay/async-trait#46. The +// workaround is to make both parameters `tt` (token tree). macro_rules! impl_HttpHandlerFunc_for_func_with_params { ($(($i:tt, $T:tt)),*) => { @@ -395,41 +363,33 @@ impl_HttpHandlerFunc_for_func_with_params!((0, T0)); impl_HttpHandlerFunc_for_func_with_params!((0, T1), (1, T2)); impl_HttpHandlerFunc_for_func_with_params!((0, T1), (1, T2), (2, T3)); -/** - * `RouteHandler` abstracts an `HttpHandlerFunc` in a - * way that allows callers to invoke the handler without knowing the handler's - * function signature. - * - * The "Route" in `RouteHandler` refers to the fact that this structure is used - * to record that a specific handler has been attached to a specific HTTP route. - */ +/// `RouteHandler` abstracts an `HttpHandlerFunc` in a +/// way that allows callers to invoke the handler without knowing the handler's +/// function signature. +/// +/// The "Route" in `RouteHandler` refers to the fact that this structure is used +/// to record that a specific handler has been attached to a specific HTTP route. #[async_trait] pub trait RouteHandler: Debug + Send + Sync { - /** - * Returns a description of this handler. This might be a function name, - * for example. This is not guaranteed to be unique. - */ + /// Returns a description of this handler. This might be a function name, + /// for example. This is not guaranteed to be unique. fn label(&self) -> &str; - /** - * Handle an incoming HTTP request. - */ + /// Handle an incoming HTTP request. async fn handle_request( &self, rqctx: RequestContext, ) -> HttpHandlerResult; } -/** - * `HttpRouteHandler` is the only type that implements `RouteHandler`. The - * reason both exist is that we need `HttpRouteHandler::new()` to consume an - * arbitrary kind of `HttpHandlerFunc` and return an object that's - * _not_ parametrized by `FuncParams`. In fact, the resulting - * `HttpRouteHandler` _is_ parametrized by `FuncParams`, but we returned it - * as a `RouteHandler` that does not have those type parameters, allowing the - * caller to ignore the differences between different handler function type - * signatures. - */ +/// `HttpRouteHandler` is the only type that implements `RouteHandler`. The +/// reason both exist is that we need `HttpRouteHandler::new()` to consume an +/// arbitrary kind of `HttpHandlerFunc` and return an object that's +/// _not_ parametrized by `FuncParams`. In fact, the resulting +/// `HttpRouteHandler` _is_ parametrized by `FuncParams`, but we returned it +/// as a `RouteHandler` that does not have those type parameters, allowing the +/// caller to ignore the differences between different handler function type +/// signatures. pub struct HttpRouteHandler where Context: ServerContext, @@ -437,20 +397,18 @@ where FuncParams: Extractor, ResponseType: HttpResponse + Send + Sync + 'static, { - /** the actual HttpHandlerFunc used to implement this route */ + /// the actual HttpHandlerFunc used to implement this route handler: HandlerType, - /** debugging label for the handler */ + /// debugging label for the handler label: String, - /** - * In order to define `new()` below, we need a type parameter `HandlerType` - * that implements `HttpHandlerFunc`, which means we also need a - * `FuncParams` type parameter. However, this type parameter would be - * unconstrained, which makes Rust upset. Use of PhantomData - * here causes the compiler to behave as though this struct referred to a - * `FuncParams`, which allows us to use the type parameter below. - */ + /// In order to define `new()` below, we need a type parameter `HandlerType` + /// that implements `HttpHandlerFunc`, which means we also need a + /// `FuncParams` type parameter. However, this type parameter would be + /// unconstrained, which makes Rust upset. Use of PhantomData + /// here causes the compiler to behave as though this struct referred to a + /// `FuncParams`, which allows us to use the type parameter below. phantom: PhantomData<(FuncParams, ResponseType, Context)>, } @@ -484,24 +442,22 @@ where &self, rqctx_raw: RequestContext, ) -> HttpHandlerResult { - /* - * This is where the magic happens: in the code below, `funcparams` has - * type `FuncParams`, which is a tuple type describing the extractor - * arguments to the handler function. This could be `()`, `(Query)`, - * `(TypedBody)`, `(Query, TypedBody)`, or any other - * combination of extractors we decide to support in the future. - * Whatever it is must implement `Extractor`, which means we can invoke - * `Extractor::from_request()` to construct the argument tuple, - * generally from information available in the `request` object. We - * pass this down to the `HttpHandlerFunc`, for which there's a - * different implementation for each value of `FuncParams`. The - * `HttpHandlerFunc` for each `FuncParams` just pulls the arguments out - * of the `funcparams` tuple and makes them actual function arguments - * for the actual handler function. From this point down, all of this - * is resolved statically.makes them actual function arguments for the - * actual handler function. From this point down, all of this is - * resolved statically. - */ + // This is where the magic happens: in the code below, `funcparams` has + // type `FuncParams`, which is a tuple type describing the extractor + // arguments to the handler function. This could be `()`, `(Query)`, + // `(TypedBody)`, `(Query, TypedBody)`, or any other + // combination of extractors we decide to support in the future. + // Whatever it is must implement `Extractor`, which means we can invoke + // `Extractor::from_request()` to construct the argument tuple, + // generally from information available in the `request` object. We + // pass this down to the `HttpHandlerFunc`, for which there's a + // different implementation for each value of `FuncParams`. The + // `HttpHandlerFunc` for each `FuncParams` just pulls the arguments out + // of the `funcparams` tuple and makes them actual function arguments + // for the actual handler function. From this point down, all of this + // is resolved statically.makes them actual function arguments for the + // actual handler function. From this point down, all of this is + // resolved statically. let rqctx = Arc::new(rqctx_raw); let funcparams = Extractor::from_request(Arc::clone(&rqctx)).await?; let future = self.handler.handle_request(rqctx, funcparams); @@ -509,9 +465,7 @@ where } } -/* - * Public interfaces - */ +// Public interfaces impl HttpRouteHandler @@ -521,20 +475,16 @@ where FuncParams: Extractor + 'static, ResponseType: HttpResponse + Send + Sync + 'static, { - /** - * Given a function matching one of the supported API handler function - * signatures, return a RouteHandler that can be used to respond to HTTP - * requests using this function. - */ + /// Given a function matching one of the supported API handler function + /// signatures, return a RouteHandler that can be used to respond to HTTP + /// requests using this function. pub fn new(handler: HandlerType) -> Box> { HttpRouteHandler::new_with_name(handler, "") } - /** - * Given a function matching one of the supported API handler function - * signatures, return a RouteHandler that can be used to respond to HTTP - * requests using this function. - */ + /// Given a function matching one of the supported API handler function + /// signatures, return a RouteHandler that can be used to respond to HTTP + /// requests using this function. pub fn new_with_name( handler: HandlerType, label: &str, @@ -547,38 +497,28 @@ where } } -/* - * Extractors - */ +// Extractors -/* - * Query: query string extractor - */ +// Query: query string extractor -/** - * `Query` is an extractor used to deserialize an instance of - * `QueryType` from an HTTP request's query string. `QueryType` is any - * structure of yours that implements `serde::Deserialize`. See this module's - * documentation for more information. - */ +/// `Query` is an extractor used to deserialize an instance of +/// `QueryType` from an HTTP request's query string. `QueryType` is any +/// structure of yours that implements `serde::Deserialize`. See this module's +/// documentation for more information. #[derive(Debug)] pub struct Query { inner: QueryType, } impl Query { - /* - * TODO drop this in favor of Deref? + Display and Debug for convenience? - */ + // TODO drop this in favor of Deref? + Display and Debug for convenience? pub fn into_inner(self) -> QueryType { self.inner } } -/** - * Given an HTTP request, pull out the query string and attempt to deserialize - * it as an instance of `QueryType`. - */ +/// Given an HTTP request, pull out the query string and attempt to deserialize +/// it as an instance of `QueryType`. fn http_request_load_query( request: &Request, ) -> Result, HttpError> @@ -586,9 +526,7 @@ where QueryType: DeserializeOwned + JsonSchema + Send + Sync, { let raw_query_string = request.uri().query().unwrap_or(""); - /* - * TODO-correctness: are query strings defined to be urlencoded in this way? - */ + // TODO-correctness: are query strings defined to be urlencoded in this way? match serde_urlencoded::from_str(raw_query_string) { Ok(q) => Ok(Query { inner: q }), Err(e) => Err(HttpError::for_bad_request( @@ -598,14 +536,12 @@ where } } -/* - * The `Extractor` implementation for Query describes how to construct - * an instance of `Query` from an HTTP request: namely, by parsing - * the query string to an instance of `QueryType`. - * TODO-cleanup We shouldn't have to use the "'static" bound on `QueryType` - * here. It seems like we ought to be able to use 'async_trait, but that - * doesn't seem to be defined. - */ +// The `Extractor` implementation for Query describes how to construct +// an instance of `Query` from an HTTP request: namely, by parsing +// the query string to an instance of `QueryType`. +// TODO-cleanup We shouldn't have to use the "'static" bound on `QueryType` +// here. It seems like we ought to be able to use 'async_trait, but that +// doesn't seem to be defined. #[async_trait] impl Extractor for Query where @@ -625,35 +561,27 @@ where } } -/* - * Path: path parameter string extractor - */ +// Path: path parameter string extractor -/** - * `Path` is an extractor used to deserialize an instance of - * `PathType` from an HTTP request's path parameters. `PathType` is any - * structure of yours that implements `serde::Deserialize`. See this module's - * documentation for more information. - */ +/// `Path` is an extractor used to deserialize an instance of +/// `PathType` from an HTTP request's path parameters. `PathType` is any +/// structure of yours that implements `serde::Deserialize`. See this module's +/// documentation for more information. #[derive(Debug)] pub struct Path { inner: PathType, } impl Path { - /* - * TODO drop this in favor of Deref? + Display and Debug for convenience? - */ + // TODO drop this in favor of Deref? + Display and Debug for convenience? pub fn into_inner(self) -> PathType { self.inner } } -/* - * The `Extractor` implementation for Path describes how to construct - * an instance of `Path` from an HTTP request: namely, by extracting - * parameters from the query string. - */ +// The `Extractor` implementation for Path describes how to construct +// an instance of `Path` from an HTTP request: namely, by extracting +// parameters from the query string. #[async_trait] impl Extractor for Path where @@ -673,20 +601,16 @@ where } } -/** - * Convenience function to generate parameter metadata from types implementing - * `JsonSchema` for use with `Query` and `Path` `Extractors`. - */ +/// Convenience function to generate parameter metadata from types implementing +/// `JsonSchema` for use with `Query` and `Path` `Extractors`. fn get_metadata( loc: &ApiEndpointParameterLocation, ) -> ExtractorMetadata where ParamType: JsonSchema, { - /* - * Generate the type for `ParamType` then pluck out each member of - * the structure to encode as an individual parameter. - */ + // Generate the type for `ParamType` then pluck out each member of + // the structure to encode as an individual parameter. let mut generator = schemars::gen::SchemaGenerator::new( schemars::gen::SchemaSettings::openapi3(), ); @@ -711,9 +635,7 @@ where None => ExtensionMode::None, }; - /* - * Convert our collection of struct members list of parameters. - */ + // Convert our collection of struct members list of parameters. let parameters = schema2struct(&schema, &generator, true) .into_iter() .map(|struct_member| { @@ -747,9 +669,7 @@ fn schema_extensions( } } -/** - * Used to visit all schemas and collect all dependencies. - */ +/// Used to visit all schemas and collect all dependencies. struct ReferenceVisitor<'a> { generator: &'a schemars::gen::SchemaGenerator, dependencies: indexmap::IndexMap, @@ -801,29 +721,25 @@ pub(crate) struct StructMember { pub required: bool, } -/** - * This helper function produces a list of the structure members for the - * given schema. For each it returns: - * (name: &String, schema: &Schema, required: bool) - * - * If the input schema is not a flat structure the result will be a runtime - * failure reflective of a programming error (likely an invalid type specified - * in a handler function). - * - * This function is invoked recursively on subschemas. - */ +/// This helper function produces a list of the structure members for the +/// given schema. For each it returns: +/// (name: &String, schema: &Schema, required: bool) +/// +/// If the input schema is not a flat structure the result will be a runtime +/// failure reflective of a programming error (likely an invalid type specified +/// in a handler function). +/// +/// This function is invoked recursively on subschemas. pub(crate) fn schema2struct( schema: &schemars::schema::Schema, generator: &schemars::gen::SchemaGenerator, required: bool, ) -> Vec { - /* - * We ignore schema.metadata, which includes things like doc comments, and - * schema.extensions. We call these out explicitly rather than eliding them - * as .. since we match all other fields in the structure. - */ + // We ignore schema.metadata, which includes things like doc comments, and + // schema.extensions. We call these out explicitly rather than eliding them + // as .. since we match all other fields in the structure. match schema { - /* We expect references to be on their own. */ + // We expect references to be on their own. schemars::schema::Schema::Object(schemars::schema::SchemaObject { metadata: _, instance_type: None, @@ -843,7 +759,7 @@ pub(crate) fn schema2struct( required, ), - /* Match objects and subschemas. */ + // Match objects and subschemas. schemars::schema::Schema::Object(schemars::schema::SchemaObject { metadata: _, instance_type: Some(schemars::schema::SingleOrVec::Single(_)), @@ -860,10 +776,8 @@ pub(crate) fn schema2struct( }) => { let mut results = Vec::new(); - /* - * If there's a top-level object, add its members to the list of - * parameters. - */ + // If there's a top-level object, add its members to the list of + // parameters. if let Some(object) = object { results.extend(object.properties.iter().map( |(name, schema)| { @@ -880,13 +794,11 @@ pub(crate) fn schema2struct( )); } - /* - * We might see subschemas here in the case of flattened enums - * or flattened structures that have associated doc comments. - */ + // We might see subschemas here in the case of flattened enums + // or flattened structures that have associated doc comments. if let Some(subschemas) = subschemas { match subschemas.as_ref() { - /* We expect any_of in the case of an enum. */ + // We expect any_of in the case of an enum. schemars::schema::SubschemaValidation { all_of: None, any_of: Some(schemas), @@ -896,16 +808,14 @@ pub(crate) fn schema2struct( then_schema: None, else_schema: None, } => results.extend(schemas.iter().flat_map(|subschema| { - /* Note that these will be tagged as optional. */ + // Note that these will be tagged as optional. schema2struct(subschema, generator, false) })), - /* - * With an all_of, there should be a single element. We - * typically see this in the case where there is a doc - * comment on a structure as OpenAPI 3.0.x doesn't have - * a description field directly on schemas. - */ + // With an all_of, there should be a single element. We + // typically see this in the case where there is a doc + // comment on a structure as OpenAPI 3.0.x doesn't have + // a description field directly on schemas. schemars::schema::SubschemaValidation { all_of: Some(subschemas), any_of: None, @@ -920,7 +830,7 @@ pub(crate) fn schema2struct( }), ), - /* We don't expect any other types of subschemas. */ + // We don't expect any other types of subschemas. invalid => panic!("invalid subschema {:#?}", invalid), } } @@ -928,22 +838,18 @@ pub(crate) fn schema2struct( results } - /* The generated schema should be an object. */ + // The generated schema should be an object. invalid => panic!("invalid type {:#?}", invalid), } } -/* - * TypedBody: body extractor for formats that can be deserialized to a specific - * type. Only JSON is currently supported. - */ +// TypedBody: body extractor for formats that can be deserialized to a specific +// type. Only JSON is currently supported. -/** - * `TypedBody` is an extractor used to deserialize an instance of - * `BodyType` from an HTTP request body. `BodyType` is any structure of yours - * that implements `serde::Deserialize`. See this module's documentation for - * more information. - */ +/// `TypedBody` is an extractor used to deserialize an instance of +/// `BodyType` from an HTTP request body. `BodyType` is any structure of yours +/// that implements `serde::Deserialize`. See this module's documentation for +/// more information. #[derive(Debug)] pub struct TypedBody { inner: BodyType, @@ -952,18 +858,14 @@ pub struct TypedBody { impl TypedBody { - /* - * TODO drop this in favor of Deref? + Display and Debug for convenience? - */ + // TODO drop this in favor of Deref? + Display and Debug for convenience? pub fn into_inner(self) -> BodyType { self.inner } } -/** - * Given an HTTP request, attempt to read the body, parse it according - * to the content type, and deserialize it to an instance of `BodyType`. - */ +/// Given an HTTP request, attempt to read the body, parse it according +/// to the content type, and deserialize it to an instance of `BodyType`. async fn http_request_load_body( rqctx: Arc>, ) -> Result, HttpError> @@ -1029,14 +931,12 @@ where Ok(TypedBody { inner: content }) } -/* - * The `Extractor` implementation for TypedBody describes how to - * construct an instance of `TypedBody` from an HTTP request: namely, - * by reading the request body and parsing it as JSON into type `BodyType`. - * TODO-cleanup We shouldn't have to use the "'static" bound on `BodyType` here. - * It seems like we ought to be able to use 'async_trait, but that doesn't seem - * to be defined. - */ +// The `Extractor` implementation for TypedBody describes how to +// construct an instance of `TypedBody` from an HTTP request: namely, +// by reading the request body and parsing it as JSON into type `BodyType`. +// TODO-cleanup We shouldn't have to use the "'static" bound on `BodyType` here. +// It seems like we ought to be able to use 'async_trait, but that doesn't seem +// to be defined. #[async_trait] impl Extractor for TypedBody where @@ -1065,34 +965,24 @@ where } } -/* - * UntypedBody: body extractor for a plain array of bytes of a body. - */ +// UntypedBody: body extractor for a plain array of bytes of a body. -/** - * `UntypedBody` is an extractor for reading in the contents of the HTTP request - * body and making the raw bytes directly available to the consumer. - */ +/// `UntypedBody` is an extractor for reading in the contents of the HTTP request +/// body and making the raw bytes directly available to the consumer. #[derive(Debug)] pub struct UntypedBody { content: Bytes, } impl UntypedBody { - /** - * Returns a byte slice of the underlying body content. - */ - /* - * TODO drop this in favor of Deref? + Display and Debug for convenience? - */ + /// Returns a byte slice of the underlying body content. + // TODO drop this in favor of Deref? + Display and Debug for convenience? pub fn as_bytes(&self) -> &[u8] { &self.content } - /** - * Convenience wrapper to convert the body to a UTF-8 string slice, - * returning a 400-level error if the body is not valid UTF-8. - */ + /// Convenience wrapper to convert the body to a UTF-8 string slice, + /// returning a 400-level error if the body is not valid UTF-8. pub fn as_str(&self) -> Result<&str, HttpError> { std::str::from_utf8(self.as_bytes()).map_err(|e| { HttpError::for_bad_request( @@ -1143,35 +1033,25 @@ impl Extractor for UntypedBody { } } -/* - * Response Type Conversion - * - * See the discussion on macro `impl_HttpHandlerFunc_for_func_with_params` for a - * great deal of context on this. - */ +// Response Type Conversion +// +// See the discussion on macro `impl_HttpHandlerFunc_for_func_with_params` for a +// great deal of context on this. -/** - * HttpResponse must produce a `Result, HttpError>` and generate - * the response metadata. Typically one should use `Response` or an - * implementation of `HttpTypedResponse`. - */ +/// HttpResponse must produce a `Result, HttpError>` and generate +/// the response metadata. Typically one should use `Response` or an +/// implementation of `HttpTypedResponse`. pub trait HttpResponse { - /** - * Generate the response to the HTTP call. - */ + /// Generate the response to the HTTP call. fn to_result(self) -> HttpHandlerResult; - /** - * Extract status code and structure metadata for the non-error response. - * Type information for errors is handled generically across all endpoints. - */ + /// Extract status code and structure metadata for the non-error response. + /// Type information for errors is handled generically across all endpoints. fn response_metadata() -> ApiEndpointResponse; } -/** - * `Response` is used for free-form responses. The implementation of - * `to_result()` is trivial, and we don't have any typed metadata to return. - */ +/// `Response` is used for free-form responses. The implementation of +/// `to_result()` is trivial, and we don't have any typed metadata to return. impl HttpResponse for Response { fn to_result(self) -> HttpHandlerResult { Ok(self) @@ -1191,21 +1071,17 @@ impl From for FreeformBody { } } -/** - * An "empty" type used to represent responses that have no associated data - * payload. This isn't intended for general use, but must be pub since it's - * used as the Body type for certain responses. - */ +/// An "empty" type used to represent responses that have no associated data +/// payload. This isn't intended for general use, but must be pub since it's +/// used as the Body type for certain responses. #[doc(hidden)] pub struct Empty; -/* - * Specific Response Types - * - * The `HttpTypedResponse` trait and the concrete types below are provided so - * that handler functions can return types that indicate at compile time the - * kind of HTTP response body they produce. - */ +// Specific Response Types +// +// The `HttpTypedResponse` trait and the concrete types below are provided so +// that handler functions can return types that indicate at compile time the +// kind of HTTP response body they produce. /// Adapter trait that allows both concrete types that implement [JsonSchema] /// and the [FreeformBody] type to add their content to a response builder @@ -1277,11 +1153,9 @@ where } } -/** - * The `HttpCodedResponse` trait is used for all of the specific response types - * that we provide. We use it in particular to encode the success status code - * and the type information of the return value. - */ +/// The `HttpCodedResponse` trait is used for all of the specific response types +/// that we provide. We use it in particular to encode the success status code +/// and the type information of the return value. pub trait HttpCodedResponse: Into + Send + Sync + 'static { @@ -1289,20 +1163,16 @@ pub trait HttpCodedResponse: const STATUS_CODE: StatusCode; const DESCRIPTION: &'static str; - /** - * Convenience method to produce a response based on the input - * `body_object` (whose specific type is defined by the implementing type) - * and the STATUS_CODE specified by the implementing type. This is a default - * trait method to allow callers to avoid redundant type specification. - */ + /// Convenience method to produce a response based on the input + /// `body_object` (whose specific type is defined by the implementing type) + /// and the STATUS_CODE specified by the implementing type. This is a default + /// trait method to allow callers to avoid redundant type specification. fn for_object(body: Self::Body) -> HttpHandlerResult { body.to_response(Response::builder().status(Self::STATUS_CODE)) } } -/** - * Provide results and metadata generation for all implementing types. - */ +/// Provide results and metadata generation for all implementing types. impl HttpResponse for T where T: HttpCodedResponse, @@ -1326,16 +1196,12 @@ fn make_subschema_for( gen.subschema_for::() } -/** - * `HttpResponseCreated` wraps an object of any serializable type. - * It denotes an HTTP 201 "Created" response whose body is generated by - * serializing the object. - */ -/* - * TODO-cleanup should ApiObject move into this submodule? It'd be nice if we - * could restrict this to an ApiObject::View (by having T: ApiObject and the - * field having type T::View). - */ +/// `HttpResponseCreated` wraps an object of any serializable type. +/// It denotes an HTTP 201 "Created" response whose body is generated by +/// serializing the object. +// TODO-cleanup should ApiObject move into this submodule? It'd be nice if we +// could restrict this to an ApiObject::View (by having T: ApiObject and the +// field having type T::View). pub struct HttpResponseCreated( pub T, ); @@ -1350,16 +1216,14 @@ impl From> for HttpHandlerResult { fn from(response: HttpResponseCreated) -> HttpHandlerResult { - /* TODO-correctness (or polish?): add Location header */ + // TODO-correctness (or polish?): add Location header HttpResponseCreated::for_object(response.0) } } -/** - * `HttpResponseAccepted` wraps an object of any - * serializable type. It denotes an HTTP 202 "Accepted" response whose body is - * generated by serializing the object. - */ +/// `HttpResponseAccepted` wraps an object of any +/// serializable type. It denotes an HTTP 202 "Accepted" response whose body is +/// generated by serializing the object. pub struct HttpResponseAccepted( pub T, ); @@ -1378,11 +1242,9 @@ impl } } -/** - * `HttpResponseOk` wraps an object of any serializable type. It - * denotes an HTTP 200 "OK" response whose body is generated by serializing the - * object. - */ +/// `HttpResponseOk` wraps an object of any serializable type. It +/// denotes an HTTP 200 "OK" response whose body is generated by serializing the +/// object. pub struct HttpResponseOk( pub T, ); @@ -1401,10 +1263,8 @@ impl From> } } -/** - * `HttpResponseDeleted` represents an HTTP 204 "No Content" response, intended - * for use when an API operation has successfully deleted an object. - */ +/// `HttpResponseDeleted` represents an HTTP 204 "No Content" response, intended +/// for use when an API operation has successfully deleted an object. pub struct HttpResponseDeleted(); impl HttpCodedResponse for HttpResponseDeleted { @@ -1418,11 +1278,9 @@ impl From for HttpHandlerResult { } } -/** - * `HttpResponseUpdatedNoContent` represents an HTTP 204 "No Content" response, - * intended for use when an API operation has successfully updated an object and - * has nothing to return. - */ +/// `HttpResponseUpdatedNoContent` represents an HTTP 204 "No Content" response, +/// intended for use when an API operation has successfully updated an object and +/// has nothing to return. pub struct HttpResponseUpdatedNoContent(); impl HttpCodedResponse for HttpResponseUpdatedNoContent { @@ -1436,45 +1294,41 @@ impl From for HttpHandlerResult { } } -/** Describes headers associated with a 300-level response. */ +/// Describes headers associated with a 300-level response. #[derive(JsonSchema, Serialize)] #[doc(hidden)] pub struct RedirectHeaders { - /** HTTP "Location" header */ - /* - * What type should we use to represent header values? - * - * It's tempting to use `http::HeaderValue` here. But in HTTP, header - * values can contain bytes that aren't valid Rust strings. See - * `http::header::HeaderValue`. We could propagate this nonsense all the - * way to the OpenAPI spec, encoding the Location header as, say, - * base64-encoded bytes. This sounds really annoying to consumers. It's - * also a fair bit more work to implement. We'd need to create a separate - * type for this field so that we can impl `Serialize` and `JsonSchema` on - * it, and we'd need to also impl serialization of byte sequences in - * `MapSerializer`. Ugh. - * - * We just use `String`. This might contain values that aren't valid in - * HTTP response headers. But we can at least validate that at runtime, and - * it sure is easier to implement! - */ + /// HTTP "Location" header + // What type should we use to represent header values? + // + // It's tempting to use `http::HeaderValue` here. But in HTTP, header + // values can contain bytes that aren't valid Rust strings. See + // `http::header::HeaderValue`. We could propagate this nonsense all the + // way to the OpenAPI spec, encoding the Location header as, say, + // base64-encoded bytes. This sounds really annoying to consumers. It's + // also a fair bit more work to implement. We'd need to create a separate + // type for this field so that we can impl `Serialize` and `JsonSchema` on + // it, and we'd need to also impl serialization of byte sequences in + // `MapSerializer`. Ugh. + // + // We just use `String`. This might contain values that aren't valid in + // HTTP response headers. But we can at least validate that at runtime, and + // it sure is easier to implement! location: String, } -/** See `http_response_found()` */ +/// See `http_response_found()` pub type HttpResponseFound = HttpResponseHeaders; -/** - * `http_response_found` returns an HTTP 302 "Found" response with no response - * body. - * - * The sole argument will become the value of the `Location` header. This is - * where you want to redirect the client to. - * - * Per MDN and RFC 9110 S15.4.3, you might want to use 307 ("Temporary - * Redirect") or 303 ("See Other") instead. - */ +/// `http_response_found` returns an HTTP 302 "Found" response with no response +/// body. +/// +/// The sole argument will become the value of the `Location` header. This is +/// where you want to redirect the client to. +/// +/// Per MDN and RFC 9110 S15.4.3, you might want to use 307 ("Temporary +/// Redirect") or 303 ("See Other") instead. pub fn http_response_found( location: String, ) -> Result { @@ -1496,11 +1350,9 @@ fn http_redirect_error( )) } -/** - * This internal type impls HttpCodedResponse. Consumers should use - * `HttpResponseFound` instead, which includes metadata about the `Location` - * header. - */ +/// This internal type impls HttpCodedResponse. Consumers should use +/// `HttpResponseFound` instead, which includes metadata about the `Location` +/// header. #[doc(hidden)] pub struct HttpResponseFoundStatus; impl HttpCodedResponse for HttpResponseFoundStatus { @@ -1514,22 +1366,20 @@ impl From for HttpHandlerResult { } } -/** See `http_response_see_other()` */ +/// See `http_response_see_other()` pub type HttpResponseSeeOther = HttpResponseHeaders; -/** - * `http_response_see_other` returns an HTTP 303 "See Other" response with no - * response body. - * - * The sole argument will become the value of the `Location` header. This is - * where you want to redirect the client to. - * - * Use this (as opposed to 307 "Temporary Redirect") when you want the client to - * follow up with a GET, rather than whatever method they used to make the - * current request. This is intended to be used after a PUT or POST to show a - * confirmation page or the like. - */ +/// `http_response_see_other` returns an HTTP 303 "See Other" response with no +/// response body. +/// +/// The sole argument will become the value of the `Location` header. This is +/// where you want to redirect the client to. +/// +/// Use this (as opposed to 307 "Temporary Redirect") when you want the client to +/// follow up with a GET, rather than whatever method they used to make the +/// current request. This is intended to be used after a PUT or POST to show a +/// confirmation page or the like. pub fn http_response_see_other( location: String, ) -> Result { @@ -1541,11 +1391,9 @@ pub fn http_response_see_other( )) } -/** - * This internal type impls HttpCodedResponse. Consumers should use - * `HttpResponseSeeOther` instead, which includes metadata about the `Location` - * header. - */ +/// This internal type impls HttpCodedResponse. Consumers should use +/// `HttpResponseSeeOther` instead, which includes metadata about the `Location` +/// header. #[doc(hidden)] pub struct HttpResponseSeeOtherStatus; impl HttpCodedResponse for HttpResponseSeeOtherStatus { @@ -1559,20 +1407,18 @@ impl From for HttpHandlerResult { } } -/** See `http_response_temporary_redirect()` */ +/// See `http_response_temporary_redirect()` pub type HttpResponseTemporaryRedirect = HttpResponseHeaders; -/** - * `http_response_temporary_redirect` represents an HTTP 307 "Temporary - * Redirect" response with no response body. - * - * The sole argument will become the value of the `Location` header. This is - * where you want to redirect the client to. - * - * Use this (as opposed to 303 "See Other") when you want the client to use the - * same request method and body when it makes the follow-up request. - */ +/// `http_response_temporary_redirect` represents an HTTP 307 "Temporary +/// Redirect" response with no response body. +/// +/// The sole argument will become the value of the `Location` header. This is +/// where you want to redirect the client to. +/// +/// Use this (as opposed to 303 "See Other") when you want the client to use the +/// same request method and body when it makes the follow-up request. pub fn http_response_temporary_redirect( location: String, ) -> Result { @@ -1584,11 +1430,9 @@ pub fn http_response_temporary_redirect( )) } -/** - * This internal type impls HttpCodedResponse. Consumers should use - * `HttpResponseTemporaryRedirect` instead, which includes metadata about the - * `Location` header. - */ +/// This internal type impls HttpCodedResponse. Consumers should use +/// `HttpResponseTemporaryRedirect` instead, which includes metadata about the +/// `Location` header. #[doc(hidden)] pub struct HttpResponseTemporaryRedirectStatus; impl HttpCodedResponse for HttpResponseTemporaryRedirectStatus { @@ -1605,16 +1449,14 @@ impl From for HttpHandlerResult { #[derive(Serialize, JsonSchema)] pub struct NoHeaders {} -/** - * `HttpResponseHeaders` is a wrapper for responses that include both - * structured and unstructured headers. The first type parameter is a - * `HttpTypedResponse` that provides the structure of the response body. - * The second type parameter is an optional struct that enumerates named - * headers that are included in the response. In addition to those (optional) - * named headers, consumers may add additional headers via the `headers_mut` - * interface. Unnamed headers override named headers in the case of naming - * conflicts. - */ +/// `HttpResponseHeaders` is a wrapper for responses that include both +/// structured and unstructured headers. The first type parameter is a +/// `HttpTypedResponse` that provides the structure of the response body. +/// The second type parameter is an optional struct that enumerates named +/// headers that are included in the response. In addition to those (optional) +/// named headers, consumers may add additional headers via the `headers_mut` +/// interface. Unnamed headers override named headers in the case of naming +/// conflicts. pub struct HttpResponseHeaders< T: HttpCodedResponse, H: JsonSchema + Serialize + Send + Sync + 'static = NoHeaders, @@ -1657,9 +1499,9 @@ impl< fn to_result(self) -> HttpHandlerResult { let HttpResponseHeaders { body, structured_headers, other_headers } = self; - /* Compute the body. */ + // Compute the body. let mut result = body.into()?; - /* Add in both the structured and other headers. */ + // Add in both the structured and other headers. let headers = result.headers_mut(); let header_map = to_map(&structured_headers).map_err(|e| { HttpError::for_internal_error(format!( @@ -1715,11 +1557,9 @@ impl< fn schema_extract_description( schema: &schemars::schema::Schema, ) -> (Option, schemars::schema::Schema) { - /* - * Because the OpenAPI v3.0.x Schema cannot include a description with - * a reference, we may see a schema with a description and an `all_of` - * with a single subschema. In this case, we flatten the trivial subschema. - */ + // Because the OpenAPI v3.0.x Schema cannot include a description with + // a reference, we may see a schema with a description and an `all_of` + // with a single subschema. In this case, we flatten the trivial subschema. if let schemars::schema::Schema::Object(schemars::schema::SchemaObject { metadata, instance_type: None, @@ -1822,10 +1662,8 @@ mod test { ) { assert_eq!(actual.extension_mode, extension_mode); - /* - * This is order-dependent. We might not really care if the order - * changes, but it will be interesting to understand why if it does. - */ + // This is order-dependent. We might not really care if the order + // changes, but it will be interesting to understand why if it does. actual.parameters.iter().zip(parameters.iter()).for_each( |(param, (name, required))| { if let ApiEndpointParameter { diff --git a/dropshot/src/http_util.rs b/dropshot/src/http_util.rs index c043ecb5a..91ce1b728 100644 --- a/dropshot/src/http_util.rs +++ b/dropshot/src/http_util.rs @@ -1,7 +1,5 @@ // Copyright 2020 Oxide Computer Company -/*! - * General-purpose HTTP-related facilities - */ +//! General-purpose HTTP-related facilities use bytes::BufMut; use bytes::Bytes; @@ -12,22 +10,20 @@ use super::error::HttpError; use crate::from_map::from_map; use crate::router::VariableSet; -/** header name for conveying request ids ("x-request-id") */ +/// header name for conveying request ids ("x-request-id") pub const HEADER_REQUEST_ID: &str = "x-request-id"; -/** MIME type for raw bytes */ +/// MIME type for raw bytes pub const CONTENT_TYPE_OCTET_STREAM: &str = "application/octet-stream"; -/** MIME type for plain JSON data */ +/// MIME type for plain JSON data pub const CONTENT_TYPE_JSON: &str = "application/json"; -/** MIME type for newline-delimited JSON data */ +/// MIME type for newline-delimited JSON data pub const CONTENT_TYPE_NDJSON: &str = "application/x-ndjson"; -/** MIME type for form/urlencoded data */ +/// MIME type for form/urlencoded data pub const CONTENT_TYPE_URL_ENCODED: &str = "application/x-www-form-urlencoded"; -/** - * Reads the rest of the body from the request up to the given number of bytes. - * If the body fits within the specified cap, a buffer is returned with all the - * bytes read. If not, an error is returned. - */ +/// Reads the rest of the body from the request up to the given number of bytes. +/// If the body fits within the specified cap, a buffer is returned with all the +/// bytes read. If not, an error is returned. pub async fn http_read_body( body: &mut T, cap: usize, @@ -35,18 +31,16 @@ pub async fn http_read_body( where T: HttpBody + std::marker::Unpin, { - /* - * This looks a lot like the implementation of hyper::body::to_bytes(), but - * applies the requested cap. We've skipped the optimization for the - * 1-buffer case for now, as it seems likely this implementation will change - * anyway. - * TODO should this use some Stream interface instead? - * TODO why does this look so different in type signature (Data=Bytes, - * std::marker::Unpin, &mut T) - * TODO Error type shouldn't have to be hyper Error -- Into should - * work too? - * TODO do we need to use saturating_add() here? - */ + // This looks a lot like the implementation of hyper::body::to_bytes(), but + // applies the requested cap. We've skipped the optimization for the + // 1-buffer case for now, as it seems likely this implementation will change + // anyway. + // TODO should this use some Stream interface instead? + // TODO why does this look so different in type signature (Data=Bytes, + // std::marker::Unpin, &mut T) + // TODO Error type shouldn't have to be hyper Error -- Into should + // work too? + // TODO do we need to use saturating_add() here? let mut parts = std::vec::Vec::new(); let mut nbytesread: usize = 0; while let Some(maybebuf) = body.data().await { @@ -66,95 +60,81 @@ where parts.put(buf); } - /* - * Read the trailers as well, even though we're not going to do anything - * with them. - */ + // Read the trailers as well, even though we're not going to do anything + // with them. body.trailers().await?; - /* - * TODO-correctness why does the is_end_stream() assertion fail and the next - * one panic? - */ + // TODO-correctness why does the is_end_stream() assertion fail and the next + // one panic? // assert!(body.is_end_stream()); // assert!(body.data().await.is_none()); // assert!(body.trailers().await?.is_none()); Ok(parts.into()) } -/** - * Reads the rest of the body from the request, dropping all the bytes. This is - * useful after encountering error conditions. - */ +/// Reads the rest of the body from the request, dropping all the bytes. This is +/// useful after encountering error conditions. pub async fn http_dump_body(body: &mut T) -> Result where T: HttpBody + std::marker::Unpin, { - /* - * TODO should this use some Stream interface instead? - * TODO-hardening: does this actually cap the amount of data that will be - * read? What if the underlying implementation chooses to wait for a much - * larger number of bytes? - * TODO better understand pin_mut!() - * TODO do we need to use saturating_add() here? - */ + // TODO should this use some Stream interface instead? + // TODO-hardening: does this actually cap the amount of data that will be + // read? What if the underlying implementation chooses to wait for a much + // larger number of bytes? + // TODO better understand pin_mut!() + // TODO do we need to use saturating_add() here? let mut nbytesread: usize = 0; while let Some(maybebuf) = body.data().await { let buf = maybebuf?; nbytesread += buf.len(); } - /* - * TODO-correctness why does the is_end_stream() assertion fail? - */ + // TODO-correctness why does the is_end_stream() assertion fail? // assert!(body.is_end_stream()); Ok(nbytesread) } -/** - * Given a set of variables (most immediately from a RequestContext, likely - * generated by the HttpRouter when routing an incoming request), extract them - * into an instance of type T. This is a convenience function that reports an - * appropriate error when the extraction fails. - * - * Note that if this function fails, either there was a type error (e.g., a path - * parameter was supposed to be a UUID, but wasn't), in which case we should - * report a 400-level error; or the caller attempted to extract a parameter - * (using a field in T) that wasn't populated in `path_params`. This latter - * case is a programmer error -- this invocation can never work with this type - * for this HTTP handler. Ideally, we'd catch this at build time, but we don't - * currently do that. However, we _do_ currently catch this at server startup - * time, so this case should be impossible. - * - * TODO-cleanup: It would be better to fail to build when the struct's - * parameters don't match up precisely with the path parameters - * TODO-cleanup It would also be nice to know if the struct could not possibly - * be correctly constructed from path parameters because the struct contains - * values that could not be represented in path parameters (e.g., nested - * structs). One approach to doing this would be to skip serde altogether here - * for `T` and instead define our own trait. We could define a "derive" macro - * that would do something similar to serde, but only allows field values that - * implement FromStr. Then we'd at least know at build time that the consumer - * gave us a type that could conceivably be represented by the path parameters. - * TODO-testing: Add automated tests. - */ +/// Given a set of variables (most immediately from a RequestContext, likely +/// generated by the HttpRouter when routing an incoming request), extract them +/// into an instance of type T. This is a convenience function that reports an +/// appropriate error when the extraction fails. +/// +/// Note that if this function fails, either there was a type error (e.g., a path +/// parameter was supposed to be a UUID, but wasn't), in which case we should +/// report a 400-level error; or the caller attempted to extract a parameter +/// (using a field in T) that wasn't populated in `path_params`. This latter +/// case is a programmer error -- this invocation can never work with this type +/// for this HTTP handler. Ideally, we'd catch this at build time, but we don't +/// currently do that. However, we _do_ currently catch this at server startup +/// time, so this case should be impossible. +/// +/// TODO-cleanup: It would be better to fail to build when the struct's +/// parameters don't match up precisely with the path parameters +/// TODO-cleanup It would also be nice to know if the struct could not possibly +/// be correctly constructed from path parameters because the struct contains +/// values that could not be represented in path parameters (e.g., nested +/// structs). One approach to doing this would be to skip serde altogether here +/// for `T` and instead define our own trait. We could define a "derive" macro +/// that would do something similar to serde, but only allows field values that +/// implement FromStr. Then we'd at least know at build time that the consumer +/// gave us a type that could conceivably be represented by the path parameters. +/// TODO-testing: Add automated tests. pub fn http_extract_path_params( path_params: &VariableSet, ) -> Result { from_map(path_params).map_err(|message| { - /* - * TODO-correctness We'd like to assert that the error here is a bad - * type, not a missing field. If it's a missing field, then we somehow - * allowed somebody to register a handler function for a path where the - * handler function's path parameters are inconsistent with the actual - * path registered. Unfortunately, we don't have a way to - * programmatically distinguish these values at this point. In fact, - * even with our own deserializer, we'd also have to build our - * own serde::de::Error impl in order to distinguish this particular - * case. For now, we resort to parsing the error message. - * TODO-correctness The error message produced in the type-error case - * (that end users will see) does not indicate which path parameter was - * invalid. That's pretty bad for end users. - */ + // TODO-correctness We'd like to assert that the error here is a bad + // type, not a missing field. If it's a missing field, then we somehow + // allowed somebody to register a handler function for a path where the + // handler function's path parameters are inconsistent with the actual + // path registered. Unfortunately, we don't have a way to + // programmatically distinguish these values at this point. In fact, + // even with our own deserializer, we'd also have to build our + // own serde::de::Error impl in order to distinguish this particular + // case. For now, we resort to parsing the error message. + // TODO-correctness The error message produced in the type-error case + // (that end users will see) does not indicate which path parameter was + // invalid. That's pretty bad for end users. assert!(!message.starts_with("missing field: ")); HttpError::for_bad_request( None, diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index 12fd1fb62..6de0f7635 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -1,554 +1,547 @@ // Copyright 2020 Oxide Computer Company -/*! - * Dropshot is a general-purpose crate for exposing REST APIs from a Rust - * program. Planned highlights include: - * - * * Suitability for production use on a largely untrusted network. - * Dropshot-based systems should be high-performing, reliable, debuggable, and - * secure against basic denial of service attacks (intentional or otherwise). - * - * * First-class OpenAPI support, in the form of precise OpenAPI specs generated - * directly from code. This works because the functions that serve HTTP - * resources consume arguments and return values of specific types from which - * a schema can be statically generated. - * - * * Ease of integrating into a diverse team. An important use case for - * Dropshot consumers is to have a team of engineers where individuals might - * add a few endpoints at a time to a complex server, and it should be - * relatively easy to do this. Part of this means an emphasis on the - * principle of least surprise: like Rust itself, we may choose abstractions - * that require more time to learn up front in order to make it harder to - * accidentally build systems that will not perform, will crash in corner - * cases, etc. - * - * By "REST API", we primarily mean an API built atop existing HTTP primitives, - * organized into hierarchical resources, and providing consistent, idempotent - * mechanisms to create, update, list, and delete those resources. "REST" can - * mean a range of things depending on who you talk to, and some people are - * dogmatic about what is or isn't RESTy. We find such dogma not only - * unhelpful, but poorly defined. (Consider such a simple case as trying to - * update a resource in a REST API. Popular APIs sometimes use `PUT`, `PATCH`, - * or `POST` for the verb; and JSON Merge Patch or JSON Patch as the format. - * (sometimes without even knowing it!). There's hardly a clear standard, yet - * this is a really basic operation for any REST API.) - * - * For a discussion of alternative crates considered, see Oxide RFD 10. - * - * We hope Dropshot will be fairly general-purpose, but it's primarily intended - * to address the needs of the Oxide control plane. - * - * - * ## Usage - * - * The bare minimum might look like this: - * - * ```no_run - * use dropshot::ApiDescription; - * use dropshot::ConfigDropshot; - * use dropshot::ConfigLogging; - * use dropshot::ConfigLoggingLevel; - * use dropshot::HttpServerStarter; - * use std::sync::Arc; - * - * #[tokio::main] - * async fn main() -> Result<(), String> { - * // Set up a logger. - * let log = - * ConfigLogging::StderrTerminal { - * level: ConfigLoggingLevel::Info, - * } - * .to_logger("minimal-example") - * .map_err(|e| e.to_string())?; - * - * // Describe the API. - * let api = ApiDescription::new(); - * // Register API functions -- see detailed example or ApiDescription docs. - * - * // Start the server. - * let server = - * HttpServerStarter::new( - * &ConfigDropshot { - * bind_address: "127.0.0.1:0".parse().unwrap(), - * request_body_max_bytes: 1024, - * tls: None, - * }, - * api, - * Arc::new(()), - * &log, - * ) - * .map_err(|error| format!("failed to start server: {}", error))? - * .start(); - * - * server.await - * } - * ``` - * - * This server returns a 404 for all resources because no API functions were - * registered. See `examples/basic.rs` for a simple, documented example that - * provides a few resources using shared state. - * - * For a given `ApiDescription`, you can also print out an OpenAPI spec - * describing the API. See [`ApiDescription::openapi`]. - * - * - * ## API Handler Functions - * - * HTTP talks about **resources**. For a REST API, we often talk about - * **endpoints** or **operations**, which are identified by a combination of the - * HTTP method and the URI path. - * - * Example endpoints for a resource called a "project" might include: - * - * * `GET /projects` (list projects) - * * `POST /projects` (one way to create a project) - * * `GET /projects/my_project` (fetch one project) - * * `PUT /projects/my_project` (update (or possibly create) a project) - * * `DELETE /projects/my_project` (delete a project) - * - * With Dropshot, an incoming request for a given API endpoint is handled by a - * particular Rust function. That function is called an **entrypoint**, an - * **endpoint handler**, or a **handler function**. When you set up a Dropshot - * server, you configure the set of available API endpoints and which functions - * will handle each one by setting up an [`ApiDescription`]. - * - * Typically, you define an endpoint with a handler function by using the - * [`endpoint`] macro. Here's an example of a single endpoint that lists - * a hardcoded project: - * - * ``` - * use dropshot::endpoint; - * use dropshot::ApiDescription; - * use dropshot::HttpError; - * use dropshot::HttpResponseOk; - * use dropshot::RequestContext; - * use http::Method; - * use schemars::JsonSchema; - * use serde::Serialize; - * use std::sync::Arc; - * - * /** Represents a project in our API */ - * #[derive(Serialize, JsonSchema)] - * struct Project { - * /** name of the project */ - * name: String, - * } - * - * /** Fetch a project. */ - * #[endpoint { - * method = GET, - * path = "/projects/project1", - * }] - * async fn myapi_projects_get_project( - * rqctx: Arc>, - * ) -> Result, HttpError> - * { - * let project = Project { name: String::from("project1") }; - * Ok(HttpResponseOk(project)) - * } - * - * fn main() { - * let mut api = ApiDescription::new(); - * - * /* - * * Register our endpoint and its handler function. The "endpoint" macro - * * specifies the HTTP method and URI path that identify the endpoint, - * * allowing this metadata to live right alongside the handler function. - * */ - * api.register(myapi_projects_get_project).unwrap(); - * - * /* ... (use `api` to set up an `HttpServer` ) */ - * } - * - * ``` - * - * There's quite a lot going on here: - * - * * The `endpoint` macro specifies the HTTP method and URI path. When we - * invoke `ApiDescription::register()`, this information is used to register - * the endpoint that will be handled by our function. - * * The signature of our function indicates that on success, it returns a - * `HttpResponseOk`. This means that the function will - * return an HTTP 200 status code ("OK") with an object of type `Project`. - * * The function itself has a Rustdoc comment that will be used to document - * this _endpoint_ in the OpenAPI schema. - * - * From this information, Dropshot can generate an OpenAPI specification for - * this API that describes the endpoint (which OpenAPI calls an "operation"), - * its documentation, the possible responses that it can return, and the schema - * for each type of response (which can also include documentation). This is - * largely known statically, though generated at runtime. - * - * - * ### `#[endpoint { ... }]` attribute parameters - * - * The `endpoint` attribute accepts parameters the affect the operation of - * the endpoint as well as metadata that appears in the OpenAPI description - * of it. - * - * ```ignore - * #[endpoint { - * // Required fields - * method = { DELETE | GET | PATCH | POST | PUT }, - * path = "/path/name/with/{named}/{variables}", - * - * // Optional fields - * tags = [ "all", "your", "OpenAPI", "tags" ], - * }] - * ``` - * - * This is where you specify the HTTP method and path (including path variables) - * for the API endpoint. These are used as part of endpoint registration and - * appear in the OpenAPI spec output. - * - * The tags field is used to categorize API endpoints and only impacts the - * OpenAPI spec output. - * - * - * ### Function parameters - * - * In general, a handler function looks like this: - * - * ```ignore - * async fn f( - * rqctx: Arc>, - * [query_params: Query,] - * [path_params: Path

    ,] - * [body_param: TypedBody,] - * [body_param: UntypedBody,] - * ) -> Result - * ``` - * - * Other than the RequestContext, parameters may appear in any order. - * - * The `Context` type is caller-provided context which is provided when - * the server is created. - * - * The types `Query`, `Path`, `TypedBody`, and `UntypedBody` are called - * **Extractors** because they cause information to be pulled out of the request - * and made available to the handler function. - * - * * [`Query`]`` extracts parameters from a query string, deserializing them - * into an instance of type `Q`. `Q` must implement `serde::Deserialize` and - * `schemars::JsonSchema`. - * * [`Path`]`

    ` extracts parameters from HTTP path, deserializing them into - * an instance of type `P`. `P` must implement `serde::Deserialize` and - * `schemars::JsonSchema`. - * * [`TypedBody`]`` extracts content from the request body by parsing the - * body as JSON (or form/url-encoded) and deserializing it into an instance - * of type `J`. `J` must implement `serde::Deserialize` and `schemars::JsonSchema`. - * * [`UntypedBody`] extracts the raw bytes of the request body. - * - * If the handler takes a `Query`, `Path

    `, `TypedBody`, or - * `UntypedBody`, and the corresponding extraction cannot be completed, the - * request fails with status code 400 and an error message reflecting a - * validation error. - * - * As with any serde-deserializable type, you can make fields optional by having - * the corresponding property of the type be an `Option`. Here's an example of - * an endpoint that takes two arguments via query parameters: "limit", a - * required u32, and "marker", an optional string: - * - * ``` - * use http::StatusCode; - * use dropshot::HttpError; - * use dropshot::TypedBody; - * use dropshot::Query; - * use dropshot::RequestContext; - * use hyper::Body; - * use hyper::Response; - * use schemars::JsonSchema; - * use serde::Deserialize; - * use std::sync::Arc; - * - * #[derive(Deserialize, JsonSchema)] - * struct MyQueryArgs { - * limit: u32, - * marker: Option - * } - * - * struct MyContext {} - * - * async fn myapi_projects_get( - * rqctx: Arc>, - * query: Query) - * -> Result, HttpError> - * { - * let query_args = query.into_inner(); - * let context: &MyContext = rqctx.context(); - * let limit: u32 = query_args.limit; - * let marker: Option = query_args.marker; - * Ok(Response::builder() - * .status(StatusCode::OK) - * .body(format!("limit = {}, marker = {:?}\n", limit, marker).into())?) - * } - * ``` - * - * ### Endpoint function return types - * - * Endpoint handler functions are async, so they always return a `Future`. When - * we say "return type" below, we use that as shorthand for the output of the - * future. - * - * An endpoint function must return a type that implements `HttpResponse`. - * Typically this should be a type that implements `HttpTypedResponse` (either - * one of the Dropshot-provided ones or one of your own creation). - * - * The more specific a type returned by the handler function, the more can be - * validated at build-time, and the more specific an OpenAPI schema can be - * generated from the source code. For example, a POST to an endpoint - * "/projects" might return `Result, HttpError>`. - * As you might expect, on success, this turns into an HTTP 201 "Created" - * response whose body is constructed by serializing the `Project`. In this - * example, OpenAPI tooling can identify at build time that this function - * produces a 201 "Created" response on success with a body whose schema matches - * `Project` (which we already said implements `Serialize`), and there would be - * no way to violate this contract at runtime. - * - * These are the implementations of `HttpTypedResponse` with their associated - * HTTP response code - * on the HTTP method: - * - * | Return Type | HTTP status code | - * | ----------- | ---------------- | - * | [`HttpResponseOk`] | 200 | - * | [`HttpResponseCreated`] | 201 | - * | [`HttpResponseAccepted`] | 202 | - * | [`HttpResponseDeleted`] | 204 | - * | [`HttpResponseUpdatedNoContent`] | 204 | - * - * In situations where the response schema is not fixed, the endpoint should - * return `Response`, which also implements `HttpResponse`. Note that - * the OpenAPI spec will not include any status code or type information in - * this case. - * - * ## What about generic handlers that run on all requests? - * - * There's no mechanism in Dropshot for this. Instead, it's recommended that - * users commonize code using regular Rust functions and calling them. See the - * design notes in the README for more on this. - * - * - * ## Support for paginated resources - * - * "Pagination" here refers to the interface pattern where HTTP resources (or - * API endpoints) that provide a list of the items in a collection return a - * relatively small maximum number of items per request, often called a "page" - * of results. Each page includes some metadata that the client can use to make - * another request for the next page of results. The client can repeat this - * until they've gotten all the results. Limiting the number of results - * returned per request helps bound the resource utilization and time required - * for any request, which in turn facilities horizontal scalability, high - * availability, and protection against some denial of service attacks - * (intentional or otherwise). For more background, see the comments in - * dropshot/src/pagination.rs. - * - * Pagination support in Dropshot implements this common pattern: - * - * * This server exposes an **API endpoint** that returns the **items** - * contained within a **collection**. - * * The client is not allowed to list the entire collection in one request. - * Instead, they list the collection using a sequence of requests to the one - * endpoint. We call this sequence of requests a **scan** of the collection, - * and we sometimes say that the client **pages through** the collection. - * * The initial request in the scan may specify the **scan parameters**, which - * typically specify how the results are to be sorted (i.e., by which - * field(s) and whether the sort is ascending or descending), any filters to - * apply, etc. - * * Each request returns a **page** of results at a time, along with a **page - * token** that's provided with the next request as a query parameter. - * * The scan parameters cannot change between requests that are part of the - * same scan. - * * With all requests: there's a default limit (e.g., 100 items returned at a - * time). Clients can request a higher limit using a query parameter (e.g., - * `limit=1000`). This limit is capped by a hard limit on the server. If the - * client asks for more than the hard limit, the server can use the hard limit - * or reject the request. - * - * As an example, imagine that we have an API endpoint called `"/animals"`. Each - * item returned is an `Animal` object that might look like this: - * - * ```json - * { - * "name": "aardvark", - * "class": "mammal", - * "max_weight": "80", /* kilograms, typical */ - * } - * ``` - * - * There are at least 1.5 million known species of animal -- too many to return - * in one API call! Our API supports paginating them by `"name"`, which we'll - * say is a unique field in our data set. - * - * The first request to the API fetches `"/animals"` (with no querystring - * parameters) and returns: - * - * ```json - * { - * "page_token": "abc123...", - * "items": [ - * { - * "name": "aardvark", - * "class": "mammal", - * "max_weight": "80", - * }, - * ... - * { - * "name": "badger", - * "class": "mammal", - * "max_weight": "12", - * } - * ] - * } - * ``` - * - * The subsequent request to the API fetches `"/animals?page_token=abc123..."`. - * The page token `"abc123..."` is an opaque token to the client, but typically - * encodes the scan parameters and the value of the last item seen - * (`"name=badger"`). The client knows it has completed the scan when it - * receives a response with no `page_token` in it. - * - * Our API endpoint can also support scanning in reverse order. In this case, - * when the client makes the first request, it should fetch - * `"/animals?sort=name-descending"`. Now the first result might be `"zebra"`. - * Again, the page token must include the scan parameters so that in subsequent - * requests, the API endpoint knows that we're scanning backwards, not forwards, - * from the value we were given. It's not allowed to change directions or sort - * order in the middle of a scan. (You can always start a new scan, but you - * can't pick up from where you were in the previous scan.) - * - * It's also possible to support sorting by multiple fields. For example, we - * could support `sort=class-name`, which we could define to mean that we'll - * sort the results first by the animal's class, then by name. Thus we'd get - * all the amphibians in sorted order, then all the mammals, then all the - * reptiles. The main requirement is that the combination of fields used for - * pagination must be unique. We cannot paginate by the animal's class alone. - * (To see why: there are over 6,000 mammals. If the page size is, say, 1000, - * then the page_token would say `"mammal"`, but there's not enough information - * there to see where we are within the list of mammals. It doesn't matter - * whether there are 2 mammals or 6,000 because clients can limit the page size - * to just one item if they want and that ought to work.) - * - * - * ### Dropshot interfaces for pagination - * - * We can think of pagination in two parts: the input (handling the pagination - * query parameters) and the output (emitting a page of results, including the - * page token). - * - * For input, a paginated API endpoint's handler function should accept a - * [`Query`]`<`[`PaginationParams`]`>`, where - * `ScanParams` is a consumer-defined type specifying the parameters of the scan - * (typically including the sort fields, sort order, and filter options) and - * `PageSelector` is a consumer-defined type describing the page token. The - * PageSelector will be serialized to JSON and base64-encoded to construct the - * page token. This will be automatically parsed on the way back in. - * - * For output, a paginated API endpoint's handler function can return - * `Result<`[`HttpResponseOk`]<[`ResultsPage`]`, HttpError>` where `T: - * Serialize` is the item listed by the endpoint. You can also use your own - * structure that contains a [`ResultsPage`] (possibly using - * `#[serde(flatten)]`), if that's the behavior you want. - * - * There are several complete, documented examples in the "examples" directory. - * - * - * ### Advanced usage notes - * - * It's possible to accept additional query parameters besides the pagination - * parameters by having your API endpoint handler function take two different - * arguments using `Query`, like this: - * - * ``` - * use dropshot::HttpError; - * use dropshot::HttpResponseOk; - * use dropshot::PaginationParams; - * use dropshot::Query; - * use dropshot::RequestContext; - * use dropshot::ResultsPage; - * use dropshot::endpoint; - * use schemars::JsonSchema; - * use serde::Deserialize; - * use std::sync::Arc; - * # use serde::Serialize; - * # #[derive(Debug, Deserialize, JsonSchema)] - * # enum MyScanParams { A }; - * # #[derive(Debug, Deserialize, JsonSchema, Serialize)] - * # enum MyPageSelector { A(String) }; - * #[derive(Deserialize, JsonSchema)] - * struct MyExtraQueryParams { - * do_extra_stuff: bool, - * } - * - * #[endpoint { - * method = GET, - * path = "/list_stuff" - * }] - * async fn my_list_api( - * rqctx: Arc>, - * pag_params: Query>, - * extra_params: Query, - * ) -> Result>, HttpError> - * { - * # unimplemented!(); - * /* ... */ - * } - * ``` - * - * You might expect that instead of doing this, you could define your own - * structure that includes a `PaginationParams` using `#[serde(flatten)]`, and - * this ought to work, but it currently doesn't due to serde_urlencoded#33, - * which is really serde#1183. - * - * ### DTrace probes - * - * Dropshot optionally exposes two DTrace probes, `request_start` and - * `request_finish`. These provide detailed information about each request, - * such as their ID, the local and remote IPs, and the response information. - * See the [`RequestInfo`] and [`ResponseInfo`] types for a complete listing - * of what's available. - * - * These probes are implemented via the [`usdt`] crate. They require a nightly - * toolchain if built on MacOS (which requires the unstable `asm_sym` feature). - * Otherwise a stable compiler >= v1.59 is required in order to present the - * necessary features. Given these constraints, usdt functionality is behind - * the feature flag `"usdt-probes"`. - * - * > *Important:* The probes are internally registered with the DTrace kernel - * module, making them visible via `dtrace(1M)`. This is done when an `HttpServer` - * object is created, but it's possible that registration fails. The result of - * registration is stored in the server after creation, and can be accessed with - * the [`HttpServer::probe_registration()`] method. This allows callers to decide - * how to handle failures, but ensures that probes are always enabled if possible. - * - * Once in place, the probes can be seen via DTrace. For example, running: - * - * ```text - * $ cargo +nightly run --example basic --features usdt-probes - * ``` - * - * And making several requests to it with `curl`, we can see the DTrace - * probes with an invocation like: - * - * ```text - * ## dtrace -Zq -n 'dropshot*:::request-* { printf("%s\n", copyinstr(arg0)); }' - * {"ok":{"id":"b793c62e-60e4-45c5-9274-198a04d9abb1","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:34286","method":"GET","path":"/counter","query":null}} - * {"ok":{"id":"b793c62e-60e4-45c5-9274-198a04d9abb1","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:34286","status_code":200,"message":""}} - * {"ok":{"id":"9050e30a-1ce3-4d6f-be1c-69a11c618800","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:41101","method":"PUT","path":"/counter","query":null}} - * {"ok":{"id":"9050e30a-1ce3-4d6f-be1c-69a11c618800","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:41101","status_code":400,"message":"do not like the number 10"}} - * {"ok":{"id":"a53696af-543d-452f-81b6-5a045dd9921d","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:57376","method":"PUT","path":"/counter","query":null}} - * {"ok":{"id":"a53696af-543d-452f-81b6-5a045dd9921d","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:57376","status_code":204,"message":""}} - * ``` - */ +//! Dropshot is a general-purpose crate for exposing REST APIs from a Rust +//! program. Planned highlights include: +//! +//! * Suitability for production use on a largely untrusted network. +//! Dropshot-based systems should be high-performing, reliable, debuggable, and +//! secure against basic denial of service attacks (intentional or otherwise). +//! +//! * First-class OpenAPI support, in the form of precise OpenAPI specs generated +//! directly from code. This works because the functions that serve HTTP +//! resources consume arguments and return values of specific types from which +//! a schema can be statically generated. +//! +//! * Ease of integrating into a diverse team. An important use case for +//! Dropshot consumers is to have a team of engineers where individuals might +//! add a few endpoints at a time to a complex server, and it should be +//! relatively easy to do this. Part of this means an emphasis on the +//! principle of least surprise: like Rust itself, we may choose abstractions +//! that require more time to learn up front in order to make it harder to +//! accidentally build systems that will not perform, will crash in corner +//! cases, etc. +//! +//! By "REST API", we primarily mean an API built atop existing HTTP primitives, +//! organized into hierarchical resources, and providing consistent, idempotent +//! mechanisms to create, update, list, and delete those resources. "REST" can +//! mean a range of things depending on who you talk to, and some people are +//! dogmatic about what is or isn't RESTy. We find such dogma not only +//! unhelpful, but poorly defined. (Consider such a simple case as trying to +//! update a resource in a REST API. Popular APIs sometimes use `PUT`, `PATCH`, +//! or `POST` for the verb; and JSON Merge Patch or JSON Patch as the format. +//! (sometimes without even knowing it!). There's hardly a clear standard, yet +//! this is a really basic operation for any REST API.) +//! +//! For a discussion of alternative crates considered, see Oxide RFD 10. +//! +//! We hope Dropshot will be fairly general-purpose, but it's primarily intended +//! to address the needs of the Oxide control plane. +//! +//! +//! ## Usage +//! +//! The bare minimum might look like this: +//! +//! ```no_run +//! use dropshot::ApiDescription; +//! use dropshot::ConfigDropshot; +//! use dropshot::ConfigLogging; +//! use dropshot::ConfigLoggingLevel; +//! use dropshot::HttpServerStarter; +//! use std::sync::Arc; +//! +//! #[tokio::main] +//! async fn main() -> Result<(), String> { +//! // Set up a logger. +//! let log = +//! ConfigLogging::StderrTerminal { +//! level: ConfigLoggingLevel::Info, +//! } +//! .to_logger("minimal-example") +//! .map_err(|e| e.to_string())?; +//! +//! // Describe the API. +//! let api = ApiDescription::new(); +//! // Register API functions -- see detailed example or ApiDescription docs. +//! +//! // Start the server. +//! let server = +//! HttpServerStarter::new( +//! &ConfigDropshot { +//! bind_address: "127.0.0.1:0".parse().unwrap(), +//! request_body_max_bytes: 1024, +//! tls: None, +//! }, +//! api, +//! Arc::new(()), +//! &log, +//! ) +//! .map_err(|error| format!("failed to start server: {}", error))? +//! .start(); +//! +//! server.await +//! } +//! ``` +//! +//! This server returns a 404 for all resources because no API functions were +//! registered. See `examples/basic.rs` for a simple, documented example that +//! provides a few resources using shared state. +//! +//! For a given `ApiDescription`, you can also print out an OpenAPI spec +//! describing the API. See [`ApiDescription::openapi`]. +//! +//! +//! ## API Handler Functions +//! +//! HTTP talks about **resources**. For a REST API, we often talk about +//! **endpoints** or **operations**, which are identified by a combination of the +//! HTTP method and the URI path. +//! +//! Example endpoints for a resource called a "project" might include: +//! +//! * `GET /projects` (list projects) +//! * `POST /projects` (one way to create a project) +//! * `GET /projects/my_project` (fetch one project) +//! * `PUT /projects/my_project` (update (or possibly create) a project) +//! * `DELETE /projects/my_project` (delete a project) +//! +//! With Dropshot, an incoming request for a given API endpoint is handled by a +//! particular Rust function. That function is called an **entrypoint**, an +//! **endpoint handler**, or a **handler function**. When you set up a Dropshot +//! server, you configure the set of available API endpoints and which functions +//! will handle each one by setting up an [`ApiDescription`]. +//! +//! Typically, you define an endpoint with a handler function by using the +//! [`endpoint`] macro. Here's an example of a single endpoint that lists +//! a hardcoded project: +//! +//! ``` +//! use dropshot::endpoint; +//! use dropshot::ApiDescription; +//! use dropshot::HttpError; +//! use dropshot::HttpResponseOk; +//! use dropshot::RequestContext; +//! use http::Method; +//! use schemars::JsonSchema; +//! use serde::Serialize; +//! use std::sync::Arc; +//! +//! /** Represents a project in our API */ +//! #[derive(Serialize, JsonSchema)] +//! struct Project { +//! /** name of the project */ +//! name: String, +//! } +//! +//! /** Fetch a project. */ +//! #[endpoint { +//! method = GET, +//! path = "/projects/project1", +//! }] +//! async fn myapi_projects_get_project( +//! rqctx: Arc>, +//! ) -> Result, HttpError> +//! { +//! let project = Project { name: String::from("project1") }; +//! Ok(HttpResponseOk(project)) +//! } +//! +//! fn main() { +//! let mut api = ApiDescription::new(); +//! +//! /* +//! * Register our endpoint and its handler function. The "endpoint" macro +//! * specifies the HTTP method and URI path that identify the endpoint, +//! * allowing this metadata to live right alongside the handler function. +//! */ +//! api.register(myapi_projects_get_project).unwrap(); +//! +//! /* ... (use `api` to set up an `HttpServer` ) */ +//! } +//! ``` +//! +//! There's quite a lot going on here: +//! +//! * The `endpoint` macro specifies the HTTP method and URI path. When we +//! invoke `ApiDescription::register()`, this information is used to register +//! the endpoint that will be handled by our function. +//! * The signature of our function indicates that on success, it returns a +//! `HttpResponseOk`. This means that the function will +//! return an HTTP 200 status code ("OK") with an object of type `Project`. +//! * The function itself has a Rustdoc comment that will be used to document +//! this _endpoint_ in the OpenAPI schema. +//! +//! From this information, Dropshot can generate an OpenAPI specification for +//! this API that describes the endpoint (which OpenAPI calls an "operation"), +//! its documentation, the possible responses that it can return, and the schema +//! for each type of response (which can also include documentation). This is +//! largely known statically, though generated at runtime. +//! +//! +//! ### `#[endpoint { ... }]` attribute parameters +//! +//! The `endpoint` attribute accepts parameters the affect the operation of +//! the endpoint as well as metadata that appears in the OpenAPI description +//! of it. +//! +//! ```ignore +//! #[endpoint { +//! // Required fields +//! method = { DELETE | GET | PATCH | POST | PUT }, +//! path = "/path/name/with/{named}/{variables}", +//! +//! // Optional fields +//! tags = [ "all", "your", "OpenAPI", "tags" ], +//! }] +//! ``` +//! +//! This is where you specify the HTTP method and path (including path variables) +//! for the API endpoint. These are used as part of endpoint registration and +//! appear in the OpenAPI spec output. +//! +//! The tags field is used to categorize API endpoints and only impacts the +//! OpenAPI spec output. +//! +//! +//! ### Function parameters +//! +//! In general, a handler function looks like this: +//! +//! ```ignore +//! async fn f( +//! rqctx: Arc>, +//! [query_params: Query,] +//! [path_params: Path

    ,] +//! [body_param: TypedBody,] +//! [body_param: UntypedBody,] +//! ) -> Result +//! ``` +//! +//! Other than the RequestContext, parameters may appear in any order. +//! +//! The `Context` type is caller-provided context which is provided when +//! the server is created. +//! +//! The types `Query`, `Path`, `TypedBody`, and `UntypedBody` are called +//! **Extractors** because they cause information to be pulled out of the request +//! and made available to the handler function. +//! +//! * [`Query`]`` extracts parameters from a query string, deserializing them +//! into an instance of type `Q`. `Q` must implement `serde::Deserialize` and +//! `schemars::JsonSchema`. +//! * [`Path`]`

    ` extracts parameters from HTTP path, deserializing them into +//! an instance of type `P`. `P` must implement `serde::Deserialize` and +//! `schemars::JsonSchema`. +//! * [`TypedBody`]`` extracts content from the request body by parsing the +//! body as JSON (or form/url-encoded) and deserializing it into an instance +//! of type `J`. `J` must implement `serde::Deserialize` and `schemars::JsonSchema`. +//! * [`UntypedBody`] extracts the raw bytes of the request body. +//! +//! If the handler takes a `Query`, `Path

    `, `TypedBody`, or +//! `UntypedBody`, and the corresponding extraction cannot be completed, the +//! request fails with status code 400 and an error message reflecting a +//! validation error. +//! +//! As with any serde-deserializable type, you can make fields optional by having +//! the corresponding property of the type be an `Option`. Here's an example of +//! an endpoint that takes two arguments via query parameters: "limit", a +//! required u32, and "marker", an optional string: +//! +//! ``` +//! use http::StatusCode; +//! use dropshot::HttpError; +//! use dropshot::TypedBody; +//! use dropshot::Query; +//! use dropshot::RequestContext; +//! use hyper::Body; +//! use hyper::Response; +//! use schemars::JsonSchema; +//! use serde::Deserialize; +//! use std::sync::Arc; +//! +//! #[derive(Deserialize, JsonSchema)] +//! struct MyQueryArgs { +//! limit: u32, +//! marker: Option +//! } +//! +//! struct MyContext {} +//! +//! async fn myapi_projects_get( +//! rqctx: Arc>, +//! query: Query) +//! -> Result, HttpError> +//! { +//! let query_args = query.into_inner(); +//! let context: &MyContext = rqctx.context(); +//! let limit: u32 = query_args.limit; +//! let marker: Option = query_args.marker; +//! Ok(Response::builder() +//! .status(StatusCode::OK) +//! .body(format!("limit = {}, marker = {:?}\n", limit, marker).into())?) +//! } +//! ``` +//! +//! ### Endpoint function return types +//! +//! Endpoint handler functions are async, so they always return a `Future`. When +//! we say "return type" below, we use that as shorthand for the output of the +//! future. +//! +//! An endpoint function must return a type that implements `HttpResponse`. +//! Typically this should be a type that implements `HttpTypedResponse` (either +//! one of the Dropshot-provided ones or one of your own creation). +//! +//! The more specific a type returned by the handler function, the more can be +//! validated at build-time, and the more specific an OpenAPI schema can be +//! generated from the source code. For example, a POST to an endpoint +//! "/projects" might return `Result, HttpError>`. +//! As you might expect, on success, this turns into an HTTP 201 "Created" +//! response whose body is constructed by serializing the `Project`. In this +//! example, OpenAPI tooling can identify at build time that this function +//! produces a 201 "Created" response on success with a body whose schema matches +//! `Project` (which we already said implements `Serialize`), and there would be +//! no way to violate this contract at runtime. +//! +//! These are the implementations of `HttpTypedResponse` with their associated +//! HTTP response code +//! on the HTTP method: +//! +//! | Return Type | HTTP status code | +//! | ----------- | ---------------- | +//! | [`HttpResponseOk`] | 200 | +//! | [`HttpResponseCreated`] | 201 | +//! | [`HttpResponseAccepted`] | 202 | +//! | [`HttpResponseDeleted`] | 204 | +//! | [`HttpResponseUpdatedNoContent`] | 204 | +//! +//! In situations where the response schema is not fixed, the endpoint should +//! return `Response`, which also implements `HttpResponse`. Note that +//! the OpenAPI spec will not include any status code or type information in +//! this case. +//! +//! ## What about generic handlers that run on all requests? +//! +//! There's no mechanism in Dropshot for this. Instead, it's recommended that +//! users commonize code using regular Rust functions and calling them. See the +//! design notes in the README for more on this. +//! +//! +//! ## Support for paginated resources +//! +//! "Pagination" here refers to the interface pattern where HTTP resources (or +//! API endpoints) that provide a list of the items in a collection return a +//! relatively small maximum number of items per request, often called a "page" +//! of results. Each page includes some metadata that the client can use to make +//! another request for the next page of results. The client can repeat this +//! until they've gotten all the results. Limiting the number of results +//! returned per request helps bound the resource utilization and time required +//! for any request, which in turn facilities horizontal scalability, high +//! availability, and protection against some denial of service attacks +//! (intentional or otherwise). For more background, see the comments in +//! dropshot/src/pagination.rs. +//! +//! Pagination support in Dropshot implements this common pattern: +//! +//! * This server exposes an **API endpoint** that returns the **items** +//! contained within a **collection**. +//! * The client is not allowed to list the entire collection in one request. +//! Instead, they list the collection using a sequence of requests to the one +//! endpoint. We call this sequence of requests a **scan** of the collection, +//! and we sometimes say that the client **pages through** the collection. +//! * The initial request in the scan may specify the **scan parameters**, which +//! typically specify how the results are to be sorted (i.e., by which +//! field(s) and whether the sort is ascending or descending), any filters to +//! apply, etc. +//! * Each request returns a **page** of results at a time, along with a **page +//! token** that's provided with the next request as a query parameter. +//! * The scan parameters cannot change between requests that are part of the +//! same scan. +//! * With all requests: there's a default limit (e.g., 100 items returned at a +//! time). Clients can request a higher limit using a query parameter (e.g., +//! `limit=1000`). This limit is capped by a hard limit on the server. If the +//! client asks for more than the hard limit, the server can use the hard limit +//! or reject the request. +//! +//! As an example, imagine that we have an API endpoint called `"/animals"`. Each +//! item returned is an `Animal` object that might look like this: +//! +//! ```json +//! { +//! "name": "aardvark", +//! "class": "mammal", +//! "max_weight": "80", /* kilograms, typical */ +//! } +//! ``` +//! +//! There are at least 1.5 million known species of animal -- too many to return +//! in one API call! Our API supports paginating them by `"name"`, which we'll +//! say is a unique field in our data set. +//! +//! The first request to the API fetches `"/animals"` (with no querystring +//! parameters) and returns: +//! +//! ```json +//! { +//! "page_token": "abc123...", +//! "items": [ +//! { +//! "name": "aardvark", +//! "class": "mammal", +//! "max_weight": "80", +//! }, +//! ... +//! { +//! "name": "badger", +//! "class": "mammal", +//! "max_weight": "12", +//! } +//! ] +//! } +//! ``` +//! +//! The subsequent request to the API fetches `"/animals?page_token=abc123..."`. +//! The page token `"abc123..."` is an opaque token to the client, but typically +//! encodes the scan parameters and the value of the last item seen +//! (`"name=badger"`). The client knows it has completed the scan when it +//! receives a response with no `page_token` in it. +//! +//! Our API endpoint can also support scanning in reverse order. In this case, +//! when the client makes the first request, it should fetch +//! `"/animals?sort=name-descending"`. Now the first result might be `"zebra"`. +//! Again, the page token must include the scan parameters so that in subsequent +//! requests, the API endpoint knows that we're scanning backwards, not forwards, +//! from the value we were given. It's not allowed to change directions or sort +//! order in the middle of a scan. (You can always start a new scan, but you +//! can't pick up from where you were in the previous scan.) +//! +//! It's also possible to support sorting by multiple fields. For example, we +//! could support `sort=class-name`, which we could define to mean that we'll +//! sort the results first by the animal's class, then by name. Thus we'd get +//! all the amphibians in sorted order, then all the mammals, then all the +//! reptiles. The main requirement is that the combination of fields used for +//! pagination must be unique. We cannot paginate by the animal's class alone. +//! (To see why: there are over 6,000 mammals. If the page size is, say, 1000, +//! then the page_token would say `"mammal"`, but there's not enough information +//! there to see where we are within the list of mammals. It doesn't matter +//! whether there are 2 mammals or 6,000 because clients can limit the page size +//! to just one item if they want and that ought to work.) +//! +//! +//! ### Dropshot interfaces for pagination +//! +//! We can think of pagination in two parts: the input (handling the pagination +//! query parameters) and the output (emitting a page of results, including the +//! page token). +//! +//! For input, a paginated API endpoint's handler function should accept a +//! [`Query`]`<`[`PaginationParams`]`>`, where +//! `ScanParams` is a consumer-defined type specifying the parameters of the scan +//! (typically including the sort fields, sort order, and filter options) and +//! `PageSelector` is a consumer-defined type describing the page token. The +//! PageSelector will be serialized to JSON and base64-encoded to construct the +//! page token. This will be automatically parsed on the way back in. +//! +//! For output, a paginated API endpoint's handler function can return +//! `Result<`[`HttpResponseOk`]<[`ResultsPage`]`, HttpError>` where `T: +//! Serialize` is the item listed by the endpoint. You can also use your own +//! structure that contains a [`ResultsPage`] (possibly using +//! `#[serde(flatten)]`), if that's the behavior you want. +//! +//! There are several complete, documented examples in the "examples" directory. +//! +//! +//! ### Advanced usage notes +//! +//! It's possible to accept additional query parameters besides the pagination +//! parameters by having your API endpoint handler function take two different +//! arguments using `Query`, like this: +//! +//! ``` +//! use dropshot::HttpError; +//! use dropshot::HttpResponseOk; +//! use dropshot::PaginationParams; +//! use dropshot::Query; +//! use dropshot::RequestContext; +//! use dropshot::ResultsPage; +//! use dropshot::endpoint; +//! use schemars::JsonSchema; +//! use serde::Deserialize; +//! use std::sync::Arc; +//! # use serde::Serialize; +//! # #[derive(Debug, Deserialize, JsonSchema)] +//! # enum MyScanParams { A }; +//! # #[derive(Debug, Deserialize, JsonSchema, Serialize)] +//! # enum MyPageSelector { A(String) }; +//! #[derive(Deserialize, JsonSchema)] +//! struct MyExtraQueryParams { +//! do_extra_stuff: bool, +//! } +//! +//! #[endpoint { +//! method = GET, +//! path = "/list_stuff" +//! }] +//! async fn my_list_api( +//! rqctx: Arc>, +//! pag_params: Query>, +//! extra_params: Query, +//! ) -> Result>, HttpError> +//! { +//! # unimplemented!(); +//! /* ... */ +//! } +//! ``` +//! +//! You might expect that instead of doing this, you could define your own +//! structure that includes a `PaginationParams` using `#[serde(flatten)]`, and +//! this ought to work, but it currently doesn't due to serde_urlencoded#33, +//! which is really serde#1183. +//! +//! ### DTrace probes +//! +//! Dropshot optionally exposes two DTrace probes, `request_start` and +//! `request_finish`. These provide detailed information about each request, +//! such as their ID, the local and remote IPs, and the response information. +//! See the [`RequestInfo`] and [`ResponseInfo`] types for a complete listing +//! of what's available. +//! +//! These probes are implemented via the [`usdt`] crate. They require a nightly +//! toolchain if built on MacOS (which requires the unstable `asm_sym` feature). +//! Otherwise a stable compiler >= v1.59 is required in order to present the +//! necessary features. Given these constraints, usdt functionality is behind +//! the feature flag `"usdt-probes"`. +//! +//! > *Important:* The probes are internally registered with the DTrace kernel +//! module, making them visible via `dtrace(1M)`. This is done when an `HttpServer` +//! object is created, but it's possible that registration fails. The result of +//! registration is stored in the server after creation, and can be accessed with +//! the [`HttpServer::probe_registration()`] method. This allows callers to decide +//! how to handle failures, but ensures that probes are always enabled if possible. +//! +//! Once in place, the probes can be seen via DTrace. For example, running: +//! +//! ```text +//! $ cargo +nightly run --example basic --features usdt-probes +//! ``` +//! +//! And making several requests to it with `curl`, we can see the DTrace +//! probes with an invocation like: +//! +//! ```text +//! ## dtrace -Zq -n 'dropshot*:::request-* { printf("%s\n", copyinstr(arg0)); }' +//! {"ok":{"id":"b793c62e-60e4-45c5-9274-198a04d9abb1","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:34286","method":"GET","path":"/counter","query":null}} +//! {"ok":{"id":"b793c62e-60e4-45c5-9274-198a04d9abb1","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:34286","status_code":200,"message":""}} +//! {"ok":{"id":"9050e30a-1ce3-4d6f-be1c-69a11c618800","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:41101","method":"PUT","path":"/counter","query":null}} +//! {"ok":{"id":"9050e30a-1ce3-4d6f-be1c-69a11c618800","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:41101","status_code":400,"message":"do not like the number 10"}} +//! {"ok":{"id":"a53696af-543d-452f-81b6-5a045dd9921d","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:57376","method":"PUT","path":"/counter","query":null}} +//! {"ok":{"id":"a53696af-543d-452f-81b6-5a045dd9921d","local_addr":"127.0.0.1:61028","remote_addr":"127.0.0.1:57376","status_code":204,"message":""}} +//! ``` -/* - * Clippy's style advice is definitely valuable, but not worth the trouble for - * automated enforcement. - */ +// Clippy's style advice is definitely valuable, but not worth the trouble for +// automated enforcement. #![allow(clippy::style)] -/* - * The `usdt` crate requires nightly, enabled if our consumer is enabling - * DTrace probes. - */ +// The `usdt` crate requires nightly, enabled if our consumer is enabling +// DTrace probes. #![cfg_attr(all(feature = "usdt-probes", not(usdt_stable_asm)), feature(asm))] #![cfg_attr( all( @@ -678,9 +671,7 @@ pub use websocket::WebsocketConnectionRaw; pub use websocket::WebsocketEndpointResult; pub use websocket::WebsocketUpgrade; -/* - * Users of the `endpoint` macro need the following macros: - */ +// Users of the `endpoint` macro need the following macros: pub use handler::RequestContextArgument; pub use http::Method; diff --git a/dropshot/src/logging.rs b/dropshot/src/logging.rs index 8a5d50326..5f3b903c0 100644 --- a/dropshot/src/logging.rs +++ b/dropshot/src/logging.rs @@ -1,9 +1,7 @@ // Copyright 2020 Oxide Computer Company -/*! - * Provides basic facilities for configuring logging and creating loggers, all - * using Slog. None of these facilities are required for this crate, but - * they're provided because they're commonly wanted by consumers of this crate. - */ +//! Provides basic facilities for configuring logging and creating loggers, all +//! using Slog. None of these facilities are required for this crate, but +//! they're provided because they're commonly wanted by consumers of this crate. use camino::Utf8PathBuf; use serde::Deserialize; @@ -15,16 +13,14 @@ use std::fs::OpenOptions; use std::io::LineWriter; use std::{io, path::Path}; -/** - * Represents the logging configuration for a server. This is expected to be a - * top-level block in a TOML config file, although that's not required. - */ +/// Represents the logging configuration for a server. This is expected to be a +/// top-level block in a TOML config file, although that's not required. #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] #[serde(rename_all = "kebab-case", tag = "mode")] pub enum ConfigLogging { - /** Pretty-printed output to stderr, assumed to support terminal escapes. */ + /// Pretty-printed output to stderr, assumed to support terminal escapes. StderrTerminal { level: ConfigLoggingLevel }, - /** Bunyan-formatted output to a specified file. */ + /// Bunyan-formatted output to a specified file. File { level: ConfigLoggingLevel, path: Utf8PathBuf, @@ -32,9 +28,7 @@ pub enum ConfigLogging { }, } -/** - * Log messages have a level that's used for filtering in the usual way. - */ +/// Log messages have a level that's used for filtering in the usual way. #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] #[serde(rename_all = "lowercase")] pub enum ConfigLoggingLevel { @@ -59,24 +53,20 @@ impl From<&ConfigLoggingLevel> for Level { } } -/** - * Specifies the behavior when logging to a file that already exists. - */ +/// Specifies the behavior when logging to a file that already exists. #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] #[serde(rename_all = "lowercase")] pub enum ConfigLoggingIfExists { - /** Fail to create the log */ + /// Fail to create the log Fail, - /** Truncate the existing file */ + /// Truncate the existing file Truncate, - /** Append to the existing file */ + /// Append to the existing file Append, } impl ConfigLogging { - /** - * Create a root logger based on the requested configuration. - */ + /// Create a root logger based on the requested configuration. pub fn to_logger>( &self, log_name: S, @@ -117,13 +107,11 @@ impl ConfigLogging { } } -/* - * TODO-hardening We use an async drain to take care of synchronization. That's - * mainly because the other two documented options use a std::sync::Mutex, which - * is not futures-aware and is likely to foul up our executor. However, we have - * not verified that the async implementation behaves reasonably under - * backpressure, and it definitely makes things harder to debug. - */ +// TODO-hardening We use an async drain to take care of synchronization. That's +// mainly because the other two documented options use a std::sync::Mutex, which +// is not futures-aware and is likely to foul up our executor. However, we have +// not verified that the async implementation behaves reasonably under +// backpressure, and it definitely makes things harder to debug. fn async_root_logger(level: &ConfigLoggingLevel, drain: T) -> slog::Logger where T: slog::Drain + Send + 'static, @@ -146,19 +134,15 @@ fn log_drain_for_file( // Buffer writes to the file around newlines to minimize syscalls. let file = LineWriter::new(open_options.open(path)?); - /* - * Record a message to the stderr so that a reader who doesn't already know - * how logging is configured knows where the rest of the log messages went. - */ + // Record a message to the stderr so that a reader who doesn't already know + // how logging is configured knows where the rest of the log messages went. eprintln!("note: configured to log to \"{}\"", path.display()); - /* - * Using leak() here is dubious. However, we really want the logger's name - * to be dynamically generated from the test name. Unfortunately, the - * bunyan interface requires that it be a `&'static str`. The correct - * approach is to fix that interface. - * TODO-cleanup - */ + // Using leak() here is dubious. However, we really want the logger's name + // to be dynamically generated from the test name. Unfortunately, the + // bunyan interface requires that it be a `&'static str`. The correct + // approach is to fix that interface. + // TODO-cleanup let log_name_box = Box::new(log_name); let log_name_leaked = Box::leak(log_name_box); Ok(slog_bunyan::with_name(log_name_leaked, file).build().fuse()) @@ -178,9 +162,7 @@ mod test { use std::path::Path; use std::{io, path::PathBuf}; - /** - * Generates a temporary filesystem path unique for the given label. - */ + /// Generates a temporary filesystem path unique for the given label. fn temp_path(label: &str) -> PathBuf { let arg0str = std::env::args().next().expect("expected process arg0"); let arg0 = Path::new(&arg0str) @@ -194,9 +176,7 @@ mod test { pathbuf } - /** - * Load a configuration and create a logger from it. - */ + /// Load a configuration and create a logger from it. fn read_config_and_create_logger( label: &str, contents: &str, @@ -209,9 +189,7 @@ mod test { result } - /* - * Bad value for "log_mode" - */ + // Bad value for "log_mode" #[test] fn test_config_bad_log_mode() { @@ -225,12 +203,10 @@ mod test { )); } - /* - * Bad "mode = stderr-terminal" config - * - * TODO-coverage: consider adding tests for all variants of missing or - * invalid properties for all log modes - */ + // Bad "mode = stderr-terminal" config + // + // TODO-coverage: consider adding tests for all variants of missing or + // invalid properties for all log modes #[test] fn test_config_bad_terminal_no_level() { @@ -258,17 +234,15 @@ mod test { ); } - /* - * Working "mode = stderr-terminal" config - * - * TODO-coverage: It would be nice to redirect our own stderr to a file (or - * something else we can collect) and then use the logger that we get below. - * Then we could verify that it contains the content we expect. - * Unfortunately, while Rust has private functions to redirect stdout and - * stderr, there's no exposed function for doing that, nor is there a way to - * provide a specific stream to a terminal logger. (We could always - * implement our own.) - */ + // Working "mode = stderr-terminal" config + // + // TODO-coverage: It would be nice to redirect our own stderr to a file (or + // something else we can collect) and then use the logger that we get below. + // Then we could verify that it contains the content we expect. + // Unfortunately, while Rust has private functions to redirect stdout and + // stderr, there's no exposed function for doing that, nor is there a way to + // provide a specific stream to a terminal logger. (We could always + // implement our own.) #[test] fn test_config_stderr_terminal() { let config = r##" @@ -280,9 +254,7 @@ mod test { config.to_logger("test-logger").unwrap(); } - /* - * Bad "mode = file" configurations - */ + // Bad "mode = file" configurations #[test] fn test_config_bad_file_no_file() { @@ -310,11 +282,9 @@ mod test { assert_eq!(error, "missing field `level`"); } - /** - * `LogTest` and `LogTestCleanup` are used for the tests that create various - * files on the filesystem to commonize code and make sure everything gets - * cleaned up as expected. - */ + /// `LogTest` and `LogTestCleanup` are used for the tests that create various + /// files on the filesystem to commonize code and make sure everything gets + /// cleaned up as expected. struct LogTest { directory: PathBuf, cleanup_list: Vec, @@ -327,14 +297,12 @@ mod test { } impl LogTest { - /** - * The setup for a logger test creates a temporary directory with the - * given label and returns a `LogTest` with that directory in the - * cleanup list so that on teardown the temporary directory will be - * removed. The temporary directory must be empty by the time the - * `LogTest` is torn down except for files and directories created with - * `will_create_dir()` and `will_create_file()`. - */ + /// The setup for a logger test creates a temporary directory with the + /// given label and returns a `LogTest` with that directory in the + /// cleanup list so that on teardown the temporary directory will be + /// removed. The temporary directory must be empty by the time the + /// `LogTest` is torn down except for files and directories created with + /// `will_create_dir()` and `will_create_file()`. fn setup(label: &str) -> LogTest { let directory_path = temp_path(label); @@ -353,14 +321,12 @@ mod test { } } - /** - * Records that the caller intends to create a directory with relative - * path "path" underneath the root directory for this log test. Returns - * the path to this directory. This directory will be removed during - * teardown. Directories and files must be recorded in the order they - * would be created so that the order can be reversed at teardown - * (without needing any kind of recursive removal). - */ + /// Records that the caller intends to create a directory with relative + /// path "path" underneath the root directory for this log test. Returns + /// the path to this directory. This directory will be removed during + /// teardown. Directories and files must be recorded in the order they + /// would be created so that the order can be reversed at teardown + /// (without needing any kind of recursive removal). fn will_create_dir(&mut self, path: &str) -> PathBuf { let mut pathbuf = self.directory.clone(); pathbuf.push(path); @@ -368,14 +334,12 @@ mod test { pathbuf } - /** - * Records that the caller intends to create a file with relative path - * "path" underneath the root directory for this log test. Returns a - * the path to this file. This file will be removed during teardown. - * Directories and files must be recorded in the order they would be - * created so that the order can be reversed at teardown (without - * needing any kind of recursive removal). - */ + /// Records that the caller intends to create a file with relative path + /// "path" underneath the root directory for this log test. Returns a + /// the path to this file. This file will be removed during teardown. + /// Directories and files must be recorded in the order they would be + /// created so that the order can be reversed at teardown (without + /// needing any kind of recursive removal). fn will_create_file(&mut self, path: &str) -> PathBuf { let mut pathbuf = self.directory.clone(); pathbuf.push(path); @@ -401,10 +365,8 @@ mod test { #[test] fn test_config_bad_file_bad_path_type() { - /* - * We create a path as a directory so that when we subsequently try to - * use it a file, we won't be able to. - */ + // We create a path as a directory so that when we subsequently try to + // use it a file, we won't be able to. let mut logtest = LogTest::setup("bad_file_bad_path_type_dir"); let path = logtest.will_create_dir("log_file_as_dir"); fs::create_dir(&path).unwrap(); @@ -467,11 +429,9 @@ mod test { assert_eq!(error.kind(), std::io::ErrorKind::AlreadyExists); } - /* - * Working "mode = file" configuration. The following test exercises - * successful file-based configurations for all three values of "if_exists", - * different log levels, and the bunyan log format. - */ + // Working "mode = file" configuration. The following test exercises + // successful file-based configurations for all three values of "if_exists", + // different log levels, and the bunyan log format. #[test] fn test_config_file() { @@ -483,7 +443,7 @@ mod test { let escaped_path = logpath.display().to_string().escape_default().to_string(); - /* The first attempt should succeed. The log file doesn't exist yet. */ + // The first attempt should succeed. The log file doesn't exist yet. let config = format!( r#" mode = "file" @@ -495,17 +455,15 @@ mod test { ); { - /* - * Construct the logger in a block so that it's flushed by the time - * we proceed. - */ + // Construct the logger in a block so that it's flushed by the time + // we proceed. let log = read_config_and_create_logger("file", &config).unwrap(); debug!(log, "message1_debug"); warn!(log, "message1_warn"); error!(log, "message1_error"); } - /* Try again with if_exists = "append". This should also work. */ + // Try again with if_exists = "append". This should also work. let config = format!( r#" mode = "file" @@ -517,7 +475,7 @@ mod test { ); { - /* See above. */ + // See above. let log = read_config_and_create_logger("file", &config).unwrap(); warn!(log, "message2"); } @@ -545,10 +503,8 @@ mod test { assert_eq!(log_records[1].msg, "message1_error"); assert_eq!(log_records[2].msg, "message2"); - /* - * Try again with if_exists = "truncate". This should also work, but - * remove the contents that's already there. - */ + // Try again with if_exists = "truncate". This should also work, but + // remove the contents that's already there. let time_before = time_after; let time_after = chrono::offset::Utc::now(); let config = format!( @@ -562,7 +518,7 @@ mod test { ); { - /* See above. */ + // See above. let log = read_config_and_create_logger("file", &config).unwrap(); debug!(log, "message3_debug"); warn!(log, "message3_warn"); diff --git a/dropshot/src/pagination.rs b/dropshot/src/pagination.rs index 219497fa8..a854b50b6 100644 --- a/dropshot/src/pagination.rs +++ b/dropshot/src/pagination.rs @@ -1,103 +1,101 @@ // Copyright 2022 Oxide Computer Company -/*! - * Detailed end-user documentation for pagination lives in the Dropshot top- - * level block comment. Here we discuss some of the design choices. - * - * ## Background: patterns for pagination - * - * [In their own API design guidelines, Google describes an approach similar to - * the one we use][1]. There are many ways to implement the page token with - * many different tradeoffs. The one described in the Dropshot top-level block - * comment has a lot of nice properties: - * - * * For APIs backed by a database of some kind, it's usually straightforward to - * use an existing primary key or other unique, sortable field (or combination - * of fields) as the token. - * - * * If the client scans all the way through the collection, they will see every - * object that existed both before the scan and after the scan and was not - * renamed during the scan. (This isn't true for schemes that use a simple - * numeric offset as the token.) - * - * * There's no server-side state associated with the token, so it's no problem - * if the server crashes between requests or if subsequent requests are - * handled by a different instance. (This isn't true for schemes that store - * the result set on the server.) - * - * * It's often straightforward to support a reversed-order scan as well -- this - * may just be a matter of flipping the inequality used for a database query. - * - * * It's easy to support sorting by a single field, and with some care it's - * possible to support queries on multiple different fields, even at the same - * time. An API can support listing by any unique, sortable combination of - * fields. For example, say our Projects have a modification time ("mtime") - * as well. We could support listing projects alphabetically by name _or_ in - * order of most recently modified. For the latter, since the modification - * time is generally not unique, and the marker must be unique, we'd really be - * listing by an ("mtime" descending, "name" ascending) tuple. - * - * The interfaces here are intended to support this sort of use case. For APIs - * backed by traditional RDBMS databases, see [this post for background on - * various ways to page through a large set of data][2]. (What we're describing - * here leverages what this post calls "keyset pagination".) - * - * Another consideration in designing pagination is whether the token ought to - * be explicit and meaningful to the user or just an opaque token (likely - * encoded in some way). It can be convenient for developers to use APIs where - * the token is explicitly intended to be one of the fields of the object (e.g., - * so that you could list animals starting in the middle by just requesting - * `?animal_name=moose`), but this puts constraints on the server because - * clients may come to depend on specific fields being supported and sorted in a - * certain way. Dropshot takes the approach of using an encoded token that - * includes information about the whole scan (e.g., the sort order). This makes - * it possible to identify cases that might otherwise result in confusing - * behavior (e.g., a client lists projects in ascending order, but then asks for - * the next page in descending order). The token also includes a version number - * so that it can be evolved in the future. - * - * - * ## Background: Why paginate HTTP APIs in the first place? - * - * Pagination helps ensure that the cost of a request in terms of resource - * utilization remains O(1) -- that is, it can be bounded above by a constant - * rather than scaling proportionally with any of the request parameters. This - * simplifies utilization monitoring, capacity planning, and scale-out - * activities for the service, since operators can think of the service in terms - * of one unit that needs to be scaled up. (It's still a very complex process, - * but it would be significantly harder if requests weren't O(1).) - * - * Similarly, pagination helps ensure that the time required for a request is - * O(1) under normal conditions. This makes it easier to define expectations - * for service latency and to monitor that latency to determine if those - * expectations are violated. Generally, if latency increases, then the service - * is unhealthy, and a crisp definition of "unhealthy" is important to operate a - * service with high availability. If requests weren't O(1), an increase in - * latency might just reflect a changing workload that's still performing within - * expectations -- e.g., clients listing larger collections than they were - * before, but still getting results promptly. That would make it much harder - * to see when the service really is unhealthy. - * - * Finally, bounding requests to O(1) work is a critical mitigation for common - * (if basic) denial-of-service (DoS) attacks because it requires that clients - * consume resources commensurate with the server costs that they're imposing. - * If a service exposes an API that does work proportional to some parameter, - * then it's cheap to launch a DoS on the service by just invoking that API with - * a large parameter. By contrast, if the client has to do work that scales - * linearly with the work the server has to do, then the client's costs go up in - * order to scale up the attack. - * - * Along these lines, connections and requests consume finite server resources - * like open file descriptors and database connections. If a service is built - * so that requests are all supposed to take about the same amount of time (or - * at least that there's a constant upper bound), then it may be possible to use - * a simple timeout scheme to cancel requests that are taking too long, as might - * happen if a malicious client finds some way to cause requests to hang or take - * a very long time. - * - * [1]: https://cloud.google.com/apis/design/design_patterns#list_pagination - * [2]: https://www.citusdata.com/blog/2016/03/30/five-ways-to-paginate/ - */ +//! Detailed end-user documentation for pagination lives in the Dropshot top- +//! level block comment. Here we discuss some of the design choices. +//! +//! ## Background: patterns for pagination +//! +//! [In their own API design guidelines, Google describes an approach similar to +//! the one we use][1]. There are many ways to implement the page token with +//! many different tradeoffs. The one described in the Dropshot top-level block +//! comment has a lot of nice properties: +//! +//! * For APIs backed by a database of some kind, it's usually straightforward to +//! use an existing primary key or other unique, sortable field (or combination +//! of fields) as the token. +//! +//! * If the client scans all the way through the collection, they will see every +//! object that existed both before the scan and after the scan and was not +//! renamed during the scan. (This isn't true for schemes that use a simple +//! numeric offset as the token.) +//! +//! * There's no server-side state associated with the token, so it's no problem +//! if the server crashes between requests or if subsequent requests are +//! handled by a different instance. (This isn't true for schemes that store +//! the result set on the server.) +//! +//! * It's often straightforward to support a reversed-order scan as well -- this +//! may just be a matter of flipping the inequality used for a database query. +//! +//! * It's easy to support sorting by a single field, and with some care it's +//! possible to support queries on multiple different fields, even at the same +//! time. An API can support listing by any unique, sortable combination of +//! fields. For example, say our Projects have a modification time ("mtime") +//! as well. We could support listing projects alphabetically by name _or_ in +//! order of most recently modified. For the latter, since the modification +//! time is generally not unique, and the marker must be unique, we'd really be +//! listing by an ("mtime" descending, "name" ascending) tuple. +//! +//! The interfaces here are intended to support this sort of use case. For APIs +//! backed by traditional RDBMS databases, see [this post for background on +//! various ways to page through a large set of data][2]. (What we're describing +//! here leverages what this post calls "keyset pagination".) +//! +//! Another consideration in designing pagination is whether the token ought to +//! be explicit and meaningful to the user or just an opaque token (likely +//! encoded in some way). It can be convenient for developers to use APIs where +//! the token is explicitly intended to be one of the fields of the object (e.g., +//! so that you could list animals starting in the middle by just requesting +//! `?animal_name=moose`), but this puts constraints on the server because +//! clients may come to depend on specific fields being supported and sorted in a +//! certain way. Dropshot takes the approach of using an encoded token that +//! includes information about the whole scan (e.g., the sort order). This makes +//! it possible to identify cases that might otherwise result in confusing +//! behavior (e.g., a client lists projects in ascending order, but then asks for +//! the next page in descending order). The token also includes a version number +//! so that it can be evolved in the future. +//! +//! +//! ## Background: Why paginate HTTP APIs in the first place? +//! +//! Pagination helps ensure that the cost of a request in terms of resource +//! utilization remains O(1) -- that is, it can be bounded above by a constant +//! rather than scaling proportionally with any of the request parameters. This +//! simplifies utilization monitoring, capacity planning, and scale-out +//! activities for the service, since operators can think of the service in terms +//! of one unit that needs to be scaled up. (It's still a very complex process, +//! but it would be significantly harder if requests weren't O(1).) +//! +//! Similarly, pagination helps ensure that the time required for a request is +//! O(1) under normal conditions. This makes it easier to define expectations +//! for service latency and to monitor that latency to determine if those +//! expectations are violated. Generally, if latency increases, then the service +//! is unhealthy, and a crisp definition of "unhealthy" is important to operate a +//! service with high availability. If requests weren't O(1), an increase in +//! latency might just reflect a changing workload that's still performing within +//! expectations -- e.g., clients listing larger collections than they were +//! before, but still getting results promptly. That would make it much harder +//! to see when the service really is unhealthy. +//! +//! Finally, bounding requests to O(1) work is a critical mitigation for common +//! (if basic) denial-of-service (DoS) attacks because it requires that clients +//! consume resources commensurate with the server costs that they're imposing. +//! If a service exposes an API that does work proportional to some parameter, +//! then it's cheap to launch a DoS on the service by just invoking that API with +//! a large parameter. By contrast, if the client has to do work that scales +//! linearly with the work the server has to do, then the client's costs go up in +//! order to scale up the attack. +//! +//! Along these lines, connections and requests consume finite server resources +//! like open file descriptors and database connections. If a service is built +//! so that requests are all supposed to take about the same amount of time (or +//! at least that there's a constant upper bound), then it may be possible to use +//! a simple timeout scheme to cancel requests that are taking too long, as might +//! happen if a malicious client finds some way to cause requests to hang or take +//! a very long time. +//! +//! [1]: https://cloud.google.com/apis/design/design_patterns#list_pagination +//! [2]: https://www.citusdata.com/blog/2016/03/30/five-ways-to-paginate/ use crate::error::HttpError; use crate::from_map::from_map; @@ -114,17 +112,15 @@ use std::collections::BTreeMap; use std::fmt::Debug; use std::num::NonZeroU32; -/** - * A page of results from a paginated API - * - * This structure is intended for use both on the server side (to generate the - * results page) and on the client side (to parse it). - */ +/// A page of results from a paginated API +/// +/// This structure is intended for use both on the server side (to generate the +/// results page) and on the client side (to parse it). #[derive(Debug, Deserialize, Serialize)] pub struct ResultsPage { - /** token used to fetch the next page of results (if any) */ + /// token used to fetch the next page of results (if any) pub next_page: Option, - /** list of items on this page of results */ + /// list of items on this page of results pub items: Vec, } @@ -143,24 +139,20 @@ where } } -/** - * A single page of results - */ +/// A single page of results #[derive(JsonSchema)] pub struct ResultsPageSchema { - /** token used to fetch the next page of results (if any) */ + /// token used to fetch the next page of results (if any) pub next_page: Option, - /** list of items on this page of results */ + /// list of items on this page of results pub items: Vec, } impl ResultsPage { - /** - * Construct a new results page from the list of `items`. `page_selector` - * is a function used to construct the page token that clients will provide - * to fetch the next page of results. `scan_params` is provided to the - * `page_selector` function, since the token may depend on the type of scan. - */ + /// Construct a new results page from the list of `items`. `page_selector` + /// is a function used to construct the page token that clients will provide + /// to fetch the next page of results. `scan_params` is provided to the + /// `page_selector` function, since the token may depend on the type of scan. pub fn new( items: Vec, scan_params: &ScanParams, @@ -182,63 +174,57 @@ impl ResultsPage { } } -/** - * Querystring parameters provided by clients when scanning a paginated - * collection - * - * To build an API endpoint that paginates results, you have your handler - * function accept a `Query>` and - * return a [`ResultsPage`]. You define your own `ScanParams` and - * `PageSelector` types. - * - * `ScanParams` describes the set of querystring parameters that your endpoint - * accepts for the _first_ request of the scan (typically: filters and sort - * options). This must be deserializable from a querystring. - * - * `PageSelector` describes the information your endpoint needs for requests - * after the first one. Typically this would include an id of some sort for the - * last item on the previous page as well as any parameters related to filtering - * or sorting so that your function can apply those, too. The entire - * `PageSelector` will be serialized to an opaque string and included in the - * [`ResultsPage`]. The client is expected to provide this string as the - * `"page_token"` querystring parameter in the subsequent request. - * `PageSelector` must implement both [`Deserialize`] and [`Serialize`]. - * (Unlike `ScanParams`, `PageSelector` will not be deserialized directly from - * the querystring.) - * - * There are several complete, documented examples in `dropshot/examples`. - * - * **NOTE:** Your choices of `ScanParams` and `PageSelector` determine the - * querystring parameters accepted by your endpoint and the structure of the - * page token, respectively. Both of these are part of your API's public - * interface, though the page token won't appear in the OpenAPI spec. Be - * careful when designing these structures to consider what you might want to - * support in the future. - */ +/// Querystring parameters provided by clients when scanning a paginated +/// collection +/// +/// To build an API endpoint that paginates results, you have your handler +/// function accept a `Query>` and +/// return a [`ResultsPage`]. You define your own `ScanParams` and +/// `PageSelector` types. +/// +/// `ScanParams` describes the set of querystring parameters that your endpoint +/// accepts for the _first_ request of the scan (typically: filters and sort +/// options). This must be deserializable from a querystring. +/// +/// `PageSelector` describes the information your endpoint needs for requests +/// after the first one. Typically this would include an id of some sort for the +/// last item on the previous page as well as any parameters related to filtering +/// or sorting so that your function can apply those, too. The entire +/// `PageSelector` will be serialized to an opaque string and included in the +/// [`ResultsPage`]. The client is expected to provide this string as the +/// `"page_token"` querystring parameter in the subsequent request. +/// `PageSelector` must implement both [`Deserialize`] and [`Serialize`]. +/// (Unlike `ScanParams`, `PageSelector` will not be deserialized directly from +/// the querystring.) +/// +/// There are several complete, documented examples in `dropshot/examples`. +/// +/// **NOTE:** Your choices of `ScanParams` and `PageSelector` determine the +/// querystring parameters accepted by your endpoint and the structure of the +/// page token, respectively. Both of these are part of your API's public +/// interface, though the page token won't appear in the OpenAPI spec. Be +/// careful when designing these structures to consider what you might want to +/// support in the future. #[derive(Debug, Deserialize)] pub struct PaginationParams where ScanParams: DeserializeOwned, PageSelector: DeserializeOwned + Serialize, { - /** - * Specifies whether this is the first request in a scan or a subsequent - * request, as well as the parameters provided - * - * See [`WhichPage`] for details. Note that this field is flattened by - * serde, so you have to look at the variants of [`WhichPage`] to see what - * query parameters are actually processed here. - */ + /// Specifies whether this is the first request in a scan or a subsequent + /// request, as well as the parameters provided + /// + /// See [`WhichPage`] for details. Note that this field is flattened by + /// serde, so you have to look at the variants of [`WhichPage`] to see what + /// query parameters are actually processed here. #[serde(flatten, deserialize_with = "deserialize_whichpage")] pub page: WhichPage, - /** - * Client-requested limit on page size (optional) - * - * Consumers should use - * [`RequestContext`][crate::handler::RequestContext::page_limit()] - * to access this value. - */ + /// Client-requested limit on page size (optional) + /// + /// Consumers should use + /// [`RequestContext`][crate::handler::RequestContext::page_limit()] + /// to access this value. pub(crate) limit: Option, } @@ -259,14 +245,12 @@ where fn json_schema( gen: &mut schemars::gen::SchemaGenerator, ) -> schemars::schema::Schema { - /* - * We use `SchemaPaginationParams to generate an intuitive schema and - * we use the JSON schema extensions mechanism to communicate the fact - * that this is a pagination parameter. We'll later use this to tag - * its associated operation as paginated. - * TODO we would ideally like to verify that both parameters *and* - * response structure are properly configured for pagination. - */ + // We use `SchemaPaginationParams to generate an intuitive schema and + // we use the JSON schema extensions mechanism to communicate the fact + // that this is a pagination parameter. We'll later use this to tag + // its associated operation as paginated. + // TODO we would ideally like to verify that both parameters *and* + // response structure are properly configured for pagination. let mut schema = SchemaPaginationParams::::json_schema(gen) .into_object(); schema @@ -276,31 +260,27 @@ where } } -/* - * This is the API consumer-visible interface for paginated endpoints. We use - * this solely to generate the schema. User-specified parameters appear before - * pagination boilerplate. - */ +// This is the API consumer-visible interface for paginated endpoints. We use +// this solely to generate the schema. User-specified parameters appear before +// pagination boilerplate. #[derive(JsonSchema)] #[allow(dead_code)] struct SchemaPaginationParams { #[schemars(flatten)] params: Option, - /** Maximum number of items returned by a single call */ + /// Maximum number of items returned by a single call limit: Option, - /** Token returned by previous call to retrieve the subsequent page */ + /// Token returned by previous call to retrieve the subsequent page page_token: Option, } -/* - * Deserialize `WhichPage` for `PaginationParams`. In REST APIs, callers - * typically provide either the parameters to resume a scan (in our case, just - * "page_token") or the parameters to begin a new one (which can be - * any set of parameters that our consumer wants). There's generally no - * separate field to indicate which case they're requesting. We deserialize into - * a generic map first and then either interpret the page token or deserialize - * the map into ScanParams. - */ +// Deserialize `WhichPage` for `PaginationParams`. In REST APIs, callers +// typically provide either the parameters to resume a scan (in our case, just +// "page_token") or the parameters to begin a new one (which can be +// any set of parameters that our consumer wants). There's generally no +// separate field to indicate which case they're requesting. We deserialize into +// a generic map first and then either interpret the page token or deserialize +// the map into ScanParams. fn deserialize_whichpage<'de, D, ScanParams, PageSelector>( deserializer: D, ) -> Result, D::Error> @@ -325,45 +305,35 @@ where } } -/** - * Describes whether the client is beginning a new scan or resuming an existing - * one - * - * In either case, this type provides access to consumer-defined parameters for - * the particular type of request. See [`PaginationParams`] for more - * information. - */ +/// Describes whether the client is beginning a new scan or resuming an existing +/// one +/// +/// In either case, this type provides access to consumer-defined parameters for +/// the particular type of request. See [`PaginationParams`] for more +/// information. #[derive(Debug)] pub enum WhichPage { - /** - * Indicates that the client is beginning a new scan - * - * `ScanParams` are the consumer-defined parameters for beginning a new scan - * (e.g., filters, sort options, etc.) - */ + /// Indicates that the client is beginning a new scan + /// + /// `ScanParams` are the consumer-defined parameters for beginning a new scan + /// (e.g., filters, sort options, etc.) First(ScanParams), - /** - * Indicates that the client is resuming a previous scan - * - * `PageSelector` are the consumer-defined parameters for resuming a - * previous scan (e.g., any scan parameters, plus a marker to indicate the - * last result seen by the client). - */ + /// Indicates that the client is resuming a previous scan + /// + /// `PageSelector` are the consumer-defined parameters for resuming a + /// previous scan (e.g., any scan parameters, plus a marker to indicate the + /// last result seen by the client). Next(PageSelector), } -/** - * `ScanParams` for use with `PaginationParams` when the API endpoint has no - * scan parameters (i.e., it always iterates items in the collection in the same - * way). - */ +/// `ScanParams` for use with `PaginationParams` when the API endpoint has no +/// scan parameters (i.e., it always iterates items in the collection in the same +/// way). #[derive(Debug, Deserialize, JsonSchema)] pub struct EmptyScanParams {} -/** - * The order in which the client wants to page through the requested collection - */ +/// The order in which the client wants to page through the requested collection #[derive(Copy, Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "lowercase")] pub enum PaginationOrder { @@ -371,67 +341,57 @@ pub enum PaginationOrder { Descending, } -/* - * Token and querystring serialization and deserialization - * - * Page tokens essentially take the consumer's PageSelector struct, add a - * version number, serialize that as JSON, and base64-encode the result. This - * token is returned in any response from a paginated API, and the client will - * pass it back as a query parameter for subsequent pagination requests. This - * approach allows us to rev the serialized form if needed (see - * `PaginationVersion`) and add other metadata in a backwards-compatiable way. - * It also emphasizes to clients that the token should be treated as opaque, - * though it's obviously not resistant to tampering. - */ - -/** - * Maximum length of a page token once the consumer-provided type is serialized - * and the result is base64-encoded - * - * We impose a maximum length primarily to prevent a client from making us parse - * extremely large strings. We apply this limit when we create tokens to avoid - * handing out a token that can't be used. - * - * Note that these tokens are passed in the HTTP request line (before the - * headers), and many HTTP implementations impose a limit as low as 8KiB on the - * size of the request line and headers together, so it's a good idea to keep - * this as small as we can. - */ +// Token and querystring serialization and deserialization +// +// Page tokens essentially take the consumer's PageSelector struct, add a +// version number, serialize that as JSON, and base64-encode the result. This +// token is returned in any response from a paginated API, and the client will +// pass it back as a query parameter for subsequent pagination requests. This +// approach allows us to rev the serialized form if needed (see +// `PaginationVersion`) and add other metadata in a backwards-compatiable way. +// It also emphasizes to clients that the token should be treated as opaque, +// though it's obviously not resistant to tampering. + +/// Maximum length of a page token once the consumer-provided type is serialized +/// and the result is base64-encoded +/// +/// We impose a maximum length primarily to prevent a client from making us parse +/// extremely large strings. We apply this limit when we create tokens to avoid +/// handing out a token that can't be used. +/// +/// Note that these tokens are passed in the HTTP request line (before the +/// headers), and many HTTP implementations impose a limit as low as 8KiB on the +/// size of the request line and headers together, so it's a good idea to keep +/// this as small as we can. const MAX_TOKEN_LENGTH: usize = 512; -/** - * Version for the pagination token serialization format - * - * This may seem like overkill, but it allows us to rev this in a future version - * of Dropshot without breaking any ongoing scans when the change is deployed. - * If we rev this, we might need to provide a way for clients to request at - * runtime which version of token to generate so that if they do a rolling - * upgrade of multiple instances, they can configure the instances to generate - * v1 tokens until the rollout is complete, then switch on the new token - * version. Obviously, it would be better to avoid revving this version if - * possible! - * - * Note that consumers still need to consider compatibility if they change their - * own `ScanParams` or `PageSelector` types. - */ +/// Version for the pagination token serialization format +/// +/// This may seem like overkill, but it allows us to rev this in a future version +/// of Dropshot without breaking any ongoing scans when the change is deployed. +/// If we rev this, we might need to provide a way for clients to request at +/// runtime which version of token to generate so that if they do a rolling +/// upgrade of multiple instances, they can configure the instances to generate +/// v1 tokens until the rollout is complete, then switch on the new token +/// version. Obviously, it would be better to avoid revving this version if +/// possible! +/// +/// Note that consumers still need to consider compatibility if they change their +/// own `ScanParams` or `PageSelector` types. #[derive(Copy, Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "lowercase")] enum PaginationVersion { V1, } -/** - * Parts of the pagination token that actually get serialized - */ +/// Parts of the pagination token that actually get serialized #[derive(Debug, Deserialize, Serialize)] struct SerializedToken { v: PaginationVersion, page_start: PageSelector, } -/** - * Construct a serialized page token from a consumer's page selector - */ +/// Construct a serialized page token from a consumer's page selector fn serialize_page_token( page_start: PageSelector, ) -> Result { @@ -450,14 +410,12 @@ fn serialize_page_token( base64::encode_engine(json_bytes, &FastPortable::from(&URL_SAFE, PAD)) }; - /* - * TODO-robustness is there a way for us to know at compile-time that - * this won't be a problem? What if we say that PageSelector has to be - * Sized? That won't guarantee that this will work, but wouldn't that - * mean that if it ever works, then it will always work? But would that - * interface be a pain to use, given that variable-length strings are - * very common in the token? - */ + // TODO-robustness is there a way for us to know at compile-time that + // this won't be a problem? What if we say that PageSelector has to be + // Sized? That won't guarantee that this will work, but wouldn't that + // mean that if it ever works, then it will always work? But would that + // interface be a pain to use, given that variable-length strings are + // very common in the token? if token_bytes.len() > MAX_TOKEN_LENGTH { return Err(HttpError::for_internal_error(format!( "serialized token is too large ({} bytes, max is {})", @@ -469,10 +427,8 @@ fn serialize_page_token( Ok(token_bytes) } -/** - * Deserialize a token from the given string into the consumer's page selector - * type - */ +/// Deserialize a token from the given string into the consumer's page selector +/// type fn deserialize_page_token( token_str: &str, ) -> Result { @@ -487,17 +443,15 @@ fn deserialize_page_token( ) .map_err(|e| format!("failed to parse pagination token: {}", e))?; - /* - * TODO-debugging: we don't want the user to have to know about the - * internal structure of the token, so the error message here doesn't - * say anything about that. However, it would be nice if we could - * create an internal error message that included the serde_json error, - * which would have more context for someone looking at the server logs - * to figure out what happened with this request. Our own `HttpError` - * supports this, but it seems like serde only preserves the to_string() - * output of the error anyway. It's not clear how else we could - * propagate this information out. - */ + // TODO-debugging: we don't want the user to have to know about the + // internal structure of the token, so the error message here doesn't + // say anything about that. However, it would be nice if we could + // create an internal error message that included the serde_json error, + // which would have more context for someone looking at the server logs + // to figure out what happened with this request. Our own `HttpError` + // supports this, but it seems like serde only preserves the to_string() + // output of the error anyway. It's not clear how else we could + // propagate this information out. let deserialized: SerializedToken = serde_json::from_slice(&json_bytes).map_err(|_| { String::from("failed to parse pagination token: corrupted token") @@ -540,27 +494,21 @@ mod test { x: u8, } - /* - * The most basic functionality is that if we serialize something and - * then deserialize the result of that, we get back the original thing. - */ + // The most basic functionality is that if we serialize something and + // then deserialize the result of that, we get back the original thing. let before = MyToken { x: 1025 }; let serialized = serialize_page_token(&before).unwrap(); let after: MyToken = deserialize_page_token(&serialized).unwrap(); assert_eq!(after.x, 1025); - /* - * We should also sanity-check that if we try to deserialize it as the - * wrong type, that will fail. - */ + // We should also sanity-check that if we try to deserialize it as the + // wrong type, that will fail. let error = deserialize_page_token::(&serialized).unwrap_err(); assert!(error.contains("corrupted token")); - /* - * Try serializing the maximum possible size. (This was empirically - * determined at the time of this writing.) - */ + // Try serializing the maximum possible size. (This was empirically + // determined at the time of this writing.) #[derive(Debug, Deserialize, Serialize)] struct TokenWithStr { s: String, @@ -572,12 +520,10 @@ mod test { let output: TokenWithStr = deserialize_page_token(&serialized).unwrap(); assert_eq!(input.s, output.s); - /* - * Error cases make up the rest of this test. - * - * Start by attempting to serialize a token larger than the maximum - * allowed size. - */ + // Error cases make up the rest of this test. + // + // Start by attempting to serialize a token larger than the maximum + // allowed size. let input = TokenWithStr { s: String::from_utf8(vec![b'e'; 353]).unwrap() }; let error = serialize_page_token(&input).unwrap_err(); @@ -587,30 +533,30 @@ mod test { .internal_message .contains("serialized token is too large")); - /* Non-base64 */ + // Non-base64 let error = deserialize_page_token::("not base 64").unwrap_err(); assert!(error.contains("failed to parse")); - /* Non-JSON */ + // Non-JSON let error = deserialize_page_token::(&base64::encode("{")) .unwrap_err(); assert!(error.contains("corrupted token")); - /* Wrong top-level JSON type */ + // Wrong top-level JSON type let error = deserialize_page_token::(&base64::encode("[]")) .unwrap_err(); assert!(error.contains("corrupted token")); - /* Structure does not match our general Dropshot schema. */ + // Structure does not match our general Dropshot schema. let error = deserialize_page_token::(&base64::encode("{}")) .unwrap_err(); assert!(error.contains("corrupted token")); - /* Bad version */ + // Bad version let error = deserialize_page_token::(&base64::encode( "{\"v\":11}", )) @@ -618,17 +564,15 @@ mod test { assert!(error.contains("corrupted token")); } - /* - * It's worth testing parsing around PaginationParams and WhichPage because - * is a little non-trivial, owing to the use of untagged enums (which rely - * on the ordering of fields), some optional fields, an extra layer of - * indirection using `TryFrom`, etc. - * - * This is also the primary place where we test things like non-positive - * values of "limit" being rejected, so even though the implementation in - * our code is trivial, this functions more like an integration or system - * test for those parameters. - */ + // It's worth testing parsing around PaginationParams and WhichPage because + // is a little non-trivial, owing to the use of untagged enums (which rely + // on the ordering of fields), some optional fields, an extra layer of + // indirection using `TryFrom`, etc. + // + // This is also the primary place where we test things like non-positive + // values of "limit" being rejected, so even though the implementation in + // our code is trivial, this functions more like an integration or system + // test for those parameters. #[test] fn test_pagparams_parsing() { #[derive(Debug, Deserialize, Serialize)] @@ -653,9 +597,7 @@ mod test { the_page: u8, } - /* - * "First page" cases - */ + // "First page" cases fn parse_as_first_page( querystring: &str, @@ -670,7 +612,7 @@ mod test { (scan_params, limit) } - /* basic case: optional boolean specified, limit unspecified */ + // basic case: optional boolean specified, limit unspecified let (scan, limit) = parse_as_first_page::( "the_field=name&only_good=true&how_many=42&really=false", ); @@ -680,7 +622,7 @@ mod test { assert_eq!(scan.really, false); assert_eq!(limit, None); - /* optional boolean specified but false, limit unspecified */ + // optional boolean specified but false, limit unspecified let (scan, limit) = parse_as_first_page::( "the_field=&only_good=false&how_many=42&really=false", ); @@ -690,7 +632,7 @@ mod test { assert_eq!(scan.really, false); assert_eq!(limit, None); - /* optional boolean unspecified, limit is valid */ + // optional boolean unspecified, limit is valid let (scan, limit) = parse_as_first_page::( "the_field=name&limit=3&how_many=42&really=false", ); @@ -700,13 +642,13 @@ mod test { assert_eq!(scan.really, false); assert_eq!(limit.unwrap().get(), 3); - /* empty query string when all parameters are optional */ + // empty query string when all parameters are optional let (scan, limit) = parse_as_first_page::(""); assert_eq!(scan.the_field, None); assert_eq!(scan.only_good, None); assert_eq!(limit, None); - /* extra parameters are fine */ + // extra parameters are fine let (scan, limit) = parse_as_first_page::( "the_field=name&limit=17&boomtown=okc&how_many=42", ); @@ -715,13 +657,11 @@ mod test { assert_eq!(scan.how_many, Some(42)); assert_eq!(limit.unwrap().get(), 17); - /* - * Error cases, including errors parsing first page parameters. - * - * TODO-polish The actual error messages for the following cases are - * pretty poor, so we don't test them here, but we should clean these - * up. - */ + // Error cases, including errors parsing first page parameters. + // + // TODO-polish The actual error messages for the following cases are + // pretty poor, so we don't test them here, but we should clean these + // up. fn parse_as_error(querystring: &str) -> serde_urlencoded::de::Error { serde_urlencoded::from_str::< PaginationParams, @@ -729,22 +669,18 @@ mod test { .unwrap_err() } - /* missing required field ("the_field") */ + // missing required field ("the_field") parse_as_error(""); - /* invalid limit (number out of range) */ + // invalid limit (number out of range) parse_as_error("the_field=name&limit=0"); parse_as_error("the_field=name&limit=-3"); - /* invalid limit (not a number) */ + // invalid limit (not a number) parse_as_error("the_field=name&limit=abcd"); - /* - * Invalid page token (bad base64 length) - * Other test cases for deserializing tokens are tested elsewhere. - */ + // Invalid page token (bad base64 length) + // Other test cases for deserializing tokens are tested elsewhere. parse_as_error("page_token=q"); - /* - * "Next page" cases - */ + // "Next page" cases fn parse_as_next_page( querystring: &str, @@ -759,7 +695,7 @@ mod test { (page_selector, limit) } - /* basic case */ + // basic case let token = serialize_page_token(&MyPageSelector { the_page: 123 }).unwrap(); let (page_selector, limit) = @@ -767,16 +703,14 @@ mod test { assert_eq!(page_selector.the_page, 123); assert_eq!(limit, None); - /* limit is also accepted */ + // limit is also accepted let (page_selector, limit) = parse_as_next_page(&format!("page_token={}&limit=12", token)); assert_eq!(page_selector.the_page, 123); assert_eq!(limit.unwrap().get(), 12); - /* - * Having parameters appropriate to the scan params doesn't change the - * way this is interpreted. - */ + // Having parameters appropriate to the scan params doesn't change the + // way this is interpreted. let (page_selector, limit) = parse_as_next_page(&format!( "the_field=name&page_token={}&limit=3", token @@ -784,15 +718,13 @@ mod test { assert_eq!(page_selector.the_page, 123); assert_eq!(limit.unwrap().get(), 3); - /* invalid limits (same as above) */ + // invalid limits (same as above) parse_as_error(&format!("page_token={}&limit=0", token)); parse_as_error(&format!("page_token={}&limit=-3", token)); - /* - * We ought not to promise much about what happens if the user's - * ScanParams has a "page_token" field. In practice, ours always takes - * precedence (and it's not clear how else this could work). - */ + // We ought not to promise much about what happens if the user's + // ScanParams has a "page_token" field. In practice, ours always takes + // precedence (and it's not clear how else this could work). #[derive(Debug, Deserialize)] #[allow(dead_code)] struct SketchyScanParams { @@ -815,11 +747,9 @@ mod test { #[test] fn test_results_page() { - /* - * It would be a neat paginated fibonacci API if the page selector was - * just the last two numbers! Dropshot doesn't support that and it's - * not clear that's a practical use case anyway. - */ + // It would be a neat paginated fibonacci API if the page selector was + // just the last two numbers! Dropshot doesn't support that and it's + // not clear that's a practical use case anyway. let items = vec![1, 1, 2, 3, 5, 8, 13]; let dummy_scan_params = 21; #[derive(Debug, Deserialize, Serialize)] diff --git a/dropshot/src/router.rs b/dropshot/src/router.rs index 936d8d6ad..60c843f0f 100644 --- a/dropshot/src/router.rs +++ b/dropshot/src/router.rs @@ -1,7 +1,5 @@ // Copyright 2021 Oxide Computer Company -/*! - * Routes incoming HTTP requests to handler functions - */ +//! Routes incoming HTTP requests to handler functions use super::error::HttpError; use super::handler::RouteHandler; @@ -17,114 +15,106 @@ use percent_encoding::percent_decode_str; use std::collections::BTreeMap; use std::collections::BTreeSet; -/** - * `HttpRouter` is a simple data structure for routing incoming HTTP requests to - * specific handler functions based on the request method and URI path. For - * examples, see the basic test below. - * - * Routes are registered and looked up according to a path, like `"/foo/bar"`. - * Paths are split into segments separated by one or more '/' characters. When - * registering a route, a path segment may be either a literal string or a - * variable. Variables are specified by wrapping the segment in braces. - * - * For example, a handler registered for `"/foo/bar"` will match only - * `"/foo/bar"` (after normalization, that is -- it will also match - * `"/foo///bar"`). A handler registered for `"/foo/{bar}"` uses a - * variable for the second segment, so it will match `"/foo/123"` (with `"bar"` - * assigned to `"123"`) as well as `"/foo/bar456"` (with `"bar"` mapped to - * `"bar456"`). Only one segment is matched per variable, so `"/foo/{bar}"` - * will not match `"/foo/123/456"`. - * - * The implementation here is essentially a trie where edges represent segments - * of the URI path. ("Segments" here are chunks of the path separated by one or - * more "/" characters.) To register or look up the path `"/foo/bar/baz"`, we - * would start at the root and traverse edges for the literal strings `"foo"`, - * `"bar"`, and `"baz"`, arriving at a particular node. Each node has a set of - * handlers, each associated with one HTTP method. - * - * We make (and, in some cases, enforce) a number of simplifying assumptions. - * These could be relaxed, but it's not clear that's useful, and enforcing them - * makes it easier to catch some types of bugs: - * - * * A particular resource (node) may have child resources (edges) with either - * literal path segments or variable path segments, but not both. For - * example, you can't register both `"/projects/{id}"` and - * `"/projects/default"`. - * - * * If a given resource has an edge with a variable name, all routes through - * this node must use the same name for that variable. That is, you can't - * define routes for `"/projects/{id}"` and `"/projects/{project_id}/info"`. - * - * * A given path cannot use the same variable name twice. For example, you - * can't register path `"/projects/{id}/instances/{id}"`. - * - * * A given resource may have at most one handler for a given HTTP method. - * - * * The expectation is that during server initialization, - * `HttpRouter::insert()` will be invoked to register a number of route - * handlers. After that initialization period, the router will be - * read-only. This behavior isn't enforced by `HttpRouter`. - */ +/// `HttpRouter` is a simple data structure for routing incoming HTTP requests to +/// specific handler functions based on the request method and URI path. For +/// examples, see the basic test below. +/// +/// Routes are registered and looked up according to a path, like `"/foo/bar"`. +/// Paths are split into segments separated by one or more '/' characters. When +/// registering a route, a path segment may be either a literal string or a +/// variable. Variables are specified by wrapping the segment in braces. +/// +/// For example, a handler registered for `"/foo/bar"` will match only +/// `"/foo/bar"` (after normalization, that is -- it will also match +/// `"/foo///bar"`). A handler registered for `"/foo/{bar}"` uses a +/// variable for the second segment, so it will match `"/foo/123"` (with `"bar"` +/// assigned to `"123"`) as well as `"/foo/bar456"` (with `"bar"` mapped to +/// `"bar456"`). Only one segment is matched per variable, so `"/foo/{bar}"` +/// will not match `"/foo/123/456"`. +/// +/// The implementation here is essentially a trie where edges represent segments +/// of the URI path. ("Segments" here are chunks of the path separated by one or +/// more "/" characters.) To register or look up the path `"/foo/bar/baz"`, we +/// would start at the root and traverse edges for the literal strings `"foo"`, +/// `"bar"`, and `"baz"`, arriving at a particular node. Each node has a set of +/// handlers, each associated with one HTTP method. +/// +/// We make (and, in some cases, enforce) a number of simplifying assumptions. +/// These could be relaxed, but it's not clear that's useful, and enforcing them +/// makes it easier to catch some types of bugs: +/// +/// * A particular resource (node) may have child resources (edges) with either +/// literal path segments or variable path segments, but not both. For +/// example, you can't register both `"/projects/{id}"` and +/// `"/projects/default"`. +/// +/// * If a given resource has an edge with a variable name, all routes through +/// this node must use the same name for that variable. That is, you can't +/// define routes for `"/projects/{id}"` and `"/projects/{project_id}/info"`. +/// +/// * A given path cannot use the same variable name twice. For example, you +/// can't register path `"/projects/{id}/instances/{id}"`. +/// +/// * A given resource may have at most one handler for a given HTTP method. +/// +/// * The expectation is that during server initialization, +/// `HttpRouter::insert()` will be invoked to register a number of route +/// handlers. After that initialization period, the router will be +/// read-only. This behavior isn't enforced by `HttpRouter`. #[derive(Debug)] pub struct HttpRouter { - /** root of the trie */ + /// root of the trie root: Box>, } -/** - * Each node in the tree represents a group of HTTP resources having the same - * handler functions. As described above, these may correspond to exactly one - * canonical path (e.g., `"/foo/bar"`) or a set of paths that differ by some - * number of variable assignments (e.g., `"/projects/123/instances"` and - * `"/projects/456/instances"`). - * - * Edges of the tree come in one of type types: edges for literal strings and - * edges for variable strings. A given node has either literal string edges or - * variable edges, but not both. However, we don't necessarily know what type - * of outgoing edges a node will have when we create it. - */ +/// Each node in the tree represents a group of HTTP resources having the same +/// handler functions. As described above, these may correspond to exactly one +/// canonical path (e.g., `"/foo/bar"`) or a set of paths that differ by some +/// number of variable assignments (e.g., `"/projects/123/instances"` and +/// `"/projects/456/instances"`). +/// +/// Edges of the tree come in one of type types: edges for literal strings and +/// edges for variable strings. A given node has either literal string edges or +/// variable edges, but not both. However, we don't necessarily know what type +/// of outgoing edges a node will have when we create it. #[derive(Debug)] struct HttpRouterNode { - /** Handlers, etc. for each of the HTTP methods defined for this node. */ + /// Handlers, etc. for each of the HTTP methods defined for this node. methods: BTreeMap>, - /** Edges linking to child nodes. */ + /// Edges linking to child nodes. edges: Option>, } #[derive(Debug)] enum HttpRouterEdges { - /** Outgoing edges for literal paths. */ + /// Outgoing edges for literal paths. Literals(BTreeMap>>), - /** Outgoing edge for variable-named paths. */ + /// Outgoing edge for variable-named paths. VariableSingle(String, Box>), - /** Outgoing edge that consumes all remaining components. */ + /// Outgoing edge that consumes all remaining components. VariableRest(String, Box>), } -/** - * `PathSegment` represents a segment in a URI path when the router is being - * configured. Each segment may be either a literal string or a variable (the - * latter indicated by being wrapped in braces). Variables may consume a single - * /-delimited segment or several as defined by a regex (currently only `.*` is - * supported). - */ +/// `PathSegment` represents a segment in a URI path when the router is being +/// configured. Each segment may be either a literal string or a variable (the +/// latter indicated by being wrapped in braces). Variables may consume a single +/// /-delimited segment or several as defined by a regex (currently only `.*` is +/// supported). #[derive(Debug, PartialEq)] pub enum PathSegment { - /** a path segment for a literal string */ + /// a path segment for a literal string Literal(String), - /** a path segment for a variable */ + /// a path segment for a variable VarnameSegment(String), - /** a path segment that matches all remaining components for a variable */ + /// a path segment that matches all remaining components for a variable VarnameWildcard(String), } impl PathSegment { - /** - * Given a `&str` representing a path segment from a Uri, return a - * PathSegment. This is used to parse a sequence of path segments to the - * corresponding `PathSegment`, which basically means determining whether - * it's a variable or a literal. - */ + /// Given a `&str` representing a path segment from a Uri, return a + /// PathSegment. This is used to parse a sequence of path segments to the + /// corresponding `PathSegment`, which basically means determining whether + /// it's a variable or a literal. pub fn from(segment: &str) -> PathSegment { if segment.starts_with('{') || segment.ends_with('}') { assert!( @@ -146,12 +136,10 @@ impl PathSegment { (var, None) }; - /* - * Note that the only constraint on the variable name is that it is - * not empty. Consumers may choose odd names like '_' or 'type' - * that are not valid Rust identifiers and rename them with - * serde attributes during deserialization. - */ + // Note that the only constraint on the variable name is that it is + // not empty. Consumers may choose odd names like '_' or 'type' + // that are not valid Rust identifiers and rename them with + // serde attributes during deserialization. assert!( !var.is_empty(), "HTTP URI path segment variable name must not be empty", @@ -172,10 +160,8 @@ impl PathSegment { } } -/** - * Wrapper for a path that's the result of user input i.e. an HTTP query. - * We use this type to avoid confusion with paths used to define routes. - */ +/// Wrapper for a path that's the result of user input i.e. an HTTP query. +/// We use this type to avoid confusion with paths used to define routes. #[derive(Debug)] pub struct InputPath<'a>(&'a str); @@ -185,10 +171,8 @@ impl<'a> From<&'a str> for InputPath<'a> { } } -/** - * A value for a variable which may either be a single value or a list of - * values in the case of wildcard path matching. - */ +/// A value for a variable which may either be a single value or a list of +/// values in the case of wildcard path matching. #[derive(Debug, Clone, PartialEq, Eq)] pub enum VariableValue { String(String), @@ -217,13 +201,11 @@ impl MapValue for VariableValue { } } -/** - * `RouterLookupResult` represents the result of invoking - * `HttpRouter::lookup_route()`. A successful route lookup includes - * the handler, a mapping of variables in the configured path to the - * corresponding values in the actual path, and the expected body - * content type. - */ +/// `RouterLookupResult` represents the result of invoking +/// `HttpRouter::lookup_route()`. A successful route lookup includes +/// the handler, a mapping of variables in the configured path to the +/// corresponding values in the actual path, and the expected body +/// content type. #[derive(Debug)] pub struct RouterLookupResult<'a, Context: ServerContext> { pub handler: &'a dyn RouteHandler, @@ -238,18 +220,14 @@ impl HttpRouterNode { } impl HttpRouter { - /** - * Returns a new `HttpRouter` with no routes configured. - */ + /// Returns a new `HttpRouter` with no routes configured. pub fn new() -> Self { HttpRouter { root: Box::new(HttpRouterNode::new()) } } - /** - * Configure a route for HTTP requests based on the HTTP `method` and - * URI `path`. See the `HttpRouter` docs for information about how `path` - * is processed. Requests matching `path` will be resolved to `handler`. - */ + /// Configure a route for HTTP requests based on the HTTP `method` and + /// URI `path`. See the `HttpRouter` docs for information about how `path` + /// is processed. Requests matching `path` will be resolved to `handler`. pub fn insert(&mut self, endpoint: ApiEndpoint) { let method = endpoint.method.clone(); let path = endpoint.path.clone(); @@ -269,12 +247,10 @@ impl HttpRouter { HttpRouterEdges::Literals(BTreeMap::new()), ); match edges { - /* - * We do not allow both literal and variable edges from - * the same node. This could be supported (with some - * caveats about how matching would work), but it seems - * more likely to be a mistake. - */ + // We do not allow both literal and variable edges from + // the same node. This could be supported (with some + // caveats about how matching would work), but it seems + // more likely to be a mistake. HttpRouterEdges::VariableSingle(varname, _) | HttpRouterEdges::VariableRest(varname, _) => { panic!( @@ -301,10 +277,8 @@ impl HttpRouter { ), ); match edges { - /* - * See the analogous check above about combining literal - * and variable path segments from the same resource. - */ + // See the analogous check above about combining literal + // and variable path segments from the same resource. HttpRouterEdges::Literals(_) => panic!( "URI path \"{}\": attempted to register route for \ variable path segment (variable name: \"{}\") \ @@ -326,12 +300,10 @@ impl HttpRouter { ref mut node, ) => { if *new_varname != *varname { - /* - * Don't allow people to use different names for - * the same part of the path. Again, this could - * be supported, but it seems likely to be - * confusing and probably a mistake. - */ + // Don't allow people to use different names for + // the same part of the path. Again, this could + // be supported, but it seems likely to be + // confusing and probably a mistake. panic!( "URI path \"{}\": attempted to use \ variable name \"{}\", but a different \ @@ -423,14 +395,12 @@ impl HttpRouter { node.methods.insert(methodname, endpoint); } - /** - * Look up the route handler for an HTTP request having method `method` and - * URI path `path`. A successful lookup produces a `RouterLookupResult`, - * which includes both the handler that can process this request and a map - * of variables assigned based on the request path as part of the lookup. - * On failure, this returns an `HttpError` appropriate for the failure - * mode. - */ + /// Look up the route handler for an HTTP request having method `method` and + /// URI path `path`. A successful lookup produces a `RouterLookupResult`, + /// which includes both the handler that can process this request and a map + /// of variables assigned based on the request path as part of the lookup. + /// On failure, this returns an `HttpError` appropriate for the failure + /// mode. pub fn lookup_route<'a, 'b>( &'a self, method: &'b Method, @@ -471,10 +441,8 @@ impl HttpRouter { varname.clone(), VariableValue::Components(rest), ); - /* - * There should be no outgoing edges since this is by - * definition a terminal node - */ + // There should be no outgoing edges since this is by + // definition a terminal node assert!(node.edges.is_none()); Some(node) } @@ -487,24 +455,20 @@ impl HttpRouter { })? } - /* - * The wildcard match consumes the implicit, empty path segment - */ + // The wildcard match consumes the implicit, empty path segment match &node.edges { Some(HttpRouterEdges::VariableRest(varname, new_node)) => { variables .insert(varname.clone(), VariableValue::Components(vec![])); - /* There should be no outgoing edges */ + // There should be no outgoing edges assert!(new_node.edges.is_none()); node = new_node; } _ => {} } - /* - * As a somewhat special case, if one requests a node with no handlers - * at all, report a 404. We could probably treat this as a 405 as well. - */ + // As a somewhat special case, if one requests a node with no handlers + // at all, report a 404. We could probably treat this as a 405 as well. if node.methods.is_empty() { return Err(HttpError::for_not_found( None, @@ -526,19 +490,15 @@ impl HttpRouter { } } -/** - * Insert a variable into the set after checking for duplicates. - */ +/// Insert a variable into the set after checking for duplicates. fn insert_var( path: &str, varnames: &mut BTreeSet, new_varname: &String, ) -> () { - /* - * Do not allow the same variable name to be used more than - * once in the path. Again, this could be supported (with - * some caveats), but it seems more likely to be a mistake. - */ + // Do not allow the same variable name to be used more than + // once in the path. Again, this could be supported (with + // some caveats), but it seems more likely to be a mistake. if varnames.contains(new_varname) { panic!( "URI path \"{}\": variable name \"{}\" is used more than once", @@ -556,16 +516,14 @@ impl<'a, Context: ServerContext> IntoIterator for &'a HttpRouter { } } -/** - * Route Interator implementation. We perform a preorder, depth first traversal - * of the tree starting from the root node. For each node, we enumerate the - * methods and then descend into its children (or single child in the case of - * path parameter variables). `method` holds the iterator over the current - * node's `methods`; `path` is a stack that represents the current collection - * of path segments and the iterators at each corresponding node. We start with - * the root node's `methods` iterator and a stack consisting of a - * blank string and an iterator over the root node's children. - */ +/// Route Interator implementation. We perform a preorder, depth first traversal +/// of the tree starting from the root node. For each node, we enumerate the +/// methods and then descend into its children (or single child in the case of +/// path parameter variables). `method` holds the iterator over the current +/// node's `methods`; `path` is a stack that represents the current collection +/// of path segments and the iterators at each corresponding node. We start with +/// the root node's `methods` iterator and a stack consisting of a +/// blank string and an iterator over the root node's children. pub struct HttpRouterIter<'a, Context: ServerContext> { method: Box)> + 'a>, @@ -585,12 +543,10 @@ impl<'a, Context: ServerContext> HttpRouterIter<'a, Context> { } } - /** - * Produce an iterator over `node`'s children. This is the null (empty) - * iterator if there are no children, a single (once) iterator for a - * path parameter variable, and a modified iterator in the case of - * literal, explicit path segments. - */ + /// Produce an iterator over `node`'s children. This is the null (empty) + /// iterator if there are no children, a single (once) iterator for a + /// path parameter variable, and a modified iterator in the case of + /// literal, explicit path segments. fn iter_node( node: &'a HttpRouterNode, ) -> Box> { @@ -615,9 +571,7 @@ impl<'a, Context: ServerContext> HttpRouterIter<'a, Context> { } } - /** - * Produce a human-readable path from the current vector of path segments. - */ + /// Produce a human-readable path from the current vector of path segments. fn path(&self) -> String { // Ignore the leading element as that's just a placeholder. let components: Vec = self.path[1..] @@ -673,62 +627,58 @@ impl<'a, Context: ServerContext> Iterator for HttpRouterIter<'a, Context> { } } -/** - * Helper function for taking a Uri path and producing a `Vec` of - * URL-decoded strings, each representing one segment of the path. The input is - * percent-encoded. Empty segments i.e. due to consecutive "/" characters or a - * leading "/" are omitted. - * - * Regarding "dot-segments" ("." and ".."), RFC 3986 section 3.3 says this: - * The path segments "." and "..", also known as dot-segments, are - * defined for relative reference within the path name hierarchy. They - * are intended for use at the beginning of a relative-path reference - * (Section 4.2) to indicate relative position within the hierarchical - * tree of names. This is similar to their role within some operating - * systems' file directory structures to indicate the current directory - * and parent directory, respectively. However, unlike in a file - * system, these dot-segments are only interpreted within the URI path - * hierarchy and are removed as part of the resolution process (Section - * 5.2). - * - * While nothing prohibits APIs from including dot-segments. We see no strong - * case for allowing them in paths, and plenty of pitfalls if we were to - * require consumers to consider them (e.g. "GET /../../../etc/passwd"). Note - * that consumers may be susceptible to other information leaks, for example - * if a client were able to follow a symlink to the root of the filesystem. As - * always, it is incumbent on the consumer and *critical* to validate input. - */ +/// Helper function for taking a Uri path and producing a `Vec` of +/// URL-decoded strings, each representing one segment of the path. The input is +/// percent-encoded. Empty segments i.e. due to consecutive "/" characters or a +/// leading "/" are omitted. +/// +/// Regarding "dot-segments" ("." and ".."), RFC 3986 section 3.3 says this: +/// The path segments "." and "..", also known as dot-segments, are +/// defined for relative reference within the path name hierarchy. They +/// are intended for use at the beginning of a relative-path reference +/// (Section 4.2) to indicate relative position within the hierarchical +/// tree of names. This is similar to their role within some operating +/// systems' file directory structures to indicate the current directory +/// and parent directory, respectively. However, unlike in a file +/// system, these dot-segments are only interpreted within the URI path +/// hierarchy and are removed as part of the resolution process (Section +/// 5.2). +/// +/// While nothing prohibits APIs from including dot-segments. We see no strong +/// case for allowing them in paths, and plenty of pitfalls if we were to +/// require consumers to consider them (e.g. "GET /../../../etc/passwd"). Note +/// that consumers may be susceptible to other information leaks, for example +/// if a client were able to follow a symlink to the root of the filesystem. As +/// always, it is incumbent on the consumer and *critical* to validate input. fn input_path_to_segments(path: &InputPath) -> Result, String> { - /* - * We're given the "path" portion of a URI and we want to construct an - * array of the segments of the path. Relevant references: - * - * RFC 7230 HTTP/1.1 Syntax and Routing - * (particularly: 2.7.3 on normalization) - * RFC 3986 Uniform Resource Identifier (URI): Generic Syntax - * (particularly: 6.2.2 on comparison) - * - * TODO-hardening We should revisit this. We want to consider a couple of - * things: - * - what it means (and what we should do) if the path does not begin with - * a leading "/" - * - how to handle paths that end in "/" (in some cases, ought this send a - * 300-level redirect?) - * - * It would seem obvious to reach for the Rust "url" crate. That crate - * parses complete URLs, which include a scheme and authority section that - * does not apply here. We could certainly make one up (e.g., - * "http://127.0.0.1") and construct a URL whose path matches the path we - * were given. However, while it seems natural that our internal - * representation would not be percent-encoded, the "url" crate - * percent-encodes any path that it's given. Further, we probably want to - * treat consecutive "/" characters as equivalent to a single "/", but that - * crate treats them separately (which is not unreasonable, since it's not - * clear that the above RFCs say anything about whether empty segments - * should be ignored). The net result is that that crate doesn't buy us - * much here, but it does create more work, so we'll just split it - * ourselves. - */ + // We're given the "path" portion of a URI and we want to construct an + // array of the segments of the path. Relevant references: + // + // RFC 7230 HTTP/1.1 Syntax and Routing + // (particularly: 2.7.3 on normalization) + // RFC 3986 Uniform Resource Identifier (URI): Generic Syntax + // (particularly: 6.2.2 on comparison) + // + // TODO-hardening We should revisit this. We want to consider a couple of + // things: + // - what it means (and what we should do) if the path does not begin with + // a leading "/" + // - how to handle paths that end in "/" (in some cases, ought this send a + // 300-level redirect?) + // + // It would seem obvious to reach for the Rust "url" crate. That crate + // parses complete URLs, which include a scheme and authority section that + // does not apply here. We could certainly make one up (e.g., + // "http://127.0.0.1") and construct a URL whose path matches the path we + // were given. However, while it seems natural that our internal + // representation would not be percent-encoded, the "url" crate + // percent-encodes any path that it's given. Further, we probably want to + // treat consecutive "/" characters as equivalent to a single "/", but that + // crate treats them separately (which is not unreasonable, since it's not + // clear that the above RFCs say anything about whether empty segments + // should be ignored). The net result is that that crate doesn't buy us + // much here, but it does create more work, so we'll just split it + // ourselves. path.0 .split('/') .filter(|segment| !segment.is_empty()) @@ -742,14 +692,12 @@ fn input_path_to_segments(path: &InputPath) -> Result, String> { .collect() } -/** - * Whereas in `input_path_to_segments()` we must accommodate any user input, when - * processing paths specified by the client program we can be more stringent and - * fail via a panic! rather than an error. We do not percent-decode the path - * meaning that programs may specify path segments that would require - * percent-encoding by clients. Paths *must* begin with a "/"; only the final - * segment may be empty i.e. the path may end with a "/". - */ +/// Whereas in `input_path_to_segments()` we must accommodate any user input, when +/// processing paths specified by the client program we can be more stringent and +/// fail via a panic! rather than an error. We do not percent-decode the path +/// meaning that programs may specify path segments that would require +/// percent-encoding by clients. Paths *must* begin with a "/"; only the final +/// segment may be empty i.e. the path may end with a "/". pub fn route_path_to_segments(path: &str) -> Vec<&str> { if !matches!(path.chars().next(), Some('/')) { panic!("route paths must begin with a '/': '{}'", path); @@ -1001,11 +949,9 @@ mod test { )); } - /* - * TODO: We allow a trailing slash after the wildcard specifier, but we may - * reconsider this if we decided to distinguish between the presence or - * absence of the trailing slash. - */ + // TODO: We allow a trailing slash after the wildcard specifier, but we may + // reconsider this if we decided to distinguish between the presence or + // absence of the trailing slash. #[test] fn test_slash_after_wildcard_is_fine_dot_dot_dot_for_now() { let mut router = HttpRouter::new(); @@ -1020,9 +966,7 @@ mod test { fn test_error_cases() { let mut router = HttpRouter::new(); - /* - * Check a few initial conditions. - */ + // Check a few initial conditions. let error = router.lookup_route(&Method::GET, "/".into()).unwrap_err(); assert_eq!(error.status_code, StatusCode::NOT_FOUND); let error = @@ -1036,10 +980,8 @@ mod test { .unwrap_err(); assert_eq!(error.status_code, StatusCode::NOT_FOUND); - /* - * Insert a route into the middle of the tree. This will let us look at - * parent nodes, sibling nodes, and child nodes. - */ + // Insert a route into the middle of the tree. This will let us look at + // parent nodes, sibling nodes, and child nodes. router.insert(new_endpoint(new_handler(), Method::GET, "/foo/bar")); assert!(router.lookup_route(&Method::GET, "/foo/bar".into()).is_ok()); assert!(router.lookup_route(&Method::GET, "/foo/bar/".into()).is_ok()); @@ -1052,11 +994,9 @@ mod test { .lookup_route(&Method::GET, "///foo///bar///".into()) .is_ok()); - /* - * TODO-cleanup: consider having a "build" step that constructs a - * read-only router and does validation like making sure that there's a - * GET route on all nodes? - */ + // TODO-cleanup: consider having a "build" step that constructs a + // read-only router and does validation like making sure that there's a + // GET route on all nodes? let error = router.lookup_route(&Method::GET, "/".into()).unwrap_err(); assert_eq!(error.status_code, StatusCode::NOT_FOUND); let error = @@ -1082,12 +1022,10 @@ mod test { fn test_router_basic() { let mut router = HttpRouter::new(); - /* - * Insert a handler at the root and verify that we get that handler - * back, even if we use different names that normalize to "/". - * Before we start, sanity-check that there's nothing at the root - * already. Other test cases examine the errors in more detail. - */ + // Insert a handler at the root and verify that we get that handler + // back, even if we use different names that normalize to "/". + // Before we start, sanity-check that there's nothing at the root + // already. Other test cases examine the errors in more detail. assert!(router.lookup_route(&Method::GET, "/".into()).is_err()); router.insert(new_endpoint(new_handler_named("h1"), Method::GET, "/")); let result = router.lookup_route(&Method::GET, "/".into()).unwrap(); @@ -1100,12 +1038,10 @@ mod test { assert_eq!(result.handler.label(), "h1"); assert!(result.variables.is_empty()); - /* - * Now insert a handler for a different method at the root. Verify that - * we get both this handler and the previous one if we ask for the - * corresponding method and that we get no handler for a different, - * third method. - */ + // Now insert a handler for a different method at the root. Verify that + // we get both this handler and the previous one if we ask for the + // corresponding method and that we get no handler for a different, + // third method. assert!(router.lookup_route(&Method::PUT, "/".into()).is_err()); router.insert(new_endpoint(new_handler_named("h2"), Method::PUT, "/")); let result = router.lookup_route(&Method::PUT, "/".into()).unwrap(); @@ -1116,11 +1052,9 @@ mod test { assert!(router.lookup_route(&Method::DELETE, "/".into()).is_err()); assert!(result.variables.is_empty()); - /* - * Now insert a handler one level deeper. Verify that all the previous - * handlers behave as we expect, and that we have one handler at the new - * path, whichever name we use for it. - */ + // Now insert a handler one level deeper. Verify that all the previous + // handlers behave as we expect, and that we have one handler at the new + // path, whichever name we use for it. assert!(router.lookup_route(&Method::GET, "/foo".into()).is_err()); router.insert(new_endpoint( new_handler_named("h3"), @@ -1155,10 +1089,8 @@ mod test { #[test] fn test_embedded_non_variable() { - /* - * This isn't an important use case today, but we'd like to know if we - * change the behavior, intentionally or otherwise. - */ + // This isn't an important use case today, but we'd like to know if we + // change the behavior, intentionally or otherwise. let mut router = HttpRouter::new(); assert!(router .lookup_route(&Method::GET, "/not{a}variable".into()) @@ -1183,9 +1115,7 @@ mod test { #[test] fn test_variables_basic() { - /* - * Basic test using a variable. - */ + // Basic test using a variable. let mut router = HttpRouter::new(); router.insert(new_endpoint( new_handler_named("h5"), @@ -1227,7 +1157,7 @@ mod test { *result.variables.get("project_id").unwrap(), VariableValue::String("p12345".to_string()) ); - /* Trick question! */ + // Trick question! let result = router .lookup_route(&Method::GET, "/projects/{project_id}".into()) .unwrap(); @@ -1240,9 +1170,7 @@ mod test { #[test] fn test_variables_multi() { - /* - * Exercise a case with multiple variables. - */ + // Exercise a case with multiple variables. let mut router = HttpRouter::new(); router.insert(new_endpoint( new_handler_named("h6"), @@ -1277,10 +1205,8 @@ mod test { #[test] fn test_empty_variable() { - /* - * Exercise a case where a broken implementation might erroneously - * assign a variable to the empty string. - */ + // Exercise a case where a broken implementation might erroneously + // assign a variable to the empty string. let mut router = HttpRouter::new(); router.insert(new_endpoint( new_handler_named("h7"), diff --git a/dropshot/src/server.rs b/dropshot/src/server.rs index 506a51a52..6ab40327d 100644 --- a/dropshot/src/server.rs +++ b/dropshot/src/server.rs @@ -1,7 +1,5 @@ // Copyright 2020 Oxide Computer Company -/*! - * Generic server-wide state and facilities - */ +//! Generic server-wide state and facilities use super::api_description::ApiDescription; use super::config::{ConfigDropshot, ConfigTls}; @@ -42,34 +40,30 @@ use uuid::Uuid; use slog::Logger; -/* TODO Replace this with something else? */ +// TODO Replace this with something else? type GenericError = Box; -/** - * Endpoint-accessible context associated with a server. - * - * Automatically implemented for all Send + Sync types. - */ +/// Endpoint-accessible context associated with a server. +/// +/// Automatically implemented for all Send + Sync types. pub trait ServerContext: Send + Sync + 'static {} impl ServerContext for T where T: Send + Sync {} -/** - * Stores shared state used by the Dropshot server. - */ +/// Stores shared state used by the Dropshot server. #[derive(Debug)] pub struct DropshotState { - /** caller-specific state */ + /// caller-specific state pub private: C, - /** static server configuration parameters */ + /// static server configuration parameters pub config: ServerConfig, - /** request router */ + /// request router pub router: HttpRouter, - /** server-wide log handle */ + /// server-wide log handle pub log: Logger, - /** bound local address for the server. */ + /// bound local address for the server. pub local_addr: SocketAddr, - /** Identifies how to accept TLS connections */ + /// Identifies how to accept TLS connections pub(crate) tls_acceptor: Option>>, } @@ -79,24 +73,20 @@ impl DropshotState { } } -/** - * Stores static configuration associated with the server - * TODO-cleanup merge with ConfigDropshot - */ +/// Stores static configuration associated with the server +/// TODO-cleanup merge with ConfigDropshot #[derive(Debug)] pub struct ServerConfig { - /** maximum allowed size of a request body */ + /// maximum allowed size of a request body pub request_body_max_bytes: usize, - /** maximum size of any page of results */ + /// maximum size of any page of results pub page_max_nitems: NonZeroU32, - /** default size for a page of results */ + /// default size for a page of results pub page_default_nitems: NonZeroU32, } -/** - * A thin wrapper around a Hyper Server object that exposes some interfaces that - * we find useful. - */ +/// A thin wrapper around a Hyper Server object that exposes some interfaces that +/// we find useful. pub struct HttpServerStarter { app_state: Arc>, local_addr: SocketAddr, @@ -111,7 +101,7 @@ impl HttpServerStarter { log: &Logger, ) -> Result, GenericError> { let server_config = ServerConfig { - /* We start aggressively to ensure test coverage. */ + // We start aggressively to ensure test coverage. request_body_max_bytes: config.request_body_max_bytes, page_max_nitems: NonZeroU32::new(10000).unwrap(), page_default_nitems: NonZeroU32::new(100).unwrap(), @@ -241,14 +231,12 @@ impl InnerHttpServerStarter { tokio::spawn(async { graceful.await }) } - /** - * Set up an HTTP server bound on the specified address that runs registered - * handlers. You must invoke `start()` on the returned instance of - * `HttpServerStarter` (and await the result) to actually start the server. - * - * TODO-cleanup We should be able to take a reference to the ApiDescription. - * We currently can't because we need to hang onto the router. - */ + /// Set up an HTTP server bound on the specified address that runs registered + /// handlers. You must invoke `start()` on the returned instance of + /// `HttpServerStarter` (and await the result) to actually start the server. + /// + /// TODO-cleanup We should be able to take a reference to the ApiDescription. + /// We currently can't because we need to hang onto the router. fn new( config: &ConfigDropshot, server_config: ServerConfig, @@ -259,7 +247,7 @@ impl InnerHttpServerStarter { let incoming = AddrIncoming::bind(&config.bind_address)?; let local_addr = incoming.local_addr(); - /* TODO-cleanup too many Arcs? */ + // TODO-cleanup too many Arcs? let app_state = Arc::new(DropshotState { private, config: server_config, @@ -595,11 +583,9 @@ impl HttpServer { Ok(()) } - /** - * Return the result of registering the server's DTrace USDT probes. - * - * See [`ProbeRegistration`] for details. - */ + /// Return the result of registering the server's DTrace USDT probes. + /// + /// See [`ProbeRegistration`] for details. pub fn probe_registration(&self) -> &ProbeRegistration { &self.probe_registration } @@ -628,12 +614,10 @@ impl HttpServer { } } -/* - * For graceful termination, the `close()` function is preferred, as it can - * report errors and wait for termination to complete. However, we impl - * `Drop` to attempt to shut down the server to handle less clean shutdowns - * (e.g., from failing tests). - */ +// For graceful termination, the `close()` function is preferred, as it can +// report errors and wait for termination to complete. However, we impl +// `Drop` to attempt to shut down the server to handle less clean shutdowns +// (e.g., from failing tests). impl Drop for CloseHandle { fn drop(&mut self) { if let Some(c) = self.close_channel.take() { @@ -658,12 +642,10 @@ impl FusedFuture for HttpServer { } } -/** - * Initial entry point for handling a new connection to the HTTP server. - * This is invoked by Hyper when a new connection is accepted. This function - * must return a Hyper Service object that will handle requests for this - * connection. - */ +/// Initial entry point for handling a new connection to the HTTP server. +/// This is invoked by Hyper when a new connection is accepted. This function +/// must return a Hyper Service object that will handle requests for this +/// connection. async fn http_connection_handle( server: Arc>, remote_addr: SocketAddr, @@ -672,23 +654,19 @@ async fn http_connection_handle( Ok(ServerRequestHandler::new(server, remote_addr)) } -/** - * Initial entry point for handling a new request to the HTTP server. This is - * invoked by Hyper when a new request is received. This function returns a - * Result that either represents a valid HTTP response or an error (which will - * also get turned into an HTTP response). - */ +/// Initial entry point for handling a new request to the HTTP server. This is +/// invoked by Hyper when a new request is received. This function returns a +/// Result that either represents a valid HTTP response or an error (which will +/// also get turned into an HTTP response). async fn http_request_handle_wrap( server: Arc>, remote_addr: SocketAddr, request: Request, ) -> Result, GenericError> { - /* - * This extra level of indirection makes error handling much more - * straightforward, since the request handling code can simply return early - * with an error and we'll treat it like an error from any of the endpoints - * themselves. - */ + // This extra level of indirection makes error handling much more + // straightforward, since the request handling code can simply return early + // with an error and we'll treat it like an error from any of the endpoints + // themselves. let request_id = generate_request_id(); let request_log = server.log.new(o!( "remote_addr" => remote_addr, @@ -740,7 +718,7 @@ async fn http_request_handle_wrap( } }); - /* TODO-debug: add request and response headers here */ + // TODO-debug: add request and response headers here info!(request_log, "request completed"; "response_code" => r.status().as_str().to_string(), "error_message_internal" => message_internal, @@ -751,7 +729,7 @@ async fn http_request_handle_wrap( } Ok(response) => { - /* TODO-debug: add request and response headers here */ + // TODO-debug: add request and response headers here info!(request_log, "request completed"; "response_code" => response.status().as_str().to_string() ); @@ -780,14 +758,12 @@ async fn http_request_handle( request_id: &str, request_log: Logger, ) -> Result, HttpError> { - /* - * TODO-hardening: is it correct to (and do we correctly) read the entire - * request body even if we decide it's too large and are going to send a 400 - * response? - * TODO-hardening: add a request read timeout as well so that we don't allow - * this to take forever. - * TODO-correctness: Do we need to dump the body on errors? - */ + // TODO-hardening: is it correct to (and do we correctly) read the entire + // request body even if we decide it's too large and are going to send a 400 + // response? + // TODO-hardening: add a request read timeout as well so that we don't allow + // this to take forever. + // TODO-correctness: Do we need to dump the body on errors? let method = request.method(); let uri = request.uri(); let lookup_result = @@ -808,48 +784,40 @@ async fn http_request_handle( Ok(response) } -/* - * This function should probably be parametrized by some name of the service - * that is expected to be unique within an organization. That way, it would be - * possible to determine from a given request id which service it was from. - * TODO should we encode more information here? Service? Instance? Time up to - * the hour? - */ +// This function should probably be parametrized by some name of the service +// that is expected to be unique within an organization. That way, it would be +// possible to determine from a given request id which service it was from. +// TODO should we encode more information here? Service? Instance? Time up to +// the hour? fn generate_request_id() -> String { format!("{}", Uuid::new_v4()) } -/** - * ServerConnectionHandler is a Hyper Service implementation that forwards - * incoming connections to `http_connection_handle()`, providing the server - * state object as an additional argument. We could use `make_service_fn` here - * using a closure to capture the state object, but the resulting code is a bit - * simpler without it. - */ +/// ServerConnectionHandler is a Hyper Service implementation that forwards +/// incoming connections to `http_connection_handle()`, providing the server +/// state object as an additional argument. We could use `make_service_fn` here +/// using a closure to capture the state object, but the resulting code is a bit +/// simpler without it. pub struct ServerConnectionHandler { - /** backend state that will be made available to the connection handler */ + /// backend state that will be made available to the connection handler server: Arc>, } impl ServerConnectionHandler { - /** - * Create an ServerConnectionHandler with the given state object that - * will be made available to the handler. - */ + /// Create an ServerConnectionHandler with the given state object that + /// will be made available to the handler. fn new(server: Arc>) -> Self { ServerConnectionHandler { server } } } impl Service<&AddrStream> for ServerConnectionHandler { - /* - * Recall that a Service in this context is just something that takes a - * request (which could be anything) and produces a response (which could be - * anything). This being a connection handler, the request type is an - * AddrStream (which wraps a TCP connection) and the response type is - * another Service: one that accepts HTTP requests and produces HTTP - * responses. - */ + // Recall that a Service in this context is just something that takes a + // request (which could be anything) and produces a response (which could be + // anything). This being a connection handler, the request type is an + // AddrStream (which wraps a TCP connection) and the response type is + // another Service: one that accepts HTTP requests and produces HTTP + // responses. type Response = ServerRequestHandler; type Error = GenericError; type Future = BoxFuture<'static, Result>; @@ -863,39 +831,33 @@ impl Service<&AddrStream> for ServerConnectionHandler { } fn call(&mut self, conn: &AddrStream) -> Self::Future { - /* - * We're given a borrowed reference to the AddrStream, but our interface - * is async (which is good, so that we can support time-consuming - * operations as part of receiving requests). To avoid having to ensure - * that conn's lifetime exceeds that of this async operation, we simply - * copy the only useful information out of the conn: the SocketAddr. We - * may want to create our own connection type to encapsulate the socket - * address and any other per-connection state that we want to keep. - */ + // We're given a borrowed reference to the AddrStream, but our interface + // is async (which is good, so that we can support time-consuming + // operations as part of receiving requests). To avoid having to ensure + // that conn's lifetime exceeds that of this async operation, we simply + // copy the only useful information out of the conn: the SocketAddr. We + // may want to create our own connection type to encapsulate the socket + // address and any other per-connection state that we want to keep. let server = Arc::clone(&self.server); let remote_addr = conn.remote_addr(); Box::pin(http_connection_handle(server, remote_addr)) } } -/** - * ServerRequestHandler is a Hyper Service implementation that forwards - * incoming requests to `http_request_handle_wrap()`, including as an argument - * the backend server state object. We could use `service_fn` here using a - * closure to capture the server state object, but the resulting code is a bit - * simpler without all that. - */ +/// ServerRequestHandler is a Hyper Service implementation that forwards +/// incoming requests to `http_request_handle_wrap()`, including as an argument +/// the backend server state object. We could use `service_fn` here using a +/// closure to capture the server state object, but the resulting code is a bit +/// simpler without all that. pub struct ServerRequestHandler { - /** backend state that will be made available to the request handler */ + /// backend state that will be made available to the request handler server: Arc>, remote_addr: SocketAddr, } impl ServerRequestHandler { - /** - * Create a ServerRequestHandler object with the given state object that - * will be provided to the handler function. - */ + /// Create a ServerRequestHandler object with the given state object that + /// will be provided to the handler function. fn new(server: Arc>, remote_addr: SocketAddr) -> Self { ServerRequestHandler { server, remote_addr } } diff --git a/dropshot/src/test_util.rs b/dropshot/src/test_util.rs index 36b652e82..632e63bcc 100644 --- a/dropshot/src/test_util.rs +++ b/dropshot/src/test_util.rs @@ -1,8 +1,6 @@ // Copyright 2020 Oxide Computer Company -/*! - * Automated testing facilities. These are intended for use both by this crate - * and dependents of this crate. - */ +//! Automated testing facilities. These are intended for use both by this crate +//! and dependents of this crate. use camino::Utf8PathBuf; use chrono::DateTime; @@ -72,23 +70,19 @@ const ALLOWED_HEADERS: [AllowedHeader<'static>; 8] = [ AllowedHeader::new(TEST_HEADER_2), ]; -/** - * ClientTestContext encapsulates several facilities associated with using an - * HTTP client for testing. - */ +/// ClientTestContext encapsulates several facilities associated with using an +/// HTTP client for testing. pub struct ClientTestContext { - /** actual bind address of the HTTP server under test */ + /// actual bind address of the HTTP server under test pub bind_address: SocketAddr, - /** HTTP client, used for making requests against the test server */ + /// HTTP client, used for making requests against the test server pub client: Client, - /** logger for the test suite HTTP client */ + /// logger for the test suite HTTP client pub client_log: Logger, } impl ClientTestContext { - /** - * Set up a `ClientTestContext` for running tests against an API server. - */ + /// Set up a `ClientTestContext` for running tests against an API server. pub fn new(server_addr: SocketAddr, log: Logger) -> ClientTestContext { ClientTestContext { bind_address: server_addr, @@ -97,12 +91,10 @@ impl ClientTestContext { } } - /** - * Given the path for an API endpoint (e.g., "/projects"), return a Uri that - * we can use to invoke this endpoint from the client. This essentially - * appends the path to a base URL constructed from the server's IP address - * and port. - */ + /// Given the path for an API endpoint (e.g., "/projects"), return a Uri that + /// we can use to invoke this endpoint from the client. This essentially + /// appends the path to a base URL constructed from the server's IP address + /// and port. pub fn url(&self, path: &str) -> Uri { Uri::builder() .scheme("http") @@ -112,18 +104,16 @@ impl ClientTestContext { .expect("attempted to construct invalid URI") } - /** - * Execute an HTTP request against the test server and perform basic - * validation of the result, including: - * - * - the expected status code - * - the expected Date header (within reason) - * - for error responses: the expected body content - * - header names are in allowed list - * - any other semantics that can be verified in general - * - * The body will be JSON encoded. - */ + /// Execute an HTTP request against the test server and perform basic + /// validation of the result, including: + /// + /// - the expected status code + /// - the expected Date header (within reason) + /// - for error responses: the expected body content + /// - header names are in allowed list + /// - any other semantics that can be verified in general + /// + /// The body will be JSON encoded. pub async fn make_request( &self, method: Method, @@ -139,11 +129,9 @@ impl ClientTestContext { self.make_request_with_body(method, path, body, expected_status).await } - /** - * Execute an HTTP request against the test server and perform basic - * validation of the result like [`make_request`], but with a content - * type of "application/x-www-form-urlencoded". - */ + /// Execute an HTTP request against the test server and perform basic + /// validation of the result like [`make_request`], but with a content + /// type of "application/x-www-form-urlencoded". pub async fn make_request_url_encoded< RequestBodyType: Serialize + Debug, >( @@ -182,9 +170,7 @@ impl ClientTestContext { .await } - /** - * Fetches a resource for which we expect to get an error response. - */ + /// Fetches a resource for which we expect to get an error response. pub async fn make_request_error( &self, method: Method, @@ -196,11 +182,9 @@ impl ClientTestContext { .unwrap_err() } - /** - * Fetches a resource for which we expect to get an error response. - * TODO-cleanup the make_request_error* interfaces are slightly different - * than the non-error ones (and probably a bit more ergonomic). - */ + /// Fetches a resource for which we expect to get an error response. + /// TODO-cleanup the make_request_error* interfaces are slightly different + /// than the non-error ones (and probably a bit more ergonomic). pub async fn make_request_error_body( &self, method: Method, @@ -264,17 +248,15 @@ impl ClientTestContext { .await .expect("failed to make request to server"); - /* Check that we got the expected response code. */ + // Check that we got the expected response code. let status = response.status(); info!(self.client_log, "client received response"; "status" => ?status); assert_eq!(expected_status, status); - /* - * Check that we didn't have any unexpected headers. This could be more - * efficient by putting the allowed headers into a BTree or Hash, but - * right now the structure is tiny and it's convenient to have it - * statically-defined above. - */ + // Check that we didn't have any unexpected headers. This could be more + // efficient by putting the allowed headers into a BTree or Hash, but + // right now the structure is tiny and it's convenient to have it + // statically-defined above. let headers = response.headers(); for (header_name, header_value) in headers { let mut okay = false; @@ -300,18 +282,16 @@ impl ClientTestContext { } } - /* - * Sanity check the Date header in the response. Note that this - * assertion will fail spuriously in the unlikely event that the system - * clock is adjusted backwards in between when we sent the request and - * when we received the response, but we consider that case unlikely - * enough to be worth doing this check anyway. (We'll try to check for - * the clock reset condition, too, but we cannot catch all cases that - * would cause the Date header check to be incorrect.) - * - * Note that the Date header typically only has precision down to one - * second, so we don't want to try to do a more precise comparison. - */ + // Sanity check the Date header in the response. Note that this + // assertion will fail spuriously in the unlikely event that the system + // clock is adjusted backwards in between when we sent the request and + // when we received the response, but we consider that case unlikely + // enough to be worth doing this check anyway. (We'll try to check for + // the clock reset condition, too, but we cannot catch all cases that + // would cause the Date header check to be incorrect.) + // + // Note that the Date header typically only has precision down to one + // second, so we don't want to try to do a more precise comparison. let time_after = chrono::offset::Utc::now().timestamp(); let date_header = headers .get(http::header::DATE) @@ -327,10 +307,8 @@ impl ClientTestContext { assert!(time_request.timestamp() >= time_before - 1); assert!(time_request.timestamp() <= time_after + 1); - /* - * Validate that we have a request id header. - * TODO-coverage check that it's unique among requests we've issued - */ + // Validate that we have a request id header. + // TODO-coverage check that it's unique among requests we've issued let request_id_header = headers .get(crate::HEADER_REQUEST_ID) .expect("missing request id header") @@ -338,10 +316,8 @@ impl ClientTestContext { .expect("non-ASCII characters in request id") .to_string(); - /* - * For "204 No Content" responses, validate that we got no content in - * the body. - */ + // For "204 No Content" responses, validate that we got no content in + // the body. if status == StatusCode::NO_CONTENT { let body_bytes = to_bytes(response.body_mut()) .await @@ -349,19 +325,15 @@ impl ClientTestContext { assert_eq!(0, body_bytes.len()); } - /* - * If this was a successful response, there's nothing else to check - * here. Return the response so the caller can validate the content if - * they want. - */ + // If this was a successful response, there's nothing else to check + // here. Return the response so the caller can validate the content if + // they want. if !status.is_client_error() && !status.is_server_error() { return Ok(response); } - /* - * We got an error. Parse the response body to make sure it's valid and - * then return that. - */ + // We got an error. Parse the response body to make sure it's valid and + // then return that. let error_body: HttpErrorResponseBody = read_json(&mut response).await; info!(self.client_log, "client error"; "error_body" => ?error_body); assert_eq!(error_body.request_id, request_id_header); @@ -369,87 +341,81 @@ impl ClientTestContext { } } -/** - * Constructs a Logger for use by a test suite. If a file-based logger is - * requested, the file will be put in a temporary directory and the name will be - * unique for a given test name and is likely to be unique across multiple runs - * of this test. The file will also be deleted if the test succeeds, indicated - * by invoking [`LogContext::cleanup_successful`]. This way, you can debug a - * test failure from the failed instance rather than hoping the failure is - * reproducible. - * - * ## Example - * - * ``` - * # use dropshot::ConfigLoggingLevel; - * # - * # fn my_logging_config() -> ConfigLogging { - * # ConfigLogging::StderrTerminal { - * # level: ConfigLoggingLevel::Info, - * # } - * # } - * # - * # fn some_invariant() -> bool { - * # true - * # } - * # - * use dropshot::ConfigLogging; - * use dropshot::test_util::LogContext; - * - * #[macro_use] - * extern crate slog; /* for the `info!` macro below */ - * - * # fn main() { - * let log_config: ConfigLogging = my_logging_config(); - * let logctx = LogContext::new("my_test", &log_config); - * let log = &logctx.log; - * - * /* Run your test. Use the log like you normally would. */ - * info!(log, "the test is going great"); - * assert!(some_invariant()); - * - * /* Upon successful completion, invoke `cleanup_successful()`. */ - * logctx.cleanup_successful(); - * # } - * ``` - * - * If the test fails (e.g., the `some_invariant()` assertion fails), the log - * file will be retained. If the test gets as far as calling - * `cleanup_successful()`, the log file will be removed. - * - * Note that `cleanup_successful()` is not invoked automatically on `drop` - * because that would remove the file even if the test failed, which isn't what - * we want. You have to explicitly call `cleanup_successful`. Normally, you - * just do this as one of the last steps in your test. This pattern ensures - * that the log file sticks around if the test fails, but is removed if the test - * succeeds. - */ +/// Constructs a Logger for use by a test suite. If a file-based logger is +/// requested, the file will be put in a temporary directory and the name will be +/// unique for a given test name and is likely to be unique across multiple runs +/// of this test. The file will also be deleted if the test succeeds, indicated +/// by invoking [`LogContext::cleanup_successful`]. This way, you can debug a +/// test failure from the failed instance rather than hoping the failure is +/// reproducible. +/// +/// ## Example +/// +/// ``` +/// # use dropshot::ConfigLoggingLevel; +/// # +/// # fn my_logging_config() -> ConfigLogging { +/// # ConfigLogging::StderrTerminal { +/// # level: ConfigLoggingLevel::Info, +/// # } +/// # } +/// # +/// # fn some_invariant() -> bool { +/// # true +/// # } +/// # +/// use dropshot::ConfigLogging; +/// use dropshot::test_util::LogContext; +/// +/// #[macro_use] +/// extern crate slog; /* for the `info!` macro below */ +/// +/// # fn main() { +/// let log_config: ConfigLogging = my_logging_config(); +/// let logctx = LogContext::new("my_test", &log_config); +/// let log = &logctx.log; +/// +/// /* Run your test. Use the log like you normally would. */ +/// info!(log, "the test is going great"); +/// assert!(some_invariant()); +/// +/// /* Upon successful completion, invoke `cleanup_successful()`. */ +/// logctx.cleanup_successful(); +/// # } +/// ``` +/// +/// If the test fails (e.g., the `some_invariant()` assertion fails), the log +/// file will be retained. If the test gets as far as calling +/// `cleanup_successful()`, the log file will be removed. +/// +/// Note that `cleanup_successful()` is not invoked automatically on `drop` +/// because that would remove the file even if the test failed, which isn't what +/// we want. You have to explicitly call `cleanup_successful`. Normally, you +/// just do this as one of the last steps in your test. This pattern ensures +/// that the log file sticks around if the test fails, but is removed if the test +/// succeeds. pub struct LogContext { - /** general-purpose logger */ + /// general-purpose logger pub log: Logger, log_path: Option, } impl LogContext { - /** - * Sets up a LogContext. If `initial_config_logging` specifies a file-based - * log (i.e., [`ConfigLogging::File`]), then the requested path _must_ be - * the string `"UNUSED"` and it will be replaced with a file name (in a - * temporary directory) containing `test_name` and other information to make - * the filename likely to be unique across multiple runs (e.g., process id). - */ + /// Sets up a LogContext. If `initial_config_logging` specifies a file-based + /// log (i.e., [`ConfigLogging::File`]), then the requested path _must_ be + /// the string `"UNUSED"` and it will be replaced with a file name (in a + /// temporary directory) containing `test_name` and other information to make + /// the filename likely to be unique across multiple runs (e.g., process id). pub fn new( test_name: &str, initial_config_logging: &ConfigLogging, ) -> LogContext { - /* - * See above. If the caller requested a file path, assert that the path - * matches our sentinel (just to improve debuggability -- otherwise - * people might be pretty confused about where the logs went). Then - * override the path with one uniquely generated for this test. - * TODO-developer allow keeping the logs in successful cases with an - * environment variable or other flag. - */ + // See above. If the caller requested a file path, assert that the path + // matches our sentinel (just to improve debuggability -- otherwise + // people might be pretty confused about where the logs went). Then + // override the path with one uniquely generated for this test. + // TODO-developer allow keeping the logs in successful cases with an + // environment variable or other flag. let (log_path, log_config) = match initial_config_logging { ConfigLogging::File { level, path: dummy_path, if_exists } => { assert_eq!( @@ -477,9 +443,7 @@ impl LogContext { LogContext { log, log_path } } - /** - * Removes the log file, if this was a file-based logger. - */ + /// Removes the log file, if this was a file-based logger. pub fn cleanup_successful(self) { if let Some(ref log_path) = self.log_path { fs::remove_file(log_path).unwrap(); @@ -487,11 +451,9 @@ impl LogContext { } } -/** - * TestContext is used to manage a matched server and client for the common - * test-case pattern of setting up a logger, server, and client and tearing them - * all down at the end. - */ +/// TestContext is used to manage a matched server and client for the common +/// test-case pattern of setting up a logger, server, and client and tearing them +/// all down at the end. pub struct TestContext { pub client_testctx: ClientTestContext, pub server: HttpServer, @@ -500,15 +462,13 @@ pub struct TestContext { } impl TestContext { - /** - * Instantiate a TestContext by creating a new Dropshot server with `api`, - * `private`, `config_dropshot`, and `log`, and then creating a - * `ClientTestContext` with whatever address the server wound up bound to. - * - * This interfaces requires that `config_dropshot.bind_address.port()` be - * `0` to allow the server to bind to any available port. This is necessary - * in order for it to be used concurrently by many tests. - */ + /// Instantiate a TestContext by creating a new Dropshot server with `api`, + /// `private`, `config_dropshot`, and `log`, and then creating a + /// `ClientTestContext` with whatever address the server wound up bound to. + /// + /// This interfaces requires that `config_dropshot.bind_address.port()` be + /// `0` to allow the server to bind to any available port. This is necessary + /// in order for it to be used concurrently by many tests. pub fn new( api: ApiDescription, private: Context, @@ -522,9 +482,7 @@ impl TestContext { "test suite only supports binding on port 0 (any available port)" ); - /* - * Set up the server itself. - */ + // Set up the server itself. let server = HttpServerStarter::new(&config_dropshot, api, private, &log) .unwrap() @@ -537,11 +495,9 @@ impl TestContext { TestContext { client_testctx, server, log, log_context } } - /** - * Requests a graceful shutdown of the server, waits for that to complete, - * and cleans up the associated log context (if any). - */ - /* TODO-cleanup: is there an async analog to Drop? */ + /// Requests a graceful shutdown of the server, waits for that to complete, + /// and cleans up the associated log context (if any). + // TODO-cleanup: is there an async analog to Drop? pub async fn teardown(self) { self.server.close().await.expect("server stopped with an error"); if let Some(log_context) = self.log_context { @@ -550,12 +506,10 @@ impl TestContext { } } -/** - * Given a Hyper Response whose body is expected to represent newline-separated - * JSON, each line of which is expected to be parseable via Serde as type T, - * asynchronously read the body of the response and parse it accordingly, - * returning a vector of T. - */ +/// Given a Hyper Response whose body is expected to represent newline-separated +/// JSON, each line of which is expected to be parseable via Serde as type T, +/// asynchronously read the body of the response and parse it accordingly, +/// returning a vector of T. pub async fn read_ndjson( response: &mut Response, ) -> Vec { @@ -569,12 +523,10 @@ pub async fn read_ndjson( let body_string = String::from_utf8(body_bytes.as_ref().into()) .expect("response contained non-UTF-8 bytes"); - /* - * TODO-cleanup: Consider using serde_json::StreamDeserializer or maybe - * implementing an NDJSON-based Serde type? - * TODO-correctness: If we don't do that, this should split on (\r?\n)+ to - * be NDJSON-compatible. - */ + // TODO-cleanup: Consider using serde_json::StreamDeserializer or maybe + // implementing an NDJSON-based Serde type? + // TODO-correctness: If we don't do that, this should split on (\r?\n)+ to + // be NDJSON-compatible. body_string .split('\n') .filter(|line| !line.is_empty()) @@ -585,11 +537,9 @@ pub async fn read_ndjson( .collect::>() } -/** - * Given a Hyper response whose body is expected to be a JSON object that should - * be parseable via Serde as type T, asynchronously read the body of the - * response and parse it, returning an instance of T. - */ +/// Given a Hyper response whose body is expected to be a JSON object that should +/// be parseable via Serde as type T, asynchronously read the body of the +/// response and parse it, returning an instance of T. pub async fn read_json( response: &mut Response, ) -> T { @@ -604,10 +554,8 @@ pub async fn read_json( .expect("failed to parse server body as expected type") } -/** - * Given a Hyper Response whose body is expected to be a UTF-8-encoded string, - * asynchronously read the body. - */ +/// Given a Hyper Response whose body is expected to be a UTF-8-encoded string, +/// asynchronously read the body. pub async fn read_string(response: &mut Response) -> String { let body_bytes = to_bytes(response.body_mut()).await.expect("error reading body"); @@ -615,9 +563,7 @@ pub async fn read_string(response: &mut Response) -> String { .expect("response contained non-UTF-8 bytes") } -/** - * Fetches a single resource from the API. - */ +/// Fetches a single resource from the API. pub async fn object_get( client: &ClientTestContext, object_url: &str, @@ -634,9 +580,7 @@ pub async fn object_get( read_json::(&mut response).await } -/** - * Fetches a list of resources from the API. - */ +/// Fetches a list of resources from the API. pub async fn objects_list( client: &ClientTestContext, list_url: &str, @@ -653,9 +597,7 @@ pub async fn objects_list( read_ndjson::(&mut response).await } -/** - * Fetches a page of resources from the API. - */ +/// Fetches a page of resources from the API. pub async fn objects_list_page( client: &ClientTestContext, list_url: &str, @@ -676,9 +618,7 @@ where read_json::>(&mut response).await } -/** - * Issues an HTTP POST to the specified collection URL to create an object. - */ +/// Issues an HTTP POST to the specified collection URL to create an object. pub async fn objects_post( client: &ClientTestContext, collection_url: &str, @@ -696,9 +636,7 @@ pub async fn objects_post( read_json::(&mut response).await } -/** - * Issues an HTTP PUT to the specified collection URL to update an object. - */ +/// Issues an HTTP PUT to the specified collection URL to update an object. pub async fn object_put( client: &ClientTestContext, object_url: &str, @@ -711,9 +649,7 @@ pub async fn object_put( .unwrap(); } -/** - * Issues an HTTP DELETE to the specified object URL to delete an object. - */ +/// Issues an HTTP DELETE to the specified object URL to delete an object. pub async fn object_delete(client: &ClientTestContext, object_url: &str) { client .make_request_no_body( @@ -725,9 +661,7 @@ pub async fn object_delete(client: &ClientTestContext, object_url: &str) { .unwrap(); } -/** - * Iterate a paginated collection. - */ +/// Iterate a paginated collection. pub async fn iter_collection( client: &ClientTestContext, collection_url: &str, @@ -760,10 +694,8 @@ pub async fn iter_collection( static TEST_SUITE_LOGGER_ID: AtomicU32 = AtomicU32::new(0); -/** - * Returns a unique path name in a temporary directory that includes the given - * `test_name`. - */ +/// Returns a unique path name in a temporary directory that includes the given +/// `test_name`. pub fn log_file_for_test(test_name: &str) -> Utf8PathBuf { let arg0 = { let arg0path = Utf8PathBuf::from(std::env::args().next().unwrap()); @@ -778,11 +710,9 @@ pub fn log_file_for_test(test_name: &str) -> Utf8PathBuf { pathbuf } -/** - * Load an object of type `T` (usually a hunk of configuration) from the string - * `contents`. `label` is used as an identifying string in a log message. It - * should be unique for each test. - */ +/// Load an object of type `T` (usually a hunk of configuration) from the string +/// `contents`. `label` is used as an identifying string in a log message. It +/// should be unique for each test. pub fn read_config( label: &str, contents: &str, @@ -792,14 +722,10 @@ pub fn read_config( result } -/* - * Bunyan testing facilities - */ +// Bunyan testing facilities -/** - * Represents a Bunyan log record. This form does not support any non-standard - * fields. "level" is not yet supported because we don't (yet) need it. - */ +/// Represents a Bunyan log record. This form does not support any non-standard +/// fields. "level" is not yet supported because we don't (yet) need it. #[derive(Deserialize)] pub struct BunyanLogRecord { pub time: DateTime, @@ -810,9 +736,7 @@ pub struct BunyanLogRecord { pub v: usize, } -/** - * Read a file containing a Bunyan-format log, returning an array of records. - */ +/// Read a file containing a Bunyan-format log, returning an array of records. pub fn read_bunyan_log(logpath: &Path) -> Vec { let log_contents = fs::read_to_string(logpath).unwrap(); log_contents @@ -822,9 +746,7 @@ pub fn read_bunyan_log(logpath: &Path) -> Vec { .collect::>() } -/** - * Analogous to a BunyanLogRecord, but where all fields are optional. - */ +/// Analogous to a BunyanLogRecord, but where all fields are optional. pub struct BunyanLogRecordSpec { pub name: Option, pub hostname: Option, @@ -832,11 +754,9 @@ pub struct BunyanLogRecordSpec { pub v: Option, } -/** - * Verify that the key fields of the log records emitted by `iter` match the - * corresponding values in `expected`. Fields that are `None` in `expected` - * will not be checked. - */ +/// Verify that the key fields of the log records emitted by `iter` match the +/// corresponding values in `expected`. Fields that are `None` in `expected` +/// will not be checked. pub fn verify_bunyan_records<'a, 'b, I>( iter: I, expected: &'a BunyanLogRecordSpec, @@ -859,11 +779,9 @@ pub fn verify_bunyan_records<'a, 'b, I>( } } -/** - * Verify that the Bunyan records emitted by `iter` are chronologically - * sequential and after `maybe_time_before` and before `maybe_time_after`, if - * those latter two parameters are specified. - */ +/// Verify that the Bunyan records emitted by `iter` are chronologically +/// sequential and after `maybe_time_before` and before `maybe_time_after`, if +/// those latter two parameters are specified. pub fn verify_bunyan_records_sequential<'a, 'b, I>( iter: I, maybe_time_before: Option<&'a DateTime>, @@ -912,9 +830,7 @@ mod test { } } - /* - * Tests various cases where verify_bunyan_records() should not panic. - */ + // Tests various cases where verify_bunyan_records() should not panic. #[test] fn test_bunyan_easy_cases() { let t1: DateTime = @@ -929,7 +845,7 @@ mod test { v: 1, }; - /* Test case: nothing to check. */ + // Test case: nothing to check. let records: Vec<&BunyanLogRecord> = vec![&r1]; let iter = records.iter().map(|x| *x); verify_bunyan_records( @@ -942,7 +858,7 @@ mod test { }, ); - /* Test case: check name, no problem. */ + // Test case: check name, no problem. let records: Vec<&BunyanLogRecord> = vec![&r1]; let iter = records.iter().map(|x| *x); verify_bunyan_records( @@ -955,7 +871,7 @@ mod test { }, ); - /* Test case: check hostname, no problem. */ + // Test case: check hostname, no problem. let records: Vec<&BunyanLogRecord> = vec![&r1]; let iter = records.iter().map(|x| *x); verify_bunyan_records( @@ -968,7 +884,7 @@ mod test { }, ); - /* Test case: check pid, no problem. */ + // Test case: check pid, no problem. let records: Vec<&BunyanLogRecord> = vec![&r1]; let iter = records.iter().map(|x| *x); verify_bunyan_records( @@ -981,7 +897,7 @@ mod test { }, ); - /* Test case: check hostname, no problem. */ + // Test case: check hostname, no problem. let records: Vec<&BunyanLogRecord> = vec![&r1]; let iter = records.iter().map(|x| *x); verify_bunyan_records( @@ -994,7 +910,7 @@ mod test { }, ); - /* Test case: check all, no problem. */ + // Test case: check all, no problem. let records: Vec<&BunyanLogRecord> = vec![&r1]; let iter = records.iter().map(|x| *x); verify_bunyan_records( @@ -1007,7 +923,7 @@ mod test { }, ); - /* Test case: check multiple records, no problem. */ + // Test case: check multiple records, no problem. let records: Vec<&BunyanLogRecord> = vec![&r1, &r2]; let iter = records.iter().map(|x| *x); verify_bunyan_records( @@ -1021,9 +937,7 @@ mod test { ); } - /* - * Test cases exercising violations of each of the fields. - */ + // Test cases exercising violations of each of the fields. #[test] #[should_panic(expected = "assertion failed")] @@ -1093,10 +1007,8 @@ mod test { ); } - /* - * These cases exercise 0, 1, and 2 records with every valid combination - * of lower and upper bounds. - */ + // These cases exercise 0, 1, and 2 records with every valid combination + // of lower and upper bounds. #[test] fn test_bunyan_seq_easy_cases() { let t1: DateTime = @@ -1145,9 +1057,7 @@ mod test { verify_bunyan_records_sequential(v2.iter(), Some(&t1), Some(&t2)); } - /* - * Test case: no records, but the bounds themselves violate the constraint. - */ + // Test case: no records, but the bounds themselves violate the constraint. #[test] #[should_panic(expected = "assertion failed: should_be_before")] fn test_bunyan_seq_bounds_bad() { @@ -1159,9 +1069,7 @@ mod test { verify_bunyan_records_sequential(v0.iter(), Some(&t2), Some(&t1)); } - /* - * Test case: sole record appears before early bound. - */ + // Test case: sole record appears before early bound. #[test] #[should_panic(expected = "assertion failed: should_be_before")] fn test_bunyan_seq_lower_violated() { @@ -1180,9 +1088,7 @@ mod test { verify_bunyan_records_sequential(v1.iter(), Some(&t2), None); } - /* - * Test case: sole record appears after late bound. - */ + // Test case: sole record appears after late bound. #[test] #[should_panic(expected = "assertion failed: should_be_before")] fn test_bunyan_seq_upper_violated() { @@ -1201,9 +1107,7 @@ mod test { verify_bunyan_records_sequential(v1.iter(), None, Some(&t1)); } - /* - * Test case: two records out of order. - */ + // Test case: two records out of order. #[test] #[should_panic(expected = "assertion failed: should_be_before")] fn test_bunyan_seq_bad_order() { diff --git a/dropshot/src/to_map.rs b/dropshot/src/to_map.rs index 77c5aad82..9250f6913 100644 --- a/dropshot/src/to_map.rs +++ b/dropshot/src/to_map.rs @@ -7,9 +7,7 @@ use serde::{ Serialize, Serializer, }; -/** - * Serialize an instance of T into a `BTreeMap`. - */ +/// Serialize an instance of T into a `BTreeMap`. pub(crate) fn to_map(input: &T) -> Result, MapError> where T: Serialize, @@ -174,9 +172,7 @@ impl<'de, 'a, Input> Serializer for &'a mut MapSerializer<'de, Input> { } } -/** - * Used to serialize structs for `MapSerializer`. - */ +/// Used to serialize structs for `MapSerializer`. struct MapSerializeStruct { output: BTreeMap, } @@ -204,11 +200,9 @@ impl SerializeStruct for MapSerializeStruct { } } -/** - * A trivial `Serializer` used to extract a `String`. One could imagine - * extending this to convert other scalars into strings, but for now we'll just - * work with strings. - */ +/// A trivial `Serializer` used to extract a `String`. One could imagine +/// extending this to convert other scalars into strings, but for now we'll just +/// work with strings. struct StringSerializer; impl<'a> Serializer for &'a mut StringSerializer { type Ok = String; diff --git a/dropshot/src/type_util.rs b/dropshot/src/type_util.rs index 3e0e1faf5..63a460f20 100644 --- a/dropshot/src/type_util.rs +++ b/dropshot/src/type_util.rs @@ -1,8 +1,6 @@ // Copyright 2021 Oxide Computer Company -/*! - * Utility functions for working with JsonSchema types. - */ +//! Utility functions for working with JsonSchema types. use std::collections::HashSet; @@ -11,10 +9,8 @@ use schemars::schema::{ InstanceType, Schema, SchemaObject, SingleOrVec, SubschemaValidation, }; -/** - * Returns true iff the input schema is a boolean, floating-point number, - * string or integer. - */ +/// Returns true iff the input schema is a boolean, floating-point number, +/// string or integer. pub fn type_is_scalar( name: &String, schema: &Schema, @@ -31,9 +27,7 @@ pub fn type_is_scalar( }) } -/** - * Returns true iff the input schema is a string. - */ +/// Returns true iff the input schema is a string. pub fn type_is_string( name: &String, schema: &Schema, @@ -44,24 +38,20 @@ pub fn type_is_string( }) } -/** - * Helper function for scalar types. - */ +/// Helper function for scalar types. fn type_is_scalar_common( name: &String, schema: &Schema, dependencies: &IndexMap, type_check: fn(&InstanceType) -> bool, ) -> Result<(), String> { - /* Make sure we're examining a type and not a reference */ + // Make sure we're examining a type and not a reference let schema = type_resolve(schema, dependencies); match schema { - /* - * Types that have no subschemas, are not arrays, are not objects, are - * not references, and whose instance type matches the limited set of - * scalar types. - */ + // Types that have no subschemas, are not arrays, are not objects, are + // not references, and whose instance type matches the limited set of + // scalar types. Schema::Object(SchemaObject { instance_type: Some(SingleOrVec::Single(instance_type)), subschemas: None, @@ -71,9 +61,7 @@ fn type_is_scalar_common( .. }) if type_check(instance_type.as_ref()) => Ok(()), - /* - * Handle subschemas. - */ + // Handle subschemas. Schema::Object(SchemaObject { instance_type: None, format: None, @@ -100,12 +88,10 @@ fn type_is_scalar_common( } } -/** - * Determine if a collection of subschemas are scalar (and meet the criteria of - * the `type_check` parameter). For `allOf` and `anyOf` subschemas, we proceed - * only if there is a lone subschema which we check recursively. For `oneOf` - * subschemas, we check that each subschema is scalar. - */ +/// Determine if a collection of subschemas are scalar (and meet the criteria of +/// the `type_check` parameter). For `allOf` and `anyOf` subschemas, we proceed +/// only if there is a lone subschema which we check recursively. For `oneOf` +/// subschemas, we check that each subschema is scalar. fn type_is_scalar_subschemas( name: &String, subschemas: &SubschemaValidation, @@ -160,7 +146,7 @@ pub fn type_is_string_enum( schema: &Schema, dependencies: &IndexMap, ) -> Result<(), String> { - /* Make sure we're examining a type and not a reference */ + // Make sure we're examining a type and not a reference let schema = type_resolve(schema, dependencies); match schema { @@ -302,7 +288,7 @@ mod tests { #[derive(JsonSchema)] struct ThingHolder { - /** This is my thing */ + /// This is my thing thing: Things, } diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index 1055660b9..1fabc0baa 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -1,12 +1,10 @@ // Copyright 2022 Oxide Computer Company -/*! - * Implements websocket upgrades as an Extractor for use in API route handler - * parameters to indicate that the given endpoint is meant to be upgraded to - * a websocket. - * - * This exposes a raw upgraded HTTP connection to a user-provided async future, - * which will be spawned to handle the incoming connection. - */ +//! Implements websocket upgrades as an Extractor for use in API route handler +//! parameters to indicate that the given endpoint is meant to be upgraded to +//! a websocket. +//! +//! This exposes a raw upgraded HTTP connection to a user-provided async future, +//! which will be spawned to handle the incoming connection. use crate::api_description::ExtensionMode; use crate::{ @@ -26,37 +24,29 @@ use slog::Logger; use std::future::Future; use std::sync::Arc; -/** - * WebsocketUpgrade is an Extractor used to upgrade and handle an HTTP request - * as a websocket when present in a Dropshot endpoint's function arguments. - * - * The consumer of this must call [WebsocketUpgrade::handle] for the connection - * to be upgraded. (This is done for you by `#[channel]`.) - */ +/// WebsocketUpgrade is an Extractor used to upgrade and handle an HTTP request +/// as a websocket when present in a Dropshot endpoint's function arguments. +/// +/// The consumer of this must call [WebsocketUpgrade::handle] for the connection +/// to be upgraded. (This is done for you by `#[channel]`.) #[derive(Debug)] pub struct WebsocketUpgrade(Option); -/** - * This is the return type of the websocket-handling future provided to - * [`dropshot_endpoint::channel`] - * (which in turn provides it to [WebsocketUpgrade::handle]). - */ +/// This is the return type of the websocket-handling future provided to +/// [`dropshot_endpoint::channel`] +/// (which in turn provides it to [WebsocketUpgrade::handle]). pub type WebsocketChannelResult = Result<(), Box>; -/** - * [WebsocketUpgrade::handle]'s return type. - * The `#[endpoint]` handler must return the value returned by - * [WebsocketUpgrade::handle]. (This is done for you by `#[channel]`.) - */ +/// [WebsocketUpgrade::handle]'s return type. +/// The `#[endpoint]` handler must return the value returned by +/// [WebsocketUpgrade::handle]. (This is done for you by `#[channel]`.) pub type WebsocketEndpointResult = Result, HttpError>; -/** - * The upgraded connection passed as the second argument to the websocket - * handler function. [`WebsocketConnection::into_inner`] can be used to - * access the raw upgraded connection, for passing to any implementation - * of the websockets protocol. - */ +/// The upgraded connection passed as the second argument to the websocket +/// handler function. [`WebsocketConnection::into_inner`] can be used to +/// access the raw upgraded connection, for passing to any implementation +/// of the websockets protocol. pub struct WebsocketConnection(WebsocketConnectionRaw); /// A type that implements [tokio::io::AsyncRead] + [tokio::io::AsyncWrite]. @@ -88,11 +78,9 @@ fn derive_accept_key(request_key: &[u8]) -> String { base64::encode(&sha1.finalize()) } -/** - * This `Extractor` implementation constructs an instance of `WebsocketUpgrade` - * from an HTTP request, and returns an error if the given request does not - * contain websocket upgrade headers. - */ +/// This `Extractor` implementation constructs an instance of `WebsocketUpgrade` +/// from an HTTP request, and returns an error if the given request does not +/// contain websocket upgrade headers. #[async_trait] impl Extractor for WebsocketUpgrade { async fn from_request( @@ -184,45 +172,43 @@ impl Extractor for WebsocketUpgrade { } impl WebsocketUpgrade { - /** - * Upgrade the HTTP connection to a websocket and spawn a user-provided - * async handler to service it. - * - * This function's return value should be the basis of the return value of - * your endpoint's function, as it sends the headers to tell the HTTP - * client that we are accepting the upgrade. - * - * `handler` is a closure that accepts a [`WebsocketConnection`] - * and returns a future that will be spawned by this function, - * in which the `WebsocketConnection`'s inner `Upgraded` connection may be - * used with your choice of websocket-handling code operating over an - * [`tokio::io::AsyncRead`] + [`tokio::io::AsyncWrite`] type - * (e.g. `tokio_tungstenite`). - * - * ``` - #[dropshot::endpoint { method = GET, path = "/my/ws/endpoint/{id}" }] - async fn my_ws_endpoint( - rqctx: std::sync::Arc>, - websock: dropshot::WebsocketUpgrade, - id: dropshot::Path, - ) -> dropshot::WebsocketEndpointResult { - let logger = rqctx.log.new(slog::o!()); - websock.handle(move |upgraded| async move { - slog::info!(logger, "Entered handler for ID {}", id.into_inner()); - use futures::stream::StreamExt; - let mut ws_stream = tokio_tungstenite::WebSocketStream::from_raw_socket( - upgraded.into_inner(), tokio_tungstenite::tungstenite::protocol::Role::Server, None - ).await; - slog::info!(logger, "Received from websocket: {:?}", ws_stream.next().await); - Ok(()) - }) - } - * ``` - * - * Note that as a consumer of this crate, you most likely do not want to - * call this function directly; rather, prefer to annotate your function - * with [`dropshot_endpoint::channel`] instead of `endpoint`. - */ + /// Upgrade the HTTP connection to a websocket and spawn a user-provided + /// async handler to service it. + /// + /// This function's return value should be the basis of the return value of + /// your endpoint's function, as it sends the headers to tell the HTTP + /// client that we are accepting the upgrade. + /// + /// `handler` is a closure that accepts a [`WebsocketConnection`] + /// and returns a future that will be spawned by this function, + /// in which the `WebsocketConnection`'s inner `Upgraded` connection may be + /// used with your choice of websocket-handling code operating over an + /// [`tokio::io::AsyncRead`] + [`tokio::io::AsyncWrite`] type + /// (e.g. `tokio_tungstenite`). + /// + /// ``` + /// #[dropshot::endpoint { method = GET, path = "/my/ws/endpoint/{id}" }] + /// async fn my_ws_endpoint( + /// rqctx: std::sync::Arc>, + /// websock: dropshot::WebsocketUpgrade, + /// id: dropshot::Path, + /// ) -> dropshot::WebsocketEndpointResult { + /// let logger = rqctx.log.new(slog::o!()); + /// websock.handle(move |upgraded| async move { + /// slog::info!(logger, "Entered handler for ID {}", id.into_inner()); + /// use futures::stream::StreamExt; + /// let mut ws_stream = tokio_tungstenite::WebSocketStream::from_raw_socket( + /// upgraded.into_inner(), tokio_tungstenite::tungstenite::protocol::Role::Server, None + /// ).await; + /// slog::info!(logger, "Received from websocket: {:?}", ws_stream.next().await); + /// Ok(()) + /// }) + /// } + /// ``` + /// + /// Note that as a consumer of this crate, you most likely do not want to + /// call this function directly; rather, prefer to annotate your function + /// with [`dropshot_endpoint::channel`] instead of `endpoint`. pub fn handle(mut self, handler: C) -> WebsocketEndpointResult where C: FnOnce(WebsocketConnection) -> F + Send + 'static, diff --git a/dropshot/tests/common/mod.rs b/dropshot/tests/common/mod.rs index b74332b1e..3ebca24f2 100644 --- a/dropshot/tests/common/mod.rs +++ b/dropshot/tests/common/mod.rs @@ -1,7 +1,5 @@ // Copyright 2020 Oxide Computer Company -/*! - * Common facilities for automated testing. - */ +//! Common facilities for automated testing. use dropshot::test_util::LogContext; use dropshot::test_util::TestContext; @@ -18,15 +16,13 @@ pub fn test_setup( test_name: &str, api: ApiDescription, ) -> TestContext { - /* - * The IP address to which we bind can be any local IP, but we use - * 127.0.0.1 because we know it's present, it shouldn't expose this server - * on any external network, and we don't have to go looking for some other - * local IP (likely in a platform-specific way). We specify port 0 to - * request any available port. This is important because we may run - * multiple concurrent tests, so any fixed port could result in spurious - * failures due to port conflicts. - */ + // The IP address to which we bind can be any local IP, but we use + // 127.0.0.1 because we know it's present, it shouldn't expose this server + // on any external network, and we don't have to go looking for some other + // local IP (likely in a platform-specific way). We specify port 0 to + // request any available port. This is important because we may run + // multiple concurrent tests, so any fixed port could result in spurious + // failures due to port conflicts. let config_dropshot: ConfigDropshot = Default::default(); let logctx = create_log_context(test_name); diff --git a/dropshot/tests/test_config.rs b/dropshot/tests/test_config.rs index ff639add4..047570ba8 100644 --- a/dropshot/tests/test_config.rs +++ b/dropshot/tests/test_config.rs @@ -1,7 +1,5 @@ // Copyright 2020 Oxide Computer Company -/*! - * Tests for configuration file. - */ +//! Tests for configuration file. use dropshot::test_util::read_config; use dropshot::{ConfigDropshot, ConfigTls}; @@ -14,9 +12,7 @@ use tempfile::NamedTempFile; pub mod common; use common::create_log_context; -/* - * Bad values for "bind_address" - */ +// Bad values for "bind_address" #[test] fn test_config_bad_bind_address_port_too_small() { @@ -54,9 +50,7 @@ fn test_config_bad_bind_address_garbage() { .starts_with("invalid socket address syntax for key `bind_address`")); } -/* - * Bad values for "request_body_max_bytes" - */ +// Bad values for "request_body_max_bytes" #[test] fn test_config_bad_request_body_max_bytes_negative() { @@ -80,9 +74,7 @@ fn test_config_bad_request_body_max_bytes_too_large() { assert!(error.starts_with("")); } -/* - * Bad values for "key_file" - */ +// Bad values for "key_file" #[test] fn test_config_bad_key_file_garbage() { @@ -95,9 +87,7 @@ fn test_config_bad_key_file_garbage() { assert!(error.starts_with("invalid type: integer")); } -/* - * Bad values for "cert_file" - */ +// Bad values for "cert_file" #[test] fn test_config_bad_cert_file_garbage() { @@ -110,9 +100,7 @@ fn test_config_bad_cert_file_garbage() { assert!(error.starts_with("invalid type: integer")); } -/* - * Bad values for "tls" - */ +// Bad values for "tls" #[test] fn test_config_bad_tls_garbage() { @@ -186,41 +174,33 @@ where { let client = test_config.make_client(); - /* - * Make sure there is not currently a server running on our expected - * port so that when we subsequently create a server and run it we know - * we're getting the one we configured. - */ + // Make sure there is not currently a server running on our expected + // port so that when we subsequently create a server and run it we know + // we're getting the one we configured. let error = client.get(test_config.make_uri(bind_port)).await.unwrap_err(); assert!(error.is_connect()); - /* - * Now start a server with our configuration and make the request again. - * This should succeed in terms of making the request. (The request - * itself might fail with a 400-level or 500-level response code -- we - * don't want to depend on too much from the ApiServer here -- but we - * should have successfully made the request.) - */ + // Now start a server with our configuration and make the request again. + // This should succeed in terms of making the request. (The request + // itself might fail with a 400-level or 500-level response code -- we + // don't want to depend on too much from the ApiServer here -- but we + // should have successfully made the request.) let server = test_config.make_server(bind_port); client.get(test_config.make_uri(bind_port)).await.unwrap(); server.close().await.unwrap(); - /* - * Make another request to make sure it fails now that we've shut down - * the server. We need a new client to make sure our client-side connection - * starts from a clean slate. (Otherwise, a race during shutdown could - * cause us to successfully send a request packet, only to have the TCP - * stack return with ECONNRESET, which gets in the way of what we're trying - * to test here.) - */ + // Make another request to make sure it fails now that we've shut down + // the server. We need a new client to make sure our client-side connection + // starts from a clean slate. (Otherwise, a race during shutdown could + // cause us to successfully send a request packet, only to have the TCP + // stack return with ECONNRESET, which gets in the way of what we're trying + // to test here.) let client = test_config.make_client(); let error = client.get(test_config.make_uri(bind_port)).await.unwrap_err(); assert!(error.is_connect()); - /* - * Start a server on another TCP port and make sure we can reach that - * one (and NOT the one we just shut down). - */ + // Start a server on another TCP port and make sure we can reach that + // one (and NOT the one we just shut down). let server = test_config.make_server(bind_port + 1); client.get(test_config.make_uri(bind_port + 1)).await.unwrap(); let error = client.get(test_config.make_uri(bind_port)).await.unwrap_err(); @@ -336,7 +316,7 @@ async fn test_config_bind_address_https() { let (cert_file, key_file) = common::tls_key_to_file(&certs, &key); let test_config = ConfigBindServerHttps { log, certs, cert_file, key_file }; - /* This must be different than the bind_port used in the http test. */ + // This must be different than the bind_port used in the http test. let bind_port = 12217; test_config_bind_server::<_, ConfigBindServerHttps>(test_config, bind_port) .await; @@ -409,7 +389,7 @@ async fn test_config_bind_address_https_buffer() { let test_config = ConfigBindServerHttps { log, certs, serialized_certs, serialized_key }; - /* This must be different than the bind_port used in the http test. */ + // This must be different than the bind_port used in the http test. let bind_port = 12219; test_config_bind_server::<_, ConfigBindServerHttps>(test_config, bind_port) .await; diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index e0ed378f7..aabeb9756 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -1,19 +1,17 @@ // Copyright 2020 Oxide Computer Company -/*! - * Test cases for the "demo" handlers. These handlers exercise various - * supported configurations of the HTTP handler interface. We exercise them - * here to make sure that even if these aren't used at a given point, they still - * work. - * - * Note that the purpose is mainly to exercise the various possible function - * signatures that can be used to implement handler functions. We don't need to - * exercises very many cases (or error cases) of each one because the handlers - * themselves are not important, but we need to exercise enough to validate that - * the generic JSON and query parsing handles error cases. - * - * TODO-hardening: add test cases that exceed limits (e.g., query string length, - * JSON body length) - */ +//! Test cases for the "demo" handlers. These handlers exercise various +//! supported configurations of the HTTP handler interface. We exercise them +//! here to make sure that even if these aren't used at a given point, they still +//! work. +//! +//! Note that the purpose is mainly to exercise the various possible function +//! signatures that can be used to implement handler functions. We don't need to +//! exercises very many cases (or error cases) of each one because the handlers +//! themselves are not important, but we need to exercise enough to validate that +//! the generic JSON and query parsing handles error cases. +//! +//! TODO-hardening: add test cases that exceed limits (e.g., query string length, +//! JSON body length) use dropshot::channel; use dropshot::endpoint; @@ -80,10 +78,8 @@ fn demo_api() -> ApiDescription { api.register(demo_handler_307_temporary_redirect).unwrap(); api.register(demo_handler_websocket).unwrap(); - /* - * We don't need to exhaustively test these cases, as they're tested by unit - * tests. - */ + // We don't need to exhaustively test these cases, as they're tested by unit + // tests. let error = api.register(demo_handler_path_param_impossible).unwrap_err(); assert_eq!( error, @@ -94,10 +90,8 @@ fn demo_api() -> ApiDescription { api } -/* - * The "demo1" handler consumes neither query nor JSON body parameters. Here we - * test that such handlers work. There are no error cases for us to induce. - */ +// The "demo1" handler consumes neither query nor JSON body parameters. Here we +// test that such handlers work. There are no error cases for us to induce. #[tokio::test] async fn test_demo1() { let api = demo_api(); @@ -121,19 +115,17 @@ async fn test_demo1() { testctx.teardown().await; } -/* - * The "demo2query" handler consumes only query arguments. Here we make sure - * such handlers work and also exercise various error cases associated with bad - * query string parsing. - * TODO-hardening there are a lot more to check here, particularly around - * encoded values. - */ +// The "demo2query" handler consumes only query arguments. Here we make sure +// such handlers work and also exercise various error cases associated with bad +// query string parsing. +// TODO-hardening there are a lot more to check here, particularly around +// encoded values. #[tokio::test] async fn test_demo2query() { let api = demo_api(); let testctx = common::test_setup("demo2query", api); - /* Test case: optional field missing */ + // Test case: optional field missing let mut response = testctx .client_testctx .make_request( @@ -148,7 +140,7 @@ async fn test_demo2query() { assert_eq!(json.test1, "foo"); assert_eq!(json.test2, None); - /* Test case: both fields specified */ + // Test case: both fields specified let mut response = testctx .client_testctx .make_request( @@ -163,7 +155,7 @@ async fn test_demo2query() { assert_eq!(json.test1, "foo"); assert_eq!(json.test2, Some(10)); - /* Test case: required field missing */ + // Test case: required field missing let error = testctx .client_testctx .make_request( @@ -179,7 +171,7 @@ async fn test_demo2query() { "unable to parse query string: missing field `test1`" ); - /* Test case: typed field has bad value */ + // Test case: typed field has bad value let error = testctx .client_testctx .make_request( @@ -195,7 +187,7 @@ async fn test_demo2query() { "unable to parse query string: invalid digit found in string" ); - /* Test case: duplicated field name */ + // Test case: duplicated field name let error = testctx .client_testctx .make_request( @@ -214,17 +206,15 @@ async fn test_demo2query() { testctx.teardown().await; } -/* - * The "demo2json" handler consumes only a JSON object. Here we make sure such - * handlers work and also exercise various error cases associated with bad JSON - * handling. - */ +// The "demo2json" handler consumes only a JSON object. Here we make sure such +// handlers work and also exercise various error cases associated with bad JSON +// handling. #[tokio::test] async fn test_demo2json() { let api = demo_api(); let testctx = common::test_setup("demo2json", api); - /* Test case: optional field */ + // Test case: optional field let input = DemoJsonBody { test1: "bar".to_string(), test2: None }; let mut response = testctx .client_testctx @@ -240,7 +230,7 @@ async fn test_demo2json() { assert_eq!(json.test1, "bar"); assert_eq!(json.test2, None); - /* Test case: both fields populated */ + // Test case: both fields populated let input = DemoJsonBody { test1: "bar".to_string(), test2: Some(15) }; let mut response = testctx .client_testctx @@ -256,7 +246,7 @@ async fn test_demo2json() { assert_eq!(json.test1, "bar"); assert_eq!(json.test2, Some(15)); - /* Test case: no input specified */ + // Test case: no input specified let error = testctx .client_testctx .make_request( @@ -269,7 +259,7 @@ async fn test_demo2json() { .expect_err("expected failure"); assert!(error.message.starts_with("unable to parse JSON body")); - /* Test case: invalid JSON */ + // Test case: invalid JSON let error = testctx .client_testctx .make_request_with_body( @@ -282,7 +272,7 @@ async fn test_demo2json() { .expect_err("expected failure"); assert!(error.message.starts_with("unable to parse JSON body")); - /* Test case: bad type */ + // Test case: bad type let json_bad_type = "{ \"test1\": \"oops\", \"test2\": \"oops\" }"; let error = testctx .client_testctx @@ -301,16 +291,14 @@ async fn test_demo2json() { testctx.teardown().await; } -/* - * Handlers may also accept form/URL-encoded bodies. Here we test such - * bodies with both valid and invalid encodings. - */ +// Handlers may also accept form/URL-encoded bodies. Here we test such +// bodies with both valid and invalid encodings. #[tokio::test] async fn test_demo2urlencoded() { let api = demo_api(); let testctx = common::test_setup("demo2urlencoded", api); - /* Test case: optional field */ + // Test case: optional field let input = DemoJsonBody { test1: "bar".to_string(), test2: None }; let mut response = testctx .client_testctx @@ -326,7 +314,7 @@ async fn test_demo2urlencoded() { assert_eq!(json.test1, "bar"); assert_eq!(json.test2, None); - /* Test case: both fields populated */ + // Test case: both fields populated let input = DemoJsonBody { test1: "baz".to_string(), test2: Some(20) }; let mut response = testctx .client_testctx @@ -342,7 +330,7 @@ async fn test_demo2urlencoded() { assert_eq!(json.test1, "baz"); assert_eq!(json.test2, Some(20)); - /* Error case: wrong content type for endpoint */ + // Error case: wrong content type for endpoint let input = DemoJsonBody { test1: "qux".to_string(), test2: Some(30) }; let error = testctx .client_testctx @@ -359,7 +347,7 @@ async fn test_demo2urlencoded() { got \"application/json\"" )); - /* Error case: invalid encoding */ + // Error case: invalid encoding let error = testctx .client_testctx .make_request_with_body_url_encoded( @@ -372,7 +360,7 @@ async fn test_demo2urlencoded() { .expect_err("expected failure"); assert!(error.message.starts_with("unable to parse URL-encoded body")); - /* Error case: bad type */ + // Error case: bad type let error = testctx .client_testctx .make_request_with_body_url_encoded( @@ -388,18 +376,16 @@ async fn test_demo2urlencoded() { )); } -/* - * The "demo3" handler takes both query arguments and a JSON body. This test - * makes sure that both sets of parameters are received by the handler function - * and at least one error case from each of those sources is exercised. We - * don't need exhaustively re-test the query and JSON error handling paths. - */ +// The "demo3" handler takes both query arguments and a JSON body. This test +// makes sure that both sets of parameters are received by the handler function +// and at least one error case from each of those sources is exercised. We +// don't need exhaustively re-test the query and JSON error handling paths. #[tokio::test] async fn test_demo3json() { let api = demo_api(); let testctx = common::test_setup("demo3json", api); - /* Test case: everything filled in. */ + // Test case: everything filled in. let json_input = DemoJsonBody { test1: "bart".to_string(), test2: Some(0) }; let mut response = testctx @@ -418,7 +404,7 @@ async fn test_demo3json() { assert_eq!(json.query.test1, "martin"); assert_eq!(json.query.test2.unwrap(), 2); - /* Test case: error parsing query */ + // Test case: error parsing query let json_input = DemoJsonBody { test1: "bart".to_string(), test2: Some(0) }; let error = testctx .client_testctx @@ -435,7 +421,7 @@ async fn test_demo3json() { "unable to parse query string: missing field `test1`" ); - /* Test case: error parsing body */ + // Test case: error parsing body let error = testctx .client_testctx .make_request_with_body( @@ -451,26 +437,22 @@ async fn test_demo3json() { testctx.teardown().await; } -/* - * The "demo_path_param_string" handler takes just a single string path - * parameter. - */ +// The "demo_path_param_string" handler takes just a single string path +// parameter. #[tokio::test] async fn test_demo_path_param_string() { let api = demo_api(); let testctx = common::test_setup("demo_path_param_string", api); - /* - * Simple error cases. All of these should produce 404 "Not Found" errors. - */ + // Simple error cases. All of these should produce 404 "Not Found" errors. let bad_paths = vec![ - /* missing path parameter (won't match route) */ + // missing path parameter (won't match route) "/testing/demo_path_string", - /* missing path parameter (won't match route) */ + // missing path parameter (won't match route) "/testing/demo_path_string/", - /* missing path parameter (won't match route) */ + // missing path parameter (won't match route) "/testing/demo_path_string//", - /* extra path segment (won't match route) */ + // extra path segment (won't match route) "/testing/demo_path_string/okay/then", ]; @@ -488,9 +470,7 @@ async fn test_demo_path_param_string() { assert_eq!(error.message, "Not Found"); } - /* - * Success cases (use the path parameter). - */ + // Success cases (use the path parameter). let okay_paths = vec![ ("/testing/demo_path_string/okay", "okay"), ("/testing/demo_path_string/okay/", "okay"), @@ -526,18 +506,14 @@ async fn test_demo_path_param_string() { testctx.teardown().await; } -/* - * The "demo_path_param_uuid" handler takes just a single uuid path parameter. - */ +// The "demo_path_param_uuid" handler takes just a single uuid path parameter. #[tokio::test] async fn test_demo_path_param_uuid() { let api = demo_api(); let testctx = common::test_setup("demo_path_param_uuid", api); - /* - * Error case: not a valid uuid. The other error cases are the same as for - * the string-valued path parameter and they're tested above. - */ + // Error case: not a valid uuid. The other error cases are the same as for + // the string-valued path parameter and they're tested above. let error = testctx .client_testctx .make_request_with_body( @@ -550,9 +526,7 @@ async fn test_demo_path_param_uuid() { .unwrap_err(); assert!(error.message.starts_with("bad parameter in URL path:")); - /* - * Success case (use the Uuid) - */ + // Success case (use the Uuid) let uuid_str = "e7de8ccc-8938-43fa-8404-a040a0836ee4"; let valid_path = format!("/testing/demo_path_uuid/{}", uuid_str); let mut response = testctx @@ -571,18 +545,14 @@ async fn test_demo_path_param_uuid() { testctx.teardown().await; } -/* - * The "demo_path_param_u32" handler takes just a single u32 path parameter. - */ +// The "demo_path_param_u32" handler takes just a single u32 path parameter. #[tokio::test] async fn test_demo_path_param_u32() { let api = demo_api(); let testctx = common::test_setup("demo_path_param_u32", api); - /* - * Error case: not a valid u32. Other error cases are the same as for the - * string-valued path parameter and they're tested above. - */ + // Error case: not a valid u32. Other error cases are the same as for the + // string-valued path parameter and they're tested above. let error = testctx .client_testctx .make_request_with_body( @@ -595,9 +565,7 @@ async fn test_demo_path_param_u32() { .unwrap_err(); assert!(error.message.starts_with("bad parameter in URL path:")); - /* - * Success case (use the number) - */ + // Success case (use the number) let u32_str = "37"; let valid_path = format!("/testing/demo_path_u32/{}", u32_str); let mut response = testctx @@ -616,16 +584,14 @@ async fn test_demo_path_param_u32() { testctx.teardown().await; } -/* - * Test `UntypedBody`. - */ +// Test `UntypedBody`. #[tokio::test] async fn test_untyped_body() { let api = demo_api(); let testctx = common::test_setup("test_untyped_body", api); let client = &testctx.client_testctx; - /* Error case: body too large. */ + // Error case: body too large. let big_body = vec![0u8; 1025]; let error = client .make_request_with_body( @@ -641,7 +607,7 @@ async fn test_untyped_body() { "request body exceeded maximum size of 1024 bytes" ); - /* Error case: invalid UTF-8, when parsing as a UTF-8 string. */ + // Error case: invalid UTF-8, when parsing as a UTF-8 string. let bad_body = vec![0x80u8; 1]; let error = client .make_request_with_body( @@ -658,7 +624,7 @@ async fn test_untyped_body() { bytes from index 0" ); - /* Success case: invalid UTF-8, when not parsing. */ + // Success case: invalid UTF-8, when not parsing. let mut response = client .make_request_with_body( Method::PUT, @@ -672,7 +638,7 @@ async fn test_untyped_body() { assert_eq!(json.nbytes, 1); assert_eq!(json.as_utf8, None); - /* Success case: empty body */ + // Success case: empty body let mut response = client .make_request_with_body( Method::PUT, @@ -686,7 +652,7 @@ async fn test_untyped_body() { assert_eq!(json.nbytes, 0); assert_eq!(json.as_utf8, Some(String::from(""))); - /* Success case: non-empty content */ + // Success case: non-empty content let body: Vec = Vec::from(&b"t\xce\xbcv"[..]); let mut response = client .make_request_with_body( @@ -704,9 +670,7 @@ async fn test_untyped_body() { testctx.teardown().await; } -/* - * Test delete request - */ +// Test delete request #[tokio::test] async fn test_delete_request() { let api = demo_api(); @@ -718,9 +682,7 @@ async fn test_delete_request() { testctx.teardown().await; } -/* - * Test response headers - */ +// Test response headers #[tokio::test] async fn test_header_request() { let api = demo_api(); @@ -753,9 +715,7 @@ async fn test_header_request() { assert_eq!(headers, vec!["hi", "howdy"]); } -/* - * Test 302 "Found" response with an invalid header value - */ +// Test 302 "Found" response with an invalid header value #[tokio::test] async fn test_302_bogus() { let api = demo_api(); @@ -771,9 +731,7 @@ async fn test_302_bogus() { assert_eq!(error.message, "Internal Server Error"); } -/* - * Test 302 "Found" response - */ +// Test 302 "Found" response #[tokio::test] async fn test_302_found() { let api = demo_api(); @@ -798,9 +756,7 @@ async fn test_302_found() { assert_eq!(read_string(&mut response).await, ""); } -/* - * Test 303 "See Other" response - */ +// Test 303 "See Other" response #[tokio::test] async fn test_303_see_other() { let api = demo_api(); @@ -825,9 +781,7 @@ async fn test_303_see_other() { assert_eq!(read_string(&mut response).await, ""); } -/* - * Test 307 "Temporary Redirect" response - */ +// Test 307 "Temporary Redirect" response #[tokio::test] async fn test_307_temporary_redirect() { let api = demo_api(); @@ -852,10 +806,8 @@ async fn test_307_temporary_redirect() { assert_eq!(read_string(&mut response).await, ""); } -/* - * The "test_demo_websocket" handler upgrades to a websocket and exchanges - * greetings with the client. - */ +// The "test_demo_websocket" handler upgrades to a websocket and exchanges +// greetings with the client. #[tokio::test] async fn test_demo_websocket() { let api = demo_api(); @@ -874,9 +826,7 @@ async fn test_demo_websocket() { testctx.teardown().await; } -/* - * Demo handler functions - */ +// Demo handler functions type RequestCtx = Arc>; diff --git a/dropshot/tests/test_openapi.rs b/dropshot/tests/test_openapi.rs index 6e581f257..b56a0cfb8 100644 --- a/dropshot/tests/test_openapi.rs +++ b/dropshot/tests/test_openapi.rs @@ -43,12 +43,10 @@ struct QueryArgs { path = "/test/woman", tags = ["it"], }] -/** - * C-style comment - * - * This is a multi- - * line comment. - */ +/// C-style comment +/// +/// This is a multi- +/// line comment. async fn handler2( _rqctx: Arc>, _query: Query, @@ -172,10 +170,8 @@ async fn handler7( unimplemented!(); } -/* - * Test that we do not generate duplicate type definitions when the same type is - * returned by two different handler functions. - */ +// Test that we do not generate duplicate type definitions when the same type is +// returned by two different handler functions. /// Best non-duplicated type #[derive(JsonSchema, Serialize)] @@ -213,10 +209,8 @@ async fn handler9( unimplemented!(); } -/* - * Similarly, test that we do not generate duplicate type definitions when the - * same type is accepted as a typed body to two different handler functions. - */ +// Similarly, test that we do not generate duplicate type definitions when the +// same type is accepted as a typed body to two different handler functions. #[derive(Deserialize, JsonSchema)] struct NeverDuplicatedBodyTopLevel { @@ -253,10 +247,8 @@ async fn handler11( unimplemented!(); } -/* - * Finally, test that we do not generate duplicate type definitions when the - * same type is used in two different places. - */ +// Finally, test that we do not generate duplicate type definitions when the +// same type is used in two different places. #[derive(Deserialize, JsonSchema, Serialize)] #[allow(dead_code)] diff --git a/dropshot/tests/test_pagination.rs b/dropshot/tests/test_pagination.rs index 83c9379fe..f6cfec678 100644 --- a/dropshot/tests/test_pagination.rs +++ b/dropshot/tests/test_pagination.rs @@ -1,7 +1,5 @@ // Copyright 2020 Oxide Computer Company -/*! - * Test cases for API handler functions that use pagination. - */ +//! Test cases for API handler functions that use pagination. use chrono::DateTime; use chrono::Utc; @@ -56,14 +54,10 @@ extern crate lazy_static; pub mod common; -/* - * Common helpers - */ +// Common helpers -/** - * Given a test context and URL path, assert that a GET request to that path - * (with an empty body) produces a 400 response with the given error message. - */ +/// Given a test context and URL path, assert that a GET request to that path +/// (with an empty body) produces a 400 response with the given error message. async fn assert_error( client: &ClientTestContext, path: &str, @@ -76,10 +70,8 @@ async fn assert_error( assert_eq!(error.error_code, None); } -/** - * Given an array of integers, check that they're sequential starting at - * "offset". - */ +/// Given an array of integers, check that they're sequential starting at +/// "offset". fn assert_sequence_from(items: &Vec, offset: u16, count: u16) { let nchecked = AtomicU16::new(0); items.iter().enumerate().for_each(|(i, c)| { @@ -90,13 +82,11 @@ fn assert_sequence_from(items: &Vec, offset: u16, count: u16) { assert_eq!(count as usize, items.len()); } -/** - * Iterate the paginated collection using several different "limit" values to - * validate that it always produces the same collection (no dups or missing - * records around page breaks). - * TODO This should move into test_util so that consumers can use it to test - * their own APIs. - */ +/// Iterate the paginated collection using several different "limit" values to +/// validate that it always produces the same collection (no dups or missing +/// records around page breaks). +/// TODO This should move into test_util so that consumers can use it to test +/// their own APIs. async fn assert_collection_iter( client: &ClientTestContext, path: &str, @@ -105,7 +95,7 @@ async fn assert_collection_iter( where T: Clone + Debug + Eq + DeserializeOwned, { - /* Use a modest small number for our initial limit. */ + // Use a modest small number for our initial limit. let (itemsby100, npagesby100) = iter_collection::(&client, path, initial_params, 100).await; let expected_npages = itemsby100.len() / 100 @@ -113,30 +103,24 @@ where + (if itemsby100.len() % 100 != 0 { 1 } else { 0 }); assert_eq!(expected_npages, npagesby100); - /* - * Assert that there are between 100 and 10000 items. It's not really a - * problem for there to be a number outside this range. However, our goal - * here is to independently exercise a modest limit (small but larger than - * 1) and also to check it against a max-limit request that's expected to - * have all the results, and we can't do that easily unless this condition - * holds. We could skip these checks if it's useful to have tests that work - * that way, but for now we assert this so that we find out if we're somehow - * not testing what we expect. - */ + // Assert that there are between 100 and 10000 items. It's not really a + // problem for there to be a number outside this range. However, our goal + // here is to independently exercise a modest limit (small but larger than + // 1) and also to check it against a max-limit request that's expected to + // have all the results, and we can't do that easily unless this condition + // holds. We could skip these checks if it's useful to have tests that work + // that way, but for now we assert this so that we find out if we're somehow + // not testing what we expect. assert!(itemsby100.len() > 100); assert!(itemsby100.len() <= 10000); - /* - * Use a max limit to fetch everything at once to make sure it's the same. - */ + // Use a max limit to fetch everything at once to make sure it's the same. let (itemsbymax, npagesbymax) = iter_collection::(&client, path, initial_params, 10000).await; assert_eq!(2, npagesbymax); assert_eq!(itemsby100, itemsbymax); - /* - * Iterate by one to make sure that edge case works, too. - */ + // Iterate by one to make sure that edge case works, too. let (itemsby1, npagesby1) = iter_collection::(&client, path, initial_params, 1).await; assert_eq!(itemsby100.len() + 1, npagesby1); @@ -145,11 +129,9 @@ where itemsbymax } -/** - * Page selector for a set of "u16" values - * - * This is used for several resources below. - */ +/// Page selector for a set of "u16" values +/// +/// This is used for several resources below. #[derive(Debug, Deserialize, JsonSchema, Serialize)] struct IntegersPageSelector { last_seen: u16, @@ -159,10 +141,8 @@ fn page_selector_for(n: &u16, _p: &EmptyScanParams) -> IntegersPageSelector { IntegersPageSelector { last_seen: *n } } -/** - * Define an API with a couple of different endpoints that allow us to exercise - * various functionality. - */ +/// Define an API with a couple of different endpoints that allow us to exercise +/// various functionality. fn paginate_api() -> ApiDescription { let mut api = ApiDescription::new(); api.register(api_integers).unwrap(); @@ -183,14 +163,10 @@ fn range_u16(start: u16, limit: u16) -> Vec { } } -/* - * Basic tests - */ +// Basic tests -/** - * "/intapi": a collection of positive values of "u16" (excepting u16::MAX). - * The marker is simply the last number seen. - */ +/// "/intapi": a collection of positive values of "u16" (excepting u16::MAX). +/// The marker is simply the last number seen. #[endpoint { method = GET, path = "/intapi", @@ -266,26 +242,20 @@ async fn test_paginate_basic() { let testctx = common::test_setup("basic", api); let client = &testctx.client_testctx; - /* - * "First page" test cases - */ + // "First page" test cases - /* - * Test the default value of "limit". This test will have to be updated if - * we change the default count of items, but it's important to check that - * the default actually works and is reasonable. - */ + // Test the default value of "limit". This test will have to be updated if + // we change the default count of items, but it's important to check that + // the default actually works and is reasonable. let expected_default = 100; let page = objects_list_page::(&client, "/intapi").await; assert_sequence_from(&page.items, 1, expected_default); assert!(page.next_page.is_some()); - /* - * Test the maximum value of "limit" by providing a value much higher than - * we support and observing it get clamped. As with the previous test, this - * will have to be updated if we change the maximum count, but it's worth it - * to test this case. - */ + // Test the maximum value of "limit" by providing a value much higher than + // we support and observing it get clamped. As with the previous test, this + // will have to be updated if we change the maximum count, but it's worth it + // to test this case. let expected_max = 10000; let page = objects_list_page::( &client, @@ -294,10 +264,8 @@ async fn test_paginate_basic() { .await; assert_sequence_from(&page.items, 1, expected_max); - /* - * Limits in between the default and the max should also work. This - * exercises the `page_limit()` function. - */ + // Limits in between the default and the max should also work. This + // exercises the `page_limit()` function. let count = 2 * expected_default; assert!(count > expected_default); assert!(count < expected_max); @@ -306,13 +274,9 @@ async fn test_paginate_basic() { .await; assert_sequence_from(&page.items, 1, count); - /* - * "Next page" test cases - */ + // "Next page" test cases - /* - * Run the same few limit tests as above. - */ + // Run the same few limit tests as above. let next_page_start = page.items.last().unwrap() + 1; let next_page_token = page.next_page.unwrap(); @@ -344,9 +308,7 @@ async fn test_paginate_basic() { assert_sequence_from(&page.items, next_page_start, count); assert!(page.next_page.is_some()); - /* - * Loop through the entire collection. - */ + // Loop through the entire collection. let mut next_item = 1u16; let mut page = objects_list_page::( &client, @@ -383,14 +345,10 @@ async fn test_paginate_basic() { testctx.teardown().await; } -/* - * Tests for an empty collection - */ +// Tests for an empty collection -/** - * "/empty": an empty collection of u16s, useful for testing the case where the - * first request in a scan returns no results. - */ +/// "/empty": an empty collection of u16s, useful for testing the case where the +/// first request in a scan returns no results. #[endpoint { method = GET, path = "/empty", @@ -406,11 +364,9 @@ async fn api_empty( )?)) } -/* - * Tests various cases related to an empty collection, particularly making sure - * that basic parsing of query parameters still does what we expect and that we - * get a valid results page with no objects. - */ +// Tests various cases related to an empty collection, particularly making sure +// that basic parsing of query parameters still does what we expect and that we +// get a valid results page with no objects. #[tokio::test] async fn test_paginate_empty() { let api = paginate_api(); @@ -444,15 +400,11 @@ async fn test_paginate_empty() { testctx.teardown().await; } -/* - * Test extra query parameters and response properties - */ +// Test extra query parameters and response properties -/** - * "/ints_extra": also a paginated collection of "u16" values. This - * API exercises consuming additional query parameters ("debug") and sending a - * more complex response type. - */ +/// "/ints_extra": also a paginated collection of "u16" values. This +/// API exercises consuming additional query parameters ("debug") and sending a +/// more complex response type. #[endpoint { method = GET, @@ -483,13 +435,13 @@ async fn api_with_extra_params( })) } -/* TODO-coverage check generated OpenAPI spec */ +// TODO-coverage check generated OpenAPI spec #[derive(Deserialize, JsonSchema)] struct ExtraQueryParams { debug: Option, } -/* TODO-coverage check generated OpenAPI spec */ +// TODO-coverage check generated OpenAPI spec #[derive(Debug, Deserialize, JsonSchema, Serialize)] struct ExtraResultsPage { debug_was_set: bool, @@ -504,7 +456,7 @@ async fn test_paginate_extra_params() { let testctx = common::test_setup("extra_params", api); let client = &testctx.client_testctx; - /* Test that the extra query parameter is optional. */ + // Test that the extra query parameter is optional. let page = object_get::(&client, "/ints_extra?limit=5").await; assert!(!page.debug_was_set); @@ -512,7 +464,7 @@ async fn test_paginate_extra_params() { assert_eq!(page.page.items, vec![1, 2, 3, 4, 5]); let token = page.page.next_page.unwrap(); - /* Provide a value for the extra query parameter in the FirstPage case. */ + // Provide a value for the extra query parameter in the FirstPage case. let page = object_get::( &client, "/ints_extra?limit=5&debug=true", @@ -523,7 +475,7 @@ async fn test_paginate_extra_params() { assert_eq!(page.page.items, vec![1, 2, 3, 4, 5]); assert!(page.page.next_page.is_some()); - /* Provide a value for the extra query parameter in the NextPage case. */ + // Provide a value for the extra query parameter in the NextPage case. let page = object_get::( &client, &format!("/ints_extra?page_token={}&debug=false&limit=7", token), @@ -537,18 +489,14 @@ async fn test_paginate_extra_params() { testctx.teardown().await; } -/* - * Test an endpoint that requires scan parameters. - */ +// Test an endpoint that requires scan parameters. #[derive(Deserialize, JsonSchema)] struct ReqScanParams { doit: bool, } -/** - * "/required": similar to "/intapi", but with a required start parameter - */ +/// "/required": similar to "/intapi", but with a required start parameter #[endpoint { method = GET, path = "/required", @@ -587,7 +535,7 @@ async fn test_paginate_with_required_params() { let testctx = common::test_setup("required_params", api); let client = &testctx.client_testctx; - /* Test that the extra query parameter is optional... */ + // Test that the extra query parameter is optional... let error = client .make_request_error( Method::GET, @@ -595,13 +543,11 @@ async fn test_paginate_with_required_params() { StatusCode::BAD_REQUEST, ) .await; - /* - * TODO-polish the message here is pretty poor. See comments in the - * automated tests in src/pagination.rs. - */ + // TODO-polish the message here is pretty poor. See comments in the + // automated tests in src/pagination.rs. assert!(error.message.starts_with("unable to parse query string")); - /* ... and that it's getting passed through to the handler function */ + // ... and that it's getting passed through to the handler function let error = client .make_request_error( Method::GET, @@ -618,11 +564,9 @@ async fn test_paginate_with_required_params() { testctx.teardown().await; } -/* - * Test an endpoint with scan options that returns custom structures. Our - * endpoint will return a list of words, with the marker being the last word - * seen. - */ +// Test an endpoint with scan options that returns custom structures. Our +// endpoint will return a list of words, with the marker being the last word +// seen. lazy_static! { static ref WORD_LIST: BTreeSet = make_word_list(); @@ -633,10 +577,8 @@ fn make_word_list() -> BTreeSet { word_list.lines().map(|s| s.to_string()).collect() } -/* - * The use of a structure here is kind of pointless except to exercise the case - * of endpoints that return a custom structure. - */ +// The use of a structure here is kind of pointless except to exercise the case +// of endpoints that return a custom structure. #[derive(Debug, Deserialize, Clone, Eq, JsonSchema, PartialEq, Serialize)] struct DictionaryWord { word: String, @@ -710,19 +652,17 @@ async fn api_dictionary( )?)) } -/* - * These tests exercise the behavior of a paginated API with filtering and - * multiple sort options. In some ways, these just test our test API. But it's - * an important validation that it's possible to build such an API that works - * the way we expect it to. - */ +// These tests exercise the behavior of a paginated API with filtering and +// multiple sort options. In some ways, these just test our test API. But it's +// an important validation that it's possible to build such an API that works +// the way we expect it to. #[tokio::test] async fn test_paginate_dictionary() { let api = paginate_api(); let testctx = common::test_setup("dictionary", api); let client = &testctx.client_testctx; - /* simple case */ + // simple case let page = objects_list_page::(&client, "/dictionary?limit=3") .await; @@ -739,7 +679,7 @@ async fn test_paginate_dictionary() { page.items.iter().map(|dw| dw.word.as_str()).collect::>(); assert_eq!(found_words, vec!["AAAS", "ABA", "AC",]); - /* Reverse the order. */ + // Reverse the order. let page = objects_list_page::( &client, "/dictionary?limit=3&order=descending", @@ -749,7 +689,7 @@ async fn test_paginate_dictionary() { page.items.iter().map(|dw| dw.word.as_str()).collect::>(); assert_eq!(found_words, vec!["zygote", "zucchini", "zounds",]); let token = page.next_page.unwrap(); - /* Critically, we don't have to pass order=descending again. */ + // Critically, we don't have to pass order=descending again. let page = objects_list_page::( &client, &format!("/dictionary?limit=3&page_token={}", token), @@ -759,7 +699,7 @@ async fn test_paginate_dictionary() { page.items.iter().map(|dw| dw.word.as_str()).collect::>(); assert_eq!(found_words, vec!["zooplankton", "zoom", "zoology",]); - /* Apply a filter. */ + // Apply a filter. let page = objects_list_page::( &client, "/dictionary?limit=3&min_length=12", @@ -784,10 +724,8 @@ async fn test_paginate_dictionary() { vec!["Bhagavadgita", "Brontosaurus", "Cantabrigian",] ); - /* - * Let's page through the filtered collection one item at a time. This is - * an edge case that only works if the marker is implemented correctly. - */ + // Let's page through the filtered collection one item at a time. This is + // an edge case that only works if the marker is implemented correctly. let (sortedby1, npagesby1) = iter_collection::( &client, "/dictionary", @@ -800,10 +738,8 @@ async fn test_paginate_dictionary() { assert_eq!(sortedby1[0].word, "Addressograph"); assert_eq!(sortedby1[sortedby1.len() - 1].word, "wholehearted"); - /* - * Page through it again one at a time, but in reverse order to make sure - * the marker works correctly in that direction as well. - */ + // Page through it again one at a time, but in reverse order to make sure + // the marker works correctly in that direction as well. let (rsortedby1, rnpagesby1) = iter_collection::( &client, "/dictionary", @@ -821,10 +757,8 @@ async fn test_paginate_dictionary() { .collect::>() ); - /* - * Fetch the whole thing in one go to make sure we didn't hit any edge cases - * around the markers. - */ + // Fetch the whole thing in one go to make sure we didn't hit any edge cases + // around the markers. let (sortedbybig, npagesbybig) = iter_collection::( &client, "/dictionary", @@ -833,11 +767,9 @@ async fn test_paginate_dictionary() { ) .await; assert_eq!(sortedby1, sortedbybig); - /* - * There's currently an extra request as part of any scan because Dropshot - * doesn't know not to put the marker in the response unless it sees an - * empty response. - */ + // There's currently an extra request as part of any scan because Dropshot + // doesn't know not to put the marker in the response unless it sees an + // empty response. assert_eq!(npagesbybig, 2); testctx.teardown().await; @@ -861,13 +793,11 @@ impl Drop for ExampleContext { } } -/** - * For one of the example programs that starts a Dropshot server on localhost - * using a TCP port provided as a command-line argument, this function starts - * the requested example on the requested TCP port and attempts to wait for the - * Dropshot server to become available. It returns a handle to the child - * process and a ClientTestContext for making requests against that server. - */ +/// For one of the example programs that starts a Dropshot server on localhost +/// using a TCP port provided as a command-line argument, this function starts +/// the requested example on the requested TCP port and attempts to wait for the +/// Dropshot server to become available. It returns a handle to the child +/// process and a ClientTestContext for making requests against that server. async fn start_example(path: &str, port: u16) -> ExampleContext { let logctx = LogContext::new( path, @@ -889,22 +819,18 @@ async fn start_example(path: &str, port: u16) -> ExampleContext { my_path }; - /* - * We redirect stderr to /dev/null to avoid spamming the user's terminal. - * It would be better to put this in some log file that we manage similarly - * to a LogContext so that it would be available for debugging when wanted - * but removed upon successful completion of the test. - */ + // We redirect stderr to /dev/null to avoid spamming the user's terminal. + // It would be better to put this in some log file that we manage similarly + // to a LogContext so that it would be available for debugging when wanted + // but removed upon successful completion of the test. let config = Exec::cmd(cmd_path).arg(port.to_string()).stderr(NullFile); let cmdline = config.to_cmdline_lossy(); info!(&log, "starting child process"; "cmdline" => &cmdline); let child = config.popen().unwrap(); - /* - * Wait up to 10 seconds for the actual HTTP server to start up. We'll - * continue trying to make requests against it until they fail for an - * HTTP-level error. - */ + // Wait up to 10 seconds for the actual HTTP server to start up. We'll + // continue trying to make requests against it until they fail for an + // HTTP-level error. let start = Instant::now(); let server_addr = SocketAddr::from((Ipv4Addr::LOCALHOST, port)); let client = ClientTestContext::new(server_addr, logctx.log.new(o!())); @@ -943,16 +869,12 @@ struct ExampleProject { name: String, } -/** - * Tests the "pagination-basic" example, which just lists 999 projects. - */ +/// Tests the "pagination-basic" example, which just lists 999 projects. #[tokio::test] async fn test_example_basic() { - /* - * We specify a port on which to run the example servers. It would be - * better to let them pick a port on startup (as they will do if we don't - * provide an argument) and use that. - */ + // We specify a port on which to run the example servers. It would be + // better to let them pick a port on startup (as they will do if we don't + // provide an argument) and use that. let mut exctx = start_example("pagination-basic", 12230).await; let client = &exctx.client; @@ -972,15 +894,13 @@ struct ExampleProjectMtime { mtime: DateTime, } -/** - * Tests the "pagination-multiple-sorts" example. - */ +/// Tests the "pagination-multiple-sorts" example. #[tokio::test] async fn test_example_multiple_sorts() { let mut exctx = start_example("pagination-multiple-sorts", 12231).await; let client = &exctx.client; - /* default sort */ + // default sort let byname = assert_collection_iter::(&client, "/projects", "") .await; @@ -988,7 +908,7 @@ async fn test_example_multiple_sorts() { assert_eq!(byname[0].name, "project001"); assert_eq!(byname[byname.len() - 1].name, "project999"); - /* ascending sort by name */ + // ascending sort by name let byname_asc = assert_collection_iter::( &client, "/projects", @@ -997,7 +917,7 @@ async fn test_example_multiple_sorts() { .await; assert_eq!(byname, byname_asc); - /* descending sort by name */ + // descending sort by name let byname_desc = assert_collection_iter::( &client, "/projects", @@ -1013,7 +933,7 @@ async fn test_example_multiple_sorts() { .collect::>() ); - /* ascending sort by mtime */ + // ascending sort by mtime let bymtime_asc = assert_collection_iter::( &client, "/projects", @@ -1029,7 +949,7 @@ async fn test_example_multiple_sorts() { ); }); - /* descending sort by mtime */ + // descending sort by mtime let bymtime_desc = assert_collection_iter::( &client, "/projects", @@ -1057,9 +977,7 @@ struct ExampleObject { name: String, } -/** - * Tests the "pagination-multiple-resources" example. - */ +/// Tests the "pagination-multiple-resources" example. #[tokio::test] async fn test_example_multiple_resources() { let mut exctx = start_example("pagination-multiple-resources", 12232).await; @@ -1067,7 +985,7 @@ async fn test_example_multiple_resources() { let resources = ["/projects", "/disks", "/instances"]; for resource in &resources[..] { - /* Scan parameters are not necessary. */ + // Scan parameters are not necessary. let no_args = objects_list_page::(&client, "/projects?limit=3") .await; diff --git a/dropshot/tests/test_tls.rs b/dropshot/tests/test_tls.rs index 32faf1374..4039173e1 100644 --- a/dropshot/tests/test_tls.rs +++ b/dropshot/tests/test_tls.rs @@ -1,8 +1,6 @@ // Copyright 2022 Oxide Computer Company -/*! - * Test cases for TLS support. This validates various behaviors of our TLS mode, - * including certificate loading and supported modes. - */ +//! Test cases for TLS support. This validates various behaviors of our TLS mode, +//! including certificate loading and supported modes. use dropshot::{ConfigDropshot, ConfigTls, HttpResponseOk, HttpServerStarter}; use slog::{o, Logger}; @@ -337,12 +335,10 @@ pub struct TlsCheckArgs { tls: bool, } -/* - * The same handler is used for both an HTTP and HTTPS server. - * Make sure that we can distinguish between the two. - * The intended version is determined by a query parameter - * that varies between both tests. - */ +// The same handler is used for both an HTTP and HTTPS server. +// Make sure that we can distinguish between the two. +// The intended version is determined by a query parameter +// that varies between both tests. #[dropshot::endpoint { method = GET, path = "/", diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index 5ee6709fa..a9e2cb3d2 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -4,10 +4,8 @@ //! attributes are used both to define an HTTP API and to generate an OpenAPI //! Spec (OAS) v3 document that describes the API. -/* - * Clippy's style advice is definitely valuable, but not worth the trouble for - * automated enforcement. - */ +// Clippy's style advice is definitely valuable, but not worth the trouble for +// automated enforcement. #![allow(clippy::style)] use quote::format_ident; @@ -1524,11 +1522,9 @@ mod tests { #[test] fn test_extract_summary_description() { - /** - * Javadoc summary - * Maybe there's another name for these... - * ... but Java is the first place I saw these types of comments. - */ + /// Javadoc summary + /// Maybe there's another name for these... + /// ... but Java is the first place I saw these types of comments. #[derive(Schema)] struct JavadocComments; assert_eq!( @@ -1543,11 +1539,9 @@ mod tests { ) ); - /** - * Javadoc summary - * - * Skip that blank. - */ + /// Javadoc summary + /// + /// Skip that blank. #[derive(Schema)] struct JavadocCommentsWithABlank; assert_eq!( @@ -1558,7 +1552,7 @@ mod tests { ) ); - /** Terse Javadoc summary */ + /// Terse Javadoc summary #[derive(Schema)] struct JavadocCommentsTerse; assert_eq!( @@ -1604,9 +1598,7 @@ mod tests { (Some("Just a Rustdoc summary".to_string()), None) ); - /** - * Just a Javadoc summary - */ + /// Just a Javadoc summary #[derive(Schema)] struct JustTheJavadocSummary; assert_eq!( diff --git a/rustfmt.toml b/rustfmt.toml index 11297fdbe..f0701faae 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -4,3 +4,7 @@ max_width = 80 use_small_heuristics = "max" edition = "2018" + +# Temp unstable features +unstable_features = true +normalize_comments = true From 3ac8c39e8f866160337ed0ad4e357623835e25a7 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 13:32:00 -0800 Subject: [PATCH 02/47] do not update rustfmt.toml too --- rustfmt.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/rustfmt.toml b/rustfmt.toml index f0701faae..11297fdbe 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -4,7 +4,3 @@ max_width = 80 use_small_heuristics = "max" edition = "2018" - -# Temp unstable features -unstable_features = true -normalize_comments = true From 3b481215fe9976af3a992524f39f6a12a1fd5dca Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 14:19:11 -0800 Subject: [PATCH 03/47] fix websocket.rs (thanks @ahl!) --- dropshot/src/websocket.rs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index 1fabc0baa..056b11377 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -189,20 +189,20 @@ impl WebsocketUpgrade { /// ``` /// #[dropshot::endpoint { method = GET, path = "/my/ws/endpoint/{id}" }] /// async fn my_ws_endpoint( - /// rqctx: std::sync::Arc>, - /// websock: dropshot::WebsocketUpgrade, - /// id: dropshot::Path, + /// rqctx: std::sync::Arc>, + /// websock: dropshot::WebsocketUpgrade, + /// id: dropshot::Path, /// ) -> dropshot::WebsocketEndpointResult { - /// let logger = rqctx.log.new(slog::o!()); - /// websock.handle(move |upgraded| async move { - /// slog::info!(logger, "Entered handler for ID {}", id.into_inner()); - /// use futures::stream::StreamExt; - /// let mut ws_stream = tokio_tungstenite::WebSocketStream::from_raw_socket( - /// upgraded.into_inner(), tokio_tungstenite::tungstenite::protocol::Role::Server, None - /// ).await; - /// slog::info!(logger, "Received from websocket: {:?}", ws_stream.next().await); - /// Ok(()) - /// }) + /// let logger = rqctx.log.new(slog::o!()); + /// websock.handle(move |upgraded| async move { + /// slog::info!(logger, "Entered handler for ID {}", id.into_inner()); + /// use futures::stream::StreamExt; + /// let mut ws_stream = tokio_tungstenite::WebSocketStream::from_raw_socket( + /// upgraded.into_inner(), tokio_tungstenite::tungstenite::protocol::Role::Server, None + /// ).await; + /// slog::info!(logger, "Received from websocket: {:?}", ws_stream.next().await); + /// Ok(()) + /// }) /// } /// ``` /// From 9df46f75e194c1ba52440eba05ba8d2b8f1f352d Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 13:55:35 -0800 Subject: [PATCH 04/47] move extractors and generic schema utilities out of handler.rs --- dropshot/src/extractor/mod.rs | 572 ++++++++++++++++ dropshot/src/handler.rs | 808 +---------------------- dropshot/src/lib.rs | 14 +- dropshot/src/schema_util.rs | 258 ++++++++ dropshot/src/type_util.rs | 2 +- dropshot/tests/fail/bad_endpoint4.stderr | 4 +- dropshot/tests/fail/bad_endpoint5.stderr | 2 +- 7 files changed, 847 insertions(+), 813 deletions(-) create mode 100644 dropshot/src/extractor/mod.rs create mode 100644 dropshot/src/schema_util.rs diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs new file mode 100644 index 000000000..d80799947 --- /dev/null +++ b/dropshot/src/extractor/mod.rs @@ -0,0 +1,572 @@ +// Copyright 2022 Oxide Computer Company + +//! Extractor trait +//! +//! See top-level crate documentation for details + +use crate::api_description::ApiEndpointParameter; +use crate::api_description::ApiEndpointParameterLocation; +use crate::api_description::ApiSchemaGenerator; +use crate::api_description::{ApiEndpointBodyContentType, ExtensionMode}; +use crate::error::HttpError; +use crate::http_util::http_extract_path_params; +use crate::http_util::http_read_body; +use crate::http_util::CONTENT_TYPE_JSON; +use crate::pagination::PAGINATION_PARAM_SENTINEL; +use crate::schema_util::make_subschema_for; +use crate::schema_util::schema2struct; +use crate::schema_util::schema_extensions; +use crate::schema_util::ReferenceVisitor; +use crate::server::ServerContext; +use crate::websocket::WEBSOCKET_PARAM_SENTINEL; +use crate::RequestContext; + +use async_trait::async_trait; +use bytes::Bytes; +use hyper::Body; +use hyper::Request; +use schemars::schema::InstanceType; +use schemars::schema::SchemaObject; +use schemars::JsonSchema; +use serde::de::DeserializeOwned; +use std::fmt::Debug; +use std::sync::Arc; + +/// `Extractor` defines an interface allowing a type to be constructed from a +/// `RequestContext`. Unlike most traits, `Extractor` essentially defines only a +/// constructor function, not instance functions. +/// +/// The extractors that we provide (`Query`, `Path`, `TypedBody`, `UntypedBody`, and +/// `WebsocketUpgrade`) implement `Extractor` in order to construct themselves from +/// the request. For example, `Extractor` is implemented for `Query` with a +/// function that reads the query string from the request, parses it, and +/// constructs a `Query` with it. +/// +/// We also define implementations of `Extractor` for tuples of types that +/// themselves implement `Extractor`. See the implementation of +/// `HttpRouteHandler` for more on why this needed. +#[async_trait] +pub trait Extractor: Send + Sync + Sized { + /// Construct an instance of this type from a `RequestContext`. + async fn from_request( + rqctx: Arc>, + ) -> Result; + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata; +} + +/// Metadata associated with an extractor including parameters and whether or not +/// the associated endpoint is paginated. +pub struct ExtractorMetadata { + pub extension_mode: ExtensionMode, + pub parameters: Vec, +} + +/// `impl_derived_for_tuple!` defines implementations of `Extractor` for tuples +/// whose elements themselves implement `Extractor`. +macro_rules! impl_extractor_for_tuple { + ($( $T:ident),*) => { + #[async_trait] + impl< $($T: Extractor + 'static,)* > Extractor for ($($T,)*) + { + async fn from_request(_rqctx: Arc>) + -> Result<( $($T,)* ), HttpError> + { + futures::try_join!($($T::from_request(Arc::clone(&_rqctx)),)*) + } + + fn metadata(_body_content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { + #[allow(unused_mut)] + let mut extension_mode = ExtensionMode::None; + #[allow(unused_mut)] + let mut parameters = vec![]; + $( + let mut metadata = $T::metadata(_body_content_type.clone()); + extension_mode = match (extension_mode, metadata.extension_mode) { + (ExtensionMode::None, x) | (x, ExtensionMode::None) => x, + (x, y) if x != y => { + panic!("incompatible extension modes in tuple: {:?} != {:?}", x, y); + } + (_, x) => x, + }; + parameters.append(&mut metadata.parameters); + )* + ExtractorMetadata { extension_mode, parameters } + } + } +}} + +impl_extractor_for_tuple!(); +impl_extractor_for_tuple!(T1); +impl_extractor_for_tuple!(T1, T2); +impl_extractor_for_tuple!(T1, T2, T3); + +// Query: query string extractor + +/// `Query` is an extractor used to deserialize an instance of +/// `QueryType` from an HTTP request's query string. `QueryType` is any +/// structure of yours that implements `serde::Deserialize`. See this module's +/// documentation for more information. +#[derive(Debug)] +pub struct Query { + inner: QueryType, +} + +impl Query { + // TODO drop this in favor of Deref? + Display and Debug for convenience? + pub fn into_inner(self) -> QueryType { + self.inner + } +} + +/// Given an HTTP request, pull out the query string and attempt to deserialize +/// it as an instance of `QueryType`. +fn http_request_load_query( + request: &Request, +) -> Result, HttpError> +where + QueryType: DeserializeOwned + JsonSchema + Send + Sync, +{ + let raw_query_string = request.uri().query().unwrap_or(""); + // TODO-correctness: are query strings defined to be urlencoded in this way? + match serde_urlencoded::from_str(raw_query_string) { + Ok(q) => Ok(Query { inner: q }), + Err(e) => Err(HttpError::for_bad_request( + None, + format!("unable to parse query string: {}", e), + )), + } +} + +// The `Extractor` implementation for Query describes how to construct +// an instance of `Query` from an HTTP request: namely, by parsing +// the query string to an instance of `QueryType`. +// TODO-cleanup We shouldn't have to use the "'static" bound on `QueryType` +// here. It seems like we ought to be able to use 'async_trait, but that +// doesn't seem to be defined. +#[async_trait] +impl Extractor for Query +where + QueryType: JsonSchema + DeserializeOwned + Send + Sync + 'static, +{ + async fn from_request( + rqctx: Arc>, + ) -> Result, HttpError> { + let request = rqctx.request.lock().await; + http_request_load_query(&request) + } + + fn metadata( + _body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + get_metadata::(&ApiEndpointParameterLocation::Query) + } +} + +// Path: path parameter string extractor + +/// `Path` is an extractor used to deserialize an instance of +/// `PathType` from an HTTP request's path parameters. `PathType` is any +/// structure of yours that implements `serde::Deserialize`. See this module's +/// documentation for more information. +#[derive(Debug)] +pub struct Path { + inner: PathType, +} + +impl Path { + // TODO drop this in favor of Deref? + Display and Debug for convenience? + pub fn into_inner(self) -> PathType { + self.inner + } +} + +// The `Extractor` implementation for Path describes how to construct +// an instance of `Path` from an HTTP request: namely, by extracting +// parameters from the query string. +#[async_trait] +impl Extractor for Path +where + PathType: DeserializeOwned + JsonSchema + Send + Sync + 'static, +{ + async fn from_request( + rqctx: Arc>, + ) -> Result, HttpError> { + let params: PathType = http_extract_path_params(&rqctx.path_variables)?; + Ok(Path { inner: params }) + } + + fn metadata( + _body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + get_metadata::(&ApiEndpointParameterLocation::Path) + } +} + +/// Convenience function to generate parameter metadata from types implementing +/// `JsonSchema` for use with `Query` and `Path` `Extractors`. +fn get_metadata( + loc: &ApiEndpointParameterLocation, +) -> ExtractorMetadata +where + ParamType: JsonSchema, +{ + // Generate the type for `ParamType` then pluck out each member of + // the structure to encode as an individual parameter. + let mut generator = schemars::gen::SchemaGenerator::new( + schemars::gen::SchemaSettings::openapi3(), + ); + let schema = generator.root_schema_for::().schema.into(); + + let extension_mode = match schema_extensions(&schema) { + Some(extensions) => { + let paginated = extensions + .get(&PAGINATION_PARAM_SENTINEL.to_string()) + .is_some(); + let websocket = + extensions.get(&WEBSOCKET_PARAM_SENTINEL.to_string()).is_some(); + match (paginated, websocket) { + (false, false) => ExtensionMode::None, + (false, true) => ExtensionMode::Websocket, + (true, false) => ExtensionMode::Paginated, + (true, true) => panic!( + "Cannot use websocket and pagination in the same endpoint!" + ), + } + } + None => ExtensionMode::None, + }; + + // Convert our collection of struct members list of parameters. + let parameters = schema2struct(&schema, &generator, true) + .into_iter() + .map(|struct_member| { + let mut s = struct_member.schema; + let mut visitor = ReferenceVisitor::new(&generator); + schemars::visit::visit_schema(&mut visitor, &mut s); + + ApiEndpointParameter::new_named( + loc, + struct_member.name, + struct_member.description, + struct_member.required, + ApiSchemaGenerator::Static { + schema: Box::new(s), + dependencies: visitor.dependencies(), + }, + Vec::new(), + ) + }) + .collect::>(); + + ExtractorMetadata { extension_mode, parameters } +} + +// TypedBody: body extractor for formats that can be deserialized to a specific +// type. Only JSON is currently supported. + +/// `TypedBody` is an extractor used to deserialize an instance of +/// `BodyType` from an HTTP request body. `BodyType` is any structure of yours +/// that implements `serde::Deserialize`. See this module's documentation for +/// more information. +#[derive(Debug)] +pub struct TypedBody { + inner: BodyType, +} + +impl + TypedBody +{ + // TODO drop this in favor of Deref? + Display and Debug for convenience? + pub fn into_inner(self) -> BodyType { + self.inner + } +} + +/// Given an HTTP request, attempt to read the body, parse it according +/// to the content type, and deserialize it to an instance of `BodyType`. +async fn http_request_load_body( + rqctx: Arc>, +) -> Result, HttpError> +where + BodyType: JsonSchema + DeserializeOwned + Send + Sync, +{ + let server = &rqctx.server; + let mut request = rqctx.request.lock().await; + let body = http_read_body( + request.body_mut(), + server.config.request_body_max_bytes, + ) + .await?; + + // RFC 7231 §3.1.1.1: media types are case insensitive and may + // be followed by whitespace and/or a parameter (e.g., charset), + // which we currently ignore. + let content_type = request + .headers() + .get(http::header::CONTENT_TYPE) + .map(|hv| { + hv.to_str().map_err(|e| { + HttpError::for_bad_request( + None, + format!("invalid content type: {}", e), + ) + }) + }) + .unwrap_or(Ok(CONTENT_TYPE_JSON))?; + let end = content_type.find(';').unwrap_or_else(|| content_type.len()); + let mime_type = content_type[..end].trim_end().to_lowercase(); + let body_content_type = + ApiEndpointBodyContentType::from_mime_type(&mime_type) + .map_err(|e| HttpError::for_bad_request(None, e))?; + let expected_content_type = rqctx.body_content_type.clone(); + + use ApiEndpointBodyContentType::*; + let content: BodyType = match (expected_content_type, body_content_type) { + (Json, Json) => serde_json::from_slice(&body).map_err(|e| { + HttpError::for_bad_request( + None, + format!("unable to parse JSON body: {}", e), + ) + })?, + (UrlEncoded, UrlEncoded) => serde_urlencoded::from_bytes(&body) + .map_err(|e| { + HttpError::for_bad_request( + None, + format!("unable to parse URL-encoded body: {}", e), + ) + })?, + (expected, requested) => { + return Err(HttpError::for_bad_request( + None, + format!( + "expected content type \"{}\", got \"{}\"", + expected.mime_type(), + requested.mime_type() + ), + )) + } + }; + Ok(TypedBody { inner: content }) +} + +// The `Extractor` implementation for TypedBody describes how to +// construct an instance of `TypedBody` from an HTTP request: namely, +// by reading the request body and parsing it as JSON into type `BodyType`. +// TODO-cleanup We shouldn't have to use the "'static" bound on `BodyType` here. +// It seems like we ought to be able to use 'async_trait, but that doesn't seem +// to be defined. +#[async_trait] +impl Extractor for TypedBody +where + BodyType: JsonSchema + DeserializeOwned + Send + Sync + 'static, +{ + async fn from_request( + rqctx: Arc>, + ) -> Result, HttpError> { + http_request_load_body(rqctx).await + } + + fn metadata(content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { + let body = ApiEndpointParameter::new_body( + content_type, + true, + ApiSchemaGenerator::Gen { + name: BodyType::schema_name, + schema: make_subschema_for::, + }, + vec![], + ); + ExtractorMetadata { + extension_mode: ExtensionMode::None, + parameters: vec![body], + } + } +} + +// UntypedBody: body extractor for a plain array of bytes of a body. + +/// `UntypedBody` is an extractor for reading in the contents of the HTTP request +/// body and making the raw bytes directly available to the consumer. +#[derive(Debug)] +pub struct UntypedBody { + content: Bytes, +} + +impl UntypedBody { + /// Returns a byte slice of the underlying body content. + // TODO drop this in favor of Deref? + Display and Debug for convenience? + pub fn as_bytes(&self) -> &[u8] { + &self.content + } + + /// Convenience wrapper to convert the body to a UTF-8 string slice, + /// returning a 400-level error if the body is not valid UTF-8. + pub fn as_str(&self) -> Result<&str, HttpError> { + std::str::from_utf8(self.as_bytes()).map_err(|e| { + HttpError::for_bad_request( + None, + format!("failed to parse body as UTF-8 string: {}", e), + ) + }) + } +} + +#[async_trait] +impl Extractor for UntypedBody { + async fn from_request( + rqctx: Arc>, + ) -> Result { + let server = &rqctx.server; + let mut request = rqctx.request.lock().await; + let body_bytes = http_read_body( + request.body_mut(), + server.config.request_body_max_bytes, + ) + .await?; + Ok(UntypedBody { content: body_bytes }) + } + + fn metadata( + _content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + ExtractorMetadata { + parameters: vec![ApiEndpointParameter::new_body( + ApiEndpointBodyContentType::Bytes, + true, + ApiSchemaGenerator::Static { + schema: Box::new( + SchemaObject { + instance_type: Some(InstanceType::String.into()), + format: Some(String::from("binary")), + ..Default::default() + } + .into(), + ), + dependencies: indexmap::IndexMap::default(), + }, + vec![], + )], + extension_mode: ExtensionMode::None, + } + } +} + +#[cfg(test)] +mod test { + use crate::api_description::ExtensionMode; + use crate::{ + api_description::ApiEndpointParameterMetadata, ApiEndpointParameter, + ApiEndpointParameterLocation, PaginationParams, + }; + use schemars::JsonSchema; + use serde::{Deserialize, Serialize}; + + use super::get_metadata; + use super::ExtractorMetadata; + + #[derive(Deserialize, Serialize, JsonSchema)] + #[allow(dead_code)] + struct A { + foo: String, + bar: u32, + baz: Option, + } + + #[derive(JsonSchema)] + #[allow(dead_code)] + struct B { + #[serde(flatten)] + page: T, + + limit: Option, + } + + #[derive(JsonSchema)] + #[allow(dead_code)] + #[schemars(untagged)] + enum C { + First(T), + Next { page_token: String }, + } + + fn compare( + actual: ExtractorMetadata, + extension_mode: ExtensionMode, + parameters: Vec<(&str, bool)>, + ) { + assert_eq!(actual.extension_mode, extension_mode); + + // This is order-dependent. We might not really care if the order + // changes, but it will be interesting to understand why if it does. + actual.parameters.iter().zip(parameters.iter()).for_each( + |(param, (name, required))| { + if let ApiEndpointParameter { + metadata: ApiEndpointParameterMetadata::Path(aname), + required: arequired, + .. + } = param + { + assert_eq!(aname, name); + assert_eq!(arequired, required, "mismatched for {}", name); + } else { + panic!(); + } + }, + ); + } + + #[test] + fn test_metadata_simple() { + let params = get_metadata::(&ApiEndpointParameterLocation::Path); + let expected = vec![("bar", true), ("baz", false), ("foo", true)]; + + compare(params, ExtensionMode::None, expected); + } + + #[test] + fn test_metadata_flattened() { + let params = get_metadata::>(&ApiEndpointParameterLocation::Path); + let expected = vec![ + ("bar", true), + ("baz", false), + ("foo", true), + ("limit", false), + ]; + + compare(params, ExtensionMode::None, expected); + } + + #[test] + fn test_metadata_flattened_enum() { + let params = + get_metadata::>>(&ApiEndpointParameterLocation::Path); + let expected = vec![ + ("limit", false), + ("bar", false), + ("baz", false), + ("foo", false), + ("page_token", false), + ]; + + compare(params, ExtensionMode::None, expected); + } + + #[test] + fn test_metadata_pagination() { + let params = get_metadata::>( + &ApiEndpointParameterLocation::Path, + ); + let expected = vec![ + ("bar", false), + ("baz", false), + ("foo", false), + ("limit", false), + ("page_token", false), + ]; + + compare(params, ExtensionMode::Paginated, expected); + } +} diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index ed92026ea..e2344dfe8 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -32,34 +32,29 @@ //! OpenAPI document generation. use super::error::HttpError; -use super::http_util::http_extract_path_params; -use super::http_util::http_read_body; +use super::extractor::Extractor; use super::http_util::CONTENT_TYPE_JSON; use super::http_util::CONTENT_TYPE_OCTET_STREAM; use super::server::DropshotState; use super::server::ServerContext; use crate::api_description::ApiEndpointHeader; -use crate::api_description::ApiEndpointParameter; -use crate::api_description::ApiEndpointParameterLocation; use crate::api_description::ApiEndpointResponse; use crate::api_description::ApiSchemaGenerator; -use crate::api_description::{ApiEndpointBodyContentType, ExtensionMode}; +use crate::api_description::{ApiEndpointBodyContentType}; use crate::pagination::PaginationParams; -use crate::pagination::PAGINATION_PARAM_SENTINEL; use crate::router::VariableSet; +use crate::schema_util::make_subschema_for; +use crate::schema_util::schema2struct; +use crate::schema_util::ReferenceVisitor; use crate::to_map::to_map; -use crate::websocket::WEBSOCKET_PARAM_SENTINEL; use async_trait::async_trait; -use bytes::Bytes; use futures::lock::Mutex; use http::HeaderMap; use http::StatusCode; use hyper::Body; use hyper::Request; use hyper::Response; -use schemars::schema::InstanceType; -use schemars::schema::SchemaObject; use schemars::JsonSchema; use serde::de::DeserializeOwned; use serde::Serialize; @@ -151,77 +146,6 @@ impl RequestContextArgument type Context = T; } -/// `Extractor` defines an interface allowing a type to be constructed from a -/// `RequestContext`. Unlike most traits, `Extractor` essentially defines only a -/// constructor function, not instance functions. -/// -/// The extractors that we provide (`Query`, `Path`, `TypedBody`, `UntypedBody`, and -/// `WebsocketUpgrade`) implement `Extractor` in order to construct themselves from -/// the request. For example, `Extractor` is implemented for `Query` with a -/// function that reads the query string from the request, parses it, and -/// constructs a `Query` with it. -/// -/// We also define implementations of `Extractor` for tuples of types that -/// themselves implement `Extractor`. See the implementation of -/// `HttpRouteHandler` for more on why this needed. -#[async_trait] -pub trait Extractor: Send + Sync + Sized { - /// Construct an instance of this type from a `RequestContext`. - async fn from_request( - rqctx: Arc>, - ) -> Result; - - fn metadata( - body_content_type: ApiEndpointBodyContentType, - ) -> ExtractorMetadata; -} - -/// Metadata associated with an extractor including parameters and whether or not -/// the associated endpoint is paginated. -pub struct ExtractorMetadata { - pub extension_mode: ExtensionMode, - pub parameters: Vec, -} - -/// `impl_derived_for_tuple!` defines implementations of `Extractor` for tuples -/// whose elements themselves implement `Extractor`. -macro_rules! impl_extractor_for_tuple { - ($( $T:ident),*) => { - #[async_trait] - impl< $($T: Extractor + 'static,)* > Extractor for ($($T,)*) - { - async fn from_request(_rqctx: Arc>) - -> Result<( $($T,)* ), HttpError> - { - futures::try_join!($($T::from_request(Arc::clone(&_rqctx)),)*) - } - - fn metadata(_body_content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { - #[allow(unused_mut)] - let mut extension_mode = ExtensionMode::None; - #[allow(unused_mut)] - let mut parameters = vec![]; - $( - let mut metadata = $T::metadata(_body_content_type.clone()); - extension_mode = match (extension_mode, metadata.extension_mode) { - (ExtensionMode::None, x) | (x, ExtensionMode::None) => x, - (x, y) if x != y => { - panic!("incompatible extension modes in tuple: {:?} != {:?}", x, y); - } - (_, x) => x, - }; - parameters.append(&mut metadata.parameters); - )* - ExtractorMetadata { extension_mode, parameters } - } - } -}} - -impl_extractor_for_tuple!(); -impl_extractor_for_tuple!(T1); -impl_extractor_for_tuple!(T1, T2); -impl_extractor_for_tuple!(T1, T2, T3); - /// `HttpHandlerFunc` is a trait providing a single function, `handle_request()`, /// which takes an HTTP request and produces an HTTP response (or /// `HttpError`). @@ -497,542 +421,6 @@ where } } -// Extractors - -// Query: query string extractor - -/// `Query` is an extractor used to deserialize an instance of -/// `QueryType` from an HTTP request's query string. `QueryType` is any -/// structure of yours that implements `serde::Deserialize`. See this module's -/// documentation for more information. -#[derive(Debug)] -pub struct Query { - inner: QueryType, -} - -impl Query { - // TODO drop this in favor of Deref? + Display and Debug for convenience? - pub fn into_inner(self) -> QueryType { - self.inner - } -} - -/// Given an HTTP request, pull out the query string and attempt to deserialize -/// it as an instance of `QueryType`. -fn http_request_load_query( - request: &Request, -) -> Result, HttpError> -where - QueryType: DeserializeOwned + JsonSchema + Send + Sync, -{ - let raw_query_string = request.uri().query().unwrap_or(""); - // TODO-correctness: are query strings defined to be urlencoded in this way? - match serde_urlencoded::from_str(raw_query_string) { - Ok(q) => Ok(Query { inner: q }), - Err(e) => Err(HttpError::for_bad_request( - None, - format!("unable to parse query string: {}", e), - )), - } -} - -// The `Extractor` implementation for Query describes how to construct -// an instance of `Query` from an HTTP request: namely, by parsing -// the query string to an instance of `QueryType`. -// TODO-cleanup We shouldn't have to use the "'static" bound on `QueryType` -// here. It seems like we ought to be able to use 'async_trait, but that -// doesn't seem to be defined. -#[async_trait] -impl Extractor for Query -where - QueryType: JsonSchema + DeserializeOwned + Send + Sync + 'static, -{ - async fn from_request( - rqctx: Arc>, - ) -> Result, HttpError> { - let request = rqctx.request.lock().await; - http_request_load_query(&request) - } - - fn metadata( - _body_content_type: ApiEndpointBodyContentType, - ) -> ExtractorMetadata { - get_metadata::(&ApiEndpointParameterLocation::Query) - } -} - -// Path: path parameter string extractor - -/// `Path` is an extractor used to deserialize an instance of -/// `PathType` from an HTTP request's path parameters. `PathType` is any -/// structure of yours that implements `serde::Deserialize`. See this module's -/// documentation for more information. -#[derive(Debug)] -pub struct Path { - inner: PathType, -} - -impl Path { - // TODO drop this in favor of Deref? + Display and Debug for convenience? - pub fn into_inner(self) -> PathType { - self.inner - } -} - -// The `Extractor` implementation for Path describes how to construct -// an instance of `Path` from an HTTP request: namely, by extracting -// parameters from the query string. -#[async_trait] -impl Extractor for Path -where - PathType: DeserializeOwned + JsonSchema + Send + Sync + 'static, -{ - async fn from_request( - rqctx: Arc>, - ) -> Result, HttpError> { - let params: PathType = http_extract_path_params(&rqctx.path_variables)?; - Ok(Path { inner: params }) - } - - fn metadata( - _body_content_type: ApiEndpointBodyContentType, - ) -> ExtractorMetadata { - get_metadata::(&ApiEndpointParameterLocation::Path) - } -} - -/// Convenience function to generate parameter metadata from types implementing -/// `JsonSchema` for use with `Query` and `Path` `Extractors`. -fn get_metadata( - loc: &ApiEndpointParameterLocation, -) -> ExtractorMetadata -where - ParamType: JsonSchema, -{ - // Generate the type for `ParamType` then pluck out each member of - // the structure to encode as an individual parameter. - let mut generator = schemars::gen::SchemaGenerator::new( - schemars::gen::SchemaSettings::openapi3(), - ); - let schema = generator.root_schema_for::().schema.into(); - - let extension_mode = match schema_extensions(&schema) { - Some(extensions) => { - let paginated = extensions - .get(&PAGINATION_PARAM_SENTINEL.to_string()) - .is_some(); - let websocket = - extensions.get(&WEBSOCKET_PARAM_SENTINEL.to_string()).is_some(); - match (paginated, websocket) { - (false, false) => ExtensionMode::None, - (false, true) => ExtensionMode::Websocket, - (true, false) => ExtensionMode::Paginated, - (true, true) => panic!( - "Cannot use websocket and pagination in the same endpoint!" - ), - } - } - None => ExtensionMode::None, - }; - - // Convert our collection of struct members list of parameters. - let parameters = schema2struct(&schema, &generator, true) - .into_iter() - .map(|struct_member| { - let mut s = struct_member.schema; - let mut visitor = ReferenceVisitor::new(&generator); - schemars::visit::visit_schema(&mut visitor, &mut s); - - ApiEndpointParameter::new_named( - loc, - struct_member.name, - struct_member.description, - struct_member.required, - ApiSchemaGenerator::Static { - schema: Box::new(s), - dependencies: visitor.dependencies(), - }, - Vec::new(), - ) - }) - .collect::>(); - - ExtractorMetadata { extension_mode, parameters } -} - -fn schema_extensions( - schema: &schemars::schema::Schema, -) -> Option<&schemars::Map> { - match schema { - schemars::schema::Schema::Bool(_) => None, - schemars::schema::Schema::Object(object) => Some(&object.extensions), - } -} - -/// Used to visit all schemas and collect all dependencies. -struct ReferenceVisitor<'a> { - generator: &'a schemars::gen::SchemaGenerator, - dependencies: indexmap::IndexMap, -} - -impl<'a> ReferenceVisitor<'a> { - fn new(generator: &'a schemars::gen::SchemaGenerator) -> Self { - Self { generator, dependencies: indexmap::IndexMap::new() } - } - - fn dependencies( - self, - ) -> indexmap::IndexMap { - self.dependencies - } -} - -impl<'a> schemars::visit::Visitor for ReferenceVisitor<'a> { - fn visit_schema_object(&mut self, schema: &mut SchemaObject) { - if let Some(refstr) = &schema.reference { - let definitions_path = &self.generator.settings().definitions_path; - let name = &refstr[definitions_path.len()..]; - - if !self.dependencies.contains_key(name) { - let mut refschema = self - .generator - .definitions() - .get(name) - .expect("invalid reference") - .clone(); - self.dependencies.insert( - name.to_string(), - schemars::schema::Schema::Bool(false), - ); - schemars::visit::visit_schema(self, &mut refschema); - self.dependencies.insert(name.to_string(), refschema); - } - } - - schemars::visit::visit_schema_object(self, schema); - } -} - -#[derive(Debug)] -pub(crate) struct StructMember { - pub name: String, - pub description: Option, - pub schema: schemars::schema::Schema, - pub required: bool, -} - -/// This helper function produces a list of the structure members for the -/// given schema. For each it returns: -/// (name: &String, schema: &Schema, required: bool) -/// -/// If the input schema is not a flat structure the result will be a runtime -/// failure reflective of a programming error (likely an invalid type specified -/// in a handler function). -/// -/// This function is invoked recursively on subschemas. -pub(crate) fn schema2struct( - schema: &schemars::schema::Schema, - generator: &schemars::gen::SchemaGenerator, - required: bool, -) -> Vec { - // We ignore schema.metadata, which includes things like doc comments, and - // schema.extensions. We call these out explicitly rather than eliding them - // as .. since we match all other fields in the structure. - match schema { - // We expect references to be on their own. - schemars::schema::Schema::Object(schemars::schema::SchemaObject { - metadata: _, - instance_type: None, - format: None, - enum_values: None, - const_value: None, - subschemas: None, - number: None, - string: None, - array: None, - object: None, - reference: Some(_), - extensions: _, - }) => schema2struct( - generator.dereference(schema).expect("invalid reference"), - generator, - required, - ), - - // Match objects and subschemas. - schemars::schema::Schema::Object(schemars::schema::SchemaObject { - metadata: _, - instance_type: Some(schemars::schema::SingleOrVec::Single(_)), - format: None, - enum_values: None, - const_value: None, - subschemas, - number: None, - string: None, - array: None, - object, - reference: None, - extensions: _, - }) => { - let mut results = Vec::new(); - - // If there's a top-level object, add its members to the list of - // parameters. - if let Some(object) = object { - results.extend(object.properties.iter().map( - |(name, schema)| { - let (description, schema) = - schema_extract_description(schema); - StructMember { - name: name.clone(), - description, - schema, - required: required - && object.required.contains(name), - } - }, - )); - } - - // We might see subschemas here in the case of flattened enums - // or flattened structures that have associated doc comments. - if let Some(subschemas) = subschemas { - match subschemas.as_ref() { - // We expect any_of in the case of an enum. - schemars::schema::SubschemaValidation { - all_of: None, - any_of: Some(schemas), - one_of: None, - not: None, - if_schema: None, - then_schema: None, - else_schema: None, - } => results.extend(schemas.iter().flat_map(|subschema| { - // Note that these will be tagged as optional. - schema2struct(subschema, generator, false) - })), - - // With an all_of, there should be a single element. We - // typically see this in the case where there is a doc - // comment on a structure as OpenAPI 3.0.x doesn't have - // a description field directly on schemas. - schemars::schema::SubschemaValidation { - all_of: Some(subschemas), - any_of: None, - one_of: None, - not: None, - if_schema: None, - then_schema: None, - else_schema: None, - } if subschemas.len() == 1 => results.extend( - subschemas.iter().flat_map(|subschema| { - schema2struct(subschema, generator, required) - }), - ), - - // We don't expect any other types of subschemas. - invalid => panic!("invalid subschema {:#?}", invalid), - } - } - - results - } - - // The generated schema should be an object. - invalid => panic!("invalid type {:#?}", invalid), - } -} - -// TypedBody: body extractor for formats that can be deserialized to a specific -// type. Only JSON is currently supported. - -/// `TypedBody` is an extractor used to deserialize an instance of -/// `BodyType` from an HTTP request body. `BodyType` is any structure of yours -/// that implements `serde::Deserialize`. See this module's documentation for -/// more information. -#[derive(Debug)] -pub struct TypedBody { - inner: BodyType, -} - -impl - TypedBody -{ - // TODO drop this in favor of Deref? + Display and Debug for convenience? - pub fn into_inner(self) -> BodyType { - self.inner - } -} - -/// Given an HTTP request, attempt to read the body, parse it according -/// to the content type, and deserialize it to an instance of `BodyType`. -async fn http_request_load_body( - rqctx: Arc>, -) -> Result, HttpError> -where - BodyType: JsonSchema + DeserializeOwned + Send + Sync, -{ - let server = &rqctx.server; - let mut request = rqctx.request.lock().await; - let body = http_read_body( - request.body_mut(), - server.config.request_body_max_bytes, - ) - .await?; - - // RFC 7231 §3.1.1.1: media types are case insensitive and may - // be followed by whitespace and/or a parameter (e.g., charset), - // which we currently ignore. - let content_type = request - .headers() - .get(http::header::CONTENT_TYPE) - .map(|hv| { - hv.to_str().map_err(|e| { - HttpError::for_bad_request( - None, - format!("invalid content type: {}", e), - ) - }) - }) - .unwrap_or(Ok(CONTENT_TYPE_JSON))?; - let end = content_type.find(';').unwrap_or_else(|| content_type.len()); - let mime_type = content_type[..end].trim_end().to_lowercase(); - let body_content_type = - ApiEndpointBodyContentType::from_mime_type(&mime_type) - .map_err(|e| HttpError::for_bad_request(None, e))?; - let expected_content_type = rqctx.body_content_type.clone(); - - use ApiEndpointBodyContentType::*; - let content: BodyType = match (expected_content_type, body_content_type) { - (Json, Json) => serde_json::from_slice(&body).map_err(|e| { - HttpError::for_bad_request( - None, - format!("unable to parse JSON body: {}", e), - ) - })?, - (UrlEncoded, UrlEncoded) => serde_urlencoded::from_bytes(&body) - .map_err(|e| { - HttpError::for_bad_request( - None, - format!("unable to parse URL-encoded body: {}", e), - ) - })?, - (expected, requested) => { - return Err(HttpError::for_bad_request( - None, - format!( - "expected content type \"{}\", got \"{}\"", - expected.mime_type(), - requested.mime_type() - ), - )) - } - }; - Ok(TypedBody { inner: content }) -} - -// The `Extractor` implementation for TypedBody describes how to -// construct an instance of `TypedBody` from an HTTP request: namely, -// by reading the request body and parsing it as JSON into type `BodyType`. -// TODO-cleanup We shouldn't have to use the "'static" bound on `BodyType` here. -// It seems like we ought to be able to use 'async_trait, but that doesn't seem -// to be defined. -#[async_trait] -impl Extractor for TypedBody -where - BodyType: JsonSchema + DeserializeOwned + Send + Sync + 'static, -{ - async fn from_request( - rqctx: Arc>, - ) -> Result, HttpError> { - http_request_load_body(rqctx).await - } - - fn metadata(content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { - let body = ApiEndpointParameter::new_body( - content_type, - true, - ApiSchemaGenerator::Gen { - name: BodyType::schema_name, - schema: make_subschema_for::, - }, - vec![], - ); - ExtractorMetadata { - extension_mode: ExtensionMode::None, - parameters: vec![body], - } - } -} - -// UntypedBody: body extractor for a plain array of bytes of a body. - -/// `UntypedBody` is an extractor for reading in the contents of the HTTP request -/// body and making the raw bytes directly available to the consumer. -#[derive(Debug)] -pub struct UntypedBody { - content: Bytes, -} - -impl UntypedBody { - /// Returns a byte slice of the underlying body content. - // TODO drop this in favor of Deref? + Display and Debug for convenience? - pub fn as_bytes(&self) -> &[u8] { - &self.content - } - - /// Convenience wrapper to convert the body to a UTF-8 string slice, - /// returning a 400-level error if the body is not valid UTF-8. - pub fn as_str(&self) -> Result<&str, HttpError> { - std::str::from_utf8(self.as_bytes()).map_err(|e| { - HttpError::for_bad_request( - None, - format!("failed to parse body as UTF-8 string: {}", e), - ) - }) - } -} - -#[async_trait] -impl Extractor for UntypedBody { - async fn from_request( - rqctx: Arc>, - ) -> Result { - let server = &rqctx.server; - let mut request = rqctx.request.lock().await; - let body_bytes = http_read_body( - request.body_mut(), - server.config.request_body_max_bytes, - ) - .await?; - Ok(UntypedBody { content: body_bytes }) - } - - fn metadata( - _content_type: ApiEndpointBodyContentType, - ) -> ExtractorMetadata { - ExtractorMetadata { - parameters: vec![ApiEndpointParameter::new_body( - ApiEndpointBodyContentType::Bytes, - true, - ApiSchemaGenerator::Static { - schema: Box::new( - SchemaObject { - instance_type: Some(InstanceType::String.into()), - format: Some(String::from("binary")), - ..Default::default() - } - .into(), - ), - dependencies: indexmap::IndexMap::default(), - }, - vec![], - )], - extension_mode: ExtensionMode::None, - } - } -} - // Response Type Conversion // // See the discussion on macro `impl_HttpHandlerFunc_for_func_with_params` for a @@ -1190,12 +578,6 @@ where } } -fn make_subschema_for( - gen: &mut schemars::gen::SchemaGenerator, -) -> schemars::schema::Schema { - gen.subschema_for::() -} - /// `HttpResponseCreated` wraps an object of any serializable type. /// It denotes an HTTP 201 "Created" response whose body is generated by /// serializing the object. @@ -1553,183 +935,3 @@ impl< metadata } } - -fn schema_extract_description( - schema: &schemars::schema::Schema, -) -> (Option, schemars::schema::Schema) { - // Because the OpenAPI v3.0.x Schema cannot include a description with - // a reference, we may see a schema with a description and an `all_of` - // with a single subschema. In this case, we flatten the trivial subschema. - if let schemars::schema::Schema::Object(schemars::schema::SchemaObject { - metadata, - instance_type: None, - format: None, - enum_values: None, - const_value: None, - subschemas: Some(subschemas), - number: None, - string: None, - array: None, - object: None, - reference: None, - extensions: _, - }) = schema - { - if let schemars::schema::SubschemaValidation { - all_of: Some(subschemas), - any_of: None, - one_of: None, - not: None, - if_schema: None, - then_schema: None, - else_schema: None, - } = subschemas.as_ref() - { - match (subschemas.first(), subschemas.len()) { - (Some(subschema), 1) => { - let description = metadata - .as_ref() - .and_then(|m| m.as_ref().description.clone()); - return (description, subschema.clone()); - } - _ => (), - } - } - } - - match schema { - schemars::schema::Schema::Bool(_) => (None, schema.clone()), - - schemars::schema::Schema::Object(object) => { - let description = object - .metadata - .as_ref() - .and_then(|m| m.as_ref().description.clone()); - ( - description, - schemars::schema::SchemaObject { - metadata: None, - ..object.clone() - } - .into(), - ) - } - } -} - -#[cfg(test)] -mod test { - use crate::api_description::ExtensionMode; - use crate::{ - api_description::ApiEndpointParameterMetadata, ApiEndpointParameter, - ApiEndpointParameterLocation, PaginationParams, - }; - use schemars::JsonSchema; - use serde::{Deserialize, Serialize}; - - use super::get_metadata; - use super::ExtractorMetadata; - - #[derive(Deserialize, Serialize, JsonSchema)] - #[allow(dead_code)] - struct A { - foo: String, - bar: u32, - baz: Option, - } - - #[derive(JsonSchema)] - #[allow(dead_code)] - struct B { - #[serde(flatten)] - page: T, - - limit: Option, - } - - #[derive(JsonSchema)] - #[allow(dead_code)] - #[schemars(untagged)] - enum C { - First(T), - Next { page_token: String }, - } - - fn compare( - actual: ExtractorMetadata, - extension_mode: ExtensionMode, - parameters: Vec<(&str, bool)>, - ) { - assert_eq!(actual.extension_mode, extension_mode); - - // This is order-dependent. We might not really care if the order - // changes, but it will be interesting to understand why if it does. - actual.parameters.iter().zip(parameters.iter()).for_each( - |(param, (name, required))| { - if let ApiEndpointParameter { - metadata: ApiEndpointParameterMetadata::Path(aname), - required: arequired, - .. - } = param - { - assert_eq!(aname, name); - assert_eq!(arequired, required, "mismatched for {}", name); - } else { - panic!(); - } - }, - ); - } - - #[test] - fn test_metadata_simple() { - let params = get_metadata::(&ApiEndpointParameterLocation::Path); - let expected = vec![("bar", true), ("baz", false), ("foo", true)]; - - compare(params, ExtensionMode::None, expected); - } - - #[test] - fn test_metadata_flattened() { - let params = get_metadata::>(&ApiEndpointParameterLocation::Path); - let expected = vec![ - ("bar", true), - ("baz", false), - ("foo", true), - ("limit", false), - ]; - - compare(params, ExtensionMode::None, expected); - } - - #[test] - fn test_metadata_flattened_enum() { - let params = - get_metadata::>>(&ApiEndpointParameterLocation::Path); - let expected = vec![ - ("limit", false), - ("bar", false), - ("baz", false), - ("foo", false), - ("page_token", false), - ]; - - compare(params, ExtensionMode::None, expected); - } - - #[test] - fn test_metadata_pagination() { - let params = get_metadata::>( - &ApiEndpointParameterLocation::Path, - ); - let expected = vec![ - ("bar", false), - ("baz", false), - ("foo", false), - ("limit", false), - ("page_token", false), - ]; - - compare(params, ExtensionMode::Paginated, expected); - } -} diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index 6de0f7635..2a3803c16 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -595,12 +595,14 @@ pub enum ProbeRegistration { mod api_description; mod config; mod error; +mod extractor; mod from_map; mod handler; mod http_util; mod logging; mod pagination; mod router; +mod schema_util; mod server; mod to_map; mod type_util; @@ -627,11 +629,15 @@ pub use config::ConfigDropshot; pub use config::ConfigTls; pub use error::HttpError; pub use error::HttpErrorResponseBody; +pub use extractor::Extractor; +pub use extractor::ExtractorMetadata; +pub use extractor::Path; +pub use extractor::Query; +pub use extractor::TypedBody; +pub use extractor::UntypedBody; pub use handler::http_response_found; pub use handler::http_response_see_other; pub use handler::http_response_temporary_redirect; -pub use handler::Extractor; -pub use handler::ExtractorMetadata; pub use handler::FreeformBody; pub use handler::HttpCodedResponse; pub use handler::HttpResponse; @@ -645,11 +651,7 @@ pub use handler::HttpResponseSeeOther; pub use handler::HttpResponseTemporaryRedirect; pub use handler::HttpResponseUpdatedNoContent; pub use handler::NoHeaders; -pub use handler::Path; -pub use handler::Query; pub use handler::RequestContext; -pub use handler::TypedBody; -pub use handler::UntypedBody; pub use http_util::CONTENT_TYPE_JSON; pub use http_util::CONTENT_TYPE_NDJSON; pub use http_util::CONTENT_TYPE_OCTET_STREAM; diff --git a/dropshot/src/schema_util.rs b/dropshot/src/schema_util.rs new file mode 100644 index 000000000..d08da8dda --- /dev/null +++ b/dropshot/src/schema_util.rs @@ -0,0 +1,258 @@ +// Copyright 2022 Oxide Computer Company + +//! schemars helper functions + +use schemars::schema::SchemaObject; +use schemars::JsonSchema; + +#[derive(Debug)] +pub(crate) struct StructMember { + pub name: String, + pub description: Option, + pub schema: schemars::schema::Schema, + pub required: bool, +} + +/// This helper function produces a list of the structure members for the +/// given schema. For each it returns: +/// (name: &String, schema: &Schema, required: bool) +/// +/// If the input schema is not a flat structure the result will be a runtime +/// failure reflective of a programming error (likely an invalid type specified +/// in a handler function). +/// +/// This function is invoked recursively on subschemas. +pub(crate) fn schema2struct( + schema: &schemars::schema::Schema, + generator: &schemars::gen::SchemaGenerator, + required: bool, +) -> Vec { + // We ignore schema.metadata, which includes things like doc comments, and + // schema.extensions. We call these out explicitly rather than eliding them + // as .. since we match all other fields in the structure. + match schema { + // We expect references to be on their own. + schemars::schema::Schema::Object(schemars::schema::SchemaObject { + metadata: _, + instance_type: None, + format: None, + enum_values: None, + const_value: None, + subschemas: None, + number: None, + string: None, + array: None, + object: None, + reference: Some(_), + extensions: _, + }) => schema2struct( + generator.dereference(schema).expect("invalid reference"), + generator, + required, + ), + + // Match objects and subschemas. + schemars::schema::Schema::Object(schemars::schema::SchemaObject { + metadata: _, + instance_type: Some(schemars::schema::SingleOrVec::Single(_)), + format: None, + enum_values: None, + const_value: None, + subschemas, + number: None, + string: None, + array: None, + object, + reference: None, + extensions: _, + }) => { + let mut results = Vec::new(); + + // If there's a top-level object, add its members to the list of + // parameters. + if let Some(object) = object { + results.extend(object.properties.iter().map( + |(name, schema)| { + let (description, schema) = + schema_extract_description(schema); + StructMember { + name: name.clone(), + description, + schema, + required: required + && object.required.contains(name), + } + }, + )); + } + + // We might see subschemas here in the case of flattened enums + // or flattened structures that have associated doc comments. + if let Some(subschemas) = subschemas { + match subschemas.as_ref() { + // We expect any_of in the case of an enum. + schemars::schema::SubschemaValidation { + all_of: None, + any_of: Some(schemas), + one_of: None, + not: None, + if_schema: None, + then_schema: None, + else_schema: None, + } => results.extend(schemas.iter().flat_map(|subschema| { + // Note that these will be tagged as optional. + schema2struct(subschema, generator, false) + })), + + // With an all_of, there should be a single element. We + // typically see this in the case where there is a doc + // comment on a structure as OpenAPI 3.0.x doesn't have + // a description field directly on schemas. + schemars::schema::SubschemaValidation { + all_of: Some(subschemas), + any_of: None, + one_of: None, + not: None, + if_schema: None, + then_schema: None, + else_schema: None, + } if subschemas.len() == 1 => results.extend( + subschemas.iter().flat_map(|subschema| { + schema2struct(subschema, generator, required) + }), + ), + + // We don't expect any other types of subschemas. + invalid => panic!("invalid subschema {:#?}", invalid), + } + } + + results + } + + // The generated schema should be an object. + invalid => panic!("invalid type {:#?}", invalid), + } +} + +pub(crate) fn make_subschema_for( + gen: &mut schemars::gen::SchemaGenerator, +) -> schemars::schema::Schema { + gen.subschema_for::() +} + +pub(crate) fn schema_extensions( + schema: &schemars::schema::Schema, +) -> Option<&schemars::Map> { + match schema { + schemars::schema::Schema::Bool(_) => None, + schemars::schema::Schema::Object(object) => Some(&object.extensions), + } +} + +/// Used to visit all schemas and collect all dependencies. +pub(crate) struct ReferenceVisitor<'a> { + generator: &'a schemars::gen::SchemaGenerator, + dependencies: indexmap::IndexMap, +} + +impl<'a> ReferenceVisitor<'a> { + pub fn new(generator: &'a schemars::gen::SchemaGenerator) -> Self { + Self { generator, dependencies: indexmap::IndexMap::new() } + } + + pub fn dependencies( + self, + ) -> indexmap::IndexMap { + self.dependencies + } +} + +impl<'a> schemars::visit::Visitor for ReferenceVisitor<'a> { + fn visit_schema_object(&mut self, schema: &mut SchemaObject) { + if let Some(refstr) = &schema.reference { + let definitions_path = &self.generator.settings().definitions_path; + let name = &refstr[definitions_path.len()..]; + + if !self.dependencies.contains_key(name) { + let mut refschema = self + .generator + .definitions() + .get(name) + .expect("invalid reference") + .clone(); + self.dependencies.insert( + name.to_string(), + schemars::schema::Schema::Bool(false), + ); + schemars::visit::visit_schema(self, &mut refschema); + self.dependencies.insert(name.to_string(), refschema); + } + } + + schemars::visit::visit_schema_object(self, schema); + } +} + +pub(crate) fn schema_extract_description( + schema: &schemars::schema::Schema, +) -> (Option, schemars::schema::Schema) { + // Because the OpenAPI v3.0.x Schema cannot include a description with + // a reference, we may see a schema with a description and an `all_of` + // with a single subschema. In this case, we flatten the trivial subschema. + if let schemars::schema::Schema::Object(schemars::schema::SchemaObject { + metadata, + instance_type: None, + format: None, + enum_values: None, + const_value: None, + subschemas: Some(subschemas), + number: None, + string: None, + array: None, + object: None, + reference: None, + extensions: _, + }) = schema + { + if let schemars::schema::SubschemaValidation { + all_of: Some(subschemas), + any_of: None, + one_of: None, + not: None, + if_schema: None, + then_schema: None, + else_schema: None, + } = subschemas.as_ref() + { + match (subschemas.first(), subschemas.len()) { + (Some(subschema), 1) => { + let description = metadata + .as_ref() + .and_then(|m| m.as_ref().description.clone()); + return (description, subschema.clone()); + } + _ => (), + } + } + } + + match schema { + schemars::schema::Schema::Bool(_) => (None, schema.clone()), + + schemars::schema::Schema::Object(object) => { + let description = object + .metadata + .as_ref() + .and_then(|m| m.as_ref().description.clone()); + ( + description, + schemars::schema::SchemaObject { + metadata: None, + ..object.clone() + } + .into(), + ) + } + } +} diff --git a/dropshot/src/type_util.rs b/dropshot/src/type_util.rs index 63a460f20..5c1495dea 100644 --- a/dropshot/src/type_util.rs +++ b/dropshot/src/type_util.rs @@ -235,7 +235,7 @@ mod tests { JsonSchema, }; - use crate::handler::schema2struct; + use crate::schema_util::schema2struct; use super::type_resolve; diff --git a/dropshot/tests/fail/bad_endpoint4.stderr b/dropshot/tests/fail/bad_endpoint4.stderr index d99a3626b..8d544b5ec 100644 --- a/dropshot/tests/fail/bad_endpoint4.stderr +++ b/dropshot/tests/fail/bad_endpoint4.stderr @@ -15,7 +15,7 @@ error[E0277]: the trait bound `QueryParams: schemars::JsonSchema` is not satisfi (T0, T1, T2, T3, T4, T5) and $N others note: required by a bound in `dropshot::Query` - --> src/handler.rs + --> src/extractor/mod.rs | | pub struct Query { | ^^^^^^^^^^ required by this bound in `dropshot::Query` @@ -38,7 +38,7 @@ error[E0277]: the trait bound `for<'de> QueryParams: serde::de::Deserialize<'de> and $N others = note: required for `QueryParams` to implement `serde::de::DeserializeOwned` note: required by a bound in `dropshot::Query` - --> src/handler.rs + --> src/extractor/mod.rs | | pub struct Query { | ^^^^^^^^^^^^^^^^ required by this bound in `dropshot::Query` diff --git a/dropshot/tests/fail/bad_endpoint5.stderr b/dropshot/tests/fail/bad_endpoint5.stderr index 799f4e82d..b8f0b4538 100644 --- a/dropshot/tests/fail/bad_endpoint5.stderr +++ b/dropshot/tests/fail/bad_endpoint5.stderr @@ -16,7 +16,7 @@ error[E0277]: the trait bound `for<'de> QueryParams: serde::de::Deserialize<'de> and $N others = note: required for `QueryParams` to implement `serde::de::DeserializeOwned` note: required by a bound in `dropshot::Query` - --> src/handler.rs + --> src/extractor/mod.rs | | pub struct Query { | ^^^^^^^^^^^^^^^^ required by this bound in `dropshot::Query` From db9770ffbb8bb145749102ba8421c7a0d662a972 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 16:09:12 -0800 Subject: [PATCH 05/47] fix style --- dropshot/src/handler.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index e2344dfe8..5f03c8527 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -37,10 +37,10 @@ use super::http_util::CONTENT_TYPE_JSON; use super::http_util::CONTENT_TYPE_OCTET_STREAM; use super::server::DropshotState; use super::server::ServerContext; +use crate::api_description::ApiEndpointBodyContentType; use crate::api_description::ApiEndpointHeader; use crate::api_description::ApiEndpointResponse; use crate::api_description::ApiSchemaGenerator; -use crate::api_description::{ApiEndpointBodyContentType}; use crate::pagination::PaginationParams; use crate::router::VariableSet; use crate::schema_util::make_subschema_for; From 41a1cd681c3382d01da36932c2b9aae7c7f97e36 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 16:10:14 -0800 Subject: [PATCH 06/47] WIP: making it work, need to rebase --- CHANGELOG.adoc | 2 + dropshot/examples/request-headers.rs | 4 +- dropshot/src/api_description.rs | 61 +------ dropshot/src/extractor/common.rs | 198 +++++++++++++++++++++++ dropshot/src/extractor/mod.rs | 117 ++++---------- dropshot/src/handler.rs | 25 +-- dropshot/src/lib.rs | 3 +- dropshot/src/websocket.rs | 31 ++-- dropshot/tests/fail/bad_endpoint3.stderr | 29 ---- dropshot/tests/test_demo.rs | 2 +- dropshot_endpoint/src/lib.rs | 48 ++---- 11 files changed, 283 insertions(+), 237 deletions(-) create mode 100644 dropshot/src/extractor/common.rs diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index 52184c350..b78442ca1 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -17,6 +17,8 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co === Breaking Changes +// XXX-dap TODO need update here + * https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. * https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. * https://github.com/oxidecomputer/dropshot/pull/540[#540] `ConfigDropshot` now uses a [`camino::Utf8PathBuf`](https://docs.rs/camino/1.1.1/camino/struct.Utf8PathBuf.html) for its file path. There is no change to the configuration format itself, just its representation in Rust. diff --git a/dropshot/examples/request-headers.rs b/dropshot/examples/request-headers.rs index 969511c07..b0ceba7a4 100644 --- a/dropshot/examples/request-headers.rs +++ b/dropshot/examples/request-headers.rs @@ -4,8 +4,8 @@ //! //! The headers accessed here will not be recorded as inputs in the OpenAPI //! spec. This is not currently supported out-of-the-box with Dropshot, but it -//! could be done by implementing you're own `Extractor` that pulls the headers -//! out, similar to what's done here. +//! could be done by implementing you're own `SharedExtractor` that pulls the +//! headers out, similar to what's done here. //! //! This example is based on the "basic.rs" one. See that one for more detailed //! comments on the common code. diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index 7ef06f5ca..cb52e619f 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -11,7 +11,7 @@ use crate::router::PathSegment; use crate::server::ServerContext; use crate::type_util::type_is_scalar; use crate::type_util::type_is_string_enum; -use crate::Extractor; +use crate::extractor::RequestExtractor; use crate::HttpErrorResponseBody; use crate::CONTENT_TYPE_JSON; use crate::CONTENT_TYPE_OCTET_STREAM; @@ -56,7 +56,7 @@ impl<'a, Context: ServerContext> ApiEndpoint { ) -> Self where HandlerType: HttpHandlerFunc, - FuncParams: Extractor + 'static, + FuncParams: RequestExtractor + 'static, ResponseType: HttpResponse + Send + Sync + 'static, { let body_content_type = @@ -280,7 +280,6 @@ impl ApiDescription { self.validate_tags(&e)?; self.validate_path_parameters(&e)?; - self.validate_body_parameters(&e)?; self.validate_named_parameters(&e)?; self.router.insert(e); @@ -374,32 +373,7 @@ impl ApiDescription { Ok(()) } - /// Validate that we have a single body parameter. - fn validate_body_parameters( - &self, - e: &ApiEndpoint, - ) -> Result<(), String> { - // Explicitly disallow any attempt to consume the body twice. - let nbodyextractors = e - .parameters - .iter() - .filter(|p| match p.metadata { - ApiEndpointParameterMetadata::Body(..) => true, - _ => false, - }) - .count(); - if nbodyextractors > 1 { - return Err(format!( - "only one body extractor can be used in a handler (this \ - function has {})", - nbodyextractors - )); - } - - Ok(()) - } - - /// Validate that named parameters have appropriate types and their aren't + /// Validate that named parameters have appropriate types and there are no /// duplicates. Parameters must have scalar types except in the case of the /// received for a wildcard path which must be an array of String. fn validate_named_parameters( @@ -1566,8 +1540,6 @@ mod test { use crate::Query; use crate::TagConfig; use crate::TagDetails; - use crate::TypedBody; - use crate::UntypedBody; use crate::CONTENT_TYPE_JSON; use http::Method; use hyper::Body; @@ -1717,31 +1689,8 @@ mod test { } } - #[test] - fn test_two_bodies() { - #[derive(Deserialize, JsonSchema)] - struct AStruct {} - - #[endpoint { - method = PUT, - path = "/testing/two_bodies" - }] - async fn test_twobodies_handler( - _: Arc>, - _: UntypedBody, - _: TypedBody, - ) -> Result, HttpError> { - unimplemented!(); - } - - let mut api = ApiDescription::new(); - let error = api.register(test_twobodies_handler).unwrap_err(); - assert_eq!( - error, - "only one body extractor can be used in a handler (this function \ - has 2)" - ); - } + // XXX-dap TODO-coverage need a test for trying to use two + // ExclusiveExtractors #[test] fn test_dup_names() { diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs new file mode 100644 index 000000000..0bfd294ff --- /dev/null +++ b/dropshot/src/extractor/common.rs @@ -0,0 +1,198 @@ +// Copyright 2022 Oxide Computer Company + +// XXX-dap TODO-cleanup should the metadata into a separate, shared trait? + +use crate::api_description::ApiEndpointParameter; +use crate::api_description::{ApiEndpointBodyContentType, ExtensionMode}; +use crate::error::HttpError; +use crate::server::ServerContext; +use crate::RequestContext; + +use async_trait::async_trait; + +/// Metadata associated with an extractor including parameters and whether or not +/// the associated endpoint is paginated. +pub struct ExtractorMetadata { + pub extension_mode: ExtensionMode, + pub parameters: Vec, +} + +/// Extractors that require exclusive access to the underyling `hyper::Request` +/// +/// These extractors usually need to read the body of the request or else modify +/// how the server treats the rest of it (e.g., websocket upgrade). There may +/// be at most one of these associated with any request. +#[async_trait] +pub trait ExclusiveExtractor: Send + Sync + Sized { + /// Construct an instance of this type from a `RequestContext`. + async fn from_request( + rqctx: &RequestContext, + ) -> Result; + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata; +} + +/// Extractors that do _not_ require exclusive access to the underyling +/// `hyper::Request` +/// +/// These extractors usually look at immutable properties of the request that +/// are known up front, like the URL. There may be any number of these +/// associated with any request. +#[async_trait] +pub trait SharedExtractor: Send + Sync + Sized { + /// Construct an instance of this type from a `RequestContext`. + async fn from_request( + rqctx: &RequestContext, + ) -> Result; + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata; +} + +// A `SharedExtractor` can always be treated like an `ExclusiveExtractor`. +#[async_trait] +impl ExclusiveExtractor for S { + async fn from_request( + rqctx: &RequestContext, + ) -> Result { + ::from_request(rqctx).await + } + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + ::metadata(body_content_type) + } +} + +/// Top-level extractor for a given request +/// +/// During request handling, we wind up needing to call a function with a +/// variable number of arguments whose types are all extractors (either +/// `SharedExtractor` or `ExclusiveExtractor`). We achieve this with a separate +/// type called `RequestExtractor` that looks just like `ExclusiveExtractor`. +/// We can impl this trait on a tuple of any number of types that themselves +/// impl `SharedExtractor` or `ExclusiveExtractor` by delegating to each type's +/// extractor implementation. There may be at most one `ExclusiveExtractor` in +/// the tuple. We require it to be the last argument just to avoid having to +/// define the power set of impls. +/// +/// In practice, `RequestExtractor` is identical to `ExclusiveExtractor`. But +/// we use them in different ways. `RequestExtractor` is private, only +/// implemented on tuple types, and only used to kick off extraction. +/// `ExclusiveExtractor` can be consumer-defined and would generally not be +/// implemented on tuple types. +#[async_trait] +pub trait RequestExtractor: Send + Sync + Sized { + /// Construct an instance of this type from a `RequestContext`. + async fn from_request( + rqctx: &RequestContext, + ) -> Result; + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata; +} + +// Impl for zero-element tuple (used for request handlers with no extractors) +#[async_trait] +impl RequestExtractor for () { + async fn from_request( + _rqctx: &RequestContext, + ) -> Result { + Ok(()) + } + + fn metadata( + _body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + ExtractorMetadata { + extension_mode: ExtensionMode::None, + parameters: vec![], + } + } +} + +// Impl for one-element tuple with an exclusive extractor +#[async_trait] +impl RequestExtractor for (X,) { + async fn from_request( + rqctx: &RequestContext, + ) -> Result { + Ok((X::from_request(rqctx).await?,)) + } + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + X::metadata(body_content_type) + } +} + +// XXX-dap TODO-doc update comment based on the change that uses the fact that +// SharedExtractor impls ExclusiveExtractor such that the last item in the +// tuple *must* be an exclusive extractor +/// Defines implementations of `RequestExtractor` for tuples of one or more +/// `SharedExtractor` followed by an `ExclusiveExtractor` +/// +/// As an example, `impl_rqextractor_for_tuple!(S1, S2)` defines an impl of +/// `RequestExtractor` for tuple `(S1, S2, X)` where `S1: SharedExtractor`, +/// `S2: SharedExtractor`, and `X: ExclusiveExtractor`, as well as a similar +/// impl for just `(S1, S2)`. +macro_rules! impl_rqextractor_for_tuple { + ($( $S:ident),+) => { + + // impl RequestExtractor for a tuple of shared extractors with an exclusive extractor + #[async_trait] + impl< X: ExclusiveExtractor + 'static, $($S: SharedExtractor + 'static,)+ > + RequestExtractor + for ($($S,)+ X) + { + async fn from_request(rqctx: &RequestContext) + -> Result<( $($S,)+ X ), HttpError> + { + futures::try_join!( + $($S::from_request(rqctx),)+ + X::from_request(rqctx) + ) + } + + fn metadata(_body_content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { + #[allow(unused_mut)] + let mut extension_mode = ExtensionMode::None; + #[allow(unused_mut)] + let mut parameters = vec![]; + $( + let mut metadata = $S::metadata(_body_content_type.clone()); + extension_mode = match (extension_mode, metadata.extension_mode) { + (ExtensionMode::None, x) | (x, ExtensionMode::None) => x, + (x, y) if x != y => { + panic!("incompatible extension modes in tuple: {:?} != {:?}", x, y); + } + (_, x) => x, + }; + parameters.append(&mut metadata.parameters); + )+ + + let mut metadata = X::metadata(_body_content_type.clone()); + extension_mode = match (extension_mode, metadata.extension_mode) { + (ExtensionMode::None, x) | (x, ExtensionMode::None) => x, + (x, y) if x != y => { + panic!("incompatible extension modes in tuple: {:?} != {:?}", x, y); + } + (_, x) => x, + }; + parameters.append(&mut metadata.parameters); + + ExtractorMetadata { extension_mode, parameters } + } + } +}} + +// Implement `RequestExtractor` for any tuple consisting of 0-2 shared +// extractors and exactly one exclusive extractor. +impl_rqextractor_for_tuple!(S1); +impl_rqextractor_for_tuple!(S1, S2); diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs index d80799947..721277f85 100644 --- a/dropshot/src/extractor/mod.rs +++ b/dropshot/src/extractor/mod.rs @@ -30,78 +30,15 @@ use schemars::schema::SchemaObject; use schemars::JsonSchema; use serde::de::DeserializeOwned; use std::fmt::Debug; -use std::sync::Arc; - -/// `Extractor` defines an interface allowing a type to be constructed from a -/// `RequestContext`. Unlike most traits, `Extractor` essentially defines only a -/// constructor function, not instance functions. -/// -/// The extractors that we provide (`Query`, `Path`, `TypedBody`, `UntypedBody`, and -/// `WebsocketUpgrade`) implement `Extractor` in order to construct themselves from -/// the request. For example, `Extractor` is implemented for `Query` with a -/// function that reads the query string from the request, parses it, and -/// constructs a `Query` with it. -/// -/// We also define implementations of `Extractor` for tuples of types that -/// themselves implement `Extractor`. See the implementation of -/// `HttpRouteHandler` for more on why this needed. -#[async_trait] -pub trait Extractor: Send + Sync + Sized { - /// Construct an instance of this type from a `RequestContext`. - async fn from_request( - rqctx: Arc>, - ) -> Result; - - fn metadata( - body_content_type: ApiEndpointBodyContentType, - ) -> ExtractorMetadata; -} -/// Metadata associated with an extractor including parameters and whether or not -/// the associated endpoint is paginated. -pub struct ExtractorMetadata { - pub extension_mode: ExtensionMode, - pub parameters: Vec, -} - -/// `impl_derived_for_tuple!` defines implementations of `Extractor` for tuples -/// whose elements themselves implement `Extractor`. -macro_rules! impl_extractor_for_tuple { - ($( $T:ident),*) => { - #[async_trait] - impl< $($T: Extractor + 'static,)* > Extractor for ($($T,)*) - { - async fn from_request(_rqctx: Arc>) - -> Result<( $($T,)* ), HttpError> - { - futures::try_join!($($T::from_request(Arc::clone(&_rqctx)),)*) - } +mod common; - fn metadata(_body_content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { - #[allow(unused_mut)] - let mut extension_mode = ExtensionMode::None; - #[allow(unused_mut)] - let mut parameters = vec![]; - $( - let mut metadata = $T::metadata(_body_content_type.clone()); - extension_mode = match (extension_mode, metadata.extension_mode) { - (ExtensionMode::None, x) | (x, ExtensionMode::None) => x, - (x, y) if x != y => { - panic!("incompatible extension modes in tuple: {:?} != {:?}", x, y); - } - (_, x) => x, - }; - parameters.append(&mut metadata.parameters); - )* - ExtractorMetadata { extension_mode, parameters } - } - } -}} +pub use common::ExclusiveExtractor; +pub use common::ExtractorMetadata; +pub use common::RequestExtractor; +pub use common::SharedExtractor; -impl_extractor_for_tuple!(); -impl_extractor_for_tuple!(T1); -impl_extractor_for_tuple!(T1, T2); -impl_extractor_for_tuple!(T1, T2, T3); +// XXX-dap move these definitions to separate files? // Query: query string extractor @@ -140,19 +77,19 @@ where } } -// The `Extractor` implementation for Query describes how to construct -// an instance of `Query` from an HTTP request: namely, by parsing -// the query string to an instance of `QueryType`. +// The `SharedExtractor` implementation for Query describes how to +// construct an instance of `Query` from an HTTP request: namely, by +// parsing the query string to an instance of `QueryType`. // TODO-cleanup We shouldn't have to use the "'static" bound on `QueryType` // here. It seems like we ought to be able to use 'async_trait, but that // doesn't seem to be defined. #[async_trait] -impl Extractor for Query +impl SharedExtractor for Query where QueryType: JsonSchema + DeserializeOwned + Send + Sync + 'static, { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> { let request = rqctx.request.lock().await; http_request_load_query(&request) @@ -183,16 +120,16 @@ impl Path { } } -// The `Extractor` implementation for Path describes how to construct -// an instance of `Path` from an HTTP request: namely, by extracting -// parameters from the query string. +// The `SharedExtractor` implementation for Path describes how to +// construct an instance of `Path` from an HTTP request: namely, by +// extracting parameters from the query string. #[async_trait] -impl Extractor for Path +impl SharedExtractor for Path where PathType: DeserializeOwned + JsonSchema + Send + Sync + 'static, { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> { let params: PathType = http_extract_path_params(&rqctx.path_variables)?; Ok(Path { inner: params }) @@ -288,7 +225,7 @@ impl /// Given an HTTP request, attempt to read the body, parse it according /// to the content type, and deserialize it to an instance of `BodyType`. async fn http_request_load_body( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> where BodyType: JsonSchema + DeserializeOwned + Send + Sync, @@ -352,19 +289,19 @@ where Ok(TypedBody { inner: content }) } -// The `Extractor` implementation for TypedBody describes how to -// construct an instance of `TypedBody` from an HTTP request: namely, -// by reading the request body and parsing it as JSON into type `BodyType`. -// TODO-cleanup We shouldn't have to use the "'static" bound on `BodyType` here. -// It seems like we ought to be able to use 'async_trait, but that doesn't seem -// to be defined. +// The `ExclusiveExtractor` implementation for TypedBody describes how +// to construct an instance of `TypedBody` from an HTTP request: +// namely, by reading the request body and parsing it as JSON into type +// `BodyType`. TODO-cleanup We shouldn't have to use the "'static" bound on +// `BodyType` here. It seems like we ought to be able to use 'async_trait, but +// that doesn't seem to be defined. #[async_trait] -impl Extractor for TypedBody +impl ExclusiveExtractor for TypedBody where BodyType: JsonSchema + DeserializeOwned + Send + Sync + 'static, { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> { http_request_load_body(rqctx).await } @@ -415,9 +352,9 @@ impl UntypedBody { } #[async_trait] -impl Extractor for UntypedBody { +impl ExclusiveExtractor for UntypedBody { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result { let server = &rqctx.server; let mut request = rqctx.request.lock().await; diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index 5f03c8527..fce5574a5 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -32,7 +32,7 @@ //! OpenAPI document generation. use super::error::HttpError; -use super::extractor::Extractor; +use super::extractor::RequestExtractor; use super::http_util::CONTENT_TYPE_JSON; use super::http_util::CONTENT_TYPE_OCTET_STREAM; use super::server::DropshotState; @@ -166,7 +166,7 @@ pub trait HttpHandlerFunc: Send + Sync + 'static where Context: ServerContext, - FuncParams: Extractor, + FuncParams: RequestExtractor, ResponseType: HttpResponse + Send + Sync + 'static, { async fn handle_request( @@ -267,7 +267,8 @@ macro_rules! impl_HttpHandlerFunc_for_func_with_params { FutureType: Future> + Send + 'static, ResponseType: HttpResponse + Send + Sync + 'static, - $($T: Extractor + Send + Sync + 'static,)* + ($($T,)*): RequestExtractor, + $($T: Send + Sync + 'static,)* { async fn handle_request( &self, @@ -318,7 +319,7 @@ pub struct HttpRouteHandler where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor, + FuncParams: RequestExtractor, ResponseType: HttpResponse + Send + Sync + 'static, { /// the actual HttpHandlerFunc used to implement this route @@ -341,7 +342,7 @@ impl Debug where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor, + FuncParams: RequestExtractor, ResponseType: HttpResponse + Send + Sync + 'static, { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { @@ -355,7 +356,7 @@ impl RouteHandler where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor + 'static, + FuncParams: RequestExtractor + 'static, ResponseType: HttpResponse + Send + Sync + 'static, { fn label(&self) -> &str { @@ -371,10 +372,10 @@ where // arguments to the handler function. This could be `()`, `(Query)`, // `(TypedBody)`, `(Query, TypedBody)`, or any other // combination of extractors we decide to support in the future. - // Whatever it is must implement `Extractor`, which means we can invoke - // `Extractor::from_request()` to construct the argument tuple, - // generally from information available in the `request` object. We - // pass this down to the `HttpHandlerFunc`, for which there's a + // Whatever it is must implement `RequestExtractor`, which means we can + // invoke `RequestExtractor::from_request()` to construct the argument + // tuple, generally from information available in the `request` object. + // We pass this down to the `HttpHandlerFunc`, for which there's a // different implementation for each value of `FuncParams`. The // `HttpHandlerFunc` for each `FuncParams` just pulls the arguments out // of the `funcparams` tuple and makes them actual function arguments @@ -383,7 +384,7 @@ where // actual handler function. From this point down, all of this is // resolved statically. let rqctx = Arc::new(rqctx_raw); - let funcparams = Extractor::from_request(Arc::clone(&rqctx)).await?; + let funcparams = RequestExtractor::from_request(&rqctx).await?; let future = self.handler.handle_request(rqctx, funcparams); future.await } @@ -396,7 +397,7 @@ impl where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor + 'static, + FuncParams: RequestExtractor + 'static, ResponseType: HttpResponse + Send + Sync + 'static, { /// Given a function matching one of the supported API handler function diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index 2a3803c16..a8788f01c 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -629,10 +629,11 @@ pub use config::ConfigDropshot; pub use config::ConfigTls; pub use error::HttpError; pub use error::HttpErrorResponseBody; -pub use extractor::Extractor; +pub use extractor::ExclusiveExtractor; pub use extractor::ExtractorMetadata; pub use extractor::Path; pub use extractor::Query; +pub use extractor::SharedExtractor; pub use extractor::TypedBody; pub use extractor::UntypedBody; pub use handler::http_response_found; diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index 056b11377..71800060b 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -8,8 +8,8 @@ use crate::api_description::ExtensionMode; use crate::{ - ApiEndpointBodyContentType, Extractor, ExtractorMetadata, HttpError, - RequestContext, ServerContext, + ApiEndpointBodyContentType, ExclusiveExtractor, ExtractorMetadata, + HttpError, RequestContext, ServerContext, }; use async_trait::async_trait; use http::header; @@ -22,10 +22,10 @@ use serde_json::json; use sha1::{Digest, Sha1}; use slog::Logger; use std::future::Future; -use std::sync::Arc; -/// WebsocketUpgrade is an Extractor used to upgrade and handle an HTTP request -/// as a websocket when present in a Dropshot endpoint's function arguments. +/// WebsocketUpgrade is an ExclusiveExtractor used to upgrade and handle an HTTP +/// request as a websocket when present in a Dropshot endpoint's function +/// arguments. /// /// The consumer of this must call [WebsocketUpgrade::handle] for the connection /// to be upgraded. (This is done for you by `#[channel]`.) @@ -78,13 +78,13 @@ fn derive_accept_key(request_key: &[u8]) -> String { base64::encode(&sha1.finalize()) } -/// This `Extractor` implementation constructs an instance of `WebsocketUpgrade` -/// from an HTTP request, and returns an error if the given request does not -/// contain websocket upgrade headers. +/// This `ExclusiveExtractor` implementation constructs an instance of +/// `WebsocketUpgrade` from an HTTP request, and returns an error if the given +/// request does not contain websocket upgrade headers. #[async_trait] -impl Extractor for WebsocketUpgrade { +impl ExclusiveExtractor for WebsocketUpgrade { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result { let request = &mut *rqctx.request.lock().await; @@ -190,8 +190,8 @@ impl WebsocketUpgrade { /// #[dropshot::endpoint { method = GET, path = "/my/ws/endpoint/{id}" }] /// async fn my_ws_endpoint( /// rqctx: std::sync::Arc>, - /// websock: dropshot::WebsocketUpgrade, /// id: dropshot::Path, + /// websock: dropshot::WebsocketUpgrade, /// ) -> dropshot::WebsocketEndpointResult { /// let logger = rqctx.log.new(slog::o!()); /// websock.handle(move |upgraded| async move { @@ -295,7 +295,9 @@ impl JsonSchema for WebsocketUpgrade { mod tests { use crate::router::HttpRouter; use crate::server::{DropshotState, ServerConfig}; - use crate::{Extractor, HttpError, RequestContext, WebsocketUpgrade}; + use crate::{ + ExclusiveExtractor, HttpError, RequestContext, WebsocketUpgrade, + }; use futures::lock::Mutex; use http::Request; use hyper::Body; @@ -306,7 +308,7 @@ mod tests { async fn ws_upg_from_mock_rqctx() -> Result { let log = slog::Logger::root(slog::Discard, slog::o!()).new(slog::o!()); - let fut = WebsocketUpgrade::from_request(Arc::new(RequestContext { + let rqctx = RequestContext { server: Arc::new(DropshotState { private: (), config: ServerConfig { @@ -338,7 +340,8 @@ mod tests { body_content_type: Default::default(), request_id: "".to_string(), log: log.clone(), - })); + }; + let fut = WebsocketUpgrade::from_request(&rqctx); tokio::time::timeout(Duration::from_secs(1), fut) .await .expect("Deadlocked in WebsocketUpgrade constructor") diff --git a/dropshot/tests/fail/bad_endpoint3.stderr b/dropshot/tests/fail/bad_endpoint3.stderr index 1c0a1ce47..f6b90e23c 100644 --- a/dropshot/tests/fail/bad_endpoint3.stderr +++ b/dropshot/tests/fail/bad_endpoint3.stderr @@ -1,32 +1,3 @@ -error[E0277]: the trait bound `String: Extractor` is not satisfied - --> tests/fail/bad_endpoint3.rs:17:12 - | -17 | param: String, - | ^^^^^^ the trait `Extractor` is not implemented for `String` - | - = help: the following other types implement trait `Extractor`: - () - (T1, T2) - (T1, T2, T3) - (T1,) - TypedBody - UntypedBody - WebsocketUpgrade - dropshot::Path - dropshot::Query -note: required by a bound in `need_extractor` - --> tests/fail/bad_endpoint3.rs:11:1 - | -11 | / #[endpoint { -12 | | method = GET, -13 | | path = "/test", -14 | | }] - | |__^ required by this bound in `need_extractor` -... -17 | param: String, - | ------ required by a bound in this - = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) - error[E0277]: the trait bound `fn(Arc>, String) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_endpoint}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied --> tests/fail/bad_endpoint3.rs:15:10 | diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index aabeb9756..acfd837a4 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -966,8 +966,8 @@ pub struct DemoUntypedQuery { }] async fn demo_handler_untyped_body( _rqctx: Arc>, - body: UntypedBody, query: Query, + body: UntypedBody, ) -> Result, HttpError> { let nbytes = body.as_bytes().len(); let as_utf8 = if query.into_inner().parse_str.unwrap_or(false) { diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index a9e2cb3d2..c83636b8b 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -173,7 +173,8 @@ fn do_channel( ChannelProtocol::WEBSOCKETS => { // here we construct a wrapper function and mutate the arguments a bit // for the outer layer: we replace WebsocketConnection, which is not - // an extractor, with WebsocketUpgrade, which is. + // an extractor, with WebsocketUpgrade, which is. We also move it + // to the end. let ItemFnForSignature { attrs, vis, mut sig, _block: body } = syn::parse2(item)?; @@ -219,6 +220,13 @@ fn do_channel( )); } + // XXX-dap TODO-cleanup This is a gross way to do it. + let mut input_pairs = + sig.inputs.clone().into_pairs().collect::>(); + let second_pair = input_pairs.remove(1); + input_pairs.push(second_pair); + sig.inputs = input_pairs.into_iter().collect(); + sig.output = syn::parse2(quote!(-> dropshot::WebsocketEndpointResult))?; @@ -425,12 +433,12 @@ fn do_endpoint_inner( .inputs .iter() .enumerate() - .map(|(index, arg)| { + .filter_map(|(index, arg)| { match arg { syn::FnArg::Receiver(_) => { // The compiler failure here is already comprehensible. arg_is_receiver = true; - quote! {} + Some(quote! {}) } syn::FnArg::Typed(pat) => { let span = pat.ty.span(); @@ -440,23 +448,15 @@ fn do_endpoint_inner( // The first parameter must be an Arc> // and fortunately we already have a trait that we can // use to validate this type. - quote_spanned! { span=> + Some(quote_spanned! { span=> const _: fn() = || { struct NeedRequestContext(<#ty as #dropshot::RequestContextArgument>::Context); }; - } + }) } else { - // Subsequent parameters must implement Extractor. - quote_spanned! { span=> - const _: fn() = || { - fn need_extractor() - where - T: ?Sized + #dropshot::Extractor, - { - } - need_extractor::<#ty>(); - }; - } + // XXX-dap the remaining stuff must together impl + // `RequestExtractor` + None } } } @@ -935,14 +935,6 @@ mod tests { const _: fn() = || { struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; }; - const _: fn() = || { - fn need_extractor() - where - T: ?Sized + dropshot::Extractor, - { - } - need_extractor:: >(); - }; const _: fn() = || { trait ResultTrait { type T; @@ -1041,14 +1033,6 @@ mod tests { const _: fn() = || { struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; }; - const _: fn() = || { - fn need_extractor() - where - T: ?Sized + dropshot::Extractor, - { - } - need_extractor:: >(); - }; const _: fn() = || { trait ResultTrait { type T; From 833920fcfa40cf368a5884be0b587a3b44fed700 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 16:46:43 -0800 Subject: [PATCH 07/47] update changelog with some todo items --- CHANGELOG.adoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index b78442ca1..5436afa3c 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -18,6 +18,11 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co === Breaking Changes // XXX-dap TODO need update here +// Extractor -> {Shared,Exclusive}Extractor +// type signature of from_request() changed +// both: accept &RequestContext instead of Arc +// now: no other change. future: exclusive one will get a hyper::Request +// exclusive extractors must appear last in the argument list * https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. * https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. From 067dd4a34d4822653fc582959298197660ced55d Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 19:49:38 -0800 Subject: [PATCH 08/47] prototype: remove request from RequestContext --- CHANGELOG.adoc | 4 ++ dropshot/examples/request-headers.rs | 3 +- dropshot/src/extractor/common.rs | 15 +++++-- dropshot/src/extractor/mod.rs | 49 +++++++++++++++++++---- dropshot/src/handler.rs | 58 ++++++++++++++++++++++------ dropshot/src/lib.rs | 2 + dropshot/src/server.rs | 6 ++- dropshot/src/websocket.rs | 29 ++++++-------- 8 files changed, 121 insertions(+), 45 deletions(-) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index 5436afa3c..a2fb7d460 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -24,6 +24,10 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co // now: no other change. future: exclusive one will get a hyper::Request // exclusive extractors must appear last in the argument list +// XXX-dap TODO more updates for RawRequest extractor +// also update crate-level docs and other places we talk about TypedBody, etc. +// maybe add an example? + * https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. * https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. * https://github.com/oxidecomputer/dropshot/pull/540[#540] `ConfigDropshot` now uses a [`camino::Utf8PathBuf`](https://docs.rs/camino/1.1.1/camino/struct.Utf8PathBuf.html) for its file path. There is no change to the configuration format itself, just its representation in Rust. diff --git a/dropshot/examples/request-headers.rs b/dropshot/examples/request-headers.rs index b0ceba7a4..80a360509 100644 --- a/dropshot/examples/request-headers.rs +++ b/dropshot/examples/request-headers.rs @@ -48,9 +48,8 @@ async fn main() -> Result<(), String> { async fn example_api_get_header_generic( rqctx: Arc>, ) -> Result, HttpError> { - let request = rqctx.request.lock().await; // Note that clients can provide multiple values for a header. See // http::HeaderMap for ways to get all of them. - let header = request.headers().get("demo-header"); + let header = rqctx.request.headers().get("demo-header"); Ok(HttpResponseOk(format!("value for header: {:?}", header))) } diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs index 0bfd294ff..509f4887d 100644 --- a/dropshot/src/extractor/common.rs +++ b/dropshot/src/extractor/common.rs @@ -27,6 +27,7 @@ pub trait ExclusiveExtractor: Send + Sync + Sized { /// Construct an instance of this type from a `RequestContext`. async fn from_request( rqctx: &RequestContext, + request: hyper::Request, ) -> Result; fn metadata( @@ -57,6 +58,7 @@ pub trait SharedExtractor: Send + Sync + Sized { impl ExclusiveExtractor for S { async fn from_request( rqctx: &RequestContext, + _request: hyper::Request, ) -> Result { ::from_request(rqctx).await } @@ -90,6 +92,7 @@ pub trait RequestExtractor: Send + Sync + Sized { /// Construct an instance of this type from a `RequestContext`. async fn from_request( rqctx: &RequestContext, + request: hyper::Request, ) -> Result; fn metadata( @@ -102,6 +105,7 @@ pub trait RequestExtractor: Send + Sync + Sized { impl RequestExtractor for () { async fn from_request( _rqctx: &RequestContext, + _request: hyper::Request, ) -> Result { Ok(()) } @@ -121,8 +125,9 @@ impl RequestExtractor for () { impl RequestExtractor for (X,) { async fn from_request( rqctx: &RequestContext, + request: hyper::Request, ) -> Result { - Ok((X::from_request(rqctx).await?,)) + Ok((X::from_request(rqctx, request).await?,)) } fn metadata( @@ -151,12 +156,14 @@ macro_rules! impl_rqextractor_for_tuple { RequestExtractor for ($($S,)+ X) { - async fn from_request(rqctx: &RequestContext) - -> Result<( $($S,)+ X ), HttpError> + async fn from_request( + rqctx: &RequestContext, + request: hyper::Request + ) -> Result<( $($S,)+ X ), HttpError> { futures::try_join!( $($S::from_request(rqctx),)+ - X::from_request(rqctx) + X::from_request(rqctx, request) ) } diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs index 721277f85..8c477a431 100644 --- a/dropshot/src/extractor/mod.rs +++ b/dropshot/src/extractor/mod.rs @@ -23,8 +23,6 @@ use crate::RequestContext; use async_trait::async_trait; use bytes::Bytes; -use hyper::Body; -use hyper::Request; use schemars::schema::InstanceType; use schemars::schema::SchemaObject; use schemars::JsonSchema; @@ -33,6 +31,7 @@ use std::fmt::Debug; mod common; +use crate::RequestHeader; pub use common::ExclusiveExtractor; pub use common::ExtractorMetadata; pub use common::RequestExtractor; @@ -61,7 +60,7 @@ impl Query { /// Given an HTTP request, pull out the query string and attempt to deserialize /// it as an instance of `QueryType`. fn http_request_load_query( - request: &Request, + request: &RequestHeader, ) -> Result, HttpError> where QueryType: DeserializeOwned + JsonSchema + Send + Sync, @@ -91,8 +90,7 @@ where async fn from_request( rqctx: &RequestContext, ) -> Result, HttpError> { - let request = rqctx.request.lock().await; - http_request_load_query(&request) + http_request_load_query(&rqctx.request) } fn metadata( @@ -226,12 +224,12 @@ impl /// to the content type, and deserialize it to an instance of `BodyType`. async fn http_request_load_body( rqctx: &RequestContext, + mut request: hyper::Request, ) -> Result, HttpError> where BodyType: JsonSchema + DeserializeOwned + Send + Sync, { let server = &rqctx.server; - let mut request = rqctx.request.lock().await; let body = http_read_body( request.body_mut(), server.config.request_body_max_bytes, @@ -302,8 +300,9 @@ where { async fn from_request( rqctx: &RequestContext, + request: hyper::Request, ) -> Result, HttpError> { - http_request_load_body(rqctx).await + http_request_load_body(rqctx, request).await } fn metadata(content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { @@ -355,9 +354,9 @@ impl UntypedBody { impl ExclusiveExtractor for UntypedBody { async fn from_request( rqctx: &RequestContext, + mut request: hyper::Request, ) -> Result { let server = &rqctx.server; - let mut request = rqctx.request.lock().await; let body_bytes = http_read_body( request.body_mut(), server.config.request_body_max_bytes, @@ -391,6 +390,40 @@ impl ExclusiveExtractor for UntypedBody { } } +// RawRequest: extractor for the raw underlying hyper::Request + +/// `RawRequest` is an extractor providing access to the raw underlying +/// [`hyper::Request`]. +#[derive(Debug)] +pub struct RawRequest { + request: hyper::Request, +} + +impl RawRequest { + pub fn into_inner(self) -> hyper::Request { + self.request + } +} + +#[async_trait] +impl ExclusiveExtractor for RawRequest { + async fn from_request( + _rqctx: &RequestContext, + request: hyper::Request, + ) -> Result { + Ok(RawRequest { request }) + } + + fn metadata( + _content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + ExtractorMetadata { + parameters: vec![], + extension_mode: ExtensionMode::None, + } + } +} + #[cfg(test)] mod test { use crate::api_description::ExtensionMode; diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index fce5574a5..e528474b2 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -49,11 +49,9 @@ use crate::schema_util::ReferenceVisitor; use crate::to_map::to_map; use async_trait::async_trait; -use futures::lock::Mutex; use http::HeaderMap; use http::StatusCode; use hyper::Body; -use hyper::Request; use hyper::Response; use schemars::JsonSchema; use serde::de::DeserializeOwned; @@ -73,19 +71,10 @@ use std::sync::Arc; pub type HttpHandlerResult = Result, HttpError>; /// Handle for various interfaces useful during request processing. -// TODO-cleanup What's the right way to package up "request"? The only time we -// need it to be mutable is when we're reading the body (e.g., as part of the -// JSON extractor). In order to support that, we wrap it in something that -// supports interior mutability. It also needs to be thread-safe, since we're -// using async/await. That brings us to Arc>, but it seems like -// overkill since it will only really be used by one thread at a time (at all, -// let alone mutably) and there will never be contention on the Mutex. #[derive(Debug)] pub struct RequestContext { /// shared server state pub server: Arc>, - /// HTTP request details - pub request: Arc>>, /// HTTP request routing variables pub path_variables: VariableSet, /// expected request body mime type @@ -94,6 +83,48 @@ pub struct RequestContext { pub request_id: String, /// logger for this specific request pub log: Logger, + + /// basic request information (method, URI, etc.) + pub request: RequestHeader, +} + +// This is deliberately as close to compatible with `hyper::Request` as +// reasonable. +#[derive(Debug)] +pub struct RequestHeader { + method: http::Method, + uri: http::Uri, + version: http::Version, + headers: http::HeaderMap, +} + +impl From<&hyper::Request> for RequestHeader { + fn from(request: &hyper::Request) -> Self { + RequestHeader { + method: request.method().clone(), + uri: request.uri().clone(), + version: request.version().clone(), + headers: request.headers().clone(), + } + } +} + +impl RequestHeader { + pub fn method(&self) -> &http::Method { + &self.method + } + + pub fn uri(&self) -> &http::Uri { + &self.uri + } + + pub fn version(&self) -> &http::Version { + &self.version + } + + pub fn headers(&self) -> &http::HeaderMap { + &self.headers + } } impl RequestContext { @@ -304,6 +335,7 @@ pub trait RouteHandler: Debug + Send + Sync { async fn handle_request( &self, rqctx: RequestContext, + request: hyper::Request, ) -> HttpHandlerResult; } @@ -366,6 +398,7 @@ where async fn handle_request( &self, rqctx_raw: RequestContext, + request: hyper::Request, ) -> HttpHandlerResult { // This is where the magic happens: in the code below, `funcparams` has // type `FuncParams`, which is a tuple type describing the extractor @@ -384,7 +417,8 @@ where // actual handler function. From this point down, all of this is // resolved statically. let rqctx = Arc::new(rqctx_raw); - let funcparams = RequestExtractor::from_request(&rqctx).await?; + let funcparams = + RequestExtractor::from_request(&rqctx, request).await?; let future = self.handler.handle_request(rqctx, funcparams); future.await } diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index a8788f01c..7373274d5 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -633,6 +633,7 @@ pub use extractor::ExclusiveExtractor; pub use extractor::ExtractorMetadata; pub use extractor::Path; pub use extractor::Query; +pub use extractor::RawRequest; pub use extractor::SharedExtractor; pub use extractor::TypedBody; pub use extractor::UntypedBody; @@ -653,6 +654,7 @@ pub use handler::HttpResponseTemporaryRedirect; pub use handler::HttpResponseUpdatedNoContent; pub use handler::NoHeaders; pub use handler::RequestContext; +pub use handler::RequestHeader; pub use http_util::CONTENT_TYPE_JSON; pub use http_util::CONTENT_TYPE_NDJSON; pub use http_util::CONTENT_TYPE_OCTET_STREAM; diff --git a/dropshot/src/server.rs b/dropshot/src/server.rs index 6ab40327d..0a250b937 100644 --- a/dropshot/src/server.rs +++ b/dropshot/src/server.rs @@ -38,6 +38,7 @@ use tokio::net::{TcpListener, TcpStream}; use tokio_rustls::{server::TlsStream, TlsAcceptor}; use uuid::Uuid; +use crate::RequestHeader; use slog::Logger; // TODO Replace this with something else? @@ -770,13 +771,14 @@ async fn http_request_handle( server.router.lookup_route(&method, uri.path().into())?; let rqctx = RequestContext { server: Arc::clone(&server), - request: Arc::new(Mutex::new(request)), + request: RequestHeader::from(&request), path_variables: lookup_result.variables, body_content_type: lookup_result.body_content_type, request_id: request_id.to_string(), log: request_log, }; - let mut response = lookup_result.handler.handle_request(rqctx).await?; + let mut response = + lookup_result.handler.handle_request(rqctx, request).await?; response.headers_mut().insert( HEADER_REQUEST_ID, http::header::HeaderValue::from_str(&request_id).unwrap(), diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index 71800060b..3a218ea85 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -85,9 +85,8 @@ fn derive_accept_key(request_key: &[u8]) -> String { impl ExclusiveExtractor for WebsocketUpgrade { async fn from_request( rqctx: &RequestContext, + request: hyper::Request, ) -> Result { - let request = &mut *rqctx.request.lock().await; - if !request .headers() .get(header::CONNECTION) @@ -296,9 +295,9 @@ mod tests { use crate::router::HttpRouter; use crate::server::{DropshotState, ServerConfig}; use crate::{ - ExclusiveExtractor, HttpError, RequestContext, WebsocketUpgrade, + ExclusiveExtractor, HttpError, RequestContext, RequestHeader, + WebsocketUpgrade, }; - use futures::lock::Mutex; use http::Request; use hyper::Body; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; @@ -308,6 +307,13 @@ mod tests { async fn ws_upg_from_mock_rqctx() -> Result { let log = slog::Logger::root(slog::Discard, slog::o!()).new(slog::o!()); + let request = Request::builder() + .header(http::header::CONNECTION, "Upgrade") + .header(http::header::UPGRADE, "websocket") + .header(http::header::SEC_WEBSOCKET_VERSION, "13") + .header(http::header::SEC_WEBSOCKET_KEY, "aGFjayB0aGUgcGxhbmV0IQ==") + .body(Body::empty()) + .unwrap(); let rqctx = RequestContext { server: Arc::new(DropshotState { private: (), @@ -324,24 +330,13 @@ mod tests { ), tls_acceptor: None, }), - request: Arc::new(Mutex::new( - Request::builder() - .header(http::header::CONNECTION, "Upgrade") - .header(http::header::UPGRADE, "websocket") - .header(http::header::SEC_WEBSOCKET_VERSION, "13") - .header( - http::header::SEC_WEBSOCKET_KEY, - "aGFjayB0aGUgcGxhbmV0IQ==", - ) - .body(Body::empty()) - .unwrap(), - )), + request: RequestHeader::from(&request), path_variables: Default::default(), body_content_type: Default::default(), request_id: "".to_string(), log: log.clone(), }; - let fut = WebsocketUpgrade::from_request(&rqctx); + let fut = WebsocketUpgrade::from_request(&rqctx, request); tokio::time::timeout(Duration::from_secs(1), fut) .await .expect("Deadlocked in WebsocketUpgrade constructor") From e394128910321be45c80b31a7fc8115e5ab2f179 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 19:53:14 -0800 Subject: [PATCH 09/47] add XXX --- dropshot/src/handler.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index e528474b2..16558fb67 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -90,6 +90,7 @@ pub struct RequestContext { // This is deliberately as close to compatible with `hyper::Request` as // reasonable. +// XXX-dap TODO This could use a better name. #[derive(Debug)] pub struct RequestHeader { method: http::Method, From 20460f6ed47beb0ba9b02063269e204981fd784b Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 20:13:34 -0800 Subject: [PATCH 10/47] remove Arc around RequestContext --- CHANGELOG.adoc | 2 + dropshot/examples/basic.rs | 4 +- dropshot/examples/file_server.rs | 2 +- dropshot/examples/https.rs | 4 +- dropshot/examples/index.rs | 2 +- dropshot/examples/module-basic.rs | 4 +- dropshot/examples/module-shared-context.rs | 2 +- dropshot/examples/multiple-servers.rs | 6 +- dropshot/examples/pagination-basic.rs | 2 +- .../examples/pagination-multiple-resources.rs | 6 +- .../examples/pagination-multiple-sorts.rs | 2 +- dropshot/examples/petstore.rs | 6 +- dropshot/examples/request-headers.rs | 2 +- dropshot/examples/schema-with-example.rs | 2 +- dropshot/examples/self-referential.rs | 2 +- dropshot/examples/websocket.rs | 2 +- dropshot/examples/well-tagged.rs | 6 +- dropshot/src/api_description.rs | 6 +- dropshot/src/handler.rs | 15 ++-- dropshot/src/lib.rs | 8 +- dropshot/src/router.rs | 2 +- dropshot/src/server.rs | 2 +- dropshot/src/websocket.rs | 2 +- dropshot/tests/fail/bad_endpoint1.stderr | 2 +- dropshot/tests/fail/bad_endpoint10.rs | 2 +- dropshot/tests/fail/bad_endpoint10.stderr | 4 +- dropshot/tests/fail/bad_endpoint11.rs | 3 +- dropshot/tests/fail/bad_endpoint11.stderr | 14 +-- dropshot/tests/fail/bad_endpoint12.rs | 3 +- dropshot/tests/fail/bad_endpoint12.stderr | 8 +- dropshot/tests/fail/bad_endpoint13.rs | 3 +- dropshot/tests/fail/bad_endpoint13.stderr | 30 +++---- dropshot/tests/fail/bad_endpoint14.rs | 2 +- dropshot/tests/fail/bad_endpoint15.rs | 2 +- dropshot/tests/fail/bad_endpoint15.stderr | 4 +- dropshot/tests/fail/bad_endpoint16.rs | 2 +- dropshot/tests/fail/bad_endpoint2.stderr | 2 +- dropshot/tests/fail/bad_endpoint3.rs | 2 +- dropshot/tests/fail/bad_endpoint3.stderr | 4 +- dropshot/tests/fail/bad_endpoint4.rs | 3 +- dropshot/tests/fail/bad_endpoint4.stderr | 8 +- dropshot/tests/fail/bad_endpoint5.rs | 3 +- dropshot/tests/fail/bad_endpoint5.stderr | 4 +- dropshot/tests/fail/bad_endpoint6.rs | 3 +- dropshot/tests/fail/bad_endpoint6.stderr | 12 +-- dropshot/tests/fail/bad_endpoint7.rs | 3 +- dropshot/tests/fail/bad_endpoint7.stderr | 4 +- dropshot/tests/fail/bad_endpoint8.rs | 3 +- dropshot/tests/fail/bad_endpoint8.stderr | 14 +-- dropshot/tests/fail/bad_endpoint9.stderr | 4 +- dropshot/tests/fail/unused_endpoint.rs | 3 +- dropshot/tests/fail/unused_endpoint.stderr | 4 +- dropshot/tests/test_demo.rs | 6 +- dropshot/tests/test_openapi.rs | 50 +++++------ dropshot/tests/test_pagination.rs | 11 ++- dropshot/tests/test_pagination_schema.rs | 4 +- dropshot/tests/test_path_names.rs | 3 +- dropshot/tests/test_streaming.rs | 5 +- dropshot/tests/test_tls.rs | 2 +- dropshot_endpoint/src/lib.rs | 88 +++++++++---------- 60 files changed, 201 insertions(+), 214 deletions(-) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index a2fb7d460..b6b9e44de 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -28,6 +28,8 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co // also update crate-level docs and other places we talk about TypedBody, etc. // maybe add an example? +// XXX-dap TODO need update for removal of Arc around RequestContext + * https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. * https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. * https://github.com/oxidecomputer/dropshot/pull/540[#540] `ConfigDropshot` now uses a [`camino::Utf8PathBuf`](https://docs.rs/camino/1.1.1/camino/struct.Utf8PathBuf.html) for its file path. There is no change to the configuration format itself, just its representation in Rust. diff --git a/dropshot/examples/basic.rs b/dropshot/examples/basic.rs index c11adbbcb..ee57df5fe 100644 --- a/dropshot/examples/basic.rs +++ b/dropshot/examples/basic.rs @@ -83,7 +83,7 @@ struct CounterValue { path = "/counter", }] async fn example_api_get_counter( - rqctx: Arc>, + rqctx: RequestContext, ) -> Result, HttpError> { let api_context = rqctx.context(); @@ -99,7 +99,7 @@ async fn example_api_get_counter( path = "/counter", }] async fn example_api_put_counter( - rqctx: Arc>, + rqctx: RequestContext, update: TypedBody, ) -> Result { let api_context = rqctx.context(); diff --git a/dropshot/examples/file_server.rs b/dropshot/examples/file_server.rs index 59296bf3c..e280ff44f 100644 --- a/dropshot/examples/file_server.rs +++ b/dropshot/examples/file_server.rs @@ -76,7 +76,7 @@ struct AllPath { unpublished = true, }] async fn static_content( - rqctx: Arc>, + rqctx: RequestContext, path: Path, ) -> Result, HttpError> { let path = path.into_inner().path; diff --git a/dropshot/examples/https.rs b/dropshot/examples/https.rs index 524458723..bb449f818 100644 --- a/dropshot/examples/https.rs +++ b/dropshot/examples/https.rs @@ -127,7 +127,7 @@ struct CounterValue { path = "/counter", }] async fn example_api_get_counter( - rqctx: Arc>, + rqctx: RequestContext, ) -> Result, HttpError> { let api_context = rqctx.context(); @@ -143,7 +143,7 @@ async fn example_api_get_counter( path = "/counter", }] async fn example_api_put_counter( - rqctx: Arc>, + rqctx: RequestContext, update: TypedBody, ) -> Result { let api_context = rqctx.context(); diff --git a/dropshot/examples/index.rs b/dropshot/examples/index.rs index 827d74cfa..5668afd47 100644 --- a/dropshot/examples/index.rs +++ b/dropshot/examples/index.rs @@ -66,7 +66,7 @@ struct AllPath { unpublished = true, }] async fn index( - _rqctx: Arc>, + _rqctx: RequestContext<()>, path: Path, ) -> Result, HttpError> { Ok(Response::builder() diff --git a/dropshot/examples/module-basic.rs b/dropshot/examples/module-basic.rs index f6407afe2..485639d36 100644 --- a/dropshot/examples/module-basic.rs +++ b/dropshot/examples/module-basic.rs @@ -89,7 +89,7 @@ pub mod routes { path = "/counter", }] pub async fn example_api_get_counter( - rqctx: Arc>, + rqctx: RequestContext, ) -> Result, HttpError> { let api_context = rqctx.context(); @@ -105,7 +105,7 @@ pub mod routes { path = "/counter", }] pub async fn example_api_put_counter( - rqctx: Arc>, + rqctx: RequestContext, update: TypedBody, ) -> Result { let api_context = rqctx.context(); diff --git a/dropshot/examples/module-shared-context.rs b/dropshot/examples/module-shared-context.rs index 94d108402..3d9ecdf04 100644 --- a/dropshot/examples/module-shared-context.rs +++ b/dropshot/examples/module-shared-context.rs @@ -101,7 +101,7 @@ pub struct CounterValue { path = "/counter", }] pub async fn example_api_get_counter( - rqctx: Arc>>, + rqctx: RequestContext>, ) -> Result, HttpError> { let api_context = rqctx.context(); diff --git a/dropshot/examples/multiple-servers.rs b/dropshot/examples/multiple-servers.rs index 8a46de776..1f11098c5 100644 --- a/dropshot/examples/multiple-servers.rs +++ b/dropshot/examples/multiple-servers.rs @@ -258,7 +258,7 @@ struct ServerDescription { path = "/servers", }] async fn api_get_servers( - rqctx: Arc>, + rqctx: RequestContext, ) -> Result>, HttpError> { let api_context = rqctx.context(); @@ -285,7 +285,7 @@ struct PathName { path = "/servers/{name}", }] async fn api_start_server( - rqctx: Arc>, + rqctx: RequestContext, path: Path, body: TypedBody, ) -> Result, HttpError> { @@ -313,7 +313,7 @@ async fn api_start_server( path = "/servers/{name}", }] async fn api_stop_server( - rqctx: Arc>, + rqctx: RequestContext, path: Path, ) -> Result { let api_context = rqctx.context(); diff --git a/dropshot/examples/pagination-basic.rs b/dropshot/examples/pagination-basic.rs index b34c8541e..ed4c36113 100644 --- a/dropshot/examples/pagination-basic.rs +++ b/dropshot/examples/pagination-basic.rs @@ -69,7 +69,7 @@ struct ProjectPage { path = "/projects" }] async fn example_list_projects( - rqctx: Arc>>, + rqctx: RequestContext>, query: Query>, ) -> Result>, HttpError> { let pag_params = query.into_inner(); diff --git a/dropshot/examples/pagination-multiple-resources.rs b/dropshot/examples/pagination-multiple-resources.rs index d5730e21e..f4afea277 100644 --- a/dropshot/examples/pagination-multiple-resources.rs +++ b/dropshot/examples/pagination-multiple-resources.rs @@ -158,7 +158,7 @@ fn scan_params(p: &WhichPage) -> ExScanParams { path = "/projects" }] async fn example_list_projects( - rqctx: Arc>, + rqctx: RequestContext, query: Query>, ) -> Result>, HttpError> { let pag_params = query.into_inner(); @@ -184,7 +184,7 @@ async fn example_list_projects( path = "/disks" }] async fn example_list_disks( - rqctx: Arc>, + rqctx: RequestContext, query: Query>, ) -> Result>, HttpError> { let pag_params = query.into_inner(); @@ -210,7 +210,7 @@ async fn example_list_disks( path = "/instances" }] async fn example_list_instances( - rqctx: Arc>, + rqctx: RequestContext, query: Query>, ) -> Result>, HttpError> { let pag_params = query.into_inner(); diff --git a/dropshot/examples/pagination-multiple-sorts.rs b/dropshot/examples/pagination-multiple-sorts.rs index 0a49c834d..7f673f741 100644 --- a/dropshot/examples/pagination-multiple-sorts.rs +++ b/dropshot/examples/pagination-multiple-sorts.rs @@ -221,7 +221,7 @@ fn page_selector_for( path = "/projects" }] async fn example_list_projects( - rqctx: Arc>, + rqctx: RequestContext, query: Query>, ) -> Result>, HttpError> { let pag_params = query.into_inner(); diff --git a/dropshot/examples/petstore.rs b/dropshot/examples/petstore.rs index ae0c19631..e8acb48a4 100644 --- a/dropshot/examples/petstore.rs +++ b/dropshot/examples/petstore.rs @@ -71,7 +71,7 @@ struct PathParams { }] /// Get the pet with the specified ID async fn get_pet_by_id( - rqctx: Arc>, + rqctx: RequestContext<()>, path_params: Path, ) -> Result, HttpError> { let pet = Pet { @@ -94,7 +94,7 @@ async fn get_pet_by_id( }] /// Add a new pet to the store async fn update_pet_with_form( - rqctx: Arc>, + rqctx: RequestContext<()>, body: TypedBody, ) -> Result, HttpError> { unimplemented!() @@ -125,7 +125,7 @@ struct FindByTagsPageSelector { }] /// Find pets by tags async fn find_pets_by_tags( - rqctx: Arc>, + rqctx: RequestContext<()>, query: Query< PaginationParams, >, diff --git a/dropshot/examples/request-headers.rs b/dropshot/examples/request-headers.rs index 80a360509..651dc745e 100644 --- a/dropshot/examples/request-headers.rs +++ b/dropshot/examples/request-headers.rs @@ -46,7 +46,7 @@ async fn main() -> Result<(), String> { path = "/header-example-generic", }] async fn example_api_get_header_generic( - rqctx: Arc>, + rqctx: RequestContext<()>, ) -> Result, HttpError> { // Note that clients can provide multiple values for a header. See // http::HeaderMap for ways to get all of them. diff --git a/dropshot/examples/schema-with-example.rs b/dropshot/examples/schema-with-example.rs index dc306fcf3..1f54bccb0 100644 --- a/dropshot/examples/schema-with-example.rs +++ b/dropshot/examples/schema-with-example.rs @@ -66,7 +66,7 @@ fn main() -> Result<(), String> { }] /// Get a foo async fn get_foo( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { let foo = foo_example(); Ok(HttpResponseOk(foo)) diff --git a/dropshot/examples/self-referential.rs b/dropshot/examples/self-referential.rs index 0261f3671..fb7d7c2f4 100644 --- a/dropshot/examples/self-referential.rs +++ b/dropshot/examples/self-referential.rs @@ -85,7 +85,7 @@ struct CounterValue { path = "/counter", }] async fn example_api_get_counter( - rqctx: Arc>>, + rqctx: RequestContext>, ) -> Result, HttpError> { let api_context = rqctx.context(); diff --git a/dropshot/examples/websocket.rs b/dropshot/examples/websocket.rs index a79c79a80..b009bc36b 100644 --- a/dropshot/examples/websocket.rs +++ b/dropshot/examples/websocket.rs @@ -61,7 +61,7 @@ struct QueryParams { path = "/counter", }] async fn example_api_websocket_counter( - _rqctx: Arc>, + _rqctx: RequestContext<()>, upgraded: WebsocketConnection, qp: Query, ) -> dropshot::WebsocketChannelResult { diff --git a/dropshot/examples/well-tagged.rs b/dropshot/examples/well-tagged.rs index 85693bf2c..30244dced 100644 --- a/dropshot/examples/well-tagged.rs +++ b/dropshot/examples/well-tagged.rs @@ -19,7 +19,7 @@ use dropshot::{ tags = ["simpsons"], }] async fn get_homerism( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { unimplemented!() } @@ -30,7 +30,7 @@ async fn get_homerism( tags = ["simpsons"], }] async fn get_barneyism( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { unimplemented!() } @@ -41,7 +41,7 @@ async fn get_barneyism( tags = ["futurama"], }] async fn get_fryism( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { unimplemented!() } diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index cb52e619f..effcab6b8 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -1561,7 +1561,7 @@ mod test { } async fn test_badpath_handler( - _: Arc>, + _: RequestContext<()>, _: Path, ) -> Result, HttpError> { panic!("test handler is not supposed to run"); @@ -1627,7 +1627,7 @@ mod test { path = "I don't start with a slash" }] async fn test_badpath_handler( - _: Arc>, + _: RequestContext<()>, ) -> Result, HttpError> { unimplemented!(); } @@ -1708,7 +1708,7 @@ mod test { path = "/testing/{thing}" }] async fn test_dup_names_handler( - _: Arc>, + _: RequestContext<()>, _: Query, _: Path, ) -> Result, HttpError> { diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index 16558fb67..7b8ac4848 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -166,15 +166,13 @@ impl RequestContext { /// endpoint macro parse this argument. /// /// The first argument to an endpoint handler must be of the form: -/// `Arc>` where `T` is a caller-supplied +/// `RequestContext` where `T` is a caller-supplied /// value that implements `ServerContext`. pub trait RequestContextArgument { type Context; } -impl RequestContextArgument - for Arc> -{ +impl RequestContextArgument for RequestContext { type Context = T; } @@ -203,7 +201,7 @@ where { async fn handle_request( &self, - rqctx: Arc>, + rqctx: RequestContext, p: FuncParams, ) -> HttpHandlerResult; } @@ -294,7 +292,7 @@ macro_rules! impl_HttpHandlerFunc_for_func_with_params { HttpHandlerFunc for FuncType where Context: ServerContext, - FuncType: Fn(Arc>, $($T,)*) + FuncType: Fn(RequestContext, $($T,)*) -> FutureType + Send + Sync + 'static, FutureType: Future> + Send + 'static, @@ -304,7 +302,7 @@ macro_rules! impl_HttpHandlerFunc_for_func_with_params { { async fn handle_request( &self, - rqctx: Arc>, + rqctx: RequestContext, _param_tuple: ($($T,)*) ) -> HttpHandlerResult { @@ -398,7 +396,7 @@ where async fn handle_request( &self, - rqctx_raw: RequestContext, + rqctx: RequestContext, request: hyper::Request, ) -> HttpHandlerResult { // This is where the magic happens: in the code below, `funcparams` has @@ -417,7 +415,6 @@ where // is resolved statically.makes them actual function arguments for the // actual handler function. From this point down, all of this is // resolved statically. - let rqctx = Arc::new(rqctx_raw); let funcparams = RequestExtractor::from_request(&rqctx, request).await?; let future = self.handler.handle_request(rqctx, funcparams); diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index 7373274d5..517130a31 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -138,7 +138,7 @@ //! path = "/projects/project1", //! }] //! async fn myapi_projects_get_project( -//! rqctx: Arc>, +//! rqctx: RequestContext<()>, //! ) -> Result, HttpError> //! { //! let project = Project { name: String::from("project1") }; @@ -208,7 +208,7 @@ //! //! ```ignore //! async fn f( -//! rqctx: Arc>, +//! rqctx: RequestContext, //! [query_params: Query,] //! [path_params: Path

    ,] //! [body_param: TypedBody,] @@ -267,7 +267,7 @@ //! struct MyContext {} //! //! async fn myapi_projects_get( -//! rqctx: Arc>, +//! rqctx: RequestContext, //! query: Query) //! -> Result, HttpError> //! { @@ -482,7 +482,7 @@ //! path = "/list_stuff" //! }] //! async fn my_list_api( -//! rqctx: Arc>, +//! rqctx: RequestContext<()>, //! pag_params: Query>, //! extra_params: Query, //! ) -> Result>, HttpError> diff --git a/dropshot/src/router.rs b/dropshot/src/router.rs index 60c843f0f..03a9aff33 100644 --- a/dropshot/src/router.rs +++ b/dropshot/src/router.rs @@ -741,7 +741,7 @@ mod test { use std::sync::Arc; async fn test_handler( - _: Arc>, + _: RequestContext<()>, ) -> Result, HttpError> { panic!("test handler is not supposed to run"); } diff --git a/dropshot/src/server.rs b/dropshot/src/server.rs index 0a250b937..05fb28a83 100644 --- a/dropshot/src/server.rs +++ b/dropshot/src/server.rs @@ -940,7 +940,7 @@ mod test { path = "/handler", }] async fn handler( - _rqctx: Arc>, + _rqctx: RequestContext, ) -> Result, HttpError> { Ok(HttpResponseOk(3)) } diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index 3a218ea85..3eafcac11 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -188,7 +188,7 @@ impl WebsocketUpgrade { /// ``` /// #[dropshot::endpoint { method = GET, path = "/my/ws/endpoint/{id}" }] /// async fn my_ws_endpoint( - /// rqctx: std::sync::Arc>, + /// rqctx: dropshot::RequestContext<()>, /// id: dropshot::Path, /// websock: dropshot::WebsocketUpgrade, /// ) -> dropshot::WebsocketEndpointResult { diff --git a/dropshot/tests/fail/bad_endpoint1.stderr b/dropshot/tests/fail/bad_endpoint1.stderr index 7d1989a1f..373032809 100644 --- a/dropshot/tests/fail/bad_endpoint1.stderr +++ b/dropshot/tests/fail/bad_endpoint1.stderr @@ -1,6 +1,6 @@ error: Endpoint handlers must have the following signature: async fn( - rqctx: std::sync::Arc>, + rqctx: dropshot::RequestContext, [query_params: Query,] [path_params: Path

    ,] [body_param: TypedBody,] diff --git a/dropshot/tests/fail/bad_endpoint10.rs b/dropshot/tests/fail/bad_endpoint10.rs index ba0b2b14e..a75ae2f99 100644 --- a/dropshot/tests/fail/bad_endpoint10.rs +++ b/dropshot/tests/fail/bad_endpoint10.rs @@ -12,7 +12,7 @@ use std::sync::Arc; path = "/test", }] async fn bad_error_type( - _: Arc>, + _: RequestContext<()>, ) -> Result, String> { Ok(HttpResponseOk(())) } diff --git a/dropshot/tests/fail/bad_endpoint10.stderr b/dropshot/tests/fail/bad_endpoint10.stderr index f1e9e8fdb..fdb54d3da 100644 --- a/dropshot/tests/fail/bad_endpoint10.stderr +++ b/dropshot/tests/fail/bad_endpoint10.stderr @@ -13,7 +13,7 @@ note: required by a bound in `validate_result_error_type` 16 | ) -> Result, String> { | ^^^^^^ required by this bound in `validate_result_error_type` -error[E0277]: the trait bound `fn(Arc>) -> impl Future, String>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_error_type}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied +error[E0277]: the trait bound `fn(RequestContext<()>) -> impl Future, String>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::bad_error_type}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied --> tests/fail/bad_endpoint10.rs:14:10 | 10 | / #[endpoint { @@ -22,7 +22,7 @@ error[E0277]: the trait bound `fn(Arc>) -> impl Future` is not implemented for fn item `fn(Arc>) -> impl Future, String>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_error_type}` + | ^^^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(RequestContext<()>) -> impl Future, String>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::bad_error_type}` | note: required by a bound in `ApiEndpoint::::new` --> src/api_description.rs diff --git a/dropshot/tests/fail/bad_endpoint11.rs b/dropshot/tests/fail/bad_endpoint11.rs index be526f7d0..3e509518f 100644 --- a/dropshot/tests/fail/bad_endpoint11.rs +++ b/dropshot/tests/fail/bad_endpoint11.rs @@ -4,12 +4,11 @@ use dropshot::endpoint; use dropshot::RequestContext; -use std::sync::Arc; #[endpoint { method = GET, path = "/test", }] -async fn bad_no_result(_: Arc>) {} +async fn bad_no_result(_: RequestContext<()>) {} fn main() {} diff --git a/dropshot/tests/fail/bad_endpoint11.stderr b/dropshot/tests/fail/bad_endpoint11.stderr index 1d4f19d8d..0dcb995bd 100644 --- a/dropshot/tests/fail/bad_endpoint11.stderr +++ b/dropshot/tests/fail/bad_endpoint11.stderr @@ -1,18 +1,18 @@ error: Endpoint handlers must have the following signature: async fn( - rqctx: std::sync::Arc>, + rqctx: dropshot::RequestContext, [query_params: Query,] [path_params: Path

    ,] [body_param: TypedBody,] [body_param: UntypedBody,] ) -> Result - --> tests/fail/bad_endpoint11.rs:13:1 + --> tests/fail/bad_endpoint11.rs:12:1 | -13 | async fn bad_no_result(_: Arc>) {} - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +12 | async fn bad_no_result(_: RequestContext<()>) {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error: Endpoint must return a Result - --> tests/fail/bad_endpoint11.rs:13:1 + --> tests/fail/bad_endpoint11.rs:12:1 | -13 | async fn bad_no_result(_: Arc>) {} - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +12 | async fn bad_no_result(_: RequestContext<()>) {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/dropshot/tests/fail/bad_endpoint12.rs b/dropshot/tests/fail/bad_endpoint12.rs index fcd7fd323..2f0899cb8 100644 --- a/dropshot/tests/fail/bad_endpoint12.rs +++ b/dropshot/tests/fail/bad_endpoint12.rs @@ -5,14 +5,13 @@ use dropshot::endpoint; use dropshot::HttpError; use dropshot::RequestContext; -use std::sync::Arc; #[endpoint { method = GET, path = "/test", }] async fn bad_response_type( - _: Arc>, + _: RequestContext<()>, ) -> Result { Ok("aok".to_string()) } diff --git a/dropshot/tests/fail/bad_endpoint12.stderr b/dropshot/tests/fail/bad_endpoint12.stderr index c822b18ca..76a4b95ca 100644 --- a/dropshot/tests/fail/bad_endpoint12.stderr +++ b/dropshot/tests/fail/bad_endpoint12.stderr @@ -1,7 +1,7 @@ error[E0277]: the trait bound `String: HttpCodedResponse` is not satisfied - --> tests/fail/bad_endpoint12.rs:16:6 + --> tests/fail/bad_endpoint12.rs:15:6 | -16 | ) -> Result { +15 | ) -> Result { | ^^^^^^ the trait `HttpCodedResponse` is not implemented for `String` | = help: the following other types implement trait `HttpCodedResponse`: @@ -15,7 +15,7 @@ error[E0277]: the trait bound `String: HttpCodedResponse` is not satisfied dropshot::handler::HttpResponseTemporaryRedirectStatus = note: required for `String` to implement `HttpResponse` note: required for `Result` to implement `ResultTrait` - --> tests/fail/bad_endpoint12.rs:16:6 + --> tests/fail/bad_endpoint12.rs:15:6 | -16 | ) -> Result { +15 | ) -> Result { | ^^^^^^ diff --git a/dropshot/tests/fail/bad_endpoint13.rs b/dropshot/tests/fail/bad_endpoint13.rs index c8d088559..e8c89671a 100644 --- a/dropshot/tests/fail/bad_endpoint13.rs +++ b/dropshot/tests/fail/bad_endpoint13.rs @@ -6,7 +6,6 @@ use dropshot::endpoint; use dropshot::HttpError; use dropshot::HttpResponseOk; use dropshot::RequestContext; -use std::sync::Arc; trait Stuff { fn do_stuff(); @@ -17,7 +16,7 @@ trait Stuff { path = "/test", }] async fn bad_response_type( - _: Arc>, + _: RequestContext, ) -> Result, HttpError> { S::do_stuff(); panic!() diff --git a/dropshot/tests/fail/bad_endpoint13.stderr b/dropshot/tests/fail/bad_endpoint13.stderr index 1559b41d3..0a0930814 100644 --- a/dropshot/tests/fail/bad_endpoint13.stderr +++ b/dropshot/tests/fail/bad_endpoint13.stderr @@ -1,34 +1,34 @@ error: Endpoint handlers must have the following signature: async fn( - rqctx: std::sync::Arc>, + rqctx: dropshot::RequestContext, [query_params: Query,] [path_params: Path

    ,] [body_param: TypedBody,] [body_param: UntypedBody,] ) -> Result - --> tests/fail/bad_endpoint13.rs:19:1 + --> tests/fail/bad_endpoint13.rs:18:1 | -19 | / async fn bad_response_type( -20 | | _: Arc>, -21 | | ) -> Result, HttpError> { +18 | / async fn bad_response_type( +19 | | _: RequestContext, +20 | | ) -> Result, HttpError> { | |______________________________________________^ error: generics are not permitted for endpoint handlers - --> tests/fail/bad_endpoint13.rs:19:27 + --> tests/fail/bad_endpoint13.rs:18:27 | -19 | async fn bad_response_type( +18 | async fn bad_response_type( | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0412]: cannot find type `S` in this scope - --> tests/fail/bad_endpoint13.rs:20:27 + --> tests/fail/bad_endpoint13.rs:19:23 | -20 | _: Arc>, - | - ^ not found in this scope - | | - | help: you might be missing a type parameter: `` +19 | _: RequestContext, + | -^ not found in this scope + | | + | help: you might be missing a type parameter: `` error[E0412]: cannot find type `S` in this scope - --> tests/fail/bad_endpoint13.rs:20:27 + --> tests/fail/bad_endpoint13.rs:19:23 | -20 | _: Arc>, - | ^ not found in this scope +19 | _: RequestContext, + | ^ not found in this scope diff --git a/dropshot/tests/fail/bad_endpoint14.rs b/dropshot/tests/fail/bad_endpoint14.rs index f4aad6843..cb33a0255 100644 --- a/dropshot/tests/fail/bad_endpoint14.rs +++ b/dropshot/tests/fail/bad_endpoint14.rs @@ -21,7 +21,7 @@ struct PathParams { path = "/assets/{stuff:.*}", }] async fn must_be_unpublished( - _: Arc>, + _: RequestContext<()>, _: Path, ) -> Result, HttpError> { panic!() diff --git a/dropshot/tests/fail/bad_endpoint15.rs b/dropshot/tests/fail/bad_endpoint15.rs index da7f653f8..0499ce4f6 100644 --- a/dropshot/tests/fail/bad_endpoint15.rs +++ b/dropshot/tests/fail/bad_endpoint15.rs @@ -15,7 +15,7 @@ use std::time::Duration; path = "/test", }] async fn bad_endpoint( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { let non_send_type = Rc::new(0); tokio::time::sleep(Duration::from_millis(1)).await; diff --git a/dropshot/tests/fail/bad_endpoint15.stderr b/dropshot/tests/fail/bad_endpoint15.stderr index eab06b3b8..4e5691b65 100644 --- a/dropshot/tests/fail/bad_endpoint15.stderr +++ b/dropshot/tests/fail/bad_endpoint15.stderr @@ -1,4 +1,4 @@ -error[E0277]: the trait bound `fn(Arc>) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_endpoint}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied +error[E0277]: the trait bound `fn(RequestContext<()>) -> impl Future, HttpError>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::bad_endpoint}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied --> tests/fail/bad_endpoint15.rs:17:10 | 13 | / #[endpoint { @@ -7,7 +7,7 @@ error[E0277]: the trait bound `fn(Arc>) -> impl Future` is not implemented for fn item `fn(Arc>) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_endpoint}` + | ^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(RequestContext<()>) -> impl Future, HttpError>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::bad_endpoint}` | note: required by a bound in `ApiEndpoint::::new` --> src/api_description.rs diff --git a/dropshot/tests/fail/bad_endpoint16.rs b/dropshot/tests/fail/bad_endpoint16.rs index 6302c7f69..6730c5f4a 100644 --- a/dropshot/tests/fail/bad_endpoint16.rs +++ b/dropshot/tests/fail/bad_endpoint16.rs @@ -14,7 +14,7 @@ use std::sync::Arc; content_type = "foo/bar", }] async fn bad_endpoint( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(HttpResponseOk(())) } diff --git a/dropshot/tests/fail/bad_endpoint2.stderr b/dropshot/tests/fail/bad_endpoint2.stderr index c71207619..c3428be8a 100644 --- a/dropshot/tests/fail/bad_endpoint2.stderr +++ b/dropshot/tests/fail/bad_endpoint2.stderr @@ -1,6 +1,6 @@ error: Endpoint handlers must have the following signature: async fn( - rqctx: std::sync::Arc>, + rqctx: dropshot::RequestContext, [query_params: Query,] [path_params: Path

    ,] [body_param: TypedBody,] diff --git a/dropshot/tests/fail/bad_endpoint3.rs b/dropshot/tests/fail/bad_endpoint3.rs index 062c80104..a53924315 100644 --- a/dropshot/tests/fail/bad_endpoint3.rs +++ b/dropshot/tests/fail/bad_endpoint3.rs @@ -13,7 +13,7 @@ use std::sync::Arc; path = "/test", }] async fn bad_endpoint( - _rqctx: Arc>, + _rqctx: RequestContext<()>, param: String, ) -> Result, HttpError> { Ok(HttpResponseOk(())) diff --git a/dropshot/tests/fail/bad_endpoint3.stderr b/dropshot/tests/fail/bad_endpoint3.stderr index f6b90e23c..8c3e5c8ec 100644 --- a/dropshot/tests/fail/bad_endpoint3.stderr +++ b/dropshot/tests/fail/bad_endpoint3.stderr @@ -1,4 +1,4 @@ -error[E0277]: the trait bound `fn(Arc>, String) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_endpoint}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied +error[E0277]: the trait bound `fn(RequestContext<()>, String) -> impl Future, HttpError>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::bad_endpoint}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied --> tests/fail/bad_endpoint3.rs:15:10 | 11 | / #[endpoint { @@ -7,7 +7,7 @@ error[E0277]: the trait bound `fn(Arc>, String) -> impl Futur 14 | | }] | |__- required by a bound introduced by this call 15 | async fn bad_endpoint( - | ^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(Arc>, String) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_endpoint}` + | ^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(RequestContext<()>, String) -> impl Future, HttpError>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::bad_endpoint}` | note: required by a bound in `ApiEndpoint::::new` --> src/api_description.rs diff --git a/dropshot/tests/fail/bad_endpoint4.rs b/dropshot/tests/fail/bad_endpoint4.rs index faaddf403..5a7b497bf 100644 --- a/dropshot/tests/fail/bad_endpoint4.rs +++ b/dropshot/tests/fail/bad_endpoint4.rs @@ -7,7 +7,6 @@ use dropshot::HttpError; use dropshot::HttpResponseOk; use dropshot::Query; use dropshot::RequestContext; -use std::sync::Arc; #[allow(dead_code)] struct QueryParams { @@ -20,7 +19,7 @@ struct QueryParams { path = "/test", }] async fn bad_endpoint( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _params: Query, ) -> Result, HttpError> { Ok(HttpResponseOk(())) diff --git a/dropshot/tests/fail/bad_endpoint4.stderr b/dropshot/tests/fail/bad_endpoint4.stderr index 8d544b5ec..c9df930df 100644 --- a/dropshot/tests/fail/bad_endpoint4.stderr +++ b/dropshot/tests/fail/bad_endpoint4.stderr @@ -1,7 +1,7 @@ error[E0277]: the trait bound `QueryParams: schemars::JsonSchema` is not satisfied - --> tests/fail/bad_endpoint4.rs:24:14 + --> tests/fail/bad_endpoint4.rs:23:14 | -24 | _params: Query, +23 | _params: Query, | ^^^^^^^^^^^^^^^^^^ the trait `schemars::JsonSchema` is not implemented for `QueryParams` | = help: the following other types implement trait `schemars::JsonSchema`: @@ -21,9 +21,9 @@ note: required by a bound in `dropshot::Query` | ^^^^^^^^^^ required by this bound in `dropshot::Query` error[E0277]: the trait bound `for<'de> QueryParams: serde::de::Deserialize<'de>` is not satisfied - --> tests/fail/bad_endpoint4.rs:24:14 + --> tests/fail/bad_endpoint4.rs:23:14 | -24 | _params: Query, +23 | _params: Query, | ^^^^^^^^^^^^^^^^^^ the trait `for<'de> serde::de::Deserialize<'de>` is not implemented for `QueryParams` | = help: the following other types implement trait `serde::de::Deserialize<'de>`: diff --git a/dropshot/tests/fail/bad_endpoint5.rs b/dropshot/tests/fail/bad_endpoint5.rs index 69dedd221..696e01b1a 100644 --- a/dropshot/tests/fail/bad_endpoint5.rs +++ b/dropshot/tests/fail/bad_endpoint5.rs @@ -8,7 +8,6 @@ use dropshot::HttpResponseOk; use dropshot::Query; use dropshot::RequestContext; use schemars::JsonSchema; -use std::sync::Arc; #[derive(JsonSchema)] #[allow(dead_code)] @@ -22,7 +21,7 @@ struct QueryParams { path = "/test", }] async fn bad_endpoint( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _params: Query, ) -> Result, HttpError> { Ok(HttpResponseOk(())) diff --git a/dropshot/tests/fail/bad_endpoint5.stderr b/dropshot/tests/fail/bad_endpoint5.stderr index b8f0b4538..d586a7062 100644 --- a/dropshot/tests/fail/bad_endpoint5.stderr +++ b/dropshot/tests/fail/bad_endpoint5.stderr @@ -1,7 +1,7 @@ error[E0277]: the trait bound `for<'de> QueryParams: serde::de::Deserialize<'de>` is not satisfied - --> tests/fail/bad_endpoint5.rs:26:14 + --> tests/fail/bad_endpoint5.rs:25:14 | -26 | _params: Query, +25 | _params: Query, | ^^^^^^^^^^^^^^^^^^ the trait `for<'de> serde::de::Deserialize<'de>` is not implemented for `QueryParams` | = help: the following other types implement trait `serde::de::Deserialize<'de>`: diff --git a/dropshot/tests/fail/bad_endpoint6.rs b/dropshot/tests/fail/bad_endpoint6.rs index 63ea21c85..df26133dc 100644 --- a/dropshot/tests/fail/bad_endpoint6.rs +++ b/dropshot/tests/fail/bad_endpoint6.rs @@ -8,7 +8,6 @@ use dropshot::HttpResponseOk; use dropshot::RequestContext; use schemars::JsonSchema; use serde::Serialize; -use std::sync::Arc; #[derive(JsonSchema, Serialize)] #[allow(dead_code)] @@ -22,7 +21,7 @@ struct Ret { path = "/test", }] async fn bad_endpoint( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { // Validate that compiler errors show up with useful context and aren't // obscured by the macro. diff --git a/dropshot/tests/fail/bad_endpoint6.stderr b/dropshot/tests/fail/bad_endpoint6.stderr index f9d4b40e3..324e56809 100644 --- a/dropshot/tests/fail/bad_endpoint6.stderr +++ b/dropshot/tests/fail/bad_endpoint6.stderr @@ -1,21 +1,21 @@ error: expected identifier, found `"Oxide"` - --> tests/fail/bad_endpoint6.rs:29:29 + --> tests/fail/bad_endpoint6.rs:28:29 | -29 | Ok(HttpResponseOk(Ret { "Oxide".to_string(), 0x1de })) +28 | Ok(HttpResponseOk(Ret { "Oxide".to_string(), 0x1de })) | --- ^^^^^^^ expected identifier | | | while parsing this struct error: expected identifier, found `0x1de` - --> tests/fail/bad_endpoint6.rs:29:50 + --> tests/fail/bad_endpoint6.rs:28:50 | -29 | Ok(HttpResponseOk(Ret { "Oxide".to_string(), 0x1de })) +28 | Ok(HttpResponseOk(Ret { "Oxide".to_string(), 0x1de })) | --- ^^^^^ expected identifier | | | while parsing this struct error[E0063]: missing fields `x` and `y` in initializer of `Ret` - --> tests/fail/bad_endpoint6.rs:29:23 + --> tests/fail/bad_endpoint6.rs:28:23 | -29 | Ok(HttpResponseOk(Ret { "Oxide".to_string(), 0x1de })) +28 | Ok(HttpResponseOk(Ret { "Oxide".to_string(), 0x1de })) | ^^^ missing `x` and `y` diff --git a/dropshot/tests/fail/bad_endpoint7.rs b/dropshot/tests/fail/bad_endpoint7.rs index 21de8cd6e..08294e4fc 100644 --- a/dropshot/tests/fail/bad_endpoint7.rs +++ b/dropshot/tests/fail/bad_endpoint7.rs @@ -7,7 +7,6 @@ use dropshot::HttpError; use dropshot::HttpResponseOk; use dropshot::RequestContext; use schemars::JsonSchema; -use std::sync::Arc; #[derive(JsonSchema)] #[allow(dead_code)] @@ -21,7 +20,7 @@ struct Ret { path = "/test", }] async fn bad_endpoint( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(HttpResponseOk(Ret { x: "Oxide".to_string(), diff --git a/dropshot/tests/fail/bad_endpoint7.stderr b/dropshot/tests/fail/bad_endpoint7.stderr index 34cc84017..b4ffe5a6e 100644 --- a/dropshot/tests/fail/bad_endpoint7.stderr +++ b/dropshot/tests/fail/bad_endpoint7.stderr @@ -1,7 +1,7 @@ error[E0277]: the trait bound `Ret: serde::ser::Serialize` is not satisfied - --> tests/fail/bad_endpoint7.rs:25:13 + --> tests/fail/bad_endpoint7.rs:24:13 | -25 | ) -> Result, HttpError> { +24 | ) -> Result, HttpError> { | ^^^^^^^^^^^^^^^^^^^ the trait `serde::ser::Serialize` is not implemented for `Ret` | = help: the following other types implement trait `serde::ser::Serialize`: diff --git a/dropshot/tests/fail/bad_endpoint8.rs b/dropshot/tests/fail/bad_endpoint8.rs index 7b990e83a..b1de23b10 100644 --- a/dropshot/tests/fail/bad_endpoint8.rs +++ b/dropshot/tests/fail/bad_endpoint8.rs @@ -8,7 +8,6 @@ use dropshot::HttpResponseOk; use dropshot::RequestContext; use schemars::JsonSchema; use serde::Serialize; -use std::sync::Arc; #[derive(JsonSchema, Serialize)] struct Ret {} @@ -18,7 +17,7 @@ struct Ret {} path = "/test", }] fn bad_endpoint( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(HttpResponseOk(Ret {})) } diff --git a/dropshot/tests/fail/bad_endpoint8.stderr b/dropshot/tests/fail/bad_endpoint8.stderr index dc6067086..3337fa256 100644 --- a/dropshot/tests/fail/bad_endpoint8.stderr +++ b/dropshot/tests/fail/bad_endpoint8.stderr @@ -1,20 +1,20 @@ error: Endpoint handlers must have the following signature: async fn( - rqctx: std::sync::Arc>, + rqctx: dropshot::RequestContext, [query_params: Query,] [path_params: Path

    ,] [body_param: TypedBody,] [body_param: UntypedBody,] ) -> Result - --> tests/fail/bad_endpoint8.rs:20:1 + --> tests/fail/bad_endpoint8.rs:19:1 | -20 | / fn bad_endpoint( -21 | | _rqctx: Arc>, -22 | | ) -> Result, HttpError> { +19 | / fn bad_endpoint( +20 | | _rqctx: RequestContext<()>, +21 | | ) -> Result, HttpError> { | |___________________________________________^ error: endpoint handler functions must be async - --> tests/fail/bad_endpoint8.rs:20:1 + --> tests/fail/bad_endpoint8.rs:19:1 | -20 | fn bad_endpoint( +19 | fn bad_endpoint( | ^^ diff --git a/dropshot/tests/fail/bad_endpoint9.stderr b/dropshot/tests/fail/bad_endpoint9.stderr index 2772d6044..9d90d66e3 100644 --- a/dropshot/tests/fail/bad_endpoint9.stderr +++ b/dropshot/tests/fail/bad_endpoint9.stderr @@ -4,7 +4,7 @@ error[E0277]: the trait bound `dropshot::Query: RequestContextArgum 25 | _params: Query, | ^^^^^ the trait `RequestContextArgument` is not implemented for `dropshot::Query` | - = help: the trait `RequestContextArgument` is implemented for `Arc>` + = help: the trait `RequestContextArgument` is implemented for `RequestContext` error[E0277]: the trait bound `dropshot::Query: RequestContextArgument` is not satisfied --> tests/fail/bad_endpoint9.rs:20:1 @@ -15,5 +15,5 @@ error[E0277]: the trait bound `dropshot::Query: RequestContextArgum 23 | | }] | |__^ the trait `RequestContextArgument` is not implemented for `dropshot::Query` | - = help: the trait `RequestContextArgument` is implemented for `Arc>` + = help: the trait `RequestContextArgument` is implemented for `RequestContext` = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/dropshot/tests/fail/unused_endpoint.rs b/dropshot/tests/fail/unused_endpoint.rs index 4e7b4d47f..03d00c3f0 100644 --- a/dropshot/tests/fail/unused_endpoint.rs +++ b/dropshot/tests/fail/unused_endpoint.rs @@ -8,7 +8,6 @@ use dropshot::endpoint; use dropshot::HttpError; use dropshot::HttpResponseOk; use dropshot::RequestContext; -use std::sync::Arc; // At some point we'd expect to see code like: // ``` @@ -21,7 +20,7 @@ use std::sync::Arc; path = "/test", }] async fn unused_endpoint( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(HttpResponseOk(())) } diff --git a/dropshot/tests/fail/unused_endpoint.stderr b/dropshot/tests/fail/unused_endpoint.stderr index 0bc87f2db..d0553fd64 100644 --- a/dropshot/tests/fail/unused_endpoint.stderr +++ b/dropshot/tests/fail/unused_endpoint.stderr @@ -1,7 +1,7 @@ error: constant `unused_endpoint` is never used - --> tests/fail/unused_endpoint.rs:23:10 + --> tests/fail/unused_endpoint.rs:22:10 | -23 | async fn unused_endpoint( +22 | async fn unused_endpoint( | ^^^^^^^^^^^^^^^ | note: the lint level is defined here diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index acfd837a4..0111e0df5 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -828,7 +828,7 @@ async fn test_demo_websocket() { // Demo handler functions -type RequestCtx = Arc>; +type RequestCtx = RequestContext; #[endpoint { method = GET, @@ -965,7 +965,7 @@ pub struct DemoUntypedQuery { path = "/testing/untyped_body" }] async fn demo_handler_untyped_body( - _rqctx: Arc>, + _rqctx: RequestContext, query: Query, body: UntypedBody, ) -> Result, HttpError> { @@ -988,7 +988,7 @@ pub struct DemoPathImpossible { path = "/testing/demo_path_impossible/{different_param_name}", }] async fn demo_handler_path_param_impossible( - _rqctx: Arc>, + _rqctx: RequestContext, path_params: Path, ) -> Result, HttpError> { http_echo(&path_params.into_inner()) diff --git a/dropshot/tests/test_openapi.rs b/dropshot/tests/test_openapi.rs index b56a0cfb8..748df3fb2 100644 --- a/dropshot/tests/test_openapi.rs +++ b/dropshot/tests/test_openapi.rs @@ -12,7 +12,7 @@ use dropshot::{ use hyper::Body; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::{collections::HashMap, io::Cursor, str::from_utf8, sync::Arc}; +use std::{collections::HashMap, io::Cursor, str::from_utf8}; #[endpoint { method = GET, @@ -24,7 +24,7 @@ use std::{collections::HashMap, io::Cursor, str::from_utf8, sync::Arc}; /// This is a multi- /// line comment. async fn handler1( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(HttpResponseOk(())) } @@ -48,7 +48,7 @@ struct QueryArgs { /// This is a multi- /// line comment. async fn handler2( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _query: Query, ) -> Result { Ok(HttpResponseUpdatedNoContent()) @@ -66,7 +66,7 @@ struct PathArgs { tags = ["it"], }] async fn handler3( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _path: Path, ) -> Result { Ok(HttpResponseDeleted()) @@ -109,7 +109,7 @@ struct Response {} tags = ["it"], }] async fn handler4( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _body: TypedBody, ) -> Result, HttpError> { Ok(HttpResponseCreated(Response {})) @@ -121,7 +121,7 @@ async fn handler4( tags = [ "person", "woman", "man", "camera", "tv"] }] async fn handler5( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _path: Path, _query: Query, _body: TypedBody, @@ -152,7 +152,7 @@ struct ExamplePageSelector { tags = ["it"], }] async fn handler6( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _query: Query>, ) -> Result>, HttpError> { unimplemented!(); @@ -164,7 +164,7 @@ async fn handler6( tags = ["it"], }] async fn handler7( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _dump: UntypedBody, ) -> Result { unimplemented!(); @@ -193,7 +193,7 @@ struct NeverDuplicatedResponseNextLevel { tags = ["it"], }] async fn handler8( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { unimplemented!(); } @@ -204,7 +204,7 @@ async fn handler8( tags = ["it"], }] async fn handler9( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { unimplemented!(); } @@ -229,7 +229,7 @@ struct NeverDuplicatedBodyNextLevel { tags = ["it"], }] async fn handler10( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _b: TypedBody, ) -> Result { unimplemented!(); @@ -241,7 +241,7 @@ async fn handler10( tags = ["it"], }] async fn handler11( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _b: TypedBody, ) -> Result { unimplemented!(); @@ -268,7 +268,7 @@ struct NeverDuplicatedNext { tags = ["it"], }] async fn handler12( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _b: TypedBody, ) -> Result { unimplemented!(); @@ -280,7 +280,7 @@ async fn handler12( tags = ["it"], }] async fn handler13( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { unimplemented!(); } @@ -297,7 +297,7 @@ struct AllPath { unpublished = true, }] async fn handler14( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _path: Path, ) -> Result, HttpError> { unimplemented!(); @@ -309,7 +309,7 @@ async fn handler14( tags = ["it"], }] async fn handler15( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { unimplemented!(); } @@ -320,7 +320,7 @@ async fn handler15( tags = ["it"], }] async fn handler16( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { unimplemented!(); } @@ -344,7 +344,7 @@ struct Foo(String); tags = ["it"], }] async fn handler17( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result< HttpResponseHeaders, HttpError, @@ -358,7 +358,7 @@ async fn handler17( tags = ["it"], }] async fn handler18( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { let (_, body) = Body::channel(); Ok(HttpResponseOk(body.into())) @@ -396,7 +396,7 @@ fn example_nested_object_with_example() -> NestedObjectWithExample { tags = ["it"], }] async fn handler19( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(HttpResponseOk(example_object_with_example())) } @@ -408,7 +408,7 @@ async fn handler19( tags = ["it"] }] async fn handler20( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _body: TypedBody, ) -> Result, HttpError> { Ok(HttpResponseCreated(Response {})) @@ -420,7 +420,7 @@ async fn handler20( tags = [ "it"], }] async fn handler21( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result { Ok(http_response_found(String::from("/path1")).unwrap()) } @@ -431,7 +431,7 @@ async fn handler21( tags = [ "it"], }] async fn handler22( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result { Ok(http_response_see_other(String::from("/path2")).unwrap()) } @@ -442,7 +442,7 @@ async fn handler22( tags = [ "it"], }] async fn handler23( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result { Ok(http_response_temporary_redirect(String::from("/path3")).unwrap()) } @@ -454,7 +454,7 @@ async fn handler23( deprecated = true, }] async fn handler24( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result { unimplemented!() } diff --git a/dropshot/tests/test_pagination.rs b/dropshot/tests/test_pagination.rs index f6cfec678..4bd0afbeb 100644 --- a/dropshot/tests/test_pagination.rs +++ b/dropshot/tests/test_pagination.rs @@ -39,7 +39,6 @@ use std::net::SocketAddr; use std::ops::Bound; use std::sync::atomic::AtomicU16; use std::sync::atomic::Ordering; -use std::sync::Arc; use std::time::Duration; use std::time::Instant; use subprocess::Exec; @@ -172,7 +171,7 @@ fn range_u16(start: u16, limit: u16) -> Vec { path = "/intapi", }] async fn api_integers( - rqctx: Arc>, + rqctx: RequestContext, query: Query>, ) -> Result>, HttpError> { let pag_params = query.into_inner(); @@ -354,7 +353,7 @@ async fn test_paginate_basic() { path = "/empty", }] async fn api_empty( - _rqctx: Arc>, + _rqctx: RequestContext, _query: Query>, ) -> Result>, HttpError> { Ok(HttpResponseOk(ResultsPage::new( @@ -411,7 +410,7 @@ async fn test_paginate_empty() { path = "/ints_extra", }] async fn api_with_extra_params( - rqctx: Arc>, + rqctx: RequestContext, query_pag: Query>, query_extra: Query, ) -> Result, HttpError> { @@ -502,7 +501,7 @@ struct ReqScanParams { path = "/required", }] async fn api_with_required_params( - rqctx: Arc>, + rqctx: RequestContext, query: Query>, ) -> Result>, HttpError> { let pag_params = query.into_inner(); @@ -608,7 +607,7 @@ struct DictionaryPageSelector { path = "/dictionary", }] async fn api_dictionary( - rqctx: Arc>, + rqctx: RequestContext, query: Query< PaginationParams, >, diff --git a/dropshot/tests/test_pagination_schema.rs b/dropshot/tests/test_pagination_schema.rs index a0dd8cb70..4556e6f15 100644 --- a/dropshot/tests/test_pagination_schema.rs +++ b/dropshot/tests/test_pagination_schema.rs @@ -6,7 +6,7 @@ use dropshot::{ }; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::{io::Cursor, str::from_utf8, sync::Arc}; +use std::{io::Cursor, str::from_utf8}; #[derive(JsonSchema, Serialize)] struct ResponseItem { @@ -35,7 +35,7 @@ struct PageSelector { path = "/super_pages", }] async fn handler( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _query: Query>, ) -> Result>, HttpError> { unimplemented!(); diff --git a/dropshot/tests/test_path_names.rs b/dropshot/tests/test_path_names.rs index 4d2fbc21c..a4e96ba07 100644 --- a/dropshot/tests/test_path_names.rs +++ b/dropshot/tests/test_path_names.rs @@ -5,7 +5,6 @@ use dropshot::{ }; use schemars::JsonSchema; use serde::Deserialize; -use std::sync::Arc; #[derive(JsonSchema, Deserialize)] #[allow(dead_code)] @@ -25,7 +24,7 @@ struct MyPath { path = "/{type}/{ref}/{@}", }] async fn handler( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _path: Path, ) -> Result, HttpError> { Ok(HttpResponseOk(())) diff --git a/dropshot/tests/test_streaming.rs b/dropshot/tests/test_streaming.rs index cbba49fd3..d931cc46d 100644 --- a/dropshot/tests/test_streaming.rs +++ b/dropshot/tests/test_streaming.rs @@ -6,7 +6,6 @@ use dropshot::{endpoint, ApiDescription, HttpError, RequestContext}; use http::{Method, Response, StatusCode}; use hyper::{body::HttpBody, Body}; use hyper_staticfile::FileBytesStream; -use std::sync::Arc; use tokio::io::{AsyncSeekExt, AsyncWriteExt}; extern crate slog; @@ -28,7 +27,7 @@ const BUF_COUNT: usize = 128; path = "/streaming", }] async fn api_streaming( - _rqctx: Arc>, + _rqctx: RequestContext, ) -> Result, HttpError> { let mut file = tempfile::tempfile() .map_err(|_| { @@ -58,7 +57,7 @@ async fn api_streaming( path = "/not-streaming", }] async fn api_not_streaming( - _rqctx: Arc>, + _rqctx: RequestContext, ) -> Result, HttpError> { Ok(Response::builder() .status(StatusCode::OK) diff --git a/dropshot/tests/test_tls.rs b/dropshot/tests/test_tls.rs index 4039173e1..4008b53fd 100644 --- a/dropshot/tests/test_tls.rs +++ b/dropshot/tests/test_tls.rs @@ -344,7 +344,7 @@ pub struct TlsCheckArgs { path = "/", }] async fn tls_check_handler( - rqctx: Arc>, + rqctx: dropshot::RequestContext, query: dropshot::Query, ) -> Result, dropshot::HttpError> { if rqctx.server.using_tls() != query.into_inner().tls { diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index c83636b8b..82b1b95ce 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -81,7 +81,7 @@ struct ChannelMetadata { const DROPSHOT: &str = "dropshot"; const USAGE: &str = "Endpoint handlers must have the following signature: async fn( - rqctx: std::sync::Arc>, + rqctx: dropshot::RequestContext, [query_params: Query,] [path_params: Path

    ,] [body_param: TypedBody,] @@ -137,7 +137,7 @@ fn do_endpoint( /// that is spawned asynchronously and given the upgraded connection of /// the given `protocol` (i.e. `WEBSOCKETS`). /// -/// The first argument still must be an `Arc>`. +/// The first argument still must be a `RequestContext<_>`. /// /// The second argument passed to the handler function must be a /// [`dropshot::WebsocketConnection`]. @@ -216,7 +216,7 @@ fn do_channel( if found.is_none() { return Err(Error::new_spanned( &attr, - "An argument of type dropshot::WebsocketConnection must be provided immediately following Arc>.", + "An argument of type dropshot::WebsocketConnection must be provided immediately following RequestContext.", )); } @@ -445,7 +445,7 @@ fn do_endpoint_inner( let ty = pat.ty.as_ref().into_token_stream(); arg_types.push(ty.clone()); if index == 0 { - // The first parameter must be an Arc> + // The first parameter must be a RequestContext // and fortunately we already have a trait that we can // use to validate this type. Some(quote_spanned! { span=> @@ -741,7 +741,7 @@ mod tests { }, quote! { pub async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(()) } @@ -750,7 +750,7 @@ mod tests { .unwrap(); let expected = quote! { const _: fn() = || { - struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; + struct NeedRequestContext( as dropshot::RequestContextArgument>::Context) ; }; const _: fn() = || { trait ResultTrait { @@ -793,19 +793,19 @@ mod tests { impl From for dropshot::ApiEndpoint< - - > as dropshot::RequestContextArgument>::Context> + + as dropshot::RequestContextArgument>::Context> { fn from(_: handler_xyz) -> Self { pub async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(()) } const _: fn() = || { fn future_endpoint_must_be_send(_t: T) {} - fn check_future_bounds(arg0: Arc< RequestContext<()> >) { + fn check_future_bounds(arg0: RequestContext<()>) { future_endpoint_must_be_send(handler_xyz(arg0)); } }; @@ -833,7 +833,7 @@ mod tests { path = "/a/b/c" }, quote! { - pub async fn handler_xyz(_rqctx: std::sync::Arc>) -> + pub async fn handler_xyz(_rqctx: dropshot::RequestContext<()>) -> std::Result, dropshot::HttpError> { Ok(()) @@ -842,7 +842,7 @@ mod tests { ).unwrap(); let expected = quote! { const _: fn() = || { - struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; + struct NeedRequestContext( as dropshot::RequestContextArgument>::Context) ; }; const _: fn() = || { trait ResultTrait { @@ -883,9 +883,9 @@ mod tests { #[doc = "API Endpoint: handler_xyz"] pub const handler_xyz: handler_xyz = handler_xyz {}; - impl From for dropshot::ApiEndpoint< > as dropshot::RequestContextArgument>::Context> { + impl From for dropshot::ApiEndpoint< as dropshot::RequestContextArgument>::Context> { fn from(_: handler_xyz) -> Self { - pub async fn handler_xyz(_rqctx: std::sync::Arc>) -> + pub async fn handler_xyz(_rqctx: dropshot::RequestContext<()>) -> std::Result, dropshot::HttpError> { Ok(()) @@ -893,7 +893,7 @@ mod tests { const _: fn() = || { fn future_endpoint_must_be_send(_t: T) {} - fn check_future_bounds(arg0: std::sync::Arc< dropshot::RequestContext<()> >) { + fn check_future_bounds(arg0: dropshot::RequestContext<()>) { future_endpoint_must_be_send(handler_xyz(arg0)); } }; @@ -922,7 +922,7 @@ mod tests { }, quote! { async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext, q: Query, ) -> Result, HttpError> { @@ -933,7 +933,7 @@ mod tests { .unwrap(); let expected = quote! { const _: fn() = || { - struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; + struct NeedRequestContext( as dropshot::RequestContextArgument>::Context) ; }; const _: fn() = || { trait ResultTrait { @@ -976,12 +976,12 @@ mod tests { impl From for dropshot::ApiEndpoint< - > as dropshot::RequestContextArgument>::Context + as dropshot::RequestContextArgument>::Context > { fn from(_: handler_xyz) -> Self { async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext, q: Query, ) -> Result, HttpError> @@ -991,7 +991,7 @@ mod tests { const _: fn() = || { fn future_endpoint_must_be_send(_t: T) {} - fn check_future_bounds(arg0: Arc< RequestContext >, arg1: Query) { + fn check_future_bounds(arg0: RequestContext, arg1: Query) { future_endpoint_must_be_send(handler_xyz(arg0, arg1)); } }; @@ -1020,7 +1020,7 @@ mod tests { }, quote! { pub(crate) async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext<()>, q: Query, ) -> Result, HttpError> { @@ -1031,7 +1031,7 @@ mod tests { .unwrap(); let expected = quote! { const _: fn() = || { - struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; + struct NeedRequestContext( as dropshot::RequestContextArgument>::Context) ; }; const _: fn() = || { trait ResultTrait { @@ -1074,12 +1074,12 @@ mod tests { impl From for dropshot::ApiEndpoint< - > as dropshot::RequestContextArgument>::Context + as dropshot::RequestContextArgument>::Context > { fn from(_: handler_xyz) -> Self { pub(crate) async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext<()>, q: Query, ) -> Result, HttpError> @@ -1089,7 +1089,7 @@ mod tests { const _: fn() = || { fn future_endpoint_must_be_send(_t: T) {} - fn check_future_bounds(arg0: Arc< RequestContext<()> >, arg1: Query) { + fn check_future_bounds(arg0: RequestContext<()>, arg1: Query) { future_endpoint_must_be_send(handler_xyz(arg0, arg1)); } }; @@ -1119,7 +1119,7 @@ mod tests { }, quote! { async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(()) } @@ -1128,7 +1128,7 @@ mod tests { .unwrap(); let expected = quote! { const _: fn() = || { - struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; + struct NeedRequestContext( as dropshot::RequestContextArgument>::Context) ; }; const _: fn() = || { trait ResultTrait { @@ -1171,19 +1171,19 @@ mod tests { impl From for dropshot::ApiEndpoint< - - > as dropshot::RequestContextArgument>::Context> + + as dropshot::RequestContextArgument>::Context> { fn from(_: handler_xyz) -> Self { async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(()) } const _: fn() = || { fn future_endpoint_must_be_send(_t: T) {} - fn check_future_bounds(arg0: Arc< RequestContext<()> >) { + fn check_future_bounds(arg0: RequestContext<()>) { future_endpoint_must_be_send(handler_xyz(arg0)); } }; @@ -1215,7 +1215,7 @@ mod tests { quote! { /** handle "xyz" requests */ async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(()) } @@ -1224,7 +1224,7 @@ mod tests { .unwrap(); let expected = quote! { const _: fn() = || { - struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; + struct NeedRequestContext( as dropshot::RequestContextArgument>::Context) ; }; const _: fn() = || { trait ResultTrait { @@ -1267,20 +1267,20 @@ mod tests { impl From for dropshot::ApiEndpoint< - - > as dropshot::RequestContextArgument>::Context> + + as dropshot::RequestContextArgument>::Context> { fn from(_: handler_xyz) -> Self { #[doc = r#" handle "xyz" requests "#] async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(()) } const _: fn() = || { fn future_endpoint_must_be_send(_t: T) {} - fn check_future_bounds(arg0: Arc< RequestContext<()> >) { + fn check_future_bounds(arg0: RequestContext<()>) { future_endpoint_must_be_send(handler_xyz(arg0)); } }; @@ -1357,7 +1357,7 @@ mod tests { path = "/a/b/c", }, quote! { - fn handler_xyz(_rqctx: Arc) {} + fn handler_xyz(_rqctx: RequestContext) {} }, ) .unwrap(); @@ -1419,7 +1419,7 @@ mod tests { }, quote! { pub async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(()) } @@ -1429,7 +1429,7 @@ mod tests { let expected = quote! { const _: fn() = || { - struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; + struct NeedRequestContext( as dropshot::RequestContextArgument>::Context) ; }; const _: fn() = || { trait ResultTrait { @@ -1472,19 +1472,19 @@ mod tests { impl From for dropshot::ApiEndpoint< - - > as dropshot::RequestContextArgument>::Context> + + as dropshot::RequestContextArgument>::Context> { fn from(_: handler_xyz) -> Self { pub async fn handler_xyz( - _rqctx: Arc>, + _rqctx: RequestContext<()>, ) -> Result, HttpError> { Ok(()) } const _: fn() = || { fn future_endpoint_must_be_send(_t: T) {} - fn check_future_bounds(arg0: Arc< RequestContext<()> >) { + fn check_future_bounds(arg0: RequestContext<()>) { future_endpoint_must_be_send(handler_xyz(arg0)); } }; From 8ee3aa9a41f830618622f65992a5fc17989a69be Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 10:25:37 -0800 Subject: [PATCH 11/47] fix copyrights --- dropshot/examples/request-headers.rs | 2 +- dropshot/src/api_description.rs | 2 +- dropshot/src/extractor/common.rs | 2 +- dropshot/src/extractor/mod.rs | 2 +- dropshot/src/handler.rs | 2 +- dropshot/src/lib.rs | 2 +- dropshot/src/pagination.rs | 2 +- dropshot/src/schema_util.rs | 2 +- dropshot/src/type_util.rs | 2 +- dropshot/src/websocket.rs | 2 +- dropshot/tests/test_demo.rs | 2 +- dropshot_endpoint/src/lib.rs | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/dropshot/examples/request-headers.rs b/dropshot/examples/request-headers.rs index b0ceba7a4..104c75460 100644 --- a/dropshot/examples/request-headers.rs +++ b/dropshot/examples/request-headers.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Example use of Dropshot with request headers //! diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index cb52e619f..7f861d3a5 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Describes the endpoints and handler functions in your API use crate::handler::HttpHandlerFunc; diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs index 0bfd294ff..00dc89d55 100644 --- a/dropshot/src/extractor/common.rs +++ b/dropshot/src/extractor/common.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company // XXX-dap TODO-cleanup should the metadata into a separate, shared trait? diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs index 721277f85..b32fde52e 100644 --- a/dropshot/src/extractor/mod.rs +++ b/dropshot/src/extractor/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Extractor trait //! diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index fce5574a5..39036af26 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Interface for implementing HTTP endpoint handler functions. //! //! For information about supported endpoint function signatures, argument types, diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index a8788f01c..5cd3a68a4 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Dropshot is a general-purpose crate for exposing REST APIs from a Rust //! program. Planned highlights include: //! diff --git a/dropshot/src/pagination.rs b/dropshot/src/pagination.rs index a854b50b6..80b811d4f 100644 --- a/dropshot/src/pagination.rs +++ b/dropshot/src/pagination.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Detailed end-user documentation for pagination lives in the Dropshot top- //! level block comment. Here we discuss some of the design choices. diff --git a/dropshot/src/schema_util.rs b/dropshot/src/schema_util.rs index d08da8dda..14904e757 100644 --- a/dropshot/src/schema_util.rs +++ b/dropshot/src/schema_util.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! schemars helper functions diff --git a/dropshot/src/type_util.rs b/dropshot/src/type_util.rs index 5c1495dea..fc6405f5a 100644 --- a/dropshot/src/type_util.rs +++ b/dropshot/src/type_util.rs @@ -1,4 +1,4 @@ -// Copyright 2021 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Utility functions for working with JsonSchema types. diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index 71800060b..7bd83da29 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Implements websocket upgrades as an Extractor for use in API route handler //! parameters to indicate that the given endpoint is meant to be upgraded to //! a websocket. diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index acfd837a4..b28c7856f 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Test cases for the "demo" handlers. These handlers exercise various //! supported configurations of the HTTP handler interface. We exercise them //! here to make sure that even if these aren't used at a given point, they still diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index c83636b8b..546f22460 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! This package defines macro attributes associated with HTTP handlers. These //! attributes are used both to define an HTTP API and to generate an OpenAPI From fb3183179de98c070dab4dc17be9a177f58e64d3 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 10:43:38 -0800 Subject: [PATCH 12/47] review feedback --- dropshot/src/extractor/common.rs | 34 +++++++++++++++++++------------- dropshot_endpoint/src/lib.rs | 2 +- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs index 00dc89d55..71eb886bb 100644 --- a/dropshot/src/extractor/common.rs +++ b/dropshot/src/extractor/common.rs @@ -70,21 +70,27 @@ impl ExclusiveExtractor for S { /// Top-level extractor for a given request /// -/// During request handling, we wind up needing to call a function with a -/// variable number of arguments whose types are all extractors (either -/// `SharedExtractor` or `ExclusiveExtractor`). We achieve this with a separate -/// type called `RequestExtractor` that looks just like `ExclusiveExtractor`. -/// We can impl this trait on a tuple of any number of types that themselves -/// impl `SharedExtractor` or `ExclusiveExtractor` by delegating to each type's -/// extractor implementation. There may be at most one `ExclusiveExtractor` in -/// the tuple. We require it to be the last argument just to avoid having to -/// define the power set of impls. +/// During request handling, we must find and invoke the appropriate +/// consumer-defined handler function. While each of these functions takes a +/// fixed number of arguments, different handler functions may take a different +/// number of arguments. The arguments that can vary between handler functions +/// are all extractors, meaning that they impl `SharedExtractor` or +/// `ExclusiveExtractor`. /// -/// In practice, `RequestExtractor` is identical to `ExclusiveExtractor`. But -/// we use them in different ways. `RequestExtractor` is private, only -/// implemented on tuple types, and only used to kick off extraction. -/// `ExclusiveExtractor` can be consumer-defined and would generally not be -/// implemented on tuple types. +/// This trait helps us invoke various handler functions uniformly, despite them +/// accepting different arguments. To achieve this, we impl this trait for all +/// supported _tuples_ of argument types, which is essentially 0 or more +/// `SharedExtractor`s followed by at most one `ExclusiveExtractor`. This impl +/// essentially does the same thing as any other extractor, and it does it by +/// delegating to the impls of each tuple member. +/// +/// In practice, the trait `RequestExtractor` is identical to +/// `ExclusiveExtractor` and we could use `ExclusiveExtractor` directly. But +/// it's clearer to use distinct types, since they're used differently. To +/// summarize: `RequestExtractor` is private, only implemented on tuple types, +/// and only used to kick off extraction from the top level. +/// `ExclusiveExtractor` s public, implementing types can be consumer-defined, +/// and it would generally not be implemented on tuple types. #[async_trait] pub trait RequestExtractor: Send + Sync + Sized { /// Construct an instance of this type from a `RequestContext`. diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index 546f22460..f6111a885 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -171,7 +171,7 @@ fn do_channel( } = from_tokenstream(&attr)?; match protocol { ChannelProtocol::WEBSOCKETS => { - // here we construct a wrapper function and mutate the arguments a bit + // Here we construct a wrapper function and mutate the arguments a bit // for the outer layer: we replace WebsocketConnection, which is not // an extractor, with WebsocketUpgrade, which is. We also move it // to the end. From 1dff4669e8712214ee4ff5e37d28dd819a7a370c Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 16:10:14 -0800 Subject: [PATCH 13/47] WIP: making it work, need to rebase --- CHANGELOG.adoc | 2 + dropshot/examples/request-headers.rs | 4 +- dropshot/src/api_description.rs | 61 +------ dropshot/src/extractor/common.rs | 198 +++++++++++++++++++++++ dropshot/src/extractor/mod.rs | 117 ++++---------- dropshot/src/handler.rs | 25 +-- dropshot/src/lib.rs | 3 +- dropshot/src/websocket.rs | 31 ++-- dropshot/tests/fail/bad_endpoint3.stderr | 29 ---- dropshot/tests/test_demo.rs | 2 +- dropshot_endpoint/src/lib.rs | 48 ++---- 11 files changed, 283 insertions(+), 237 deletions(-) create mode 100644 dropshot/src/extractor/common.rs diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index 52184c350..b78442ca1 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -17,6 +17,8 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co === Breaking Changes +// XXX-dap TODO need update here + * https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. * https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. * https://github.com/oxidecomputer/dropshot/pull/540[#540] `ConfigDropshot` now uses a [`camino::Utf8PathBuf`](https://docs.rs/camino/1.1.1/camino/struct.Utf8PathBuf.html) for its file path. There is no change to the configuration format itself, just its representation in Rust. diff --git a/dropshot/examples/request-headers.rs b/dropshot/examples/request-headers.rs index 969511c07..b0ceba7a4 100644 --- a/dropshot/examples/request-headers.rs +++ b/dropshot/examples/request-headers.rs @@ -4,8 +4,8 @@ //! //! The headers accessed here will not be recorded as inputs in the OpenAPI //! spec. This is not currently supported out-of-the-box with Dropshot, but it -//! could be done by implementing you're own `Extractor` that pulls the headers -//! out, similar to what's done here. +//! could be done by implementing you're own `SharedExtractor` that pulls the +//! headers out, similar to what's done here. //! //! This example is based on the "basic.rs" one. See that one for more detailed //! comments on the common code. diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index 7ef06f5ca..cb52e619f 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -11,7 +11,7 @@ use crate::router::PathSegment; use crate::server::ServerContext; use crate::type_util::type_is_scalar; use crate::type_util::type_is_string_enum; -use crate::Extractor; +use crate::extractor::RequestExtractor; use crate::HttpErrorResponseBody; use crate::CONTENT_TYPE_JSON; use crate::CONTENT_TYPE_OCTET_STREAM; @@ -56,7 +56,7 @@ impl<'a, Context: ServerContext> ApiEndpoint { ) -> Self where HandlerType: HttpHandlerFunc, - FuncParams: Extractor + 'static, + FuncParams: RequestExtractor + 'static, ResponseType: HttpResponse + Send + Sync + 'static, { let body_content_type = @@ -280,7 +280,6 @@ impl ApiDescription { self.validate_tags(&e)?; self.validate_path_parameters(&e)?; - self.validate_body_parameters(&e)?; self.validate_named_parameters(&e)?; self.router.insert(e); @@ -374,32 +373,7 @@ impl ApiDescription { Ok(()) } - /// Validate that we have a single body parameter. - fn validate_body_parameters( - &self, - e: &ApiEndpoint, - ) -> Result<(), String> { - // Explicitly disallow any attempt to consume the body twice. - let nbodyextractors = e - .parameters - .iter() - .filter(|p| match p.metadata { - ApiEndpointParameterMetadata::Body(..) => true, - _ => false, - }) - .count(); - if nbodyextractors > 1 { - return Err(format!( - "only one body extractor can be used in a handler (this \ - function has {})", - nbodyextractors - )); - } - - Ok(()) - } - - /// Validate that named parameters have appropriate types and their aren't + /// Validate that named parameters have appropriate types and there are no /// duplicates. Parameters must have scalar types except in the case of the /// received for a wildcard path which must be an array of String. fn validate_named_parameters( @@ -1566,8 +1540,6 @@ mod test { use crate::Query; use crate::TagConfig; use crate::TagDetails; - use crate::TypedBody; - use crate::UntypedBody; use crate::CONTENT_TYPE_JSON; use http::Method; use hyper::Body; @@ -1717,31 +1689,8 @@ mod test { } } - #[test] - fn test_two_bodies() { - #[derive(Deserialize, JsonSchema)] - struct AStruct {} - - #[endpoint { - method = PUT, - path = "/testing/two_bodies" - }] - async fn test_twobodies_handler( - _: Arc>, - _: UntypedBody, - _: TypedBody, - ) -> Result, HttpError> { - unimplemented!(); - } - - let mut api = ApiDescription::new(); - let error = api.register(test_twobodies_handler).unwrap_err(); - assert_eq!( - error, - "only one body extractor can be used in a handler (this function \ - has 2)" - ); - } + // XXX-dap TODO-coverage need a test for trying to use two + // ExclusiveExtractors #[test] fn test_dup_names() { diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs new file mode 100644 index 000000000..0bfd294ff --- /dev/null +++ b/dropshot/src/extractor/common.rs @@ -0,0 +1,198 @@ +// Copyright 2022 Oxide Computer Company + +// XXX-dap TODO-cleanup should the metadata into a separate, shared trait? + +use crate::api_description::ApiEndpointParameter; +use crate::api_description::{ApiEndpointBodyContentType, ExtensionMode}; +use crate::error::HttpError; +use crate::server::ServerContext; +use crate::RequestContext; + +use async_trait::async_trait; + +/// Metadata associated with an extractor including parameters and whether or not +/// the associated endpoint is paginated. +pub struct ExtractorMetadata { + pub extension_mode: ExtensionMode, + pub parameters: Vec, +} + +/// Extractors that require exclusive access to the underyling `hyper::Request` +/// +/// These extractors usually need to read the body of the request or else modify +/// how the server treats the rest of it (e.g., websocket upgrade). There may +/// be at most one of these associated with any request. +#[async_trait] +pub trait ExclusiveExtractor: Send + Sync + Sized { + /// Construct an instance of this type from a `RequestContext`. + async fn from_request( + rqctx: &RequestContext, + ) -> Result; + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata; +} + +/// Extractors that do _not_ require exclusive access to the underyling +/// `hyper::Request` +/// +/// These extractors usually look at immutable properties of the request that +/// are known up front, like the URL. There may be any number of these +/// associated with any request. +#[async_trait] +pub trait SharedExtractor: Send + Sync + Sized { + /// Construct an instance of this type from a `RequestContext`. + async fn from_request( + rqctx: &RequestContext, + ) -> Result; + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata; +} + +// A `SharedExtractor` can always be treated like an `ExclusiveExtractor`. +#[async_trait] +impl ExclusiveExtractor for S { + async fn from_request( + rqctx: &RequestContext, + ) -> Result { + ::from_request(rqctx).await + } + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + ::metadata(body_content_type) + } +} + +/// Top-level extractor for a given request +/// +/// During request handling, we wind up needing to call a function with a +/// variable number of arguments whose types are all extractors (either +/// `SharedExtractor` or `ExclusiveExtractor`). We achieve this with a separate +/// type called `RequestExtractor` that looks just like `ExclusiveExtractor`. +/// We can impl this trait on a tuple of any number of types that themselves +/// impl `SharedExtractor` or `ExclusiveExtractor` by delegating to each type's +/// extractor implementation. There may be at most one `ExclusiveExtractor` in +/// the tuple. We require it to be the last argument just to avoid having to +/// define the power set of impls. +/// +/// In practice, `RequestExtractor` is identical to `ExclusiveExtractor`. But +/// we use them in different ways. `RequestExtractor` is private, only +/// implemented on tuple types, and only used to kick off extraction. +/// `ExclusiveExtractor` can be consumer-defined and would generally not be +/// implemented on tuple types. +#[async_trait] +pub trait RequestExtractor: Send + Sync + Sized { + /// Construct an instance of this type from a `RequestContext`. + async fn from_request( + rqctx: &RequestContext, + ) -> Result; + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata; +} + +// Impl for zero-element tuple (used for request handlers with no extractors) +#[async_trait] +impl RequestExtractor for () { + async fn from_request( + _rqctx: &RequestContext, + ) -> Result { + Ok(()) + } + + fn metadata( + _body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + ExtractorMetadata { + extension_mode: ExtensionMode::None, + parameters: vec![], + } + } +} + +// Impl for one-element tuple with an exclusive extractor +#[async_trait] +impl RequestExtractor for (X,) { + async fn from_request( + rqctx: &RequestContext, + ) -> Result { + Ok((X::from_request(rqctx).await?,)) + } + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + X::metadata(body_content_type) + } +} + +// XXX-dap TODO-doc update comment based on the change that uses the fact that +// SharedExtractor impls ExclusiveExtractor such that the last item in the +// tuple *must* be an exclusive extractor +/// Defines implementations of `RequestExtractor` for tuples of one or more +/// `SharedExtractor` followed by an `ExclusiveExtractor` +/// +/// As an example, `impl_rqextractor_for_tuple!(S1, S2)` defines an impl of +/// `RequestExtractor` for tuple `(S1, S2, X)` where `S1: SharedExtractor`, +/// `S2: SharedExtractor`, and `X: ExclusiveExtractor`, as well as a similar +/// impl for just `(S1, S2)`. +macro_rules! impl_rqextractor_for_tuple { + ($( $S:ident),+) => { + + // impl RequestExtractor for a tuple of shared extractors with an exclusive extractor + #[async_trait] + impl< X: ExclusiveExtractor + 'static, $($S: SharedExtractor + 'static,)+ > + RequestExtractor + for ($($S,)+ X) + { + async fn from_request(rqctx: &RequestContext) + -> Result<( $($S,)+ X ), HttpError> + { + futures::try_join!( + $($S::from_request(rqctx),)+ + X::from_request(rqctx) + ) + } + + fn metadata(_body_content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { + #[allow(unused_mut)] + let mut extension_mode = ExtensionMode::None; + #[allow(unused_mut)] + let mut parameters = vec![]; + $( + let mut metadata = $S::metadata(_body_content_type.clone()); + extension_mode = match (extension_mode, metadata.extension_mode) { + (ExtensionMode::None, x) | (x, ExtensionMode::None) => x, + (x, y) if x != y => { + panic!("incompatible extension modes in tuple: {:?} != {:?}", x, y); + } + (_, x) => x, + }; + parameters.append(&mut metadata.parameters); + )+ + + let mut metadata = X::metadata(_body_content_type.clone()); + extension_mode = match (extension_mode, metadata.extension_mode) { + (ExtensionMode::None, x) | (x, ExtensionMode::None) => x, + (x, y) if x != y => { + panic!("incompatible extension modes in tuple: {:?} != {:?}", x, y); + } + (_, x) => x, + }; + parameters.append(&mut metadata.parameters); + + ExtractorMetadata { extension_mode, parameters } + } + } +}} + +// Implement `RequestExtractor` for any tuple consisting of 0-2 shared +// extractors and exactly one exclusive extractor. +impl_rqextractor_for_tuple!(S1); +impl_rqextractor_for_tuple!(S1, S2); diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs index d80799947..721277f85 100644 --- a/dropshot/src/extractor/mod.rs +++ b/dropshot/src/extractor/mod.rs @@ -30,78 +30,15 @@ use schemars::schema::SchemaObject; use schemars::JsonSchema; use serde::de::DeserializeOwned; use std::fmt::Debug; -use std::sync::Arc; - -/// `Extractor` defines an interface allowing a type to be constructed from a -/// `RequestContext`. Unlike most traits, `Extractor` essentially defines only a -/// constructor function, not instance functions. -/// -/// The extractors that we provide (`Query`, `Path`, `TypedBody`, `UntypedBody`, and -/// `WebsocketUpgrade`) implement `Extractor` in order to construct themselves from -/// the request. For example, `Extractor` is implemented for `Query` with a -/// function that reads the query string from the request, parses it, and -/// constructs a `Query` with it. -/// -/// We also define implementations of `Extractor` for tuples of types that -/// themselves implement `Extractor`. See the implementation of -/// `HttpRouteHandler` for more on why this needed. -#[async_trait] -pub trait Extractor: Send + Sync + Sized { - /// Construct an instance of this type from a `RequestContext`. - async fn from_request( - rqctx: Arc>, - ) -> Result; - - fn metadata( - body_content_type: ApiEndpointBodyContentType, - ) -> ExtractorMetadata; -} -/// Metadata associated with an extractor including parameters and whether or not -/// the associated endpoint is paginated. -pub struct ExtractorMetadata { - pub extension_mode: ExtensionMode, - pub parameters: Vec, -} - -/// `impl_derived_for_tuple!` defines implementations of `Extractor` for tuples -/// whose elements themselves implement `Extractor`. -macro_rules! impl_extractor_for_tuple { - ($( $T:ident),*) => { - #[async_trait] - impl< $($T: Extractor + 'static,)* > Extractor for ($($T,)*) - { - async fn from_request(_rqctx: Arc>) - -> Result<( $($T,)* ), HttpError> - { - futures::try_join!($($T::from_request(Arc::clone(&_rqctx)),)*) - } +mod common; - fn metadata(_body_content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { - #[allow(unused_mut)] - let mut extension_mode = ExtensionMode::None; - #[allow(unused_mut)] - let mut parameters = vec![]; - $( - let mut metadata = $T::metadata(_body_content_type.clone()); - extension_mode = match (extension_mode, metadata.extension_mode) { - (ExtensionMode::None, x) | (x, ExtensionMode::None) => x, - (x, y) if x != y => { - panic!("incompatible extension modes in tuple: {:?} != {:?}", x, y); - } - (_, x) => x, - }; - parameters.append(&mut metadata.parameters); - )* - ExtractorMetadata { extension_mode, parameters } - } - } -}} +pub use common::ExclusiveExtractor; +pub use common::ExtractorMetadata; +pub use common::RequestExtractor; +pub use common::SharedExtractor; -impl_extractor_for_tuple!(); -impl_extractor_for_tuple!(T1); -impl_extractor_for_tuple!(T1, T2); -impl_extractor_for_tuple!(T1, T2, T3); +// XXX-dap move these definitions to separate files? // Query: query string extractor @@ -140,19 +77,19 @@ where } } -// The `Extractor` implementation for Query describes how to construct -// an instance of `Query` from an HTTP request: namely, by parsing -// the query string to an instance of `QueryType`. +// The `SharedExtractor` implementation for Query describes how to +// construct an instance of `Query` from an HTTP request: namely, by +// parsing the query string to an instance of `QueryType`. // TODO-cleanup We shouldn't have to use the "'static" bound on `QueryType` // here. It seems like we ought to be able to use 'async_trait, but that // doesn't seem to be defined. #[async_trait] -impl Extractor for Query +impl SharedExtractor for Query where QueryType: JsonSchema + DeserializeOwned + Send + Sync + 'static, { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> { let request = rqctx.request.lock().await; http_request_load_query(&request) @@ -183,16 +120,16 @@ impl Path { } } -// The `Extractor` implementation for Path describes how to construct -// an instance of `Path` from an HTTP request: namely, by extracting -// parameters from the query string. +// The `SharedExtractor` implementation for Path describes how to +// construct an instance of `Path` from an HTTP request: namely, by +// extracting parameters from the query string. #[async_trait] -impl Extractor for Path +impl SharedExtractor for Path where PathType: DeserializeOwned + JsonSchema + Send + Sync + 'static, { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> { let params: PathType = http_extract_path_params(&rqctx.path_variables)?; Ok(Path { inner: params }) @@ -288,7 +225,7 @@ impl /// Given an HTTP request, attempt to read the body, parse it according /// to the content type, and deserialize it to an instance of `BodyType`. async fn http_request_load_body( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> where BodyType: JsonSchema + DeserializeOwned + Send + Sync, @@ -352,19 +289,19 @@ where Ok(TypedBody { inner: content }) } -// The `Extractor` implementation for TypedBody describes how to -// construct an instance of `TypedBody` from an HTTP request: namely, -// by reading the request body and parsing it as JSON into type `BodyType`. -// TODO-cleanup We shouldn't have to use the "'static" bound on `BodyType` here. -// It seems like we ought to be able to use 'async_trait, but that doesn't seem -// to be defined. +// The `ExclusiveExtractor` implementation for TypedBody describes how +// to construct an instance of `TypedBody` from an HTTP request: +// namely, by reading the request body and parsing it as JSON into type +// `BodyType`. TODO-cleanup We shouldn't have to use the "'static" bound on +// `BodyType` here. It seems like we ought to be able to use 'async_trait, but +// that doesn't seem to be defined. #[async_trait] -impl Extractor for TypedBody +impl ExclusiveExtractor for TypedBody where BodyType: JsonSchema + DeserializeOwned + Send + Sync + 'static, { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> { http_request_load_body(rqctx).await } @@ -415,9 +352,9 @@ impl UntypedBody { } #[async_trait] -impl Extractor for UntypedBody { +impl ExclusiveExtractor for UntypedBody { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result { let server = &rqctx.server; let mut request = rqctx.request.lock().await; diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index 5f03c8527..fce5574a5 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -32,7 +32,7 @@ //! OpenAPI document generation. use super::error::HttpError; -use super::extractor::Extractor; +use super::extractor::RequestExtractor; use super::http_util::CONTENT_TYPE_JSON; use super::http_util::CONTENT_TYPE_OCTET_STREAM; use super::server::DropshotState; @@ -166,7 +166,7 @@ pub trait HttpHandlerFunc: Send + Sync + 'static where Context: ServerContext, - FuncParams: Extractor, + FuncParams: RequestExtractor, ResponseType: HttpResponse + Send + Sync + 'static, { async fn handle_request( @@ -267,7 +267,8 @@ macro_rules! impl_HttpHandlerFunc_for_func_with_params { FutureType: Future> + Send + 'static, ResponseType: HttpResponse + Send + Sync + 'static, - $($T: Extractor + Send + Sync + 'static,)* + ($($T,)*): RequestExtractor, + $($T: Send + Sync + 'static,)* { async fn handle_request( &self, @@ -318,7 +319,7 @@ pub struct HttpRouteHandler where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor, + FuncParams: RequestExtractor, ResponseType: HttpResponse + Send + Sync + 'static, { /// the actual HttpHandlerFunc used to implement this route @@ -341,7 +342,7 @@ impl Debug where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor, + FuncParams: RequestExtractor, ResponseType: HttpResponse + Send + Sync + 'static, { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { @@ -355,7 +356,7 @@ impl RouteHandler where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor + 'static, + FuncParams: RequestExtractor + 'static, ResponseType: HttpResponse + Send + Sync + 'static, { fn label(&self) -> &str { @@ -371,10 +372,10 @@ where // arguments to the handler function. This could be `()`, `(Query)`, // `(TypedBody)`, `(Query, TypedBody)`, or any other // combination of extractors we decide to support in the future. - // Whatever it is must implement `Extractor`, which means we can invoke - // `Extractor::from_request()` to construct the argument tuple, - // generally from information available in the `request` object. We - // pass this down to the `HttpHandlerFunc`, for which there's a + // Whatever it is must implement `RequestExtractor`, which means we can + // invoke `RequestExtractor::from_request()` to construct the argument + // tuple, generally from information available in the `request` object. + // We pass this down to the `HttpHandlerFunc`, for which there's a // different implementation for each value of `FuncParams`. The // `HttpHandlerFunc` for each `FuncParams` just pulls the arguments out // of the `funcparams` tuple and makes them actual function arguments @@ -383,7 +384,7 @@ where // actual handler function. From this point down, all of this is // resolved statically. let rqctx = Arc::new(rqctx_raw); - let funcparams = Extractor::from_request(Arc::clone(&rqctx)).await?; + let funcparams = RequestExtractor::from_request(&rqctx).await?; let future = self.handler.handle_request(rqctx, funcparams); future.await } @@ -396,7 +397,7 @@ impl where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor + 'static, + FuncParams: RequestExtractor + 'static, ResponseType: HttpResponse + Send + Sync + 'static, { /// Given a function matching one of the supported API handler function diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index 2a3803c16..a8788f01c 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -629,10 +629,11 @@ pub use config::ConfigDropshot; pub use config::ConfigTls; pub use error::HttpError; pub use error::HttpErrorResponseBody; -pub use extractor::Extractor; +pub use extractor::ExclusiveExtractor; pub use extractor::ExtractorMetadata; pub use extractor::Path; pub use extractor::Query; +pub use extractor::SharedExtractor; pub use extractor::TypedBody; pub use extractor::UntypedBody; pub use handler::http_response_found; diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index c7908fe74..3a01fd817 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -8,8 +8,8 @@ use crate::api_description::ExtensionMode; use crate::{ - ApiEndpointBodyContentType, Extractor, ExtractorMetadata, HttpError, - RequestContext, ServerContext, + ApiEndpointBodyContentType, ExclusiveExtractor, ExtractorMetadata, + HttpError, RequestContext, ServerContext, }; use async_trait::async_trait; use base64::Engine; @@ -23,10 +23,10 @@ use serde_json::json; use sha1::{Digest, Sha1}; use slog::Logger; use std::future::Future; -use std::sync::Arc; -/// WebsocketUpgrade is an Extractor used to upgrade and handle an HTTP request -/// as a websocket when present in a Dropshot endpoint's function arguments. +/// WebsocketUpgrade is an ExclusiveExtractor used to upgrade and handle an HTTP +/// request as a websocket when present in a Dropshot endpoint's function +/// arguments. /// /// The consumer of this must call [WebsocketUpgrade::handle] for the connection /// to be upgraded. (This is done for you by `#[channel]`.) @@ -80,13 +80,13 @@ fn derive_accept_key(request_key: &[u8]) -> String { base64::engine::general_purpose::STANDARD.encode(&sha1.finalize()) } -/// This `Extractor` implementation constructs an instance of `WebsocketUpgrade` -/// from an HTTP request, and returns an error if the given request does not -/// contain websocket upgrade headers. +/// This `ExclusiveExtractor` implementation constructs an instance of +/// `WebsocketUpgrade` from an HTTP request, and returns an error if the given +/// request does not contain websocket upgrade headers. #[async_trait] -impl Extractor for WebsocketUpgrade { +impl ExclusiveExtractor for WebsocketUpgrade { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result { let request = &mut *rqctx.request.lock().await; @@ -192,8 +192,8 @@ impl WebsocketUpgrade { /// #[dropshot::endpoint { method = GET, path = "/my/ws/endpoint/{id}" }] /// async fn my_ws_endpoint( /// rqctx: std::sync::Arc>, - /// websock: dropshot::WebsocketUpgrade, /// id: dropshot::Path, + /// websock: dropshot::WebsocketUpgrade, /// ) -> dropshot::WebsocketEndpointResult { /// let logger = rqctx.log.new(slog::o!()); /// websock.handle(move |upgraded| async move { @@ -297,7 +297,9 @@ impl JsonSchema for WebsocketUpgrade { mod tests { use crate::router::HttpRouter; use crate::server::{DropshotState, ServerConfig}; - use crate::{Extractor, HttpError, RequestContext, WebsocketUpgrade}; + use crate::{ + ExclusiveExtractor, HttpError, RequestContext, WebsocketUpgrade, + }; use futures::lock::Mutex; use http::Request; use hyper::Body; @@ -308,7 +310,7 @@ mod tests { async fn ws_upg_from_mock_rqctx() -> Result { let log = slog::Logger::root(slog::Discard, slog::o!()).new(slog::o!()); - let fut = WebsocketUpgrade::from_request(Arc::new(RequestContext { + let rqctx = RequestContext { server: Arc::new(DropshotState { private: (), config: ServerConfig { @@ -340,7 +342,8 @@ mod tests { body_content_type: Default::default(), request_id: "".to_string(), log: log.clone(), - })); + }; + let fut = WebsocketUpgrade::from_request(&rqctx); tokio::time::timeout(Duration::from_secs(1), fut) .await .expect("Deadlocked in WebsocketUpgrade constructor") diff --git a/dropshot/tests/fail/bad_endpoint3.stderr b/dropshot/tests/fail/bad_endpoint3.stderr index 1c0a1ce47..f6b90e23c 100644 --- a/dropshot/tests/fail/bad_endpoint3.stderr +++ b/dropshot/tests/fail/bad_endpoint3.stderr @@ -1,32 +1,3 @@ -error[E0277]: the trait bound `String: Extractor` is not satisfied - --> tests/fail/bad_endpoint3.rs:17:12 - | -17 | param: String, - | ^^^^^^ the trait `Extractor` is not implemented for `String` - | - = help: the following other types implement trait `Extractor`: - () - (T1, T2) - (T1, T2, T3) - (T1,) - TypedBody - UntypedBody - WebsocketUpgrade - dropshot::Path - dropshot::Query -note: required by a bound in `need_extractor` - --> tests/fail/bad_endpoint3.rs:11:1 - | -11 | / #[endpoint { -12 | | method = GET, -13 | | path = "/test", -14 | | }] - | |__^ required by this bound in `need_extractor` -... -17 | param: String, - | ------ required by a bound in this - = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) - error[E0277]: the trait bound `fn(Arc>, String) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_endpoint}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied --> tests/fail/bad_endpoint3.rs:15:10 | diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index aabeb9756..acfd837a4 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -966,8 +966,8 @@ pub struct DemoUntypedQuery { }] async fn demo_handler_untyped_body( _rqctx: Arc>, - body: UntypedBody, query: Query, + body: UntypedBody, ) -> Result, HttpError> { let nbytes = body.as_bytes().len(); let as_utf8 = if query.into_inner().parse_str.unwrap_or(false) { diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index a9e2cb3d2..c83636b8b 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -173,7 +173,8 @@ fn do_channel( ChannelProtocol::WEBSOCKETS => { // here we construct a wrapper function and mutate the arguments a bit // for the outer layer: we replace WebsocketConnection, which is not - // an extractor, with WebsocketUpgrade, which is. + // an extractor, with WebsocketUpgrade, which is. We also move it + // to the end. let ItemFnForSignature { attrs, vis, mut sig, _block: body } = syn::parse2(item)?; @@ -219,6 +220,13 @@ fn do_channel( )); } + // XXX-dap TODO-cleanup This is a gross way to do it. + let mut input_pairs = + sig.inputs.clone().into_pairs().collect::>(); + let second_pair = input_pairs.remove(1); + input_pairs.push(second_pair); + sig.inputs = input_pairs.into_iter().collect(); + sig.output = syn::parse2(quote!(-> dropshot::WebsocketEndpointResult))?; @@ -425,12 +433,12 @@ fn do_endpoint_inner( .inputs .iter() .enumerate() - .map(|(index, arg)| { + .filter_map(|(index, arg)| { match arg { syn::FnArg::Receiver(_) => { // The compiler failure here is already comprehensible. arg_is_receiver = true; - quote! {} + Some(quote! {}) } syn::FnArg::Typed(pat) => { let span = pat.ty.span(); @@ -440,23 +448,15 @@ fn do_endpoint_inner( // The first parameter must be an Arc> // and fortunately we already have a trait that we can // use to validate this type. - quote_spanned! { span=> + Some(quote_spanned! { span=> const _: fn() = || { struct NeedRequestContext(<#ty as #dropshot::RequestContextArgument>::Context); }; - } + }) } else { - // Subsequent parameters must implement Extractor. - quote_spanned! { span=> - const _: fn() = || { - fn need_extractor() - where - T: ?Sized + #dropshot::Extractor, - { - } - need_extractor::<#ty>(); - }; - } + // XXX-dap the remaining stuff must together impl + // `RequestExtractor` + None } } } @@ -935,14 +935,6 @@ mod tests { const _: fn() = || { struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; }; - const _: fn() = || { - fn need_extractor() - where - T: ?Sized + dropshot::Extractor, - { - } - need_extractor:: >(); - }; const _: fn() = || { trait ResultTrait { type T; @@ -1041,14 +1033,6 @@ mod tests { const _: fn() = || { struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; }; - const _: fn() = || { - fn need_extractor() - where - T: ?Sized + dropshot::Extractor, - { - } - need_extractor:: >(); - }; const _: fn() = || { trait ResultTrait { type T; From 1c7642b54fae6e3a0714999194b6a66e173e6c8e Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 16:46:43 -0800 Subject: [PATCH 14/47] update changelog with some todo items --- CHANGELOG.adoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index b78442ca1..5436afa3c 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -18,6 +18,11 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co === Breaking Changes // XXX-dap TODO need update here +// Extractor -> {Shared,Exclusive}Extractor +// type signature of from_request() changed +// both: accept &RequestContext instead of Arc +// now: no other change. future: exclusive one will get a hyper::Request +// exclusive extractors must appear last in the argument list * https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. * https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. From 167351961f4800d76e4e3ebe3c26294b4e17e25b Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 10:25:37 -0800 Subject: [PATCH 15/47] fix copyrights --- dropshot/examples/request-headers.rs | 2 +- dropshot/src/api_description.rs | 2 +- dropshot/src/extractor/common.rs | 2 +- dropshot/src/extractor/mod.rs | 2 +- dropshot/src/handler.rs | 2 +- dropshot/src/lib.rs | 2 +- dropshot/src/pagination.rs | 2 +- dropshot/src/schema_util.rs | 2 +- dropshot/src/type_util.rs | 2 +- dropshot/src/websocket.rs | 2 +- dropshot/tests/test_demo.rs | 2 +- dropshot_endpoint/src/lib.rs | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/dropshot/examples/request-headers.rs b/dropshot/examples/request-headers.rs index b0ceba7a4..104c75460 100644 --- a/dropshot/examples/request-headers.rs +++ b/dropshot/examples/request-headers.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Example use of Dropshot with request headers //! diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index cb52e619f..7f861d3a5 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Describes the endpoints and handler functions in your API use crate::handler::HttpHandlerFunc; diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs index 0bfd294ff..00dc89d55 100644 --- a/dropshot/src/extractor/common.rs +++ b/dropshot/src/extractor/common.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company // XXX-dap TODO-cleanup should the metadata into a separate, shared trait? diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs index 721277f85..b32fde52e 100644 --- a/dropshot/src/extractor/mod.rs +++ b/dropshot/src/extractor/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Extractor trait //! diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index fce5574a5..39036af26 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Interface for implementing HTTP endpoint handler functions. //! //! For information about supported endpoint function signatures, argument types, diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index a8788f01c..5cd3a68a4 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Dropshot is a general-purpose crate for exposing REST APIs from a Rust //! program. Planned highlights include: //! diff --git a/dropshot/src/pagination.rs b/dropshot/src/pagination.rs index 76e4f6dae..936231551 100644 --- a/dropshot/src/pagination.rs +++ b/dropshot/src/pagination.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Detailed end-user documentation for pagination lives in the Dropshot top- //! level block comment. Here we discuss some of the design choices. diff --git a/dropshot/src/schema_util.rs b/dropshot/src/schema_util.rs index d08da8dda..14904e757 100644 --- a/dropshot/src/schema_util.rs +++ b/dropshot/src/schema_util.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! schemars helper functions diff --git a/dropshot/src/type_util.rs b/dropshot/src/type_util.rs index 5c1495dea..fc6405f5a 100644 --- a/dropshot/src/type_util.rs +++ b/dropshot/src/type_util.rs @@ -1,4 +1,4 @@ -// Copyright 2021 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Utility functions for working with JsonSchema types. diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index 3a01fd817..1db1b1306 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Implements websocket upgrades as an Extractor for use in API route handler //! parameters to indicate that the given endpoint is meant to be upgraded to //! a websocket. diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index acfd837a4..b28c7856f 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Test cases for the "demo" handlers. These handlers exercise various //! supported configurations of the HTTP handler interface. We exercise them //! here to make sure that even if these aren't used at a given point, they still diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index c83636b8b..546f22460 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! This package defines macro attributes associated with HTTP handlers. These //! attributes are used both to define an HTTP API and to generate an OpenAPI From f77a40d4cd3b9e5d1fee07f996e69a9253fcaa52 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 10:43:38 -0800 Subject: [PATCH 16/47] review feedback --- dropshot/src/extractor/common.rs | 34 +++++++++++++++++++------------- dropshot_endpoint/src/lib.rs | 2 +- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs index 00dc89d55..71eb886bb 100644 --- a/dropshot/src/extractor/common.rs +++ b/dropshot/src/extractor/common.rs @@ -70,21 +70,27 @@ impl ExclusiveExtractor for S { /// Top-level extractor for a given request /// -/// During request handling, we wind up needing to call a function with a -/// variable number of arguments whose types are all extractors (either -/// `SharedExtractor` or `ExclusiveExtractor`). We achieve this with a separate -/// type called `RequestExtractor` that looks just like `ExclusiveExtractor`. -/// We can impl this trait on a tuple of any number of types that themselves -/// impl `SharedExtractor` or `ExclusiveExtractor` by delegating to each type's -/// extractor implementation. There may be at most one `ExclusiveExtractor` in -/// the tuple. We require it to be the last argument just to avoid having to -/// define the power set of impls. +/// During request handling, we must find and invoke the appropriate +/// consumer-defined handler function. While each of these functions takes a +/// fixed number of arguments, different handler functions may take a different +/// number of arguments. The arguments that can vary between handler functions +/// are all extractors, meaning that they impl `SharedExtractor` or +/// `ExclusiveExtractor`. /// -/// In practice, `RequestExtractor` is identical to `ExclusiveExtractor`. But -/// we use them in different ways. `RequestExtractor` is private, only -/// implemented on tuple types, and only used to kick off extraction. -/// `ExclusiveExtractor` can be consumer-defined and would generally not be -/// implemented on tuple types. +/// This trait helps us invoke various handler functions uniformly, despite them +/// accepting different arguments. To achieve this, we impl this trait for all +/// supported _tuples_ of argument types, which is essentially 0 or more +/// `SharedExtractor`s followed by at most one `ExclusiveExtractor`. This impl +/// essentially does the same thing as any other extractor, and it does it by +/// delegating to the impls of each tuple member. +/// +/// In practice, the trait `RequestExtractor` is identical to +/// `ExclusiveExtractor` and we could use `ExclusiveExtractor` directly. But +/// it's clearer to use distinct types, since they're used differently. To +/// summarize: `RequestExtractor` is private, only implemented on tuple types, +/// and only used to kick off extraction from the top level. +/// `ExclusiveExtractor` s public, implementing types can be consumer-defined, +/// and it would generally not be implemented on tuple types. #[async_trait] pub trait RequestExtractor: Send + Sync + Sized { /// Construct an instance of this type from a `RequestContext`. diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index 546f22460..f6111a885 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -171,7 +171,7 @@ fn do_channel( } = from_tokenstream(&attr)?; match protocol { ChannelProtocol::WEBSOCKETS => { - // here we construct a wrapper function and mutate the arguments a bit + // Here we construct a wrapper function and mutate the arguments a bit // for the outer layer: we replace WebsocketConnection, which is not // an extractor, with WebsocketUpgrade, which is. We also move it // to the end. From f4dbd209dbb97ffc346d59280f2d8096b5c142af Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 16:10:14 -0800 Subject: [PATCH 17/47] WIP: making it work, need to rebase --- CHANGELOG.adoc | 2 + dropshot/examples/request-headers.rs | 4 +- dropshot/src/api_description.rs | 61 +------ dropshot/src/extractor/common.rs | 198 +++++++++++++++++++++++ dropshot/src/extractor/mod.rs | 117 ++++---------- dropshot/src/handler.rs | 25 +-- dropshot/src/lib.rs | 3 +- dropshot/src/websocket.rs | 31 ++-- dropshot/tests/fail/bad_endpoint3.stderr | 29 ---- dropshot/tests/test_demo.rs | 2 +- dropshot_endpoint/src/lib.rs | 48 ++---- 11 files changed, 283 insertions(+), 237 deletions(-) create mode 100644 dropshot/src/extractor/common.rs diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index 52184c350..b78442ca1 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -17,6 +17,8 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co === Breaking Changes +// XXX-dap TODO need update here + * https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. * https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. * https://github.com/oxidecomputer/dropshot/pull/540[#540] `ConfigDropshot` now uses a [`camino::Utf8PathBuf`](https://docs.rs/camino/1.1.1/camino/struct.Utf8PathBuf.html) for its file path. There is no change to the configuration format itself, just its representation in Rust. diff --git a/dropshot/examples/request-headers.rs b/dropshot/examples/request-headers.rs index 969511c07..b0ceba7a4 100644 --- a/dropshot/examples/request-headers.rs +++ b/dropshot/examples/request-headers.rs @@ -4,8 +4,8 @@ //! //! The headers accessed here will not be recorded as inputs in the OpenAPI //! spec. This is not currently supported out-of-the-box with Dropshot, but it -//! could be done by implementing you're own `Extractor` that pulls the headers -//! out, similar to what's done here. +//! could be done by implementing you're own `SharedExtractor` that pulls the +//! headers out, similar to what's done here. //! //! This example is based on the "basic.rs" one. See that one for more detailed //! comments on the common code. diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index e0161db4d..30d91a785 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -12,7 +12,7 @@ use crate::schema_util::j2oas_schema; use crate::server::ServerContext; use crate::type_util::type_is_scalar; use crate::type_util::type_is_string_enum; -use crate::Extractor; +use crate::extractor::RequestExtractor; use crate::HttpErrorResponseBody; use crate::CONTENT_TYPE_JSON; use crate::CONTENT_TYPE_OCTET_STREAM; @@ -57,7 +57,7 @@ impl<'a, Context: ServerContext> ApiEndpoint { ) -> Self where HandlerType: HttpHandlerFunc, - FuncParams: Extractor + 'static, + FuncParams: RequestExtractor + 'static, ResponseType: HttpResponse + Send + Sync + 'static, { let body_content_type = @@ -281,7 +281,6 @@ impl ApiDescription { self.validate_tags(&e)?; self.validate_path_parameters(&e)?; - self.validate_body_parameters(&e)?; self.validate_named_parameters(&e)?; self.router.insert(e); @@ -375,32 +374,7 @@ impl ApiDescription { Ok(()) } - /// Validate that we have a single body parameter. - fn validate_body_parameters( - &self, - e: &ApiEndpoint, - ) -> Result<(), String> { - // Explicitly disallow any attempt to consume the body twice. - let nbodyextractors = e - .parameters - .iter() - .filter(|p| match p.metadata { - ApiEndpointParameterMetadata::Body(..) => true, - _ => false, - }) - .count(); - if nbodyextractors > 1 { - return Err(format!( - "only one body extractor can be used in a handler (this \ - function has {})", - nbodyextractors - )); - } - - Ok(()) - } - - /// Validate that named parameters have appropriate types and their aren't + /// Validate that named parameters have appropriate types and there are no /// duplicates. Parameters must have scalar types except in the case of the /// received for a wildcard path which must be an array of String. fn validate_named_parameters( @@ -1131,8 +1105,6 @@ mod test { use crate::Query; use crate::TagConfig; use crate::TagDetails; - use crate::TypedBody; - use crate::UntypedBody; use crate::CONTENT_TYPE_JSON; use http::Method; use hyper::Body; @@ -1229,31 +1201,8 @@ mod test { api.register(test_badpath_handler).unwrap(); } - #[test] - fn test_two_bodies() { - #[derive(Deserialize, JsonSchema)] - struct AStruct {} - - #[endpoint { - method = PUT, - path = "/testing/two_bodies" - }] - async fn test_twobodies_handler( - _: Arc>, - _: UntypedBody, - _: TypedBody, - ) -> Result, HttpError> { - unimplemented!(); - } - - let mut api = ApiDescription::new(); - let error = api.register(test_twobodies_handler).unwrap_err(); - assert_eq!( - error, - "only one body extractor can be used in a handler (this function \ - has 2)" - ); - } + // XXX-dap TODO-coverage need a test for trying to use two + // ExclusiveExtractors #[test] fn test_dup_names() { diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs new file mode 100644 index 000000000..0bfd294ff --- /dev/null +++ b/dropshot/src/extractor/common.rs @@ -0,0 +1,198 @@ +// Copyright 2022 Oxide Computer Company + +// XXX-dap TODO-cleanup should the metadata into a separate, shared trait? + +use crate::api_description::ApiEndpointParameter; +use crate::api_description::{ApiEndpointBodyContentType, ExtensionMode}; +use crate::error::HttpError; +use crate::server::ServerContext; +use crate::RequestContext; + +use async_trait::async_trait; + +/// Metadata associated with an extractor including parameters and whether or not +/// the associated endpoint is paginated. +pub struct ExtractorMetadata { + pub extension_mode: ExtensionMode, + pub parameters: Vec, +} + +/// Extractors that require exclusive access to the underyling `hyper::Request` +/// +/// These extractors usually need to read the body of the request or else modify +/// how the server treats the rest of it (e.g., websocket upgrade). There may +/// be at most one of these associated with any request. +#[async_trait] +pub trait ExclusiveExtractor: Send + Sync + Sized { + /// Construct an instance of this type from a `RequestContext`. + async fn from_request( + rqctx: &RequestContext, + ) -> Result; + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata; +} + +/// Extractors that do _not_ require exclusive access to the underyling +/// `hyper::Request` +/// +/// These extractors usually look at immutable properties of the request that +/// are known up front, like the URL. There may be any number of these +/// associated with any request. +#[async_trait] +pub trait SharedExtractor: Send + Sync + Sized { + /// Construct an instance of this type from a `RequestContext`. + async fn from_request( + rqctx: &RequestContext, + ) -> Result; + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata; +} + +// A `SharedExtractor` can always be treated like an `ExclusiveExtractor`. +#[async_trait] +impl ExclusiveExtractor for S { + async fn from_request( + rqctx: &RequestContext, + ) -> Result { + ::from_request(rqctx).await + } + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + ::metadata(body_content_type) + } +} + +/// Top-level extractor for a given request +/// +/// During request handling, we wind up needing to call a function with a +/// variable number of arguments whose types are all extractors (either +/// `SharedExtractor` or `ExclusiveExtractor`). We achieve this with a separate +/// type called `RequestExtractor` that looks just like `ExclusiveExtractor`. +/// We can impl this trait on a tuple of any number of types that themselves +/// impl `SharedExtractor` or `ExclusiveExtractor` by delegating to each type's +/// extractor implementation. There may be at most one `ExclusiveExtractor` in +/// the tuple. We require it to be the last argument just to avoid having to +/// define the power set of impls. +/// +/// In practice, `RequestExtractor` is identical to `ExclusiveExtractor`. But +/// we use them in different ways. `RequestExtractor` is private, only +/// implemented on tuple types, and only used to kick off extraction. +/// `ExclusiveExtractor` can be consumer-defined and would generally not be +/// implemented on tuple types. +#[async_trait] +pub trait RequestExtractor: Send + Sync + Sized { + /// Construct an instance of this type from a `RequestContext`. + async fn from_request( + rqctx: &RequestContext, + ) -> Result; + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata; +} + +// Impl for zero-element tuple (used for request handlers with no extractors) +#[async_trait] +impl RequestExtractor for () { + async fn from_request( + _rqctx: &RequestContext, + ) -> Result { + Ok(()) + } + + fn metadata( + _body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + ExtractorMetadata { + extension_mode: ExtensionMode::None, + parameters: vec![], + } + } +} + +// Impl for one-element tuple with an exclusive extractor +#[async_trait] +impl RequestExtractor for (X,) { + async fn from_request( + rqctx: &RequestContext, + ) -> Result { + Ok((X::from_request(rqctx).await?,)) + } + + fn metadata( + body_content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + X::metadata(body_content_type) + } +} + +// XXX-dap TODO-doc update comment based on the change that uses the fact that +// SharedExtractor impls ExclusiveExtractor such that the last item in the +// tuple *must* be an exclusive extractor +/// Defines implementations of `RequestExtractor` for tuples of one or more +/// `SharedExtractor` followed by an `ExclusiveExtractor` +/// +/// As an example, `impl_rqextractor_for_tuple!(S1, S2)` defines an impl of +/// `RequestExtractor` for tuple `(S1, S2, X)` where `S1: SharedExtractor`, +/// `S2: SharedExtractor`, and `X: ExclusiveExtractor`, as well as a similar +/// impl for just `(S1, S2)`. +macro_rules! impl_rqextractor_for_tuple { + ($( $S:ident),+) => { + + // impl RequestExtractor for a tuple of shared extractors with an exclusive extractor + #[async_trait] + impl< X: ExclusiveExtractor + 'static, $($S: SharedExtractor + 'static,)+ > + RequestExtractor + for ($($S,)+ X) + { + async fn from_request(rqctx: &RequestContext) + -> Result<( $($S,)+ X ), HttpError> + { + futures::try_join!( + $($S::from_request(rqctx),)+ + X::from_request(rqctx) + ) + } + + fn metadata(_body_content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { + #[allow(unused_mut)] + let mut extension_mode = ExtensionMode::None; + #[allow(unused_mut)] + let mut parameters = vec![]; + $( + let mut metadata = $S::metadata(_body_content_type.clone()); + extension_mode = match (extension_mode, metadata.extension_mode) { + (ExtensionMode::None, x) | (x, ExtensionMode::None) => x, + (x, y) if x != y => { + panic!("incompatible extension modes in tuple: {:?} != {:?}", x, y); + } + (_, x) => x, + }; + parameters.append(&mut metadata.parameters); + )+ + + let mut metadata = X::metadata(_body_content_type.clone()); + extension_mode = match (extension_mode, metadata.extension_mode) { + (ExtensionMode::None, x) | (x, ExtensionMode::None) => x, + (x, y) if x != y => { + panic!("incompatible extension modes in tuple: {:?} != {:?}", x, y); + } + (_, x) => x, + }; + parameters.append(&mut metadata.parameters); + + ExtractorMetadata { extension_mode, parameters } + } + } +}} + +// Implement `RequestExtractor` for any tuple consisting of 0-2 shared +// extractors and exactly one exclusive extractor. +impl_rqextractor_for_tuple!(S1); +impl_rqextractor_for_tuple!(S1, S2); diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs index d80799947..721277f85 100644 --- a/dropshot/src/extractor/mod.rs +++ b/dropshot/src/extractor/mod.rs @@ -30,78 +30,15 @@ use schemars::schema::SchemaObject; use schemars::JsonSchema; use serde::de::DeserializeOwned; use std::fmt::Debug; -use std::sync::Arc; - -/// `Extractor` defines an interface allowing a type to be constructed from a -/// `RequestContext`. Unlike most traits, `Extractor` essentially defines only a -/// constructor function, not instance functions. -/// -/// The extractors that we provide (`Query`, `Path`, `TypedBody`, `UntypedBody`, and -/// `WebsocketUpgrade`) implement `Extractor` in order to construct themselves from -/// the request. For example, `Extractor` is implemented for `Query` with a -/// function that reads the query string from the request, parses it, and -/// constructs a `Query` with it. -/// -/// We also define implementations of `Extractor` for tuples of types that -/// themselves implement `Extractor`. See the implementation of -/// `HttpRouteHandler` for more on why this needed. -#[async_trait] -pub trait Extractor: Send + Sync + Sized { - /// Construct an instance of this type from a `RequestContext`. - async fn from_request( - rqctx: Arc>, - ) -> Result; - - fn metadata( - body_content_type: ApiEndpointBodyContentType, - ) -> ExtractorMetadata; -} -/// Metadata associated with an extractor including parameters and whether or not -/// the associated endpoint is paginated. -pub struct ExtractorMetadata { - pub extension_mode: ExtensionMode, - pub parameters: Vec, -} - -/// `impl_derived_for_tuple!` defines implementations of `Extractor` for tuples -/// whose elements themselves implement `Extractor`. -macro_rules! impl_extractor_for_tuple { - ($( $T:ident),*) => { - #[async_trait] - impl< $($T: Extractor + 'static,)* > Extractor for ($($T,)*) - { - async fn from_request(_rqctx: Arc>) - -> Result<( $($T,)* ), HttpError> - { - futures::try_join!($($T::from_request(Arc::clone(&_rqctx)),)*) - } +mod common; - fn metadata(_body_content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { - #[allow(unused_mut)] - let mut extension_mode = ExtensionMode::None; - #[allow(unused_mut)] - let mut parameters = vec![]; - $( - let mut metadata = $T::metadata(_body_content_type.clone()); - extension_mode = match (extension_mode, metadata.extension_mode) { - (ExtensionMode::None, x) | (x, ExtensionMode::None) => x, - (x, y) if x != y => { - panic!("incompatible extension modes in tuple: {:?} != {:?}", x, y); - } - (_, x) => x, - }; - parameters.append(&mut metadata.parameters); - )* - ExtractorMetadata { extension_mode, parameters } - } - } -}} +pub use common::ExclusiveExtractor; +pub use common::ExtractorMetadata; +pub use common::RequestExtractor; +pub use common::SharedExtractor; -impl_extractor_for_tuple!(); -impl_extractor_for_tuple!(T1); -impl_extractor_for_tuple!(T1, T2); -impl_extractor_for_tuple!(T1, T2, T3); +// XXX-dap move these definitions to separate files? // Query: query string extractor @@ -140,19 +77,19 @@ where } } -// The `Extractor` implementation for Query describes how to construct -// an instance of `Query` from an HTTP request: namely, by parsing -// the query string to an instance of `QueryType`. +// The `SharedExtractor` implementation for Query describes how to +// construct an instance of `Query` from an HTTP request: namely, by +// parsing the query string to an instance of `QueryType`. // TODO-cleanup We shouldn't have to use the "'static" bound on `QueryType` // here. It seems like we ought to be able to use 'async_trait, but that // doesn't seem to be defined. #[async_trait] -impl Extractor for Query +impl SharedExtractor for Query where QueryType: JsonSchema + DeserializeOwned + Send + Sync + 'static, { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> { let request = rqctx.request.lock().await; http_request_load_query(&request) @@ -183,16 +120,16 @@ impl Path { } } -// The `Extractor` implementation for Path describes how to construct -// an instance of `Path` from an HTTP request: namely, by extracting -// parameters from the query string. +// The `SharedExtractor` implementation for Path describes how to +// construct an instance of `Path` from an HTTP request: namely, by +// extracting parameters from the query string. #[async_trait] -impl Extractor for Path +impl SharedExtractor for Path where PathType: DeserializeOwned + JsonSchema + Send + Sync + 'static, { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> { let params: PathType = http_extract_path_params(&rqctx.path_variables)?; Ok(Path { inner: params }) @@ -288,7 +225,7 @@ impl /// Given an HTTP request, attempt to read the body, parse it according /// to the content type, and deserialize it to an instance of `BodyType`. async fn http_request_load_body( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> where BodyType: JsonSchema + DeserializeOwned + Send + Sync, @@ -352,19 +289,19 @@ where Ok(TypedBody { inner: content }) } -// The `Extractor` implementation for TypedBody describes how to -// construct an instance of `TypedBody` from an HTTP request: namely, -// by reading the request body and parsing it as JSON into type `BodyType`. -// TODO-cleanup We shouldn't have to use the "'static" bound on `BodyType` here. -// It seems like we ought to be able to use 'async_trait, but that doesn't seem -// to be defined. +// The `ExclusiveExtractor` implementation for TypedBody describes how +// to construct an instance of `TypedBody` from an HTTP request: +// namely, by reading the request body and parsing it as JSON into type +// `BodyType`. TODO-cleanup We shouldn't have to use the "'static" bound on +// `BodyType` here. It seems like we ought to be able to use 'async_trait, but +// that doesn't seem to be defined. #[async_trait] -impl Extractor for TypedBody +impl ExclusiveExtractor for TypedBody where BodyType: JsonSchema + DeserializeOwned + Send + Sync + 'static, { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result, HttpError> { http_request_load_body(rqctx).await } @@ -415,9 +352,9 @@ impl UntypedBody { } #[async_trait] -impl Extractor for UntypedBody { +impl ExclusiveExtractor for UntypedBody { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result { let server = &rqctx.server; let mut request = rqctx.request.lock().await; diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index 5f03c8527..fce5574a5 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -32,7 +32,7 @@ //! OpenAPI document generation. use super::error::HttpError; -use super::extractor::Extractor; +use super::extractor::RequestExtractor; use super::http_util::CONTENT_TYPE_JSON; use super::http_util::CONTENT_TYPE_OCTET_STREAM; use super::server::DropshotState; @@ -166,7 +166,7 @@ pub trait HttpHandlerFunc: Send + Sync + 'static where Context: ServerContext, - FuncParams: Extractor, + FuncParams: RequestExtractor, ResponseType: HttpResponse + Send + Sync + 'static, { async fn handle_request( @@ -267,7 +267,8 @@ macro_rules! impl_HttpHandlerFunc_for_func_with_params { FutureType: Future> + Send + 'static, ResponseType: HttpResponse + Send + Sync + 'static, - $($T: Extractor + Send + Sync + 'static,)* + ($($T,)*): RequestExtractor, + $($T: Send + Sync + 'static,)* { async fn handle_request( &self, @@ -318,7 +319,7 @@ pub struct HttpRouteHandler where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor, + FuncParams: RequestExtractor, ResponseType: HttpResponse + Send + Sync + 'static, { /// the actual HttpHandlerFunc used to implement this route @@ -341,7 +342,7 @@ impl Debug where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor, + FuncParams: RequestExtractor, ResponseType: HttpResponse + Send + Sync + 'static, { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { @@ -355,7 +356,7 @@ impl RouteHandler where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor + 'static, + FuncParams: RequestExtractor + 'static, ResponseType: HttpResponse + Send + Sync + 'static, { fn label(&self) -> &str { @@ -371,10 +372,10 @@ where // arguments to the handler function. This could be `()`, `(Query)`, // `(TypedBody)`, `(Query, TypedBody)`, or any other // combination of extractors we decide to support in the future. - // Whatever it is must implement `Extractor`, which means we can invoke - // `Extractor::from_request()` to construct the argument tuple, - // generally from information available in the `request` object. We - // pass this down to the `HttpHandlerFunc`, for which there's a + // Whatever it is must implement `RequestExtractor`, which means we can + // invoke `RequestExtractor::from_request()` to construct the argument + // tuple, generally from information available in the `request` object. + // We pass this down to the `HttpHandlerFunc`, for which there's a // different implementation for each value of `FuncParams`. The // `HttpHandlerFunc` for each `FuncParams` just pulls the arguments out // of the `funcparams` tuple and makes them actual function arguments @@ -383,7 +384,7 @@ where // actual handler function. From this point down, all of this is // resolved statically. let rqctx = Arc::new(rqctx_raw); - let funcparams = Extractor::from_request(Arc::clone(&rqctx)).await?; + let funcparams = RequestExtractor::from_request(&rqctx).await?; let future = self.handler.handle_request(rqctx, funcparams); future.await } @@ -396,7 +397,7 @@ impl where Context: ServerContext, HandlerType: HttpHandlerFunc, - FuncParams: Extractor + 'static, + FuncParams: RequestExtractor + 'static, ResponseType: HttpResponse + Send + Sync + 'static, { /// Given a function matching one of the supported API handler function diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index 2a3803c16..a8788f01c 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -629,10 +629,11 @@ pub use config::ConfigDropshot; pub use config::ConfigTls; pub use error::HttpError; pub use error::HttpErrorResponseBody; -pub use extractor::Extractor; +pub use extractor::ExclusiveExtractor; pub use extractor::ExtractorMetadata; pub use extractor::Path; pub use extractor::Query; +pub use extractor::SharedExtractor; pub use extractor::TypedBody; pub use extractor::UntypedBody; pub use handler::http_response_found; diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index c7908fe74..3a01fd817 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -8,8 +8,8 @@ use crate::api_description::ExtensionMode; use crate::{ - ApiEndpointBodyContentType, Extractor, ExtractorMetadata, HttpError, - RequestContext, ServerContext, + ApiEndpointBodyContentType, ExclusiveExtractor, ExtractorMetadata, + HttpError, RequestContext, ServerContext, }; use async_trait::async_trait; use base64::Engine; @@ -23,10 +23,10 @@ use serde_json::json; use sha1::{Digest, Sha1}; use slog::Logger; use std::future::Future; -use std::sync::Arc; -/// WebsocketUpgrade is an Extractor used to upgrade and handle an HTTP request -/// as a websocket when present in a Dropshot endpoint's function arguments. +/// WebsocketUpgrade is an ExclusiveExtractor used to upgrade and handle an HTTP +/// request as a websocket when present in a Dropshot endpoint's function +/// arguments. /// /// The consumer of this must call [WebsocketUpgrade::handle] for the connection /// to be upgraded. (This is done for you by `#[channel]`.) @@ -80,13 +80,13 @@ fn derive_accept_key(request_key: &[u8]) -> String { base64::engine::general_purpose::STANDARD.encode(&sha1.finalize()) } -/// This `Extractor` implementation constructs an instance of `WebsocketUpgrade` -/// from an HTTP request, and returns an error if the given request does not -/// contain websocket upgrade headers. +/// This `ExclusiveExtractor` implementation constructs an instance of +/// `WebsocketUpgrade` from an HTTP request, and returns an error if the given +/// request does not contain websocket upgrade headers. #[async_trait] -impl Extractor for WebsocketUpgrade { +impl ExclusiveExtractor for WebsocketUpgrade { async fn from_request( - rqctx: Arc>, + rqctx: &RequestContext, ) -> Result { let request = &mut *rqctx.request.lock().await; @@ -192,8 +192,8 @@ impl WebsocketUpgrade { /// #[dropshot::endpoint { method = GET, path = "/my/ws/endpoint/{id}" }] /// async fn my_ws_endpoint( /// rqctx: std::sync::Arc>, - /// websock: dropshot::WebsocketUpgrade, /// id: dropshot::Path, + /// websock: dropshot::WebsocketUpgrade, /// ) -> dropshot::WebsocketEndpointResult { /// let logger = rqctx.log.new(slog::o!()); /// websock.handle(move |upgraded| async move { @@ -297,7 +297,9 @@ impl JsonSchema for WebsocketUpgrade { mod tests { use crate::router::HttpRouter; use crate::server::{DropshotState, ServerConfig}; - use crate::{Extractor, HttpError, RequestContext, WebsocketUpgrade}; + use crate::{ + ExclusiveExtractor, HttpError, RequestContext, WebsocketUpgrade, + }; use futures::lock::Mutex; use http::Request; use hyper::Body; @@ -308,7 +310,7 @@ mod tests { async fn ws_upg_from_mock_rqctx() -> Result { let log = slog::Logger::root(slog::Discard, slog::o!()).new(slog::o!()); - let fut = WebsocketUpgrade::from_request(Arc::new(RequestContext { + let rqctx = RequestContext { server: Arc::new(DropshotState { private: (), config: ServerConfig { @@ -340,7 +342,8 @@ mod tests { body_content_type: Default::default(), request_id: "".to_string(), log: log.clone(), - })); + }; + let fut = WebsocketUpgrade::from_request(&rqctx); tokio::time::timeout(Duration::from_secs(1), fut) .await .expect("Deadlocked in WebsocketUpgrade constructor") diff --git a/dropshot/tests/fail/bad_endpoint3.stderr b/dropshot/tests/fail/bad_endpoint3.stderr index 1c0a1ce47..f6b90e23c 100644 --- a/dropshot/tests/fail/bad_endpoint3.stderr +++ b/dropshot/tests/fail/bad_endpoint3.stderr @@ -1,32 +1,3 @@ -error[E0277]: the trait bound `String: Extractor` is not satisfied - --> tests/fail/bad_endpoint3.rs:17:12 - | -17 | param: String, - | ^^^^^^ the trait `Extractor` is not implemented for `String` - | - = help: the following other types implement trait `Extractor`: - () - (T1, T2) - (T1, T2, T3) - (T1,) - TypedBody - UntypedBody - WebsocketUpgrade - dropshot::Path - dropshot::Query -note: required by a bound in `need_extractor` - --> tests/fail/bad_endpoint3.rs:11:1 - | -11 | / #[endpoint { -12 | | method = GET, -13 | | path = "/test", -14 | | }] - | |__^ required by this bound in `need_extractor` -... -17 | param: String, - | ------ required by a bound in this - = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) - error[E0277]: the trait bound `fn(Arc>, String) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_endpoint}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied --> tests/fail/bad_endpoint3.rs:15:10 | diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index aabeb9756..acfd837a4 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -966,8 +966,8 @@ pub struct DemoUntypedQuery { }] async fn demo_handler_untyped_body( _rqctx: Arc>, - body: UntypedBody, query: Query, + body: UntypedBody, ) -> Result, HttpError> { let nbytes = body.as_bytes().len(); let as_utf8 = if query.into_inner().parse_str.unwrap_or(false) { diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index a9e2cb3d2..c83636b8b 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -173,7 +173,8 @@ fn do_channel( ChannelProtocol::WEBSOCKETS => { // here we construct a wrapper function and mutate the arguments a bit // for the outer layer: we replace WebsocketConnection, which is not - // an extractor, with WebsocketUpgrade, which is. + // an extractor, with WebsocketUpgrade, which is. We also move it + // to the end. let ItemFnForSignature { attrs, vis, mut sig, _block: body } = syn::parse2(item)?; @@ -219,6 +220,13 @@ fn do_channel( )); } + // XXX-dap TODO-cleanup This is a gross way to do it. + let mut input_pairs = + sig.inputs.clone().into_pairs().collect::>(); + let second_pair = input_pairs.remove(1); + input_pairs.push(second_pair); + sig.inputs = input_pairs.into_iter().collect(); + sig.output = syn::parse2(quote!(-> dropshot::WebsocketEndpointResult))?; @@ -425,12 +433,12 @@ fn do_endpoint_inner( .inputs .iter() .enumerate() - .map(|(index, arg)| { + .filter_map(|(index, arg)| { match arg { syn::FnArg::Receiver(_) => { // The compiler failure here is already comprehensible. arg_is_receiver = true; - quote! {} + Some(quote! {}) } syn::FnArg::Typed(pat) => { let span = pat.ty.span(); @@ -440,23 +448,15 @@ fn do_endpoint_inner( // The first parameter must be an Arc> // and fortunately we already have a trait that we can // use to validate this type. - quote_spanned! { span=> + Some(quote_spanned! { span=> const _: fn() = || { struct NeedRequestContext(<#ty as #dropshot::RequestContextArgument>::Context); }; - } + }) } else { - // Subsequent parameters must implement Extractor. - quote_spanned! { span=> - const _: fn() = || { - fn need_extractor() - where - T: ?Sized + #dropshot::Extractor, - { - } - need_extractor::<#ty>(); - }; - } + // XXX-dap the remaining stuff must together impl + // `RequestExtractor` + None } } } @@ -935,14 +935,6 @@ mod tests { const _: fn() = || { struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; }; - const _: fn() = || { - fn need_extractor() - where - T: ?Sized + dropshot::Extractor, - { - } - need_extractor:: >(); - }; const _: fn() = || { trait ResultTrait { type T; @@ -1041,14 +1033,6 @@ mod tests { const _: fn() = || { struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; }; - const _: fn() = || { - fn need_extractor() - where - T: ?Sized + dropshot::Extractor, - { - } - need_extractor:: >(); - }; const _: fn() = || { trait ResultTrait { type T; From ec810eaccc43f4bea3c8ffd28c3d9f245ec57f13 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 16:46:43 -0800 Subject: [PATCH 18/47] update changelog with some todo items --- CHANGELOG.adoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index b78442ca1..5436afa3c 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -18,6 +18,11 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co === Breaking Changes // XXX-dap TODO need update here +// Extractor -> {Shared,Exclusive}Extractor +// type signature of from_request() changed +// both: accept &RequestContext instead of Arc +// now: no other change. future: exclusive one will get a hyper::Request +// exclusive extractors must appear last in the argument list * https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. * https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. From 29c6b473e3f9ec51d91611b55d01858b2c4e7490 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 10:25:37 -0800 Subject: [PATCH 19/47] fix copyrights --- dropshot/examples/request-headers.rs | 2 +- dropshot/src/extractor/common.rs | 2 +- dropshot/src/extractor/mod.rs | 2 +- dropshot/src/handler.rs | 2 +- dropshot/src/lib.rs | 2 +- dropshot/src/pagination.rs | 2 +- dropshot/src/type_util.rs | 2 +- dropshot/src/websocket.rs | 2 +- dropshot/tests/test_demo.rs | 2 +- dropshot_endpoint/src/lib.rs | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/dropshot/examples/request-headers.rs b/dropshot/examples/request-headers.rs index b0ceba7a4..104c75460 100644 --- a/dropshot/examples/request-headers.rs +++ b/dropshot/examples/request-headers.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Example use of Dropshot with request headers //! diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs index 0bfd294ff..00dc89d55 100644 --- a/dropshot/src/extractor/common.rs +++ b/dropshot/src/extractor/common.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company // XXX-dap TODO-cleanup should the metadata into a separate, shared trait? diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs index 721277f85..b32fde52e 100644 --- a/dropshot/src/extractor/mod.rs +++ b/dropshot/src/extractor/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Extractor trait //! diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index fce5574a5..39036af26 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Interface for implementing HTTP endpoint handler functions. //! //! For information about supported endpoint function signatures, argument types, diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index a8788f01c..5cd3a68a4 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Dropshot is a general-purpose crate for exposing REST APIs from a Rust //! program. Planned highlights include: //! diff --git a/dropshot/src/pagination.rs b/dropshot/src/pagination.rs index 76e4f6dae..936231551 100644 --- a/dropshot/src/pagination.rs +++ b/dropshot/src/pagination.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Detailed end-user documentation for pagination lives in the Dropshot top- //! level block comment. Here we discuss some of the design choices. diff --git a/dropshot/src/type_util.rs b/dropshot/src/type_util.rs index 5c1495dea..fc6405f5a 100644 --- a/dropshot/src/type_util.rs +++ b/dropshot/src/type_util.rs @@ -1,4 +1,4 @@ -// Copyright 2021 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Utility functions for working with JsonSchema types. diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index 3a01fd817..1db1b1306 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Implements websocket upgrades as an Extractor for use in API route handler //! parameters to indicate that the given endpoint is meant to be upgraded to //! a websocket. diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index acfd837a4..b28c7856f 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Test cases for the "demo" handlers. These handlers exercise various //! supported configurations of the HTTP handler interface. We exercise them //! here to make sure that even if these aren't used at a given point, they still diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index c83636b8b..546f22460 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! This package defines macro attributes associated with HTTP handlers. These //! attributes are used both to define an HTTP API and to generate an OpenAPI From 16d7ac33b3f329c1657b2359dcde98c2f4adde08 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 10:43:38 -0800 Subject: [PATCH 20/47] review feedback --- dropshot/src/extractor/common.rs | 34 +++++++++++++++++++------------- dropshot_endpoint/src/lib.rs | 2 +- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs index 00dc89d55..71eb886bb 100644 --- a/dropshot/src/extractor/common.rs +++ b/dropshot/src/extractor/common.rs @@ -70,21 +70,27 @@ impl ExclusiveExtractor for S { /// Top-level extractor for a given request /// -/// During request handling, we wind up needing to call a function with a -/// variable number of arguments whose types are all extractors (either -/// `SharedExtractor` or `ExclusiveExtractor`). We achieve this with a separate -/// type called `RequestExtractor` that looks just like `ExclusiveExtractor`. -/// We can impl this trait on a tuple of any number of types that themselves -/// impl `SharedExtractor` or `ExclusiveExtractor` by delegating to each type's -/// extractor implementation. There may be at most one `ExclusiveExtractor` in -/// the tuple. We require it to be the last argument just to avoid having to -/// define the power set of impls. +/// During request handling, we must find and invoke the appropriate +/// consumer-defined handler function. While each of these functions takes a +/// fixed number of arguments, different handler functions may take a different +/// number of arguments. The arguments that can vary between handler functions +/// are all extractors, meaning that they impl `SharedExtractor` or +/// `ExclusiveExtractor`. /// -/// In practice, `RequestExtractor` is identical to `ExclusiveExtractor`. But -/// we use them in different ways. `RequestExtractor` is private, only -/// implemented on tuple types, and only used to kick off extraction. -/// `ExclusiveExtractor` can be consumer-defined and would generally not be -/// implemented on tuple types. +/// This trait helps us invoke various handler functions uniformly, despite them +/// accepting different arguments. To achieve this, we impl this trait for all +/// supported _tuples_ of argument types, which is essentially 0 or more +/// `SharedExtractor`s followed by at most one `ExclusiveExtractor`. This impl +/// essentially does the same thing as any other extractor, and it does it by +/// delegating to the impls of each tuple member. +/// +/// In practice, the trait `RequestExtractor` is identical to +/// `ExclusiveExtractor` and we could use `ExclusiveExtractor` directly. But +/// it's clearer to use distinct types, since they're used differently. To +/// summarize: `RequestExtractor` is private, only implemented on tuple types, +/// and only used to kick off extraction from the top level. +/// `ExclusiveExtractor` s public, implementing types can be consumer-defined, +/// and it would generally not be implemented on tuple types. #[async_trait] pub trait RequestExtractor: Send + Sync + Sized { /// Construct an instance of this type from a `RequestContext`. diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index 546f22460..f6111a885 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -171,7 +171,7 @@ fn do_channel( } = from_tokenstream(&attr)?; match protocol { ChannelProtocol::WEBSOCKETS => { - // here we construct a wrapper function and mutate the arguments a bit + // Here we construct a wrapper function and mutate the arguments a bit // for the outer layer: we replace WebsocketConnection, which is not // an extractor, with WebsocketUpgrade, which is. We also move it // to the end. From c228e40140a8bef0cc5109f4824c5892135cd410 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 11:12:09 -0800 Subject: [PATCH 21/47] merge with "main" --- dropshot/src/api_description.rs | 580 +------------------------------ dropshot/src/schema_util.rs | 584 ++++++++++++++++++++++++++++++++ 2 files changed, 585 insertions(+), 579 deletions(-) diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index 7f861d3a5..30d91a785 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -8,6 +8,7 @@ use crate::handler::RouteHandler; use crate::router::route_path_to_segments; use crate::router::HttpRouter; use crate::router::PathSegment; +use crate::schema_util::j2oas_schema; use crate::server::ServerContext; use crate::type_util::type_is_scalar; use crate::type_util::type_is_string_enum; @@ -901,440 +902,6 @@ fn is_empty(schema: &schemars::schema::Schema) -> bool { false } -/// Convert from JSON Schema into OpenAPI. -// TODO Initially this seemed like it was going to be a win, but the versions -// of JSON Schema that the schemars and openapiv3 crates adhere to are just -// different enough to make the conversion a real pain in the neck. A better -// approach might be a derive(OpenAPI)-like thing, or even a generic -// derive(schema) that we could then marshall into OpenAPI. -// The schemars crate also seems a bit inflexible when it comes to how the -// schema is generated wrt references vs. inline types. -fn j2oas_schema( - name: Option<&String>, - schema: &schemars::schema::Schema, -) -> openapiv3::ReferenceOr { - match schema { - // The permissive, "match anything" schema. We'll typically see this - // when consumers use a type such as serde_json::Value. - schemars::schema::Schema::Bool(true) => { - openapiv3::ReferenceOr::Item(openapiv3::Schema { - schema_data: openapiv3::SchemaData::default(), - schema_kind: openapiv3::SchemaKind::Any( - openapiv3::AnySchema::default(), - ), - }) - } - schemars::schema::Schema::Bool(false) => { - panic!("We don't expect to see a schema that matches the null set") - } - schemars::schema::Schema::Object(obj) => j2oas_schema_object(name, obj), - } -} - -fn j2oas_schema_object( - name: Option<&String>, - obj: &schemars::schema::SchemaObject, -) -> openapiv3::ReferenceOr { - if let Some(reference) = &obj.reference { - return openapiv3::ReferenceOr::Reference { - reference: reference.clone(), - }; - } - - let ty = match &obj.instance_type { - Some(schemars::schema::SingleOrVec::Single(ty)) => Some(ty.as_ref()), - Some(schemars::schema::SingleOrVec::Vec(_)) => { - panic!( - "a type array is unsupported by openapiv3:\n{}", - serde_json::to_string_pretty(obj) - .unwrap_or_else(|_| "".to_string()) - ) - } - None => None, - }; - - let kind = match (ty, &obj.subschemas) { - (Some(schemars::schema::InstanceType::Null), None) => { - openapiv3::SchemaKind::Type(openapiv3::Type::String( - openapiv3::StringType { - enumeration: vec![None], - ..Default::default() - }, - )) - } - (Some(schemars::schema::InstanceType::Boolean), None) => { - openapiv3::SchemaKind::Type(openapiv3::Type::Boolean {}) - } - (Some(schemars::schema::InstanceType::Object), None) => { - j2oas_object(&obj.object) - } - (Some(schemars::schema::InstanceType::Array), None) => { - j2oas_array(&obj.array) - } - (Some(schemars::schema::InstanceType::Number), None) => { - j2oas_number(&obj.format, &obj.number, &obj.enum_values) - } - (Some(schemars::schema::InstanceType::String), None) => { - j2oas_string(&obj.format, &obj.string, &obj.enum_values) - } - (Some(schemars::schema::InstanceType::Integer), None) => { - j2oas_integer(&obj.format, &obj.number, &obj.enum_values) - } - (None, Some(subschema)) => j2oas_subschemas(subschema), - (None, None) => { - openapiv3::SchemaKind::Any(openapiv3::AnySchema::default()) - } - (Some(_), Some(_)) => panic!( - "a schema can't have both a type and subschemas:\n{}", - serde_json::to_string_pretty(&obj) - .unwrap_or_else(|_| "".to_string()) - ), - }; - - let mut data = openapiv3::SchemaData::default(); - - if matches!( - &obj.extensions.get("nullable"), - Some(serde_json::Value::Bool(true)) - ) { - data.nullable = true; - } - - if let Some(metadata) = &obj.metadata { - data.title = metadata.title.clone(); - data.description = metadata.description.clone(); - data.default = metadata.default.clone(); - data.deprecated = metadata.deprecated; - data.read_only = metadata.read_only; - data.write_only = metadata.write_only; - } - - if let Some(name) = name { - data.title = Some(name.clone()); - } - if let Some(example) = obj.extensions.get("example") { - data.example = Some(example.clone()); - } - - openapiv3::ReferenceOr::Item(openapiv3::Schema { - schema_data: data, - schema_kind: kind, - }) -} - -fn j2oas_subschemas( - subschemas: &schemars::schema::SubschemaValidation, -) -> openapiv3::SchemaKind { - match ( - &subschemas.all_of, - &subschemas.any_of, - &subschemas.one_of, - &subschemas.not, - ) { - (Some(all_of), None, None, None) => openapiv3::SchemaKind::AllOf { - all_of: all_of - .iter() - .map(|schema| j2oas_schema(None, schema)) - .collect::>(), - }, - (None, Some(any_of), None, None) => openapiv3::SchemaKind::AnyOf { - any_of: any_of - .iter() - .map(|schema| j2oas_schema(None, schema)) - .collect::>(), - }, - (None, None, Some(one_of), None) => openapiv3::SchemaKind::OneOf { - one_of: one_of - .iter() - .map(|schema| j2oas_schema(None, schema)) - .collect::>(), - }, - (None, None, None, Some(not)) => openapiv3::SchemaKind::Not { - not: Box::new(j2oas_schema(None, not)), - }, - _ => panic!("invalid subschema {:#?}", subschemas), - } -} - -fn j2oas_integer( - format: &Option, - number: &Option>, - enum_values: &Option>, -) -> openapiv3::SchemaKind { - let format = match format.as_ref().map(|s| s.as_str()) { - None => openapiv3::VariantOrUnknownOrEmpty::Empty, - Some("int32") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::IntegerFormat::Int32, - ), - Some("int64") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::IntegerFormat::Int64, - ), - Some(other) => { - openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) - } - }; - - let (multiple_of, minimum, exclusive_minimum, maximum, exclusive_maximum) = - match number { - None => (None, None, false, None, false), - Some(number) => { - let multiple_of = number.multiple_of.map(|f| f as i64); - let (minimum, exclusive_minimum) = - match (number.minimum, number.exclusive_minimum) { - (None, None) => (None, false), - (Some(f), None) => (Some(f as i64), false), - (None, Some(f)) => (Some(f as i64), true), - _ => panic!("invalid"), - }; - let (maximum, exclusive_maximum) = - match (number.maximum, number.exclusive_maximum) { - (None, None) => (None, false), - (Some(f), None) => (Some(f as i64), false), - (None, Some(f)) => (Some(f as i64), true), - _ => panic!("invalid"), - }; - - ( - multiple_of, - minimum, - exclusive_minimum, - maximum, - exclusive_maximum, - ) - } - }; - - let enumeration = enum_values - .iter() - .flat_map(|v| { - v.iter().map(|vv| match vv { - serde_json::Value::Null => None, - serde_json::Value::Number(value) => { - Some(value.as_i64().unwrap()) - } - _ => panic!("unexpected enumeration value {:?}", vv), - }) - }) - .collect::>(); - - openapiv3::SchemaKind::Type(openapiv3::Type::Integer( - openapiv3::IntegerType { - format, - multiple_of, - exclusive_minimum, - exclusive_maximum, - minimum, - maximum, - enumeration, - }, - )) -} - -fn j2oas_number( - format: &Option, - number: &Option>, - enum_values: &Option>, -) -> openapiv3::SchemaKind { - let format = match format.as_ref().map(|s| s.as_str()) { - None => openapiv3::VariantOrUnknownOrEmpty::Empty, - Some("float") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::NumberFormat::Float, - ), - Some("double") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::NumberFormat::Double, - ), - Some(other) => { - openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) - } - }; - - let (multiple_of, minimum, exclusive_minimum, maximum, exclusive_maximum) = - match number { - None => (None, None, false, None, false), - Some(number) => { - let multiple_of = number.multiple_of; - let (minimum, exclusive_minimum) = - match (number.minimum, number.exclusive_minimum) { - (None, None) => (None, false), - (s @ Some(_), None) => (s, false), - (None, s @ Some(_)) => (s, true), - _ => panic!("invalid"), - }; - let (maximum, exclusive_maximum) = - match (number.maximum, number.exclusive_maximum) { - (None, None) => (None, false), - (s @ Some(_), None) => (s, false), - (None, s @ Some(_)) => (s, true), - _ => panic!("invalid"), - }; - - ( - multiple_of, - minimum, - exclusive_minimum, - maximum, - exclusive_maximum, - ) - } - }; - - let enumeration = enum_values - .iter() - .flat_map(|v| { - v.iter().map(|vv| match vv { - serde_json::Value::Null => None, - serde_json::Value::Number(value) => { - Some(value.as_f64().unwrap()) - } - _ => panic!("unexpected enumeration value {:?}", vv), - }) - }) - .collect::>(); - - openapiv3::SchemaKind::Type(openapiv3::Type::Number( - openapiv3::NumberType { - format, - multiple_of, - exclusive_minimum, - exclusive_maximum, - minimum, - maximum, - enumeration, - }, - )) -} - -fn j2oas_string( - format: &Option, - string: &Option>, - enum_values: &Option>, -) -> openapiv3::SchemaKind { - let format = match format.as_ref().map(|s| s.as_str()) { - None => openapiv3::VariantOrUnknownOrEmpty::Empty, - Some("date") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::StringFormat::Date, - ), - Some("date-time") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::StringFormat::DateTime, - ), - Some("password") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::StringFormat::Password, - ), - Some("byte") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::StringFormat::Byte, - ), - Some("binary") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::StringFormat::Binary, - ), - Some(other) => { - openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) - } - }; - - let (max_length, min_length, pattern) = match string.as_ref() { - None => (None, None, None), - Some(string) => ( - string.max_length.map(|n| n as usize), - string.min_length.map(|n| n as usize), - string.pattern.clone(), - ), - }; - - let enumeration = enum_values - .iter() - .flat_map(|v| { - v.iter().map(|vv| match vv { - serde_json::Value::Null => None, - serde_json::Value::String(s) => Some(s.clone()), - _ => panic!("unexpected enumeration value {:?}", vv), - }) - }) - .collect::>(); - - openapiv3::SchemaKind::Type(openapiv3::Type::String( - openapiv3::StringType { - format, - pattern, - enumeration, - min_length, - max_length, - }, - )) -} - -fn j2oas_array( - array: &Option>, -) -> openapiv3::SchemaKind { - let arr = array.as_ref().unwrap(); - - openapiv3::SchemaKind::Type(openapiv3::Type::Array(openapiv3::ArrayType { - items: match &arr.items { - Some(schemars::schema::SingleOrVec::Single(schema)) => { - Some(box_reference_or(j2oas_schema(None, &schema))) - } - Some(schemars::schema::SingleOrVec::Vec(_)) => { - panic!("OpenAPI v3.0.x cannot support tuple-like arrays") - } - None => None, - }, - min_items: arr.min_items.map(|n| n as usize), - max_items: arr.max_items.map(|n| n as usize), - unique_items: arr.unique_items.unwrap_or(false), - })) -} - -fn box_reference_or( - r: openapiv3::ReferenceOr, -) -> openapiv3::ReferenceOr> { - match r { - openapiv3::ReferenceOr::Item(schema) => { - openapiv3::ReferenceOr::boxed_item(schema) - } - openapiv3::ReferenceOr::Reference { reference } => { - openapiv3::ReferenceOr::Reference { reference } - } - } -} - -fn j2oas_object( - object: &Option>, -) -> openapiv3::SchemaKind { - match object { - None => openapiv3::SchemaKind::Type(openapiv3::Type::Object( - openapiv3::ObjectType::default(), - )), - Some(obj) => openapiv3::SchemaKind::Type(openapiv3::Type::Object( - openapiv3::ObjectType { - properties: obj - .properties - .iter() - .map(|(prop, schema)| { - ( - prop.clone(), - box_reference_or(j2oas_schema(None, schema)), - ) - }) - .collect::<_>(), - required: obj.required.iter().cloned().collect::<_>(), - additional_properties: obj.additional_properties.as_ref().map( - |schema| match schema.as_ref() { - schemars::schema::Schema::Bool(b) => { - openapiv3::AdditionalProperties::Any(*b) - } - schemars::schema::Schema::Object(obj) => { - openapiv3::AdditionalProperties::Schema(Box::new( - j2oas_schema_object(None, obj), - )) - } - }, - ), - min_properties: obj.min_properties.map(|n| n as usize), - max_properties: obj.max_properties.map(|n| n as usize), - }, - )), - } -} - /// This object is used to specify configuration for building an OpenAPI /// definition document. It is constructed using [`ApiDescription::openapi()`]. /// Additional optional properties may be added and then the OpenAPI definition @@ -1528,8 +1095,6 @@ impl Default for ExtensionMode { #[cfg(test)] mod test { - use super::j2oas_schema; - use crate::api_description::j2oas_schema_object; use crate::endpoint; use crate::error::HttpError; use crate::handler::RequestContext; @@ -1636,59 +1201,6 @@ mod test { api.register(test_badpath_handler).unwrap(); } - #[test] - fn test_empty_struct() { - #[derive(JsonSchema)] - struct Empty {} - - let settings = schemars::gen::SchemaSettings::openapi3(); - let mut generator = schemars::gen::SchemaGenerator::new(settings); - - let schema = Empty::json_schema(&mut generator); - let _ = j2oas_schema(None, &schema); - } - - #[test] - fn test_garbage_barge_structure_conversion() { - #[allow(dead_code)] - #[derive(JsonSchema)] - struct SuperGarbage { - string: String, - strings: Vec, - more_strings: [String; 3], - substruct: Substruct, - more: Option, - union: Union, - map: std::collections::BTreeMap, - } - - #[allow(dead_code)] - #[derive(JsonSchema)] - struct Substruct { - ii32: i32, - uu64: u64, - ff: f32, - dd: f64, - b: bool, - } - - #[allow(dead_code)] - #[derive(JsonSchema)] - enum Union { - A { a: u32 }, - B { b: f32 }, - } - - let settings = schemars::gen::SchemaSettings::openapi3(); - let mut generator = schemars::gen::SchemaGenerator::new(settings); - - let schema = SuperGarbage::json_schema(&mut generator); - let _ = j2oas_schema(None, &schema); - for (key, schema) in generator.definitions().iter() { - let _ = j2oas_schema(Some(key), schema); - } - } - // XXX-dap TODO-coverage need a test for trying to use two // ExclusiveExtractors @@ -1724,52 +1236,6 @@ mod test { ); } - #[test] - fn test_additional_properties() { - #[allow(dead_code)] - #[derive(JsonSchema)] - enum Union { - A { a: u32 }, - } - let settings = schemars::gen::SchemaSettings::openapi3(); - let mut generator = schemars::gen::SchemaGenerator::new(settings); - let schema = Union::json_schema(&mut generator); - let _ = j2oas_schema(None, &schema); - for (key, schema) in generator.definitions().iter() { - let _ = j2oas_schema(Some(key), schema); - } - } - - #[test] - fn test_nullable() { - #[allow(dead_code)] - #[derive(JsonSchema)] - struct Foo { - bar: String, - } - let settings = schemars::gen::SchemaSettings::openapi3(); - let generator = schemars::gen::SchemaGenerator::new(settings); - let root_schema = generator.into_root_schema_for::>(); - let schema = root_schema.schema; - let os = j2oas_schema_object(None, &schema); - - assert_eq!( - os, - openapiv3::ReferenceOr::Item(openapiv3::Schema { - schema_data: openapiv3::SchemaData { - title: Some("Nullable_Foo".to_string()), - nullable: true, - ..Default::default() - }, - schema_kind: openapiv3::SchemaKind::AllOf { - all_of: vec![openapiv3::ReferenceOr::Reference { - reference: "#/components/schemas/Foo".to_string() - }], - }, - }) - ); - } - #[test] fn test_tags_need_one() { let mut api = ApiDescription::new().tag_config(TagConfig { @@ -1888,48 +1354,4 @@ mod test { .collect::>() ) } - - #[test] - #[should_panic] - fn test_bad_schema() { - #![allow(unused)] - - #[derive(JsonSchema)] - #[schemars(tag = "which")] - enum Which { - This, - That, - } - - #[derive(JsonSchema)] - struct BlackSheep { - #[schemars(flatten)] - you_can_get_with: Which, - } - - let schema = schemars::schema_for!(BlackSheep).schema; - - let _ = j2oas_schema_object(None, &schema); - } - - #[test] - #[should_panic] - fn test_two_types() { - #![allow(unused)] - - #[derive(JsonSchema)] - enum One { - One, - } - - #[derive(JsonSchema)] - struct Uno { - #[schemars(flatten)] - one: One, - } - - let schema = schemars::schema_for!(Uno).schema; - - let _ = j2oas_schema_object(None, &schema); - } } diff --git a/dropshot/src/schema_util.rs b/dropshot/src/schema_util.rs index 14904e757..65975ee34 100644 --- a/dropshot/src/schema_util.rs +++ b/dropshot/src/schema_util.rs @@ -256,3 +256,587 @@ pub(crate) fn schema_extract_description( } } } + +/// Convert from JSON Schema into OpenAPI. +// TODO Initially this seemed like it was going to be a win, but the versions +// of JSON Schema that the schemars and openapiv3 crates adhere to are just +// different enough to make the conversion a real pain in the neck. A better +// approach might be a derive(OpenAPI)-like thing, or even a generic +// derive(schema) that we could then marshall into OpenAPI. +// The schemars crate also seems a bit inflexible when it comes to how the +// schema is generated wrt references vs. inline types. +pub(crate) fn j2oas_schema( + name: Option<&String>, + schema: &schemars::schema::Schema, +) -> openapiv3::ReferenceOr { + match schema { + // The permissive, "match anything" schema. We'll typically see this + // when consumers use a type such as serde_json::Value. + schemars::schema::Schema::Bool(true) => { + openapiv3::ReferenceOr::Item(openapiv3::Schema { + schema_data: openapiv3::SchemaData::default(), + schema_kind: openapiv3::SchemaKind::Any( + openapiv3::AnySchema::default(), + ), + }) + } + schemars::schema::Schema::Bool(false) => { + panic!("We don't expect to see a schema that matches the null set") + } + schemars::schema::Schema::Object(obj) => j2oas_schema_object(name, obj), + } +} + +fn j2oas_schema_object( + name: Option<&String>, + obj: &schemars::schema::SchemaObject, +) -> openapiv3::ReferenceOr { + if let Some(reference) = &obj.reference { + return openapiv3::ReferenceOr::Reference { + reference: reference.clone(), + }; + } + + let ty = match &obj.instance_type { + Some(schemars::schema::SingleOrVec::Single(ty)) => Some(ty.as_ref()), + Some(schemars::schema::SingleOrVec::Vec(_)) => { + panic!( + "a type array is unsupported by openapiv3:\n{}", + serde_json::to_string_pretty(obj) + .unwrap_or_else(|_| "".to_string()) + ) + } + None => None, + }; + + let kind = match (ty, &obj.subschemas) { + (Some(schemars::schema::InstanceType::Null), None) => { + openapiv3::SchemaKind::Type(openapiv3::Type::String( + openapiv3::StringType { + enumeration: vec![None], + ..Default::default() + }, + )) + } + (Some(schemars::schema::InstanceType::Boolean), None) => { + openapiv3::SchemaKind::Type(openapiv3::Type::Boolean {}) + } + (Some(schemars::schema::InstanceType::Object), None) => { + j2oas_object(&obj.object) + } + (Some(schemars::schema::InstanceType::Array), None) => { + j2oas_array(&obj.array) + } + (Some(schemars::schema::InstanceType::Number), None) => { + j2oas_number(&obj.format, &obj.number, &obj.enum_values) + } + (Some(schemars::schema::InstanceType::String), None) => { + j2oas_string(&obj.format, &obj.string, &obj.enum_values) + } + (Some(schemars::schema::InstanceType::Integer), None) => { + j2oas_integer(&obj.format, &obj.number, &obj.enum_values) + } + (None, Some(subschema)) => j2oas_subschemas(subschema), + (None, None) => { + openapiv3::SchemaKind::Any(openapiv3::AnySchema::default()) + } + (Some(_), Some(_)) => panic!( + "a schema can't have both a type and subschemas:\n{}", + serde_json::to_string_pretty(&obj) + .unwrap_or_else(|_| "".to_string()) + ), + }; + + let mut data = openapiv3::SchemaData::default(); + + if matches!( + &obj.extensions.get("nullable"), + Some(serde_json::Value::Bool(true)) + ) { + data.nullable = true; + } + + if let Some(metadata) = &obj.metadata { + data.title = metadata.title.clone(); + data.description = metadata.description.clone(); + data.default = metadata.default.clone(); + data.deprecated = metadata.deprecated; + data.read_only = metadata.read_only; + data.write_only = metadata.write_only; + } + + if let Some(name) = name { + data.title = Some(name.clone()); + } + if let Some(example) = obj.extensions.get("example") { + data.example = Some(example.clone()); + } + + openapiv3::ReferenceOr::Item(openapiv3::Schema { + schema_data: data, + schema_kind: kind, + }) +} + +fn j2oas_subschemas( + subschemas: &schemars::schema::SubschemaValidation, +) -> openapiv3::SchemaKind { + match ( + &subschemas.all_of, + &subschemas.any_of, + &subschemas.one_of, + &subschemas.not, + ) { + (Some(all_of), None, None, None) => openapiv3::SchemaKind::AllOf { + all_of: all_of + .iter() + .map(|schema| j2oas_schema(None, schema)) + .collect::>(), + }, + (None, Some(any_of), None, None) => openapiv3::SchemaKind::AnyOf { + any_of: any_of + .iter() + .map(|schema| j2oas_schema(None, schema)) + .collect::>(), + }, + (None, None, Some(one_of), None) => openapiv3::SchemaKind::OneOf { + one_of: one_of + .iter() + .map(|schema| j2oas_schema(None, schema)) + .collect::>(), + }, + (None, None, None, Some(not)) => openapiv3::SchemaKind::Not { + not: Box::new(j2oas_schema(None, not)), + }, + _ => panic!("invalid subschema {:#?}", subschemas), + } +} + +fn j2oas_integer( + format: &Option, + number: &Option>, + enum_values: &Option>, +) -> openapiv3::SchemaKind { + let format = match format.as_ref().map(|s| s.as_str()) { + None => openapiv3::VariantOrUnknownOrEmpty::Empty, + Some("int32") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::IntegerFormat::Int32, + ), + Some("int64") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::IntegerFormat::Int64, + ), + Some(other) => { + openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) + } + }; + + let (multiple_of, minimum, exclusive_minimum, maximum, exclusive_maximum) = + match number { + None => (None, None, false, None, false), + Some(number) => { + let multiple_of = number.multiple_of.map(|f| f as i64); + let (minimum, exclusive_minimum) = + match (number.minimum, number.exclusive_minimum) { + (None, None) => (None, false), + (Some(f), None) => (Some(f as i64), false), + (None, Some(f)) => (Some(f as i64), true), + _ => panic!("invalid"), + }; + let (maximum, exclusive_maximum) = + match (number.maximum, number.exclusive_maximum) { + (None, None) => (None, false), + (Some(f), None) => (Some(f as i64), false), + (None, Some(f)) => (Some(f as i64), true), + _ => panic!("invalid"), + }; + + ( + multiple_of, + minimum, + exclusive_minimum, + maximum, + exclusive_maximum, + ) + } + }; + + let enumeration = enum_values + .iter() + .flat_map(|v| { + v.iter().map(|vv| match vv { + serde_json::Value::Null => None, + serde_json::Value::Number(value) => { + Some(value.as_i64().unwrap()) + } + _ => panic!("unexpected enumeration value {:?}", vv), + }) + }) + .collect::>(); + + openapiv3::SchemaKind::Type(openapiv3::Type::Integer( + openapiv3::IntegerType { + format, + multiple_of, + exclusive_minimum, + exclusive_maximum, + minimum, + maximum, + enumeration, + }, + )) +} + +fn j2oas_number( + format: &Option, + number: &Option>, + enum_values: &Option>, +) -> openapiv3::SchemaKind { + let format = match format.as_ref().map(|s| s.as_str()) { + None => openapiv3::VariantOrUnknownOrEmpty::Empty, + Some("float") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::NumberFormat::Float, + ), + Some("double") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::NumberFormat::Double, + ), + Some(other) => { + openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) + } + }; + + let (multiple_of, minimum, exclusive_minimum, maximum, exclusive_maximum) = + match number { + None => (None, None, false, None, false), + Some(number) => { + let multiple_of = number.multiple_of; + let (minimum, exclusive_minimum) = + match (number.minimum, number.exclusive_minimum) { + (None, None) => (None, false), + (s @ Some(_), None) => (s, false), + (None, s @ Some(_)) => (s, true), + _ => panic!("invalid"), + }; + let (maximum, exclusive_maximum) = + match (number.maximum, number.exclusive_maximum) { + (None, None) => (None, false), + (s @ Some(_), None) => (s, false), + (None, s @ Some(_)) => (s, true), + _ => panic!("invalid"), + }; + + ( + multiple_of, + minimum, + exclusive_minimum, + maximum, + exclusive_maximum, + ) + } + }; + + let enumeration = enum_values + .iter() + .flat_map(|v| { + v.iter().map(|vv| match vv { + serde_json::Value::Null => None, + serde_json::Value::Number(value) => { + Some(value.as_f64().unwrap()) + } + _ => panic!("unexpected enumeration value {:?}", vv), + }) + }) + .collect::>(); + + openapiv3::SchemaKind::Type(openapiv3::Type::Number( + openapiv3::NumberType { + format, + multiple_of, + exclusive_minimum, + exclusive_maximum, + minimum, + maximum, + enumeration, + }, + )) +} + +fn j2oas_string( + format: &Option, + string: &Option>, + enum_values: &Option>, +) -> openapiv3::SchemaKind { + let format = match format.as_ref().map(|s| s.as_str()) { + None => openapiv3::VariantOrUnknownOrEmpty::Empty, + Some("date") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::StringFormat::Date, + ), + Some("date-time") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::StringFormat::DateTime, + ), + Some("password") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::StringFormat::Password, + ), + Some("byte") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::StringFormat::Byte, + ), + Some("binary") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::StringFormat::Binary, + ), + Some(other) => { + openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) + } + }; + + let (max_length, min_length, pattern) = match string.as_ref() { + None => (None, None, None), + Some(string) => ( + string.max_length.map(|n| n as usize), + string.min_length.map(|n| n as usize), + string.pattern.clone(), + ), + }; + + let enumeration = enum_values + .iter() + .flat_map(|v| { + v.iter().map(|vv| match vv { + serde_json::Value::Null => None, + serde_json::Value::String(s) => Some(s.clone()), + _ => panic!("unexpected enumeration value {:?}", vv), + }) + }) + .collect::>(); + + openapiv3::SchemaKind::Type(openapiv3::Type::String( + openapiv3::StringType { + format, + pattern, + enumeration, + min_length, + max_length, + }, + )) +} + +fn j2oas_array( + array: &Option>, +) -> openapiv3::SchemaKind { + let arr = array.as_ref().unwrap(); + + openapiv3::SchemaKind::Type(openapiv3::Type::Array(openapiv3::ArrayType { + items: match &arr.items { + Some(schemars::schema::SingleOrVec::Single(schema)) => { + Some(box_reference_or(j2oas_schema(None, &schema))) + } + Some(schemars::schema::SingleOrVec::Vec(_)) => { + panic!("OpenAPI v3.0.x cannot support tuple-like arrays") + } + None => None, + }, + min_items: arr.min_items.map(|n| n as usize), + max_items: arr.max_items.map(|n| n as usize), + unique_items: arr.unique_items.unwrap_or(false), + })) +} + +fn box_reference_or( + r: openapiv3::ReferenceOr, +) -> openapiv3::ReferenceOr> { + match r { + openapiv3::ReferenceOr::Item(schema) => { + openapiv3::ReferenceOr::boxed_item(schema) + } + openapiv3::ReferenceOr::Reference { reference } => { + openapiv3::ReferenceOr::Reference { reference } + } + } +} + +fn j2oas_object( + object: &Option>, +) -> openapiv3::SchemaKind { + match object { + None => openapiv3::SchemaKind::Type(openapiv3::Type::Object( + openapiv3::ObjectType::default(), + )), + Some(obj) => openapiv3::SchemaKind::Type(openapiv3::Type::Object( + openapiv3::ObjectType { + properties: obj + .properties + .iter() + .map(|(prop, schema)| { + ( + prop.clone(), + box_reference_or(j2oas_schema(None, schema)), + ) + }) + .collect::<_>(), + required: obj.required.iter().cloned().collect::<_>(), + additional_properties: obj.additional_properties.as_ref().map( + |schema| match schema.as_ref() { + schemars::schema::Schema::Bool(b) => { + openapiv3::AdditionalProperties::Any(*b) + } + schemars::schema::Schema::Object(obj) => { + openapiv3::AdditionalProperties::Schema(Box::new( + j2oas_schema_object(None, obj), + )) + } + }, + ), + min_properties: obj.min_properties.map(|n| n as usize), + max_properties: obj.max_properties.map(|n| n as usize), + }, + )), + } +} + +#[cfg(test)] +mod test { + use super::j2oas_schema; + use super::j2oas_schema_object; + use schemars::JsonSchema; + + #[test] + fn test_empty_struct() { + #[derive(JsonSchema)] + struct Empty {} + + let settings = schemars::gen::SchemaSettings::openapi3(); + let mut generator = schemars::gen::SchemaGenerator::new(settings); + + let schema = Empty::json_schema(&mut generator); + let _ = j2oas_schema(None, &schema); + } + + #[test] + fn test_garbage_barge_structure_conversion() { + #[allow(dead_code)] + #[derive(JsonSchema)] + struct SuperGarbage { + string: String, + strings: Vec, + more_strings: [String; 3], + substruct: Substruct, + more: Option, + union: Union, + map: std::collections::BTreeMap, + } + + #[allow(dead_code)] + #[derive(JsonSchema)] + struct Substruct { + ii32: i32, + uu64: u64, + ff: f32, + dd: f64, + b: bool, + } + + #[allow(dead_code)] + #[derive(JsonSchema)] + enum Union { + A { a: u32 }, + B { b: f32 }, + } + + let settings = schemars::gen::SchemaSettings::openapi3(); + let mut generator = schemars::gen::SchemaGenerator::new(settings); + + let schema = SuperGarbage::json_schema(&mut generator); + let _ = j2oas_schema(None, &schema); + for (key, schema) in generator.definitions().iter() { + let _ = j2oas_schema(Some(key), schema); + } + } + + #[test] + fn test_additional_properties() { + #[allow(dead_code)] + #[derive(JsonSchema)] + enum Union { + A { a: u32 }, + } + let settings = schemars::gen::SchemaSettings::openapi3(); + let mut generator = schemars::gen::SchemaGenerator::new(settings); + let schema = Union::json_schema(&mut generator); + let _ = j2oas_schema(None, &schema); + for (key, schema) in generator.definitions().iter() { + let _ = j2oas_schema(Some(key), schema); + } + } + + #[test] + fn test_nullable() { + #[allow(dead_code)] + #[derive(JsonSchema)] + struct Foo { + bar: String, + } + let settings = schemars::gen::SchemaSettings::openapi3(); + let generator = schemars::gen::SchemaGenerator::new(settings); + let root_schema = generator.into_root_schema_for::>(); + let schema = root_schema.schema; + let os = j2oas_schema_object(None, &schema); + + assert_eq!( + os, + openapiv3::ReferenceOr::Item(openapiv3::Schema { + schema_data: openapiv3::SchemaData { + title: Some("Nullable_Foo".to_string()), + nullable: true, + ..Default::default() + }, + schema_kind: openapiv3::SchemaKind::AllOf { + all_of: vec![openapiv3::ReferenceOr::Reference { + reference: "#/components/schemas/Foo".to_string() + }], + }, + }) + ); + } + + #[test] + #[should_panic] + fn test_bad_schema() { + #![allow(unused)] + + #[derive(JsonSchema)] + #[schemars(tag = "which")] + enum Which { + This, + That, + } + + #[derive(JsonSchema)] + struct BlackSheep { + #[schemars(flatten)] + you_can_get_with: Which, + } + + let schema = schemars::schema_for!(BlackSheep).schema; + + let _ = j2oas_schema_object(None, &schema); + } + + #[test] + #[should_panic] + fn test_two_types() { + #![allow(unused)] + + #[derive(JsonSchema)] + enum One { + One, + } + + #[derive(JsonSchema)] + struct Uno { + #[schemars(flatten)] + one: One, + } + + let schema = schemars::schema_for!(Uno).schema; + + let _ = j2oas_schema_object(None, &schema); + } +} From 08c9a9ae0b39bdba6acb47c9b234cb085da9af4c Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 11:24:56 -0800 Subject: [PATCH 22/47] fix style --- dropshot/src/api_description.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index 30d91a785..67b8b5e50 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -1,6 +1,7 @@ // Copyright 2023 Oxide Computer Company //! Describes the endpoints and handler functions in your API +use crate::extractor::RequestExtractor; use crate::handler::HttpHandlerFunc; use crate::handler::HttpResponse; use crate::handler::HttpRouteHandler; @@ -12,7 +13,6 @@ use crate::schema_util::j2oas_schema; use crate::server::ServerContext; use crate::type_util::type_is_scalar; use crate::type_util::type_is_string_enum; -use crate::extractor::RequestExtractor; use crate::HttpErrorResponseBody; use crate::CONTENT_TYPE_JSON; use crate::CONTENT_TYPE_OCTET_STREAM; From 38324b4d29efd72357fee2880cdb27360daa123d Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 12:17:48 -0800 Subject: [PATCH 23/47] update tests --- dropshot/tests/fail/bad_endpoint17.rs | 37 ++++++++++++++++ dropshot/tests/fail/bad_endpoint17.stderr | 38 ++++++++++++++++ dropshot/tests/fail/bad_endpoint3.rs | 1 + dropshot/tests/fail/bad_endpoint3.stderr | 35 ++++++++++++--- dropshot/tests/fail/bad_endpoint3b.rs | 33 ++++++++++++++ dropshot/tests/fail/bad_endpoint3b.stderr | 38 ++++++++++++++++ dropshot/tests/fail/bad_endpoint3c.rs | 34 +++++++++++++++ dropshot/tests/fail/bad_endpoint3c.stderr | 38 ++++++++++++++++ dropshot_endpoint/src/lib.rs | 53 ++++++++++++++++++++--- 9 files changed, 294 insertions(+), 13 deletions(-) create mode 100644 dropshot/tests/fail/bad_endpoint17.rs create mode 100644 dropshot/tests/fail/bad_endpoint17.stderr create mode 100644 dropshot/tests/fail/bad_endpoint3b.rs create mode 100644 dropshot/tests/fail/bad_endpoint3b.stderr create mode 100644 dropshot/tests/fail/bad_endpoint3c.rs create mode 100644 dropshot/tests/fail/bad_endpoint3c.stderr diff --git a/dropshot/tests/fail/bad_endpoint17.rs b/dropshot/tests/fail/bad_endpoint17.rs new file mode 100644 index 000000000..44dbc09f9 --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint17.rs @@ -0,0 +1,37 @@ +// Copyright 2023 Oxide Computer Company + +#![allow(unused_imports)] + +use dropshot::endpoint; +use dropshot::HttpError; +use dropshot::HttpResponseOk; +use dropshot::RequestContext; +use dropshot::TypedBody; +use dropshot::UntypedBody; +use schemars::JsonSchema; +use serde::Deserialize; +use std::sync::Arc; + +#[allow(dead_code)] +#[derive(Deserialize, JsonSchema)] +struct Stuff { + x: String, +} + +// Test: two exclusive extractors. +// This winds up being tested implicitly by the fact that we test that middle +// parameters impl `SharedExtractor`. So this winds up being the same as a +// previous test case. However, it seems worth testing explicitly. +#[endpoint { + method = GET, + path = "/test", +}] +async fn two_exclusive_extractors( + _rqctx: Arc>, + _param1: TypedBody, + _param2: UntypedBody, +) -> Result, HttpError> { + Ok(HttpResponseOk(())) +} + +fn main() {} diff --git a/dropshot/tests/fail/bad_endpoint17.stderr b/dropshot/tests/fail/bad_endpoint17.stderr new file mode 100644 index 000000000..b02cf4af7 --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint17.stderr @@ -0,0 +1,38 @@ +error[E0277]: the trait bound `TypedBody: SharedExtractor` is not satisfied + --> tests/fail/bad_endpoint17.rs:28:14 + | +28 | _param1: TypedBody, + | ^^^^^^^^^^^^^^^^ the trait `SharedExtractor` is not implemented for `TypedBody` + | + = help: the following other types implement trait `SharedExtractor`: + dropshot::Path + dropshot::Query +note: required by a bound in `need_shared_extractor` + --> tests/fail/bad_endpoint17.rs:22:1 + | +22 | / #[endpoint { +23 | | method = GET, +24 | | path = "/test", +25 | | }] + | |__^ required by this bound in `need_shared_extractor` +... +28 | _param1: TypedBody, + | --------- required by a bound in this + = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `fn(Arc>, TypedBody, UntypedBody) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::two_exclusive_extractors}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied + --> tests/fail/bad_endpoint17.rs:26:10 + | +22 | / #[endpoint { +23 | | method = GET, +24 | | path = "/test", +25 | | }] + | |__- required by a bound introduced by this call +26 | async fn two_exclusive_extractors( + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(Arc>, TypedBody, UntypedBody) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::two_exclusive_extractors}` + | +note: required by a bound in `ApiEndpoint::::new` + --> src/api_description.rs + | + | HandlerType: HttpHandlerFunc, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `ApiEndpoint::::new` diff --git a/dropshot/tests/fail/bad_endpoint3.rs b/dropshot/tests/fail/bad_endpoint3.rs index 062c80104..7ae1a99e1 100644 --- a/dropshot/tests/fail/bad_endpoint3.rs +++ b/dropshot/tests/fail/bad_endpoint3.rs @@ -8,6 +8,7 @@ use dropshot::HttpResponseOk; use dropshot::RequestContext; use std::sync::Arc; +// Test: final parameter is neither an ExclusiveExtractor nor a SharedExtractor. #[endpoint { method = GET, path = "/test", diff --git a/dropshot/tests/fail/bad_endpoint3.stderr b/dropshot/tests/fail/bad_endpoint3.stderr index f6b90e23c..f27250cfd 100644 --- a/dropshot/tests/fail/bad_endpoint3.stderr +++ b/dropshot/tests/fail/bad_endpoint3.stderr @@ -1,12 +1,35 @@ +error[E0277]: the trait bound `String: SharedExtractor` is not satisfied + --> tests/fail/bad_endpoint3.rs:18:12 + | +18 | param: String, + | ^^^^^^ the trait `SharedExtractor` is not implemented for `String` + | + = help: the following other types implement trait `SharedExtractor`: + dropshot::Path + dropshot::Query + = note: required for `String` to implement `ExclusiveExtractor` +note: required by a bound in `need_exclusive_extractor` + --> tests/fail/bad_endpoint3.rs:12:1 + | +12 | / #[endpoint { +13 | | method = GET, +14 | | path = "/test", +15 | | }] + | |__^ required by this bound in `need_exclusive_extractor` +... +18 | param: String, + | ------ required by a bound in this + = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) + error[E0277]: the trait bound `fn(Arc>, String) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_endpoint}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied - --> tests/fail/bad_endpoint3.rs:15:10 + --> tests/fail/bad_endpoint3.rs:16:10 | -11 | / #[endpoint { -12 | | method = GET, -13 | | path = "/test", -14 | | }] +12 | / #[endpoint { +13 | | method = GET, +14 | | path = "/test", +15 | | }] | |__- required by a bound introduced by this call -15 | async fn bad_endpoint( +16 | async fn bad_endpoint( | ^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(Arc>, String) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_endpoint}` | note: required by a bound in `ApiEndpoint::::new` diff --git a/dropshot/tests/fail/bad_endpoint3b.rs b/dropshot/tests/fail/bad_endpoint3b.rs new file mode 100644 index 000000000..0937f5017 --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint3b.rs @@ -0,0 +1,33 @@ +// Copyright 2023 Oxide Computer Company + +#![allow(unused_imports)] + +use dropshot::endpoint; +use dropshot::HttpError; +use dropshot::HttpResponseOk; +use dropshot::Query; +use dropshot::RequestContext; +use schemars::JsonSchema; +use serde::Deserialize; +use std::sync::Arc; + +#[allow(dead_code)] +#[derive(Deserialize, JsonSchema)] +struct QueryParams { + x: String, +} + +// Test: middle parameter is not a SharedExtractor. +#[endpoint { + method = GET, + path = "/test", +}] +async fn non_extractor_as_last_argument( + _rqctx: Arc>, + _param1: String, + _param2: Query, +) -> Result, HttpError> { + Ok(HttpResponseOk(())) +} + +fn main() {} diff --git a/dropshot/tests/fail/bad_endpoint3b.stderr b/dropshot/tests/fail/bad_endpoint3b.stderr new file mode 100644 index 000000000..b8d79806e --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint3b.stderr @@ -0,0 +1,38 @@ +error[E0277]: the trait bound `std::string::String: SharedExtractor` is not satisfied + --> tests/fail/bad_endpoint3b.rs:27:14 + | +27 | _param1: String, + | ^^^^^^ the trait `SharedExtractor` is not implemented for `std::string::String` + | + = help: the following other types implement trait `SharedExtractor`: + dropshot::Path + dropshot::Query +note: required by a bound in `need_shared_extractor` + --> tests/fail/bad_endpoint3b.rs:21:1 + | +21 | / #[endpoint { +22 | | method = GET, +23 | | path = "/test", +24 | | }] + | |__^ required by this bound in `need_shared_extractor` +... +27 | _param1: String, + | ------ required by a bound in this + = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `fn(Arc>, std::string::String, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::non_extractor_as_last_argument}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied + --> tests/fail/bad_endpoint3b.rs:25:10 + | +21 | / #[endpoint { +22 | | method = GET, +23 | | path = "/test", +24 | | }] + | |__- required by a bound introduced by this call +25 | async fn non_extractor_as_last_argument( + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(Arc>, std::string::String, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::non_extractor_as_last_argument}` + | +note: required by a bound in `ApiEndpoint::::new` + --> src/api_description.rs + | + | HandlerType: HttpHandlerFunc, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `ApiEndpoint::::new` diff --git a/dropshot/tests/fail/bad_endpoint3c.rs b/dropshot/tests/fail/bad_endpoint3c.rs new file mode 100644 index 000000000..2b8193ebc --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint3c.rs @@ -0,0 +1,34 @@ +// Copyright 2023 Oxide Computer Company + +#![allow(unused_imports)] + +use dropshot::endpoint; +use dropshot::HttpError; +use dropshot::HttpResponseOk; +use dropshot::Query; +use dropshot::TypedBody; +use dropshot::RequestContext; +use schemars::JsonSchema; +use serde::Deserialize; +use std::sync::Arc; + +#[allow(dead_code)] +#[derive(Deserialize, JsonSchema)] +struct Stuff { + x: String, +} + +// Test: exclusive extractor not as the last argument +#[endpoint { + method = GET, + path = "/test", +}] +async fn exclusive_extractor_not_last( + _rqctx: Arc>, + _param1: TypedBody, + _param2: Query, +) -> Result, HttpError> { + Ok(HttpResponseOk(())) +} + +fn main() {} diff --git a/dropshot/tests/fail/bad_endpoint3c.stderr b/dropshot/tests/fail/bad_endpoint3c.stderr new file mode 100644 index 000000000..f2d315066 --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint3c.stderr @@ -0,0 +1,38 @@ +error[E0277]: the trait bound `TypedBody: SharedExtractor` is not satisfied + --> tests/fail/bad_endpoint3c.rs:28:14 + | +28 | _param1: TypedBody, + | ^^^^^^^^^^^^^^^^ the trait `SharedExtractor` is not implemented for `TypedBody` + | + = help: the following other types implement trait `SharedExtractor`: + dropshot::Path + dropshot::Query +note: required by a bound in `need_shared_extractor` + --> tests/fail/bad_endpoint3c.rs:22:1 + | +22 | / #[endpoint { +23 | | method = GET, +24 | | path = "/test", +25 | | }] + | |__^ required by this bound in `need_shared_extractor` +... +28 | _param1: TypedBody, + | --------- required by a bound in this + = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `fn(Arc>, TypedBody, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::exclusive_extractor_not_last}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied + --> tests/fail/bad_endpoint3c.rs:26:10 + | +22 | / #[endpoint { +23 | | method = GET, +24 | | path = "/test", +25 | | }] + | |__- required by a bound introduced by this call +26 | async fn exclusive_extractor_not_last( + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(Arc>, TypedBody, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::exclusive_extractor_not_last}` + | +note: required by a bound in `ApiEndpoint::::new` + --> src/api_description.rs + | + | HandlerType: HttpHandlerFunc, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `ApiEndpoint::::new` diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index f6111a885..5343a7487 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -433,12 +433,12 @@ fn do_endpoint_inner( .inputs .iter() .enumerate() - .filter_map(|(index, arg)| { + .map(|(index, arg)| { match arg { syn::FnArg::Receiver(_) => { // The compiler failure here is already comprehensible. arg_is_receiver = true; - Some(quote! {}) + quote! {} } syn::FnArg::Typed(pat) => { let span = pat.ty.span(); @@ -448,15 +448,38 @@ fn do_endpoint_inner( // The first parameter must be an Arc> // and fortunately we already have a trait that we can // use to validate this type. - Some(quote_spanned! { span=> + quote_spanned! { span=> const _: fn() = || { struct NeedRequestContext(<#ty as #dropshot::RequestContextArgument>::Context); }; - }) + } + } else if index < ast.sig.inputs.len() - 1 { + // Subsequent parameters aside from the last one must + // impl SharedExtractor. + quote_spanned! { span=> + const _: fn() = || { + fn need_shared_extractor() + where + T: ?Sized + #dropshot::SharedExtractor, + { + } + need_shared_extractor::<#ty>(); + }; + } } else { - // XXX-dap the remaining stuff must together impl - // `RequestExtractor` - None + // The final parameter must impl ExclusiveExtractor. + // (It's okay if it's another SharedExtractor. Those + // impl ExclusiveExtractor, too.) + quote_spanned! { span=> + const _: fn() = || { + fn need_exclusive_extractor() + where + T: ?Sized + #dropshot::ExclusiveExtractor, + { + } + need_exclusive_extractor::<#ty>(); + }; + } } } } @@ -935,6 +958,14 @@ mod tests { const _: fn() = || { struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; }; + const _: fn() = || { + fn need_exclusive_extractor() + where + T: ?Sized + dropshot::ExclusiveExtractor, + { + } + need_exclusive_extractor:: >(); + }; const _: fn() = || { trait ResultTrait { type T; @@ -1033,6 +1064,14 @@ mod tests { const _: fn() = || { struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; }; + const _: fn() = || { + fn need_exclusive_extractor() + where + T: ?Sized + dropshot::ExclusiveExtractor, + { + } + need_exclusive_extractor:: >(); + }; const _: fn() = || { trait ResultTrait { type T; From b200e1094d4d1d7a36b3666810c86507c9de7b43 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 12:20:46 -0800 Subject: [PATCH 24/47] remove TODO --- dropshot/src/api_description.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index 67b8b5e50..a50573600 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -1201,9 +1201,6 @@ mod test { api.register(test_badpath_handler).unwrap(); } - // XXX-dap TODO-coverage need a test for trying to use two - // ExclusiveExtractors - #[test] fn test_dup_names() { #[derive(Deserialize, JsonSchema)] From 5bb93e774c7996df913667accfc2734e24235ef5 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 12:26:27 -0800 Subject: [PATCH 25/47] rename tests --- dropshot/tests/fail/{bad_endpoint3c.rs => bad_endpoint18.rs} | 0 .../tests/fail/{bad_endpoint3c.stderr => bad_endpoint18.stderr} | 0 dropshot/tests/fail/{bad_endpoint3b.rs => bad_endpoint19.rs} | 0 .../tests/fail/{bad_endpoint3b.stderr => bad_endpoint19.stderr} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename dropshot/tests/fail/{bad_endpoint3c.rs => bad_endpoint18.rs} (100%) rename dropshot/tests/fail/{bad_endpoint3c.stderr => bad_endpoint18.stderr} (100%) rename dropshot/tests/fail/{bad_endpoint3b.rs => bad_endpoint19.rs} (100%) rename dropshot/tests/fail/{bad_endpoint3b.stderr => bad_endpoint19.stderr} (100%) diff --git a/dropshot/tests/fail/bad_endpoint3c.rs b/dropshot/tests/fail/bad_endpoint18.rs similarity index 100% rename from dropshot/tests/fail/bad_endpoint3c.rs rename to dropshot/tests/fail/bad_endpoint18.rs diff --git a/dropshot/tests/fail/bad_endpoint3c.stderr b/dropshot/tests/fail/bad_endpoint18.stderr similarity index 100% rename from dropshot/tests/fail/bad_endpoint3c.stderr rename to dropshot/tests/fail/bad_endpoint18.stderr diff --git a/dropshot/tests/fail/bad_endpoint3b.rs b/dropshot/tests/fail/bad_endpoint19.rs similarity index 100% rename from dropshot/tests/fail/bad_endpoint3b.rs rename to dropshot/tests/fail/bad_endpoint19.rs diff --git a/dropshot/tests/fail/bad_endpoint3b.stderr b/dropshot/tests/fail/bad_endpoint19.stderr similarity index 100% rename from dropshot/tests/fail/bad_endpoint3b.stderr rename to dropshot/tests/fail/bad_endpoint19.stderr From aa6f16e00aca770540faebcd11a145879cb0ac14 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 12:28:00 -0800 Subject: [PATCH 26/47] fix output --- dropshot/tests/fail/bad_endpoint17.stderr | 28 +++++++++++------------ dropshot/tests/fail/bad_endpoint18.stderr | 6 ++--- dropshot/tests/fail/bad_endpoint19.stderr | 6 ++--- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/dropshot/tests/fail/bad_endpoint17.stderr b/dropshot/tests/fail/bad_endpoint17.stderr index b02cf4af7..bf40806dd 100644 --- a/dropshot/tests/fail/bad_endpoint17.stderr +++ b/dropshot/tests/fail/bad_endpoint17.stderr @@ -1,34 +1,34 @@ error[E0277]: the trait bound `TypedBody: SharedExtractor` is not satisfied - --> tests/fail/bad_endpoint17.rs:28:14 + --> tests/fail/bad_endpoint17.rs:31:14 | -28 | _param1: TypedBody, +31 | _param1: TypedBody, | ^^^^^^^^^^^^^^^^ the trait `SharedExtractor` is not implemented for `TypedBody` | = help: the following other types implement trait `SharedExtractor`: dropshot::Path dropshot::Query note: required by a bound in `need_shared_extractor` - --> tests/fail/bad_endpoint17.rs:22:1 + --> tests/fail/bad_endpoint17.rs:25:1 | -22 | / #[endpoint { -23 | | method = GET, -24 | | path = "/test", -25 | | }] +25 | / #[endpoint { +26 | | method = GET, +27 | | path = "/test", +28 | | }] | |__^ required by this bound in `need_shared_extractor` ... -28 | _param1: TypedBody, +31 | _param1: TypedBody, | --------- required by a bound in this = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `fn(Arc>, TypedBody, UntypedBody) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::two_exclusive_extractors}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied - --> tests/fail/bad_endpoint17.rs:26:10 + --> tests/fail/bad_endpoint17.rs:29:10 | -22 | / #[endpoint { -23 | | method = GET, -24 | | path = "/test", -25 | | }] +25 | / #[endpoint { +26 | | method = GET, +27 | | path = "/test", +28 | | }] | |__- required by a bound introduced by this call -26 | async fn two_exclusive_extractors( +29 | async fn two_exclusive_extractors( | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(Arc>, TypedBody, UntypedBody) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::two_exclusive_extractors}` | note: required by a bound in `ApiEndpoint::::new` diff --git a/dropshot/tests/fail/bad_endpoint18.stderr b/dropshot/tests/fail/bad_endpoint18.stderr index f2d315066..e333c7037 100644 --- a/dropshot/tests/fail/bad_endpoint18.stderr +++ b/dropshot/tests/fail/bad_endpoint18.stderr @@ -1,5 +1,5 @@ error[E0277]: the trait bound `TypedBody: SharedExtractor` is not satisfied - --> tests/fail/bad_endpoint3c.rs:28:14 + --> tests/fail/bad_endpoint18.rs:28:14 | 28 | _param1: TypedBody, | ^^^^^^^^^^^^^^^^ the trait `SharedExtractor` is not implemented for `TypedBody` @@ -8,7 +8,7 @@ error[E0277]: the trait bound `TypedBody: SharedExtractor` is not satisfi dropshot::Path dropshot::Query note: required by a bound in `need_shared_extractor` - --> tests/fail/bad_endpoint3c.rs:22:1 + --> tests/fail/bad_endpoint18.rs:22:1 | 22 | / #[endpoint { 23 | | method = GET, @@ -21,7 +21,7 @@ note: required by a bound in `need_shared_extractor` = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `fn(Arc>, TypedBody, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::exclusive_extractor_not_last}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied - --> tests/fail/bad_endpoint3c.rs:26:10 + --> tests/fail/bad_endpoint18.rs:26:10 | 22 | / #[endpoint { 23 | | method = GET, diff --git a/dropshot/tests/fail/bad_endpoint19.stderr b/dropshot/tests/fail/bad_endpoint19.stderr index b8d79806e..8b77ae6bf 100644 --- a/dropshot/tests/fail/bad_endpoint19.stderr +++ b/dropshot/tests/fail/bad_endpoint19.stderr @@ -1,5 +1,5 @@ error[E0277]: the trait bound `std::string::String: SharedExtractor` is not satisfied - --> tests/fail/bad_endpoint3b.rs:27:14 + --> tests/fail/bad_endpoint19.rs:27:14 | 27 | _param1: String, | ^^^^^^ the trait `SharedExtractor` is not implemented for `std::string::String` @@ -8,7 +8,7 @@ error[E0277]: the trait bound `std::string::String: SharedExtractor` is not sati dropshot::Path dropshot::Query note: required by a bound in `need_shared_extractor` - --> tests/fail/bad_endpoint3b.rs:21:1 + --> tests/fail/bad_endpoint19.rs:21:1 | 21 | / #[endpoint { 22 | | method = GET, @@ -21,7 +21,7 @@ note: required by a bound in `need_shared_extractor` = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `fn(Arc>, std::string::String, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::non_extractor_as_last_argument}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied - --> tests/fail/bad_endpoint3b.rs:25:10 + --> tests/fail/bad_endpoint19.rs:25:10 | 21 | / #[endpoint { 22 | | method = GET, From 0f956a563733ff51cb61a57944aafa9604038790 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 11 Jan 2023 12:34:26 -0800 Subject: [PATCH 27/47] copyright update was over-eager --- dropshot/src/pagination.rs | 2 +- dropshot/src/type_util.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dropshot/src/pagination.rs b/dropshot/src/pagination.rs index 936231551..76e4f6dae 100644 --- a/dropshot/src/pagination.rs +++ b/dropshot/src/pagination.rs @@ -1,4 +1,4 @@ -// Copyright 2023 Oxide Computer Company +// Copyright 2022 Oxide Computer Company //! Detailed end-user documentation for pagination lives in the Dropshot top- //! level block comment. Here we discuss some of the design choices. diff --git a/dropshot/src/type_util.rs b/dropshot/src/type_util.rs index fc6405f5a..5c1495dea 100644 --- a/dropshot/src/type_util.rs +++ b/dropshot/src/type_util.rs @@ -1,4 +1,4 @@ -// Copyright 2023 Oxide Computer Company +// Copyright 2021 Oxide Computer Company //! Utility functions for working with JsonSchema types. From 59793940507575665f26dbdc70d127ab8fefe138 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 12 Jan 2023 11:05:42 -0800 Subject: [PATCH 28/47] update CHANGELOG --- CHANGELOG.adoc | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index 6c9d00d8c..f4ebff4eb 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -17,15 +17,19 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co === Breaking Changes -// XXX-dap TODO need update here -// Extractor -> {Shared,Exclusive}Extractor -// type signature of from_request() changed -// both: accept &RequestContext instead of Arc -// now: no other change. future: exclusive one will get a hyper::Request -// exclusive extractors must appear last in the argument list - -* https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. -* https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. +There are a number of breaking changes in this release but we expect they will be easy to manage. **If you have any trouble updating to this release or want help with it, please do https://github.com/oxidecomputer/dropshot/discussions[start a discussion] or https://github.com/oxidecomputer/dropshot/issues/new[file an issue]!** + +* https://github.com/oxidecomputer/dropshot/pull/556[#556] Better type-safety around the use of extractors. It is now a compile-time error to define an endpoint that accepts two extractors that use the HTTP request body (e.g., to accept both a `TypedBody` and an `UntypedBody`, or two `TypedBody` arguments). Previously, this would have resulted in a runtime error. The main change is that the `Extractor` trait has been split into two separate traits: `SharedExtractor` and `ExclusiveExtractor`. Endpoint functions can still accept 0-3 extractors, but only one can be an `ExclusiveExtractor` and it must be the last one. The function signatures for `*Extractor::from_request` have also changed. ++ +**What you need to do:** ++ +1. For any endpoint functions that use a `TypedBody` or `UntypedBody` extractor, this extractor must be the last argument to the function. Otherwise, you will get a compile error about the extractor not impl'ing `SharedExtractor`. +2. If you have your own type that impls `Extractor`, you will need to change that to either `ExclusiveExtractor` (if the impl needs a `mut` reference to the underlying `hyper::Request`, which is usually because it needs to read the request body) or `SharedExtractor`. If your extractor only needs to look at the URL or request headers and not the body, it can probably be a `SharedExtractor`. If it's an exclusive extractor, any function that accepts it must accept it as the last argument to the function. +3. If you have your own type that impls `Extractor`, you will also need to change the type signature of the `from_request` method to accept a `&RequestContext` instead of `Arc>`. (This should not be a problem unless your extractor was hanging on to a reference via the Arc. We don't know a reason this would be useful. If you were doing this, please reach out. You can probably instead copy whatever information you need out of the `RequestContext` instead.) +* https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot now allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. +* https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. ++ +**What you need to do:** If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. * https://github.com/oxidecomputer/dropshot/pull/540[#540] `ConfigDropshot` now uses a [`camino::Utf8PathBuf`](https://docs.rs/camino/1.1.1/camino/struct.Utf8PathBuf.html) for its file path. There is no change to the configuration format itself, just its representation in Rust. === Other notable Changes From 0c727b9782e930c9cac530323b089fa1b9c882cd Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 12 Jan 2023 11:06:19 -0800 Subject: [PATCH 29/47] bump for major version --- Cargo.lock | 4 ++-- dropshot/Cargo.toml | 4 ++-- dropshot_endpoint/Cargo.toml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fcfbd67d8..e5c242e80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -289,7 +289,7 @@ dependencies = [ [[package]] name = "dropshot" -version = "0.8.1-dev" +version = "0.9.0-dev" dependencies = [ "async-stream", "async-trait", @@ -341,7 +341,7 @@ dependencies = [ [[package]] name = "dropshot_endpoint" -version = "0.8.1-dev" +version = "0.9.0-dev" dependencies = [ "proc-macro2", "quote", diff --git a/dropshot/Cargo.toml b/dropshot/Cargo.toml index 8b899668b..e99140f77 100644 --- a/dropshot/Cargo.toml +++ b/dropshot/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "dropshot" description = "expose REST APIs from a Rust program" -version = "0.8.1-dev" +version = "0.9.0-dev" authors = ["David Pacheco "] edition = "2018" license = "Apache-2.0" @@ -41,7 +41,7 @@ version = "0.4.23" features = [ "serde" ] [dependencies.dropshot_endpoint] -version = "^0.8.1-dev" +version = "^0.9.0-dev" path = "../dropshot_endpoint" [dependencies.hyper] diff --git a/dropshot_endpoint/Cargo.toml b/dropshot_endpoint/Cargo.toml index bf458f97b..681e092a0 100644 --- a/dropshot_endpoint/Cargo.toml +++ b/dropshot_endpoint/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "dropshot_endpoint" description = "macro used by dropshot consumers for registering handlers" -version = "0.8.1-dev" +version = "0.9.0-dev" authors = ["Adam H. Leventhal "] edition = "2018" license = "Apache-2.0" From 5ec8a451d1d0da72a729dde5d9eb0f5da5f4f6e3 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 12 Jan 2023 11:19:13 -0800 Subject: [PATCH 30/47] clean up docs, XXXs --- dropshot/src/extractor/common.rs | 12 +++++------- dropshot/src/extractor/mod.rs | 4 +--- dropshot/src/lib.rs | 18 ++++++++++-------- dropshot_endpoint/src/lib.rs | 23 +++++++++++++++-------- 4 files changed, 31 insertions(+), 26 deletions(-) diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs index 71eb886bb..902ae95af 100644 --- a/dropshot/src/extractor/common.rs +++ b/dropshot/src/extractor/common.rs @@ -1,7 +1,5 @@ // Copyright 2023 Oxide Computer Company -// XXX-dap TODO-cleanup should the metadata into a separate, shared trait? - use crate::api_description::ApiEndpointParameter; use crate::api_description::{ApiEndpointBodyContentType, ExtensionMode}; use crate::error::HttpError; @@ -138,16 +136,16 @@ impl RequestExtractor for (X,) { } } -// XXX-dap TODO-doc update comment based on the change that uses the fact that -// SharedExtractor impls ExclusiveExtractor such that the last item in the -// tuple *must* be an exclusive extractor /// Defines implementations of `RequestExtractor` for tuples of one or more /// `SharedExtractor` followed by an `ExclusiveExtractor` /// /// As an example, `impl_rqextractor_for_tuple!(S1, S2)` defines an impl of /// `RequestExtractor` for tuple `(S1, S2, X)` where `S1: SharedExtractor`, -/// `S2: SharedExtractor`, and `X: ExclusiveExtractor`, as well as a similar -/// impl for just `(S1, S2)`. +/// `S2: SharedExtractor`, and `X: ExclusiveExtractor`. Note that any +/// `SharedExtractor` also impls `ExclusiveExtractor`, so it's not necessary to +/// impl this separately for `(S1, S2, S3)` (and indeed that would not be +/// possible, since it would overlap with the definition for `(S1, S2, X)`, even +/// if `SharedExtractor` did not impl `ExclusiveExtractor`). macro_rules! impl_rqextractor_for_tuple { ($( $S:ident),+) => { diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs index b32fde52e..2eea64cf3 100644 --- a/dropshot/src/extractor/mod.rs +++ b/dropshot/src/extractor/mod.rs @@ -1,6 +1,6 @@ // Copyright 2023 Oxide Computer Company -//! Extractor trait +//! Extractor-related traits //! //! See top-level crate documentation for details @@ -38,8 +38,6 @@ pub use common::ExtractorMetadata; pub use common::RequestExtractor; pub use common::SharedExtractor; -// XXX-dap move these definitions to separate files? - // Query: query string extractor /// `Query` is an extractor used to deserialize an instance of diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index 5e5e46dbb..9308a4f29 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -216,10 +216,8 @@ //! ) -> Result //! ``` //! -//! Other than the RequestContext, parameters may appear in any order. -//! -//! The `Context` type is caller-provided context which is provided when -//! the server is created. +//! The `RequestContext` must appear first. The `Context` type is +//! caller-provided context which is provided when the server is created. //! //! The types `Query`, `Path`, `TypedBody`, and `UntypedBody` are called //! **Extractors** because they cause information to be pulled out of the request @@ -236,10 +234,14 @@ //! of type `J`. `J` must implement `serde::Deserialize` and `schemars::JsonSchema`. //! * [`UntypedBody`] extracts the raw bytes of the request body. //! -//! If the handler takes a `Query`, `Path

    `, `TypedBody`, or -//! `UntypedBody`, and the corresponding extraction cannot be completed, the -//! request fails with status code 400 and an error message reflecting a -//! validation error. +//! `Query` and `Path` impl `SharedExtractor`. `TypedBody` and `UntypedBody` +//! impl `ExclusiveExtractor`. Your function may accept 0-3 extractors, but +//! only one can be `ExclusiveExtractor`, and it must be the last one. +//! Otherwise, the order of extractor arguments does not matter. +//! +//! If the handler accepts any extractors and the corresponding extraction +//! cannot be completed, the request fails with status code 400 and an error +//! message reflecting the error (usually a validation error). //! //! As with any serde-deserializable type, you can make fields optional by having //! the corresponding property of the type be an `Option`. Here's an example of diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index 5343a7487..faeb929fb 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -220,12 +220,19 @@ fn do_channel( )); } - // XXX-dap TODO-cleanup This is a gross way to do it. - let mut input_pairs = - sig.inputs.clone().into_pairs().collect::>(); - let second_pair = input_pairs.remove(1); - input_pairs.push(second_pair); - sig.inputs = input_pairs.into_iter().collect(); + // Historically, we required that the `WebsocketConnection` argument + // be first after the `RequestContext`. However, we also require + // that any exclusive extractor (which includes the + // `WebsocketUpgrade` argument that we put in its place) appears + // last. We replaced the type above, but now we need to put it in + // the right spot. + sig.inputs = { + let mut input_pairs = + sig.inputs.clone().into_pairs().collect::>(); + let second_pair = input_pairs.remove(1); + input_pairs.push(second_pair); + input_pairs.into_iter().collect() + }; sig.output = syn::parse2(quote!(-> dropshot::WebsocketEndpointResult))?; @@ -424,8 +431,8 @@ fn do_endpoint_inner( // When the user attaches this proc macro to a function with the wrong type // signature, the resulting errors can be deeply inscrutable. To attempt to // make failures easier to understand, we inject code that asserts the types - // of the various parameters. We do this by calling a dummy function that - // requires a type that satisfies the trait Extractor. + // of the various parameters. We do this by calling dummy functions that + // require a type that satisfies SharedExtractor or ExclusiveExtractor. let mut arg_types = Vec::new(); let mut arg_is_receiver = false; let param_checks = ast From bda525f148cd4a1698297fe011ec4d068542cca2 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 12 Jan 2023 11:28:15 -0800 Subject: [PATCH 31/47] could use another conversion --- dropshot/src/handler.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index 7b8ac4848..1f4a59ff5 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -110,6 +110,12 @@ impl From<&hyper::Request> for RequestHeader { } } +impl From> for RequestHeader { + fn from(request: hyper::Request) -> Self { + Self::from(&request) + } +} + impl RequestHeader { pub fn method(&self) -> &http::Method { &self.method From fabe335179a2719d7442d9e262b21efe543d5db9 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 19:49:38 -0800 Subject: [PATCH 32/47] prototype: remove request from RequestContext --- CHANGELOG.adoc | 4 ++ dropshot/examples/request-headers.rs | 3 +- dropshot/src/extractor/common.rs | 15 +++++-- dropshot/src/extractor/mod.rs | 49 +++++++++++++++++++---- dropshot/src/handler.rs | 58 ++++++++++++++++++++++------ dropshot/src/lib.rs | 2 + dropshot/src/server.rs | 6 ++- dropshot/src/websocket.rs | 29 ++++++-------- 8 files changed, 121 insertions(+), 45 deletions(-) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index f4ebff4eb..922033986 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -17,6 +17,10 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co === Breaking Changes +// XXX-dap TODO more updates for RawRequest extractor +// also update crate-level docs and other places we talk about TypedBody, etc. +// maybe add an example? + There are a number of breaking changes in this release but we expect they will be easy to manage. **If you have any trouble updating to this release or want help with it, please do https://github.com/oxidecomputer/dropshot/discussions[start a discussion] or https://github.com/oxidecomputer/dropshot/issues/new[file an issue]!** * https://github.com/oxidecomputer/dropshot/pull/556[#556] Better type-safety around the use of extractors. It is now a compile-time error to define an endpoint that accepts two extractors that use the HTTP request body (e.g., to accept both a `TypedBody` and an `UntypedBody`, or two `TypedBody` arguments). Previously, this would have resulted in a runtime error. The main change is that the `Extractor` trait has been split into two separate traits: `SharedExtractor` and `ExclusiveExtractor`. Endpoint functions can still accept 0-3 extractors, but only one can be an `ExclusiveExtractor` and it must be the last one. The function signatures for `*Extractor::from_request` have also changed. diff --git a/dropshot/examples/request-headers.rs b/dropshot/examples/request-headers.rs index 104c75460..bae583118 100644 --- a/dropshot/examples/request-headers.rs +++ b/dropshot/examples/request-headers.rs @@ -48,9 +48,8 @@ async fn main() -> Result<(), String> { async fn example_api_get_header_generic( rqctx: Arc>, ) -> Result, HttpError> { - let request = rqctx.request.lock().await; // Note that clients can provide multiple values for a header. See // http::HeaderMap for ways to get all of them. - let header = request.headers().get("demo-header"); + let header = rqctx.request.headers().get("demo-header"); Ok(HttpResponseOk(format!("value for header: {:?}", header))) } diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs index 902ae95af..2eec965af 100644 --- a/dropshot/src/extractor/common.rs +++ b/dropshot/src/extractor/common.rs @@ -25,6 +25,7 @@ pub trait ExclusiveExtractor: Send + Sync + Sized { /// Construct an instance of this type from a `RequestContext`. async fn from_request( rqctx: &RequestContext, + request: hyper::Request, ) -> Result; fn metadata( @@ -55,6 +56,7 @@ pub trait SharedExtractor: Send + Sync + Sized { impl ExclusiveExtractor for S { async fn from_request( rqctx: &RequestContext, + _request: hyper::Request, ) -> Result { ::from_request(rqctx).await } @@ -94,6 +96,7 @@ pub trait RequestExtractor: Send + Sync + Sized { /// Construct an instance of this type from a `RequestContext`. async fn from_request( rqctx: &RequestContext, + request: hyper::Request, ) -> Result; fn metadata( @@ -106,6 +109,7 @@ pub trait RequestExtractor: Send + Sync + Sized { impl RequestExtractor for () { async fn from_request( _rqctx: &RequestContext, + _request: hyper::Request, ) -> Result { Ok(()) } @@ -125,8 +129,9 @@ impl RequestExtractor for () { impl RequestExtractor for (X,) { async fn from_request( rqctx: &RequestContext, + request: hyper::Request, ) -> Result { - Ok((X::from_request(rqctx).await?,)) + Ok((X::from_request(rqctx, request).await?,)) } fn metadata( @@ -155,12 +160,14 @@ macro_rules! impl_rqextractor_for_tuple { RequestExtractor for ($($S,)+ X) { - async fn from_request(rqctx: &RequestContext) - -> Result<( $($S,)+ X ), HttpError> + async fn from_request( + rqctx: &RequestContext, + request: hyper::Request + ) -> Result<( $($S,)+ X ), HttpError> { futures::try_join!( $($S::from_request(rqctx),)+ - X::from_request(rqctx) + X::from_request(rqctx, request) ) } diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs index 2eea64cf3..d659f683a 100644 --- a/dropshot/src/extractor/mod.rs +++ b/dropshot/src/extractor/mod.rs @@ -23,8 +23,6 @@ use crate::RequestContext; use async_trait::async_trait; use bytes::Bytes; -use hyper::Body; -use hyper::Request; use schemars::schema::InstanceType; use schemars::schema::SchemaObject; use schemars::JsonSchema; @@ -33,6 +31,7 @@ use std::fmt::Debug; mod common; +use crate::RequestHeader; pub use common::ExclusiveExtractor; pub use common::ExtractorMetadata; pub use common::RequestExtractor; @@ -59,7 +58,7 @@ impl Query { /// Given an HTTP request, pull out the query string and attempt to deserialize /// it as an instance of `QueryType`. fn http_request_load_query( - request: &Request, + request: &RequestHeader, ) -> Result, HttpError> where QueryType: DeserializeOwned + JsonSchema + Send + Sync, @@ -89,8 +88,7 @@ where async fn from_request( rqctx: &RequestContext, ) -> Result, HttpError> { - let request = rqctx.request.lock().await; - http_request_load_query(&request) + http_request_load_query(&rqctx.request) } fn metadata( @@ -224,12 +222,12 @@ impl /// to the content type, and deserialize it to an instance of `BodyType`. async fn http_request_load_body( rqctx: &RequestContext, + mut request: hyper::Request, ) -> Result, HttpError> where BodyType: JsonSchema + DeserializeOwned + Send + Sync, { let server = &rqctx.server; - let mut request = rqctx.request.lock().await; let body = http_read_body( request.body_mut(), server.config.request_body_max_bytes, @@ -300,8 +298,9 @@ where { async fn from_request( rqctx: &RequestContext, + request: hyper::Request, ) -> Result, HttpError> { - http_request_load_body(rqctx).await + http_request_load_body(rqctx, request).await } fn metadata(content_type: ApiEndpointBodyContentType) -> ExtractorMetadata { @@ -353,9 +352,9 @@ impl UntypedBody { impl ExclusiveExtractor for UntypedBody { async fn from_request( rqctx: &RequestContext, + mut request: hyper::Request, ) -> Result { let server = &rqctx.server; - let mut request = rqctx.request.lock().await; let body_bytes = http_read_body( request.body_mut(), server.config.request_body_max_bytes, @@ -389,6 +388,40 @@ impl ExclusiveExtractor for UntypedBody { } } +// RawRequest: extractor for the raw underlying hyper::Request + +/// `RawRequest` is an extractor providing access to the raw underlying +/// [`hyper::Request`]. +#[derive(Debug)] +pub struct RawRequest { + request: hyper::Request, +} + +impl RawRequest { + pub fn into_inner(self) -> hyper::Request { + self.request + } +} + +#[async_trait] +impl ExclusiveExtractor for RawRequest { + async fn from_request( + _rqctx: &RequestContext, + request: hyper::Request, + ) -> Result { + Ok(RawRequest { request }) + } + + fn metadata( + _content_type: ApiEndpointBodyContentType, + ) -> ExtractorMetadata { + ExtractorMetadata { + parameters: vec![], + extension_mode: ExtensionMode::None, + } + } +} + #[cfg(test)] mod test { use crate::api_description::ExtensionMode; diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index 39036af26..330f43ad6 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -49,11 +49,9 @@ use crate::schema_util::ReferenceVisitor; use crate::to_map::to_map; use async_trait::async_trait; -use futures::lock::Mutex; use http::HeaderMap; use http::StatusCode; use hyper::Body; -use hyper::Request; use hyper::Response; use schemars::JsonSchema; use serde::de::DeserializeOwned; @@ -73,19 +71,10 @@ use std::sync::Arc; pub type HttpHandlerResult = Result, HttpError>; /// Handle for various interfaces useful during request processing. -// TODO-cleanup What's the right way to package up "request"? The only time we -// need it to be mutable is when we're reading the body (e.g., as part of the -// JSON extractor). In order to support that, we wrap it in something that -// supports interior mutability. It also needs to be thread-safe, since we're -// using async/await. That brings us to Arc>, but it seems like -// overkill since it will only really be used by one thread at a time (at all, -// let alone mutably) and there will never be contention on the Mutex. #[derive(Debug)] pub struct RequestContext { /// shared server state pub server: Arc>, - /// HTTP request details - pub request: Arc>>, /// HTTP request routing variables pub path_variables: VariableSet, /// expected request body mime type @@ -94,6 +83,48 @@ pub struct RequestContext { pub request_id: String, /// logger for this specific request pub log: Logger, + + /// basic request information (method, URI, etc.) + pub request: RequestHeader, +} + +// This is deliberately as close to compatible with `hyper::Request` as +// reasonable. +#[derive(Debug)] +pub struct RequestHeader { + method: http::Method, + uri: http::Uri, + version: http::Version, + headers: http::HeaderMap, +} + +impl From<&hyper::Request> for RequestHeader { + fn from(request: &hyper::Request) -> Self { + RequestHeader { + method: request.method().clone(), + uri: request.uri().clone(), + version: request.version().clone(), + headers: request.headers().clone(), + } + } +} + +impl RequestHeader { + pub fn method(&self) -> &http::Method { + &self.method + } + + pub fn uri(&self) -> &http::Uri { + &self.uri + } + + pub fn version(&self) -> &http::Version { + &self.version + } + + pub fn headers(&self) -> &http::HeaderMap { + &self.headers + } } impl RequestContext { @@ -304,6 +335,7 @@ pub trait RouteHandler: Debug + Send + Sync { async fn handle_request( &self, rqctx: RequestContext, + request: hyper::Request, ) -> HttpHandlerResult; } @@ -366,6 +398,7 @@ where async fn handle_request( &self, rqctx_raw: RequestContext, + request: hyper::Request, ) -> HttpHandlerResult { // This is where the magic happens: in the code below, `funcparams` has // type `FuncParams`, which is a tuple type describing the extractor @@ -384,7 +417,8 @@ where // actual handler function. From this point down, all of this is // resolved statically. let rqctx = Arc::new(rqctx_raw); - let funcparams = RequestExtractor::from_request(&rqctx).await?; + let funcparams = + RequestExtractor::from_request(&rqctx, request).await?; let future = self.handler.handle_request(rqctx, funcparams); future.await } diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index 9308a4f29..83dc475cf 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -632,6 +632,7 @@ pub use extractor::ExclusiveExtractor; pub use extractor::ExtractorMetadata; pub use extractor::Path; pub use extractor::Query; +pub use extractor::RawRequest; pub use extractor::SharedExtractor; pub use extractor::TypedBody; pub use extractor::UntypedBody; @@ -652,6 +653,7 @@ pub use handler::HttpResponseTemporaryRedirect; pub use handler::HttpResponseUpdatedNoContent; pub use handler::NoHeaders; pub use handler::RequestContext; +pub use handler::RequestHeader; pub use http_util::CONTENT_TYPE_JSON; pub use http_util::CONTENT_TYPE_NDJSON; pub use http_util::CONTENT_TYPE_OCTET_STREAM; diff --git a/dropshot/src/server.rs b/dropshot/src/server.rs index 6ab40327d..0a250b937 100644 --- a/dropshot/src/server.rs +++ b/dropshot/src/server.rs @@ -38,6 +38,7 @@ use tokio::net::{TcpListener, TcpStream}; use tokio_rustls::{server::TlsStream, TlsAcceptor}; use uuid::Uuid; +use crate::RequestHeader; use slog::Logger; // TODO Replace this with something else? @@ -770,13 +771,14 @@ async fn http_request_handle( server.router.lookup_route(&method, uri.path().into())?; let rqctx = RequestContext { server: Arc::clone(&server), - request: Arc::new(Mutex::new(request)), + request: RequestHeader::from(&request), path_variables: lookup_result.variables, body_content_type: lookup_result.body_content_type, request_id: request_id.to_string(), log: request_log, }; - let mut response = lookup_result.handler.handle_request(rqctx).await?; + let mut response = + lookup_result.handler.handle_request(rqctx, request).await?; response.headers_mut().insert( HEADER_REQUEST_ID, http::header::HeaderValue::from_str(&request_id).unwrap(), diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index 1db1b1306..df63d2c23 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -87,9 +87,8 @@ fn derive_accept_key(request_key: &[u8]) -> String { impl ExclusiveExtractor for WebsocketUpgrade { async fn from_request( rqctx: &RequestContext, + request: hyper::Request, ) -> Result { - let request = &mut *rqctx.request.lock().await; - if !request .headers() .get(header::CONNECTION) @@ -298,9 +297,9 @@ mod tests { use crate::router::HttpRouter; use crate::server::{DropshotState, ServerConfig}; use crate::{ - ExclusiveExtractor, HttpError, RequestContext, WebsocketUpgrade, + ExclusiveExtractor, HttpError, RequestContext, RequestHeader, + WebsocketUpgrade, }; - use futures::lock::Mutex; use http::Request; use hyper::Body; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; @@ -310,6 +309,13 @@ mod tests { async fn ws_upg_from_mock_rqctx() -> Result { let log = slog::Logger::root(slog::Discard, slog::o!()).new(slog::o!()); + let request = Request::builder() + .header(http::header::CONNECTION, "Upgrade") + .header(http::header::UPGRADE, "websocket") + .header(http::header::SEC_WEBSOCKET_VERSION, "13") + .header(http::header::SEC_WEBSOCKET_KEY, "aGFjayB0aGUgcGxhbmV0IQ==") + .body(Body::empty()) + .unwrap(); let rqctx = RequestContext { server: Arc::new(DropshotState { private: (), @@ -326,24 +332,13 @@ mod tests { ), tls_acceptor: None, }), - request: Arc::new(Mutex::new( - Request::builder() - .header(http::header::CONNECTION, "Upgrade") - .header(http::header::UPGRADE, "websocket") - .header(http::header::SEC_WEBSOCKET_VERSION, "13") - .header( - http::header::SEC_WEBSOCKET_KEY, - "aGFjayB0aGUgcGxhbmV0IQ==", - ) - .body(Body::empty()) - .unwrap(), - )), + request: RequestHeader::from(&request), path_variables: Default::default(), body_content_type: Default::default(), request_id: "".to_string(), log: log.clone(), }; - let fut = WebsocketUpgrade::from_request(&rqctx); + let fut = WebsocketUpgrade::from_request(&rqctx, request); tokio::time::timeout(Duration::from_secs(1), fut) .await .expect("Deadlocked in WebsocketUpgrade constructor") From f52ff850518bfa290ee3c193fbf03499bc0174e7 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Mon, 9 Jan 2023 19:53:14 -0800 Subject: [PATCH 33/47] add XXX --- dropshot/src/handler.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index 330f43ad6..4f4fc3173 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -90,6 +90,7 @@ pub struct RequestContext { // This is deliberately as close to compatible with `hyper::Request` as // reasonable. +// XXX-dap TODO This could use a better name. #[derive(Debug)] pub struct RequestHeader { method: http::Method, From 21b0fbb82d06c3e093f873de0973c6d811d25074 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 12 Jan 2023 12:46:51 -0800 Subject: [PATCH 34/47] sync with "main" and "exclusive-extractors" --- CHANGELOG.adoc | 25 +- Cargo.lock | 38 +- dropshot/Cargo.toml | 16 +- dropshot/build.rs | 21 +- dropshot/examples/request-headers.rs | 2 +- dropshot/src/api_description.rs | 587 +--------------------- dropshot/src/extractor/common.rs | 48 +- dropshot/src/extractor/mod.rs | 6 +- dropshot/src/handler.rs | 2 +- dropshot/src/lib.rs | 41 +- dropshot/src/pagination.rs | 29 +- dropshot/src/schema_util.rs | 586 ++++++++++++++++++++- dropshot/src/websocket.rs | 8 +- dropshot/tests/fail/bad_endpoint17.rs | 37 ++ dropshot/tests/fail/bad_endpoint17.stderr | 38 ++ dropshot/tests/fail/bad_endpoint18.rs | 34 ++ dropshot/tests/fail/bad_endpoint18.stderr | 38 ++ dropshot/tests/fail/bad_endpoint19.rs | 33 ++ dropshot/tests/fail/bad_endpoint19.stderr | 38 ++ dropshot/tests/fail/bad_endpoint3.rs | 1 + dropshot/tests/fail/bad_endpoint3.stderr | 35 +- dropshot/tests/test_demo.rs | 2 +- dropshot_endpoint/Cargo.toml | 2 +- dropshot_endpoint/src/lib.rs | 80 ++- rust-toolchain.toml | 2 +- 25 files changed, 1028 insertions(+), 721 deletions(-) create mode 100644 dropshot/tests/fail/bad_endpoint17.rs create mode 100644 dropshot/tests/fail/bad_endpoint17.stderr create mode 100644 dropshot/tests/fail/bad_endpoint18.rs create mode 100644 dropshot/tests/fail/bad_endpoint18.stderr create mode 100644 dropshot/tests/fail/bad_endpoint19.rs create mode 100644 dropshot/tests/fail/bad_endpoint19.stderr diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index a2fb7d460..922033986 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -17,23 +17,30 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co === Breaking Changes -// XXX-dap TODO need update here -// Extractor -> {Shared,Exclusive}Extractor -// type signature of from_request() changed -// both: accept &RequestContext instead of Arc -// now: no other change. future: exclusive one will get a hyper::Request -// exclusive extractors must appear last in the argument list - // XXX-dap TODO more updates for RawRequest extractor // also update crate-level docs and other places we talk about TypedBody, etc. // maybe add an example? -* https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. -* https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. +There are a number of breaking changes in this release but we expect they will be easy to manage. **If you have any trouble updating to this release or want help with it, please do https://github.com/oxidecomputer/dropshot/discussions[start a discussion] or https://github.com/oxidecomputer/dropshot/issues/new[file an issue]!** + +* https://github.com/oxidecomputer/dropshot/pull/556[#556] Better type-safety around the use of extractors. It is now a compile-time error to define an endpoint that accepts two extractors that use the HTTP request body (e.g., to accept both a `TypedBody` and an `UntypedBody`, or two `TypedBody` arguments). Previously, this would have resulted in a runtime error. The main change is that the `Extractor` trait has been split into two separate traits: `SharedExtractor` and `ExclusiveExtractor`. Endpoint functions can still accept 0-3 extractors, but only one can be an `ExclusiveExtractor` and it must be the last one. The function signatures for `*Extractor::from_request` have also changed. ++ +**What you need to do:** ++ +1. For any endpoint functions that use a `TypedBody` or `UntypedBody` extractor, this extractor must be the last argument to the function. Otherwise, you will get a compile error about the extractor not impl'ing `SharedExtractor`. +2. If you have your own type that impls `Extractor`, you will need to change that to either `ExclusiveExtractor` (if the impl needs a `mut` reference to the underlying `hyper::Request`, which is usually because it needs to read the request body) or `SharedExtractor`. If your extractor only needs to look at the URL or request headers and not the body, it can probably be a `SharedExtractor`. If it's an exclusive extractor, any function that accepts it must accept it as the last argument to the function. +3. If you have your own type that impls `Extractor`, you will also need to change the type signature of the `from_request` method to accept a `&RequestContext` instead of `Arc>`. (This should not be a problem unless your extractor was hanging on to a reference via the Arc. We don't know a reason this would be useful. If you were doing this, please reach out. You can probably instead copy whatever information you need out of the `RequestContext` instead.) +* https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot now allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. +* https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. ++ +**What you need to do:** If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. * https://github.com/oxidecomputer/dropshot/pull/540[#540] `ConfigDropshot` now uses a [`camino::Utf8PathBuf`](https://docs.rs/camino/1.1.1/camino/struct.Utf8PathBuf.html) for its file path. There is no change to the configuration format itself, just its representation in Rust. === Other notable Changes +* https://github.com/oxidecomputer/dropshot/pull/522[#522] Dropshot's DTrace + probes can now be used with a stable compiler on all platforms. This requires + Rust >= 1.59 for most platforms, or >= 1.66 for macOS. * https://github.com/oxidecomputer/dropshot/pull/452[#452] Dropshot no longer enables the `slog` cargo features `max_level_trace` and `release_max_level_debug`. Previously, clients were unable to set a release log level of `trace`; now they can. However, clients that did not select their own max log levels will see behavior change from the levels Dropshot was choosing to the default levels of `slog` itself (`debug` for debug builds and `info` for release builds). * https://github.com/oxidecomputer/dropshot/pull/451[#451] There are now response types to support 302 ("Found"), 303 ("See Other"), and 307 ("Temporary Redirect") HTTP response codes. See `HttpResponseFound`, `HttpResponseSeeOther`, and `HttpResponseTemporaryRedirect`. * https://github.com/oxidecomputer/dropshot/pull/503[#503] Add an optional `deprecated` field to the `#[endpoint]` macro. diff --git a/Cargo.lock b/Cargo.lock index 489ab0aad..e5c242e80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -34,9 +34,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d1d8ab452a3936018a687b20e6f7cf5363d713b732b8884001317b0e48aa3" +checksum = "705339e0e4a9690e2908d2b3d049d85682cf19fbd5782494498fbf7003a6a282" dependencies = [ "proc-macro2", "quote", @@ -68,9 +68,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" +checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "bitflags" @@ -134,9 +134,9 @@ checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" [[package]] name = "camino" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad0e1e3e88dd237a156ab9f571021b8a158caa0ae44b1968a241efb5144c1e" +checksum = "c77df041dc383319cc661b428b6961a005db4d6808d5e12536931b1ca9556055" dependencies = [ "serde", ] @@ -289,11 +289,11 @@ dependencies = [ [[package]] name = "dropshot" -version = "0.8.1-dev" +version = "0.9.0-dev" dependencies = [ "async-stream", "async-trait", - "base64 0.20.0", + "base64 0.21.0", "bytes", "camino", "chrono", @@ -341,7 +341,7 @@ dependencies = [ [[package]] name = "dropshot_endpoint" -version = "0.8.1-dev" +version = "0.9.0-dev" dependencies = [ "proc-macro2", "quote", @@ -1726,9 +1726,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "trybuild" -version = "1.0.73" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed01de3de062db82c0920b5cabe804f88d599a3f217932292597c678c903754d" +checksum = "f1212c215a87a183687a7cc7065901b1a98da6b37277d51a1b5faedbb4efd4f3" dependencies = [ "glob", "once_cell", @@ -1838,9 +1838,9 @@ dependencies = [ [[package]] name = "usdt" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39bf7190754941ac252f6fe9c1ff008c09c5fd0292b1732f319900c7fce365d0" +checksum = "2b4c48f9e522b977bbe938a0d7c4d36633d267ba0155aaa253fb57d0531be0fb" dependencies = [ "dtrace-parser", "serde", @@ -1851,9 +1851,9 @@ dependencies = [ [[package]] name = "usdt-attr-macro" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6c80eed594ef75117f363ee2c109b45e13507bdc4729f9d7aea434604ad1777" +checksum = "80e6ae4f982ae74dcbaa8eb17baf36ca0d464a3abc8a7172b3bd74c73e9505d6" dependencies = [ "dtrace-parser", "proc-macro2", @@ -1865,9 +1865,9 @@ dependencies = [ [[package]] name = "usdt-impl" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c8b459b1b7997d655cf1bb142551a5b216a6b0e56e51ebd76ecbc0ff5fd1de" +checksum = "f53b4ca0b33aae466dc47b30b98adc4f88454928837af8010b6ed02d18474cb1" dependencies = [ "byteorder", "dof", @@ -1885,9 +1885,9 @@ dependencies = [ [[package]] name = "usdt-macro" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13f229fd5cde35ccca2c0151c67a880f3cc13ba2992a05db55b47cc77a5ef3f" +checksum = "7cb093f9653dc91632621c754f9ed4ee25d14e46e0239b6ccaf74a6c0c2788bd" dependencies = [ "dtrace-parser", "proc-macro2", diff --git a/dropshot/Cargo.toml b/dropshot/Cargo.toml index 18f2433bd..e99140f77 100644 --- a/dropshot/Cargo.toml +++ b/dropshot/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "dropshot" description = "expose REST APIs from a Rust program" -version = "0.8.1-dev" +version = "0.9.0-dev" authors = ["David Pacheco "] edition = "2018" license = "Apache-2.0" @@ -12,10 +12,10 @@ categories = ["network-programming", "web-programming::http-server"] [dependencies] async-stream = "0.3.3" -async-trait = "0.1.60" -base64 = "0.20.0" +async-trait = "0.1.61" +base64 = "0.21.0" bytes = "1" -camino = { version = "1.1.1", features = ["serde1"] } +camino = { version = "1.1.2", features = ["serde1"] } futures = "0.3.25" hostname = "0.3.0" http = "0.2.8" @@ -41,7 +41,7 @@ version = "0.4.23" features = [ "serde" ] [dependencies.dropshot_endpoint] -version = "^0.8.1-dev" +version = "^0.9.0-dev" path = "../dropshot_endpoint" [dependencies.hyper] @@ -61,7 +61,7 @@ version = "1.19" features = [ "full" ] [dependencies.usdt] -version = "0.3.4" +version = "0.3.5" optional = true default-features = false @@ -82,7 +82,7 @@ libc = "0.2.139" mime_guess = "2.0.4" subprocess = "0.2.9" tempfile = "3.3" -trybuild = "1.0.73" +trybuild = "1.0.75" # Used by the https examples and tests pem = "1.1" rcgen = "0.10.0" @@ -109,4 +109,4 @@ features = [ "max_level_trace", "release_max_level_debug" ] version_check = "0.9.4" [features] -usdt-probes = ["usdt/asm"] +usdt-probes = [ "usdt/asm" ] diff --git a/dropshot/build.rs b/dropshot/build.rs index db67d2159..ac624cc42 100644 --- a/dropshot/build.rs +++ b/dropshot/build.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,15 +13,22 @@ // limitations under the License. // +// NOTE: The `usdt` crate uses inline assembly, which prior to Rust 1.59 or 1.66 +// on macOS, required the use of nightly features. These crate-local features +// are used to determine which toolchain is used to compile Dropshot, and then +// include the correct `#![feature(...)]` directives on the basis of that. fn main() { println!("cargo:rerun-if-changed=build.rs"); - if version_check::is_min_version("1.59").unwrap_or(false) { - println!("cargo:rustc-cfg=usdt_stable_asm"); + if !version_check::is_min_version("1.59").unwrap_or(false) { + println!("cargo:rustc-cfg=usdt_need_asm"); } - // Once asm_sym is stablilized, add an additional check so that those - // building on macos can use the stable toolchain with any hassle. - // - // A matching rust-cfg option named `usdt_stable_asm_sym` seems appropriate. + // `feature(asm_sym)` is stable, which is required on macOS. + #[cfg(target_os = "macos")] + if version_check::supports_feature("asm_sym").unwrap_or(false) + && !version_check::is_min_version("1.67").unwrap_or(false) + { + println!("cargo:rustc-cfg=usdt_need_asm_sym"); + } } diff --git a/dropshot/examples/request-headers.rs b/dropshot/examples/request-headers.rs index 80a360509..bae583118 100644 --- a/dropshot/examples/request-headers.rs +++ b/dropshot/examples/request-headers.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Example use of Dropshot with request headers //! diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index cb52e619f..a50573600 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -1,6 +1,7 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Describes the endpoints and handler functions in your API +use crate::extractor::RequestExtractor; use crate::handler::HttpHandlerFunc; use crate::handler::HttpResponse; use crate::handler::HttpRouteHandler; @@ -8,10 +9,10 @@ use crate::handler::RouteHandler; use crate::router::route_path_to_segments; use crate::router::HttpRouter; use crate::router::PathSegment; +use crate::schema_util::j2oas_schema; use crate::server::ServerContext; use crate::type_util::type_is_scalar; use crate::type_util::type_is_string_enum; -use crate::extractor::RequestExtractor; use crate::HttpErrorResponseBody; use crate::CONTENT_TYPE_JSON; use crate::CONTENT_TYPE_OCTET_STREAM; @@ -901,440 +902,6 @@ fn is_empty(schema: &schemars::schema::Schema) -> bool { false } -/// Convert from JSON Schema into OpenAPI. -// TODO Initially this seemed like it was going to be a win, but the versions -// of JSON Schema that the schemars and openapiv3 crates adhere to are just -// different enough to make the conversion a real pain in the neck. A better -// approach might be a derive(OpenAPI)-like thing, or even a generic -// derive(schema) that we could then marshall into OpenAPI. -// The schemars crate also seems a bit inflexible when it comes to how the -// schema is generated wrt references vs. inline types. -fn j2oas_schema( - name: Option<&String>, - schema: &schemars::schema::Schema, -) -> openapiv3::ReferenceOr { - match schema { - // The permissive, "match anything" schema. We'll typically see this - // when consumers use a type such as serde_json::Value. - schemars::schema::Schema::Bool(true) => { - openapiv3::ReferenceOr::Item(openapiv3::Schema { - schema_data: openapiv3::SchemaData::default(), - schema_kind: openapiv3::SchemaKind::Any( - openapiv3::AnySchema::default(), - ), - }) - } - schemars::schema::Schema::Bool(false) => { - panic!("We don't expect to see a schema that matches the null set") - } - schemars::schema::Schema::Object(obj) => j2oas_schema_object(name, obj), - } -} - -fn j2oas_schema_object( - name: Option<&String>, - obj: &schemars::schema::SchemaObject, -) -> openapiv3::ReferenceOr { - if let Some(reference) = &obj.reference { - return openapiv3::ReferenceOr::Reference { - reference: reference.clone(), - }; - } - - let ty = match &obj.instance_type { - Some(schemars::schema::SingleOrVec::Single(ty)) => Some(ty.as_ref()), - Some(schemars::schema::SingleOrVec::Vec(_)) => { - panic!( - "a type array is unsupported by openapiv3:\n{}", - serde_json::to_string_pretty(obj) - .unwrap_or_else(|_| "".to_string()) - ) - } - None => None, - }; - - let kind = match (ty, &obj.subschemas) { - (Some(schemars::schema::InstanceType::Null), None) => { - openapiv3::SchemaKind::Type(openapiv3::Type::String( - openapiv3::StringType { - enumeration: vec![None], - ..Default::default() - }, - )) - } - (Some(schemars::schema::InstanceType::Boolean), None) => { - openapiv3::SchemaKind::Type(openapiv3::Type::Boolean {}) - } - (Some(schemars::schema::InstanceType::Object), None) => { - j2oas_object(&obj.object) - } - (Some(schemars::schema::InstanceType::Array), None) => { - j2oas_array(&obj.array) - } - (Some(schemars::schema::InstanceType::Number), None) => { - j2oas_number(&obj.format, &obj.number, &obj.enum_values) - } - (Some(schemars::schema::InstanceType::String), None) => { - j2oas_string(&obj.format, &obj.string, &obj.enum_values) - } - (Some(schemars::schema::InstanceType::Integer), None) => { - j2oas_integer(&obj.format, &obj.number, &obj.enum_values) - } - (None, Some(subschema)) => j2oas_subschemas(subschema), - (None, None) => { - openapiv3::SchemaKind::Any(openapiv3::AnySchema::default()) - } - (Some(_), Some(_)) => panic!( - "a schema can't have both a type and subschemas:\n{}", - serde_json::to_string_pretty(&obj) - .unwrap_or_else(|_| "".to_string()) - ), - }; - - let mut data = openapiv3::SchemaData::default(); - - if matches!( - &obj.extensions.get("nullable"), - Some(serde_json::Value::Bool(true)) - ) { - data.nullable = true; - } - - if let Some(metadata) = &obj.metadata { - data.title = metadata.title.clone(); - data.description = metadata.description.clone(); - data.default = metadata.default.clone(); - data.deprecated = metadata.deprecated; - data.read_only = metadata.read_only; - data.write_only = metadata.write_only; - } - - if let Some(name) = name { - data.title = Some(name.clone()); - } - if let Some(example) = obj.extensions.get("example") { - data.example = Some(example.clone()); - } - - openapiv3::ReferenceOr::Item(openapiv3::Schema { - schema_data: data, - schema_kind: kind, - }) -} - -fn j2oas_subschemas( - subschemas: &schemars::schema::SubschemaValidation, -) -> openapiv3::SchemaKind { - match ( - &subschemas.all_of, - &subschemas.any_of, - &subschemas.one_of, - &subschemas.not, - ) { - (Some(all_of), None, None, None) => openapiv3::SchemaKind::AllOf { - all_of: all_of - .iter() - .map(|schema| j2oas_schema(None, schema)) - .collect::>(), - }, - (None, Some(any_of), None, None) => openapiv3::SchemaKind::AnyOf { - any_of: any_of - .iter() - .map(|schema| j2oas_schema(None, schema)) - .collect::>(), - }, - (None, None, Some(one_of), None) => openapiv3::SchemaKind::OneOf { - one_of: one_of - .iter() - .map(|schema| j2oas_schema(None, schema)) - .collect::>(), - }, - (None, None, None, Some(not)) => openapiv3::SchemaKind::Not { - not: Box::new(j2oas_schema(None, not)), - }, - _ => panic!("invalid subschema {:#?}", subschemas), - } -} - -fn j2oas_integer( - format: &Option, - number: &Option>, - enum_values: &Option>, -) -> openapiv3::SchemaKind { - let format = match format.as_ref().map(|s| s.as_str()) { - None => openapiv3::VariantOrUnknownOrEmpty::Empty, - Some("int32") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::IntegerFormat::Int32, - ), - Some("int64") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::IntegerFormat::Int64, - ), - Some(other) => { - openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) - } - }; - - let (multiple_of, minimum, exclusive_minimum, maximum, exclusive_maximum) = - match number { - None => (None, None, false, None, false), - Some(number) => { - let multiple_of = number.multiple_of.map(|f| f as i64); - let (minimum, exclusive_minimum) = - match (number.minimum, number.exclusive_minimum) { - (None, None) => (None, false), - (Some(f), None) => (Some(f as i64), false), - (None, Some(f)) => (Some(f as i64), true), - _ => panic!("invalid"), - }; - let (maximum, exclusive_maximum) = - match (number.maximum, number.exclusive_maximum) { - (None, None) => (None, false), - (Some(f), None) => (Some(f as i64), false), - (None, Some(f)) => (Some(f as i64), true), - _ => panic!("invalid"), - }; - - ( - multiple_of, - minimum, - exclusive_minimum, - maximum, - exclusive_maximum, - ) - } - }; - - let enumeration = enum_values - .iter() - .flat_map(|v| { - v.iter().map(|vv| match vv { - serde_json::Value::Null => None, - serde_json::Value::Number(value) => { - Some(value.as_i64().unwrap()) - } - _ => panic!("unexpected enumeration value {:?}", vv), - }) - }) - .collect::>(); - - openapiv3::SchemaKind::Type(openapiv3::Type::Integer( - openapiv3::IntegerType { - format, - multiple_of, - exclusive_minimum, - exclusive_maximum, - minimum, - maximum, - enumeration, - }, - )) -} - -fn j2oas_number( - format: &Option, - number: &Option>, - enum_values: &Option>, -) -> openapiv3::SchemaKind { - let format = match format.as_ref().map(|s| s.as_str()) { - None => openapiv3::VariantOrUnknownOrEmpty::Empty, - Some("float") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::NumberFormat::Float, - ), - Some("double") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::NumberFormat::Double, - ), - Some(other) => { - openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) - } - }; - - let (multiple_of, minimum, exclusive_minimum, maximum, exclusive_maximum) = - match number { - None => (None, None, false, None, false), - Some(number) => { - let multiple_of = number.multiple_of; - let (minimum, exclusive_minimum) = - match (number.minimum, number.exclusive_minimum) { - (None, None) => (None, false), - (s @ Some(_), None) => (s, false), - (None, s @ Some(_)) => (s, true), - _ => panic!("invalid"), - }; - let (maximum, exclusive_maximum) = - match (number.maximum, number.exclusive_maximum) { - (None, None) => (None, false), - (s @ Some(_), None) => (s, false), - (None, s @ Some(_)) => (s, true), - _ => panic!("invalid"), - }; - - ( - multiple_of, - minimum, - exclusive_minimum, - maximum, - exclusive_maximum, - ) - } - }; - - let enumeration = enum_values - .iter() - .flat_map(|v| { - v.iter().map(|vv| match vv { - serde_json::Value::Null => None, - serde_json::Value::Number(value) => { - Some(value.as_f64().unwrap()) - } - _ => panic!("unexpected enumeration value {:?}", vv), - }) - }) - .collect::>(); - - openapiv3::SchemaKind::Type(openapiv3::Type::Number( - openapiv3::NumberType { - format, - multiple_of, - exclusive_minimum, - exclusive_maximum, - minimum, - maximum, - enumeration, - }, - )) -} - -fn j2oas_string( - format: &Option, - string: &Option>, - enum_values: &Option>, -) -> openapiv3::SchemaKind { - let format = match format.as_ref().map(|s| s.as_str()) { - None => openapiv3::VariantOrUnknownOrEmpty::Empty, - Some("date") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::StringFormat::Date, - ), - Some("date-time") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::StringFormat::DateTime, - ), - Some("password") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::StringFormat::Password, - ), - Some("byte") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::StringFormat::Byte, - ), - Some("binary") => openapiv3::VariantOrUnknownOrEmpty::Item( - openapiv3::StringFormat::Binary, - ), - Some(other) => { - openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) - } - }; - - let (max_length, min_length, pattern) = match string.as_ref() { - None => (None, None, None), - Some(string) => ( - string.max_length.map(|n| n as usize), - string.min_length.map(|n| n as usize), - string.pattern.clone(), - ), - }; - - let enumeration = enum_values - .iter() - .flat_map(|v| { - v.iter().map(|vv| match vv { - serde_json::Value::Null => None, - serde_json::Value::String(s) => Some(s.clone()), - _ => panic!("unexpected enumeration value {:?}", vv), - }) - }) - .collect::>(); - - openapiv3::SchemaKind::Type(openapiv3::Type::String( - openapiv3::StringType { - format, - pattern, - enumeration, - min_length, - max_length, - }, - )) -} - -fn j2oas_array( - array: &Option>, -) -> openapiv3::SchemaKind { - let arr = array.as_ref().unwrap(); - - openapiv3::SchemaKind::Type(openapiv3::Type::Array(openapiv3::ArrayType { - items: match &arr.items { - Some(schemars::schema::SingleOrVec::Single(schema)) => { - Some(box_reference_or(j2oas_schema(None, &schema))) - } - Some(schemars::schema::SingleOrVec::Vec(_)) => { - panic!("OpenAPI v3.0.x cannot support tuple-like arrays") - } - None => None, - }, - min_items: arr.min_items.map(|n| n as usize), - max_items: arr.max_items.map(|n| n as usize), - unique_items: arr.unique_items.unwrap_or(false), - })) -} - -fn box_reference_or( - r: openapiv3::ReferenceOr, -) -> openapiv3::ReferenceOr> { - match r { - openapiv3::ReferenceOr::Item(schema) => { - openapiv3::ReferenceOr::boxed_item(schema) - } - openapiv3::ReferenceOr::Reference { reference } => { - openapiv3::ReferenceOr::Reference { reference } - } - } -} - -fn j2oas_object( - object: &Option>, -) -> openapiv3::SchemaKind { - match object { - None => openapiv3::SchemaKind::Type(openapiv3::Type::Object( - openapiv3::ObjectType::default(), - )), - Some(obj) => openapiv3::SchemaKind::Type(openapiv3::Type::Object( - openapiv3::ObjectType { - properties: obj - .properties - .iter() - .map(|(prop, schema)| { - ( - prop.clone(), - box_reference_or(j2oas_schema(None, schema)), - ) - }) - .collect::<_>(), - required: obj.required.iter().cloned().collect::<_>(), - additional_properties: obj.additional_properties.as_ref().map( - |schema| match schema.as_ref() { - schemars::schema::Schema::Bool(b) => { - openapiv3::AdditionalProperties::Any(*b) - } - schemars::schema::Schema::Object(obj) => { - openapiv3::AdditionalProperties::Schema(Box::new( - j2oas_schema_object(None, obj), - )) - } - }, - ), - min_properties: obj.min_properties.map(|n| n as usize), - max_properties: obj.max_properties.map(|n| n as usize), - }, - )), - } -} - /// This object is used to specify configuration for building an OpenAPI /// definition document. It is constructed using [`ApiDescription::openapi()`]. /// Additional optional properties may be added and then the OpenAPI definition @@ -1528,8 +1095,6 @@ impl Default for ExtensionMode { #[cfg(test)] mod test { - use super::j2oas_schema; - use crate::api_description::j2oas_schema_object; use crate::endpoint; use crate::error::HttpError; use crate::handler::RequestContext; @@ -1636,62 +1201,6 @@ mod test { api.register(test_badpath_handler).unwrap(); } - #[test] - fn test_empty_struct() { - #[derive(JsonSchema)] - struct Empty {} - - let settings = schemars::gen::SchemaSettings::openapi3(); - let mut generator = schemars::gen::SchemaGenerator::new(settings); - - let schema = Empty::json_schema(&mut generator); - let _ = j2oas_schema(None, &schema); - } - - #[test] - fn test_garbage_barge_structure_conversion() { - #[allow(dead_code)] - #[derive(JsonSchema)] - struct SuperGarbage { - string: String, - strings: Vec, - more_strings: [String; 3], - substruct: Substruct, - more: Option, - union: Union, - map: std::collections::BTreeMap, - } - - #[allow(dead_code)] - #[derive(JsonSchema)] - struct Substruct { - ii32: i32, - uu64: u64, - ff: f32, - dd: f64, - b: bool, - } - - #[allow(dead_code)] - #[derive(JsonSchema)] - enum Union { - A { a: u32 }, - B { b: f32 }, - } - - let settings = schemars::gen::SchemaSettings::openapi3(); - let mut generator = schemars::gen::SchemaGenerator::new(settings); - - let schema = SuperGarbage::json_schema(&mut generator); - let _ = j2oas_schema(None, &schema); - for (key, schema) in generator.definitions().iter() { - let _ = j2oas_schema(Some(key), schema); - } - } - - // XXX-dap TODO-coverage need a test for trying to use two - // ExclusiveExtractors - #[test] fn test_dup_names() { #[derive(Deserialize, JsonSchema)] @@ -1724,52 +1233,6 @@ mod test { ); } - #[test] - fn test_additional_properties() { - #[allow(dead_code)] - #[derive(JsonSchema)] - enum Union { - A { a: u32 }, - } - let settings = schemars::gen::SchemaSettings::openapi3(); - let mut generator = schemars::gen::SchemaGenerator::new(settings); - let schema = Union::json_schema(&mut generator); - let _ = j2oas_schema(None, &schema); - for (key, schema) in generator.definitions().iter() { - let _ = j2oas_schema(Some(key), schema); - } - } - - #[test] - fn test_nullable() { - #[allow(dead_code)] - #[derive(JsonSchema)] - struct Foo { - bar: String, - } - let settings = schemars::gen::SchemaSettings::openapi3(); - let generator = schemars::gen::SchemaGenerator::new(settings); - let root_schema = generator.into_root_schema_for::>(); - let schema = root_schema.schema; - let os = j2oas_schema_object(None, &schema); - - assert_eq!( - os, - openapiv3::ReferenceOr::Item(openapiv3::Schema { - schema_data: openapiv3::SchemaData { - title: Some("Nullable_Foo".to_string()), - nullable: true, - ..Default::default() - }, - schema_kind: openapiv3::SchemaKind::AllOf { - all_of: vec![openapiv3::ReferenceOr::Reference { - reference: "#/components/schemas/Foo".to_string() - }], - }, - }) - ); - } - #[test] fn test_tags_need_one() { let mut api = ApiDescription::new().tag_config(TagConfig { @@ -1888,48 +1351,4 @@ mod test { .collect::>() ) } - - #[test] - #[should_panic] - fn test_bad_schema() { - #![allow(unused)] - - #[derive(JsonSchema)] - #[schemars(tag = "which")] - enum Which { - This, - That, - } - - #[derive(JsonSchema)] - struct BlackSheep { - #[schemars(flatten)] - you_can_get_with: Which, - } - - let schema = schemars::schema_for!(BlackSheep).schema; - - let _ = j2oas_schema_object(None, &schema); - } - - #[test] - #[should_panic] - fn test_two_types() { - #![allow(unused)] - - #[derive(JsonSchema)] - enum One { - One, - } - - #[derive(JsonSchema)] - struct Uno { - #[schemars(flatten)] - one: One, - } - - let schema = schemars::schema_for!(Uno).schema; - - let _ = j2oas_schema_object(None, &schema); - } } diff --git a/dropshot/src/extractor/common.rs b/dropshot/src/extractor/common.rs index 509f4887d..2eec965af 100644 --- a/dropshot/src/extractor/common.rs +++ b/dropshot/src/extractor/common.rs @@ -1,6 +1,4 @@ -// Copyright 2022 Oxide Computer Company - -// XXX-dap TODO-cleanup should the metadata into a separate, shared trait? +// Copyright 2023 Oxide Computer Company use crate::api_description::ApiEndpointParameter; use crate::api_description::{ApiEndpointBodyContentType, ExtensionMode}; @@ -72,21 +70,27 @@ impl ExclusiveExtractor for S { /// Top-level extractor for a given request /// -/// During request handling, we wind up needing to call a function with a -/// variable number of arguments whose types are all extractors (either -/// `SharedExtractor` or `ExclusiveExtractor`). We achieve this with a separate -/// type called `RequestExtractor` that looks just like `ExclusiveExtractor`. -/// We can impl this trait on a tuple of any number of types that themselves -/// impl `SharedExtractor` or `ExclusiveExtractor` by delegating to each type's -/// extractor implementation. There may be at most one `ExclusiveExtractor` in -/// the tuple. We require it to be the last argument just to avoid having to -/// define the power set of impls. +/// During request handling, we must find and invoke the appropriate +/// consumer-defined handler function. While each of these functions takes a +/// fixed number of arguments, different handler functions may take a different +/// number of arguments. The arguments that can vary between handler functions +/// are all extractors, meaning that they impl `SharedExtractor` or +/// `ExclusiveExtractor`. +/// +/// This trait helps us invoke various handler functions uniformly, despite them +/// accepting different arguments. To achieve this, we impl this trait for all +/// supported _tuples_ of argument types, which is essentially 0 or more +/// `SharedExtractor`s followed by at most one `ExclusiveExtractor`. This impl +/// essentially does the same thing as any other extractor, and it does it by +/// delegating to the impls of each tuple member. /// -/// In practice, `RequestExtractor` is identical to `ExclusiveExtractor`. But -/// we use them in different ways. `RequestExtractor` is private, only -/// implemented on tuple types, and only used to kick off extraction. -/// `ExclusiveExtractor` can be consumer-defined and would generally not be -/// implemented on tuple types. +/// In practice, the trait `RequestExtractor` is identical to +/// `ExclusiveExtractor` and we could use `ExclusiveExtractor` directly. But +/// it's clearer to use distinct types, since they're used differently. To +/// summarize: `RequestExtractor` is private, only implemented on tuple types, +/// and only used to kick off extraction from the top level. +/// `ExclusiveExtractor` s public, implementing types can be consumer-defined, +/// and it would generally not be implemented on tuple types. #[async_trait] pub trait RequestExtractor: Send + Sync + Sized { /// Construct an instance of this type from a `RequestContext`. @@ -137,16 +141,16 @@ impl RequestExtractor for (X,) { } } -// XXX-dap TODO-doc update comment based on the change that uses the fact that -// SharedExtractor impls ExclusiveExtractor such that the last item in the -// tuple *must* be an exclusive extractor /// Defines implementations of `RequestExtractor` for tuples of one or more /// `SharedExtractor` followed by an `ExclusiveExtractor` /// /// As an example, `impl_rqextractor_for_tuple!(S1, S2)` defines an impl of /// `RequestExtractor` for tuple `(S1, S2, X)` where `S1: SharedExtractor`, -/// `S2: SharedExtractor`, and `X: ExclusiveExtractor`, as well as a similar -/// impl for just `(S1, S2)`. +/// `S2: SharedExtractor`, and `X: ExclusiveExtractor`. Note that any +/// `SharedExtractor` also impls `ExclusiveExtractor`, so it's not necessary to +/// impl this separately for `(S1, S2, S3)` (and indeed that would not be +/// possible, since it would overlap with the definition for `(S1, S2, X)`, even +/// if `SharedExtractor` did not impl `ExclusiveExtractor`). macro_rules! impl_rqextractor_for_tuple { ($( $S:ident),+) => { diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs index 8c477a431..d659f683a 100644 --- a/dropshot/src/extractor/mod.rs +++ b/dropshot/src/extractor/mod.rs @@ -1,6 +1,6 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company -//! Extractor trait +//! Extractor-related traits //! //! See top-level crate documentation for details @@ -37,8 +37,6 @@ pub use common::ExtractorMetadata; pub use common::RequestExtractor; pub use common::SharedExtractor; -// XXX-dap move these definitions to separate files? - // Query: query string extractor /// `Query` is an extractor used to deserialize an instance of diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index 16558fb67..4f4fc3173 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Interface for implementing HTTP endpoint handler functions. //! //! For information about supported endpoint function signatures, argument types, diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index 7373274d5..83dc475cf 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Dropshot is a general-purpose crate for exposing REST APIs from a Rust //! program. Planned highlights include: //! @@ -216,10 +216,8 @@ //! ) -> Result //! ``` //! -//! Other than the RequestContext, parameters may appear in any order. -//! -//! The `Context` type is caller-provided context which is provided when -//! the server is created. +//! The `RequestContext` must appear first. The `Context` type is +//! caller-provided context which is provided when the server is created. //! //! The types `Query`, `Path`, `TypedBody`, and `UntypedBody` are called //! **Extractors** because they cause information to be pulled out of the request @@ -236,10 +234,14 @@ //! of type `J`. `J` must implement `serde::Deserialize` and `schemars::JsonSchema`. //! * [`UntypedBody`] extracts the raw bytes of the request body. //! -//! If the handler takes a `Query`, `Path

    `, `TypedBody`, or -//! `UntypedBody`, and the corresponding extraction cannot be completed, the -//! request fails with status code 400 and an error message reflecting a -//! validation error. +//! `Query` and `Path` impl `SharedExtractor`. `TypedBody` and `UntypedBody` +//! impl `ExclusiveExtractor`. Your function may accept 0-3 extractors, but +//! only one can be `ExclusiveExtractor`, and it must be the last one. +//! Otherwise, the order of extractor arguments does not matter. +//! +//! If the handler accepts any extractors and the corresponding extraction +//! cannot be completed, the request fails with status code 400 and an error +//! message reflecting the error (usually a validation error). //! //! As with any serde-deserializable type, you can make fields optional by having //! the corresponding property of the type be an `Option`. Here's an example of @@ -505,11 +507,12 @@ //! See the [`RequestInfo`] and [`ResponseInfo`] types for a complete listing //! of what's available. //! -//! These probes are implemented via the [`usdt`] crate. They require a nightly -//! toolchain if built on MacOS (which requires the unstable `asm_sym` feature). -//! Otherwise a stable compiler >= v1.59 is required in order to present the -//! necessary features. Given these constraints, usdt functionality is behind -//! the feature flag `"usdt-probes"`. +//! These probes are implemented via the [`usdt`] crate. They may require a +//! nightly toolchain if built on macOS prior to Rust version 1.66. Otherwise a +//! stable compiler >= v1.59 is required in order to present the necessary +//! features. Given these constraints, USDT functionality is behind the feature +//! flag `"usdt-probes"`, which may become a default feature of this crate in +//! future releases. //! //! > *Important:* The probes are internally registered with the DTrace kernel //! module, making them visible via `dtrace(1M)`. This is done when an `HttpServer` @@ -540,15 +543,11 @@ // Clippy's style advice is definitely valuable, but not worth the trouble for // automated enforcement. #![allow(clippy::style)] -// The `usdt` crate requires nightly, enabled if our consumer is enabling +// The `usdt` crate may require nightly, enabled if our consumer is enabling // DTrace probes. -#![cfg_attr(all(feature = "usdt-probes", not(usdt_stable_asm)), feature(asm))] +#![cfg_attr(all(feature = "usdt-probes", usdt_need_asm), feature(asm))] #![cfg_attr( - all( - feature = "usdt-probes", - target_os = "macos", - not(usdt_stable_asm_sym) - ), + all(feature = "usdt-probes", target_os = "macos", usdt_need_asm_sym), feature(asm_sym) )] diff --git a/dropshot/src/pagination.rs b/dropshot/src/pagination.rs index a854b50b6..76e4f6dae 100644 --- a/dropshot/src/pagination.rs +++ b/dropshot/src/pagination.rs @@ -99,9 +99,8 @@ use crate::error::HttpError; use crate::from_map::from_map; -use base64::alphabet::URL_SAFE; -use base64::engine::fast_portable::FastPortable; -use base64::engine::fast_portable::PAD; +use base64::engine::general_purpose::URL_SAFE; +use base64::Engine; use schemars::JsonSchema; use serde::de::DeserializeOwned; use serde::Deserialize; @@ -407,7 +406,7 @@ fn serialize_page_token( )) })?; - base64::encode_engine(json_bytes, &FastPortable::from(&URL_SAFE, PAD)) + URL_SAFE.encode(json_bytes) }; // TODO-robustness is there a way for us to know at compile-time that @@ -437,11 +436,9 @@ fn deserialize_page_token( "failed to parse pagination token: too large", )); } - let json_bytes = base64::decode_engine( - token_str.as_bytes(), - &FastPortable::from(&URL_SAFE, PAD), - ) - .map_err(|e| format!("failed to parse pagination token: {}", e))?; + let json_bytes = URL_SAFE + .decode(token_str.as_bytes()) + .map_err(|e| format!("failed to parse pagination token: {}", e))?; // TODO-debugging: we don't want the user to have to know about the // internal structure of the token, so the error message here doesn't @@ -475,6 +472,8 @@ mod test { use super::ResultsPage; use super::WhichPage; use super::PAGINATION_PARAM_SENTINEL; + use base64::engine::general_purpose::URL_SAFE; + use base64::Engine; use schemars::JsonSchema; use serde::de::DeserializeOwned; use serde::Deserialize; @@ -540,26 +539,26 @@ mod test { // Non-JSON let error = - deserialize_page_token::(&base64::encode("{")) + deserialize_page_token::(&URL_SAFE.encode("{")) .unwrap_err(); assert!(error.contains("corrupted token")); // Wrong top-level JSON type let error = - deserialize_page_token::(&base64::encode("[]")) + deserialize_page_token::(&URL_SAFE.encode("[]")) .unwrap_err(); assert!(error.contains("corrupted token")); // Structure does not match our general Dropshot schema. let error = - deserialize_page_token::(&base64::encode("{}")) + deserialize_page_token::(&URL_SAFE.encode("{}")) .unwrap_err(); assert!(error.contains("corrupted token")); // Bad version - let error = deserialize_page_token::(&base64::encode( - "{\"v\":11}", - )) + let error = deserialize_page_token::( + &URL_SAFE.encode("{\"v\":11}"), + ) .unwrap_err(); assert!(error.contains("corrupted token")); } diff --git a/dropshot/src/schema_util.rs b/dropshot/src/schema_util.rs index d08da8dda..65975ee34 100644 --- a/dropshot/src/schema_util.rs +++ b/dropshot/src/schema_util.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! schemars helper functions @@ -256,3 +256,587 @@ pub(crate) fn schema_extract_description( } } } + +/// Convert from JSON Schema into OpenAPI. +// TODO Initially this seemed like it was going to be a win, but the versions +// of JSON Schema that the schemars and openapiv3 crates adhere to are just +// different enough to make the conversion a real pain in the neck. A better +// approach might be a derive(OpenAPI)-like thing, or even a generic +// derive(schema) that we could then marshall into OpenAPI. +// The schemars crate also seems a bit inflexible when it comes to how the +// schema is generated wrt references vs. inline types. +pub(crate) fn j2oas_schema( + name: Option<&String>, + schema: &schemars::schema::Schema, +) -> openapiv3::ReferenceOr { + match schema { + // The permissive, "match anything" schema. We'll typically see this + // when consumers use a type such as serde_json::Value. + schemars::schema::Schema::Bool(true) => { + openapiv3::ReferenceOr::Item(openapiv3::Schema { + schema_data: openapiv3::SchemaData::default(), + schema_kind: openapiv3::SchemaKind::Any( + openapiv3::AnySchema::default(), + ), + }) + } + schemars::schema::Schema::Bool(false) => { + panic!("We don't expect to see a schema that matches the null set") + } + schemars::schema::Schema::Object(obj) => j2oas_schema_object(name, obj), + } +} + +fn j2oas_schema_object( + name: Option<&String>, + obj: &schemars::schema::SchemaObject, +) -> openapiv3::ReferenceOr { + if let Some(reference) = &obj.reference { + return openapiv3::ReferenceOr::Reference { + reference: reference.clone(), + }; + } + + let ty = match &obj.instance_type { + Some(schemars::schema::SingleOrVec::Single(ty)) => Some(ty.as_ref()), + Some(schemars::schema::SingleOrVec::Vec(_)) => { + panic!( + "a type array is unsupported by openapiv3:\n{}", + serde_json::to_string_pretty(obj) + .unwrap_or_else(|_| "".to_string()) + ) + } + None => None, + }; + + let kind = match (ty, &obj.subschemas) { + (Some(schemars::schema::InstanceType::Null), None) => { + openapiv3::SchemaKind::Type(openapiv3::Type::String( + openapiv3::StringType { + enumeration: vec![None], + ..Default::default() + }, + )) + } + (Some(schemars::schema::InstanceType::Boolean), None) => { + openapiv3::SchemaKind::Type(openapiv3::Type::Boolean {}) + } + (Some(schemars::schema::InstanceType::Object), None) => { + j2oas_object(&obj.object) + } + (Some(schemars::schema::InstanceType::Array), None) => { + j2oas_array(&obj.array) + } + (Some(schemars::schema::InstanceType::Number), None) => { + j2oas_number(&obj.format, &obj.number, &obj.enum_values) + } + (Some(schemars::schema::InstanceType::String), None) => { + j2oas_string(&obj.format, &obj.string, &obj.enum_values) + } + (Some(schemars::schema::InstanceType::Integer), None) => { + j2oas_integer(&obj.format, &obj.number, &obj.enum_values) + } + (None, Some(subschema)) => j2oas_subschemas(subschema), + (None, None) => { + openapiv3::SchemaKind::Any(openapiv3::AnySchema::default()) + } + (Some(_), Some(_)) => panic!( + "a schema can't have both a type and subschemas:\n{}", + serde_json::to_string_pretty(&obj) + .unwrap_or_else(|_| "".to_string()) + ), + }; + + let mut data = openapiv3::SchemaData::default(); + + if matches!( + &obj.extensions.get("nullable"), + Some(serde_json::Value::Bool(true)) + ) { + data.nullable = true; + } + + if let Some(metadata) = &obj.metadata { + data.title = metadata.title.clone(); + data.description = metadata.description.clone(); + data.default = metadata.default.clone(); + data.deprecated = metadata.deprecated; + data.read_only = metadata.read_only; + data.write_only = metadata.write_only; + } + + if let Some(name) = name { + data.title = Some(name.clone()); + } + if let Some(example) = obj.extensions.get("example") { + data.example = Some(example.clone()); + } + + openapiv3::ReferenceOr::Item(openapiv3::Schema { + schema_data: data, + schema_kind: kind, + }) +} + +fn j2oas_subschemas( + subschemas: &schemars::schema::SubschemaValidation, +) -> openapiv3::SchemaKind { + match ( + &subschemas.all_of, + &subschemas.any_of, + &subschemas.one_of, + &subschemas.not, + ) { + (Some(all_of), None, None, None) => openapiv3::SchemaKind::AllOf { + all_of: all_of + .iter() + .map(|schema| j2oas_schema(None, schema)) + .collect::>(), + }, + (None, Some(any_of), None, None) => openapiv3::SchemaKind::AnyOf { + any_of: any_of + .iter() + .map(|schema| j2oas_schema(None, schema)) + .collect::>(), + }, + (None, None, Some(one_of), None) => openapiv3::SchemaKind::OneOf { + one_of: one_of + .iter() + .map(|schema| j2oas_schema(None, schema)) + .collect::>(), + }, + (None, None, None, Some(not)) => openapiv3::SchemaKind::Not { + not: Box::new(j2oas_schema(None, not)), + }, + _ => panic!("invalid subschema {:#?}", subschemas), + } +} + +fn j2oas_integer( + format: &Option, + number: &Option>, + enum_values: &Option>, +) -> openapiv3::SchemaKind { + let format = match format.as_ref().map(|s| s.as_str()) { + None => openapiv3::VariantOrUnknownOrEmpty::Empty, + Some("int32") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::IntegerFormat::Int32, + ), + Some("int64") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::IntegerFormat::Int64, + ), + Some(other) => { + openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) + } + }; + + let (multiple_of, minimum, exclusive_minimum, maximum, exclusive_maximum) = + match number { + None => (None, None, false, None, false), + Some(number) => { + let multiple_of = number.multiple_of.map(|f| f as i64); + let (minimum, exclusive_minimum) = + match (number.minimum, number.exclusive_minimum) { + (None, None) => (None, false), + (Some(f), None) => (Some(f as i64), false), + (None, Some(f)) => (Some(f as i64), true), + _ => panic!("invalid"), + }; + let (maximum, exclusive_maximum) = + match (number.maximum, number.exclusive_maximum) { + (None, None) => (None, false), + (Some(f), None) => (Some(f as i64), false), + (None, Some(f)) => (Some(f as i64), true), + _ => panic!("invalid"), + }; + + ( + multiple_of, + minimum, + exclusive_minimum, + maximum, + exclusive_maximum, + ) + } + }; + + let enumeration = enum_values + .iter() + .flat_map(|v| { + v.iter().map(|vv| match vv { + serde_json::Value::Null => None, + serde_json::Value::Number(value) => { + Some(value.as_i64().unwrap()) + } + _ => panic!("unexpected enumeration value {:?}", vv), + }) + }) + .collect::>(); + + openapiv3::SchemaKind::Type(openapiv3::Type::Integer( + openapiv3::IntegerType { + format, + multiple_of, + exclusive_minimum, + exclusive_maximum, + minimum, + maximum, + enumeration, + }, + )) +} + +fn j2oas_number( + format: &Option, + number: &Option>, + enum_values: &Option>, +) -> openapiv3::SchemaKind { + let format = match format.as_ref().map(|s| s.as_str()) { + None => openapiv3::VariantOrUnknownOrEmpty::Empty, + Some("float") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::NumberFormat::Float, + ), + Some("double") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::NumberFormat::Double, + ), + Some(other) => { + openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) + } + }; + + let (multiple_of, minimum, exclusive_minimum, maximum, exclusive_maximum) = + match number { + None => (None, None, false, None, false), + Some(number) => { + let multiple_of = number.multiple_of; + let (minimum, exclusive_minimum) = + match (number.minimum, number.exclusive_minimum) { + (None, None) => (None, false), + (s @ Some(_), None) => (s, false), + (None, s @ Some(_)) => (s, true), + _ => panic!("invalid"), + }; + let (maximum, exclusive_maximum) = + match (number.maximum, number.exclusive_maximum) { + (None, None) => (None, false), + (s @ Some(_), None) => (s, false), + (None, s @ Some(_)) => (s, true), + _ => panic!("invalid"), + }; + + ( + multiple_of, + minimum, + exclusive_minimum, + maximum, + exclusive_maximum, + ) + } + }; + + let enumeration = enum_values + .iter() + .flat_map(|v| { + v.iter().map(|vv| match vv { + serde_json::Value::Null => None, + serde_json::Value::Number(value) => { + Some(value.as_f64().unwrap()) + } + _ => panic!("unexpected enumeration value {:?}", vv), + }) + }) + .collect::>(); + + openapiv3::SchemaKind::Type(openapiv3::Type::Number( + openapiv3::NumberType { + format, + multiple_of, + exclusive_minimum, + exclusive_maximum, + minimum, + maximum, + enumeration, + }, + )) +} + +fn j2oas_string( + format: &Option, + string: &Option>, + enum_values: &Option>, +) -> openapiv3::SchemaKind { + let format = match format.as_ref().map(|s| s.as_str()) { + None => openapiv3::VariantOrUnknownOrEmpty::Empty, + Some("date") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::StringFormat::Date, + ), + Some("date-time") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::StringFormat::DateTime, + ), + Some("password") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::StringFormat::Password, + ), + Some("byte") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::StringFormat::Byte, + ), + Some("binary") => openapiv3::VariantOrUnknownOrEmpty::Item( + openapiv3::StringFormat::Binary, + ), + Some(other) => { + openapiv3::VariantOrUnknownOrEmpty::Unknown(other.to_string()) + } + }; + + let (max_length, min_length, pattern) = match string.as_ref() { + None => (None, None, None), + Some(string) => ( + string.max_length.map(|n| n as usize), + string.min_length.map(|n| n as usize), + string.pattern.clone(), + ), + }; + + let enumeration = enum_values + .iter() + .flat_map(|v| { + v.iter().map(|vv| match vv { + serde_json::Value::Null => None, + serde_json::Value::String(s) => Some(s.clone()), + _ => panic!("unexpected enumeration value {:?}", vv), + }) + }) + .collect::>(); + + openapiv3::SchemaKind::Type(openapiv3::Type::String( + openapiv3::StringType { + format, + pattern, + enumeration, + min_length, + max_length, + }, + )) +} + +fn j2oas_array( + array: &Option>, +) -> openapiv3::SchemaKind { + let arr = array.as_ref().unwrap(); + + openapiv3::SchemaKind::Type(openapiv3::Type::Array(openapiv3::ArrayType { + items: match &arr.items { + Some(schemars::schema::SingleOrVec::Single(schema)) => { + Some(box_reference_or(j2oas_schema(None, &schema))) + } + Some(schemars::schema::SingleOrVec::Vec(_)) => { + panic!("OpenAPI v3.0.x cannot support tuple-like arrays") + } + None => None, + }, + min_items: arr.min_items.map(|n| n as usize), + max_items: arr.max_items.map(|n| n as usize), + unique_items: arr.unique_items.unwrap_or(false), + })) +} + +fn box_reference_or( + r: openapiv3::ReferenceOr, +) -> openapiv3::ReferenceOr> { + match r { + openapiv3::ReferenceOr::Item(schema) => { + openapiv3::ReferenceOr::boxed_item(schema) + } + openapiv3::ReferenceOr::Reference { reference } => { + openapiv3::ReferenceOr::Reference { reference } + } + } +} + +fn j2oas_object( + object: &Option>, +) -> openapiv3::SchemaKind { + match object { + None => openapiv3::SchemaKind::Type(openapiv3::Type::Object( + openapiv3::ObjectType::default(), + )), + Some(obj) => openapiv3::SchemaKind::Type(openapiv3::Type::Object( + openapiv3::ObjectType { + properties: obj + .properties + .iter() + .map(|(prop, schema)| { + ( + prop.clone(), + box_reference_or(j2oas_schema(None, schema)), + ) + }) + .collect::<_>(), + required: obj.required.iter().cloned().collect::<_>(), + additional_properties: obj.additional_properties.as_ref().map( + |schema| match schema.as_ref() { + schemars::schema::Schema::Bool(b) => { + openapiv3::AdditionalProperties::Any(*b) + } + schemars::schema::Schema::Object(obj) => { + openapiv3::AdditionalProperties::Schema(Box::new( + j2oas_schema_object(None, obj), + )) + } + }, + ), + min_properties: obj.min_properties.map(|n| n as usize), + max_properties: obj.max_properties.map(|n| n as usize), + }, + )), + } +} + +#[cfg(test)] +mod test { + use super::j2oas_schema; + use super::j2oas_schema_object; + use schemars::JsonSchema; + + #[test] + fn test_empty_struct() { + #[derive(JsonSchema)] + struct Empty {} + + let settings = schemars::gen::SchemaSettings::openapi3(); + let mut generator = schemars::gen::SchemaGenerator::new(settings); + + let schema = Empty::json_schema(&mut generator); + let _ = j2oas_schema(None, &schema); + } + + #[test] + fn test_garbage_barge_structure_conversion() { + #[allow(dead_code)] + #[derive(JsonSchema)] + struct SuperGarbage { + string: String, + strings: Vec, + more_strings: [String; 3], + substruct: Substruct, + more: Option, + union: Union, + map: std::collections::BTreeMap, + } + + #[allow(dead_code)] + #[derive(JsonSchema)] + struct Substruct { + ii32: i32, + uu64: u64, + ff: f32, + dd: f64, + b: bool, + } + + #[allow(dead_code)] + #[derive(JsonSchema)] + enum Union { + A { a: u32 }, + B { b: f32 }, + } + + let settings = schemars::gen::SchemaSettings::openapi3(); + let mut generator = schemars::gen::SchemaGenerator::new(settings); + + let schema = SuperGarbage::json_schema(&mut generator); + let _ = j2oas_schema(None, &schema); + for (key, schema) in generator.definitions().iter() { + let _ = j2oas_schema(Some(key), schema); + } + } + + #[test] + fn test_additional_properties() { + #[allow(dead_code)] + #[derive(JsonSchema)] + enum Union { + A { a: u32 }, + } + let settings = schemars::gen::SchemaSettings::openapi3(); + let mut generator = schemars::gen::SchemaGenerator::new(settings); + let schema = Union::json_schema(&mut generator); + let _ = j2oas_schema(None, &schema); + for (key, schema) in generator.definitions().iter() { + let _ = j2oas_schema(Some(key), schema); + } + } + + #[test] + fn test_nullable() { + #[allow(dead_code)] + #[derive(JsonSchema)] + struct Foo { + bar: String, + } + let settings = schemars::gen::SchemaSettings::openapi3(); + let generator = schemars::gen::SchemaGenerator::new(settings); + let root_schema = generator.into_root_schema_for::>(); + let schema = root_schema.schema; + let os = j2oas_schema_object(None, &schema); + + assert_eq!( + os, + openapiv3::ReferenceOr::Item(openapiv3::Schema { + schema_data: openapiv3::SchemaData { + title: Some("Nullable_Foo".to_string()), + nullable: true, + ..Default::default() + }, + schema_kind: openapiv3::SchemaKind::AllOf { + all_of: vec![openapiv3::ReferenceOr::Reference { + reference: "#/components/schemas/Foo".to_string() + }], + }, + }) + ); + } + + #[test] + #[should_panic] + fn test_bad_schema() { + #![allow(unused)] + + #[derive(JsonSchema)] + #[schemars(tag = "which")] + enum Which { + This, + That, + } + + #[derive(JsonSchema)] + struct BlackSheep { + #[schemars(flatten)] + you_can_get_with: Which, + } + + let schema = schemars::schema_for!(BlackSheep).schema; + + let _ = j2oas_schema_object(None, &schema); + } + + #[test] + #[should_panic] + fn test_two_types() { + #![allow(unused)] + + #[derive(JsonSchema)] + enum One { + One, + } + + #[derive(JsonSchema)] + struct Uno { + #[schemars(flatten)] + one: One, + } + + let schema = schemars::schema_for!(Uno).schema; + + let _ = j2oas_schema_object(None, &schema); + } +} diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index 3a218ea85..df63d2c23 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Implements websocket upgrades as an Extractor for use in API route handler //! parameters to indicate that the given endpoint is meant to be upgraded to //! a websocket. @@ -12,6 +12,7 @@ use crate::{ HttpError, RequestContext, ServerContext, }; use async_trait::async_trait; +use base64::Engine; use http::header; use http::Response; use http::StatusCode; @@ -67,7 +68,8 @@ struct WebsocketUpgradeInner { ws_log: Logger, } -// Borrowed from tungstenite-0.17.3 (rather than taking a whole dependency for this one function) +// Originally copied from tungstenite-0.17.3 (rather than taking a whole +// dependency for this one function). fn derive_accept_key(request_key: &[u8]) -> String { // ... field is constructed by concatenating /key/ ... // ... with the string "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" (RFC 6455) @@ -75,7 +77,7 @@ fn derive_accept_key(request_key: &[u8]) -> String { let mut sha1 = Sha1::default(); sha1.update(request_key); sha1.update(WS_GUID); - base64::encode(&sha1.finalize()) + base64::engine::general_purpose::STANDARD.encode(&sha1.finalize()) } /// This `ExclusiveExtractor` implementation constructs an instance of diff --git a/dropshot/tests/fail/bad_endpoint17.rs b/dropshot/tests/fail/bad_endpoint17.rs new file mode 100644 index 000000000..44dbc09f9 --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint17.rs @@ -0,0 +1,37 @@ +// Copyright 2023 Oxide Computer Company + +#![allow(unused_imports)] + +use dropshot::endpoint; +use dropshot::HttpError; +use dropshot::HttpResponseOk; +use dropshot::RequestContext; +use dropshot::TypedBody; +use dropshot::UntypedBody; +use schemars::JsonSchema; +use serde::Deserialize; +use std::sync::Arc; + +#[allow(dead_code)] +#[derive(Deserialize, JsonSchema)] +struct Stuff { + x: String, +} + +// Test: two exclusive extractors. +// This winds up being tested implicitly by the fact that we test that middle +// parameters impl `SharedExtractor`. So this winds up being the same as a +// previous test case. However, it seems worth testing explicitly. +#[endpoint { + method = GET, + path = "/test", +}] +async fn two_exclusive_extractors( + _rqctx: Arc>, + _param1: TypedBody, + _param2: UntypedBody, +) -> Result, HttpError> { + Ok(HttpResponseOk(())) +} + +fn main() {} diff --git a/dropshot/tests/fail/bad_endpoint17.stderr b/dropshot/tests/fail/bad_endpoint17.stderr new file mode 100644 index 000000000..bf40806dd --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint17.stderr @@ -0,0 +1,38 @@ +error[E0277]: the trait bound `TypedBody: SharedExtractor` is not satisfied + --> tests/fail/bad_endpoint17.rs:31:14 + | +31 | _param1: TypedBody, + | ^^^^^^^^^^^^^^^^ the trait `SharedExtractor` is not implemented for `TypedBody` + | + = help: the following other types implement trait `SharedExtractor`: + dropshot::Path + dropshot::Query +note: required by a bound in `need_shared_extractor` + --> tests/fail/bad_endpoint17.rs:25:1 + | +25 | / #[endpoint { +26 | | method = GET, +27 | | path = "/test", +28 | | }] + | |__^ required by this bound in `need_shared_extractor` +... +31 | _param1: TypedBody, + | --------- required by a bound in this + = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `fn(Arc>, TypedBody, UntypedBody) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::two_exclusive_extractors}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied + --> tests/fail/bad_endpoint17.rs:29:10 + | +25 | / #[endpoint { +26 | | method = GET, +27 | | path = "/test", +28 | | }] + | |__- required by a bound introduced by this call +29 | async fn two_exclusive_extractors( + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(Arc>, TypedBody, UntypedBody) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::two_exclusive_extractors}` + | +note: required by a bound in `ApiEndpoint::::new` + --> src/api_description.rs + | + | HandlerType: HttpHandlerFunc, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `ApiEndpoint::::new` diff --git a/dropshot/tests/fail/bad_endpoint18.rs b/dropshot/tests/fail/bad_endpoint18.rs new file mode 100644 index 000000000..2b8193ebc --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint18.rs @@ -0,0 +1,34 @@ +// Copyright 2023 Oxide Computer Company + +#![allow(unused_imports)] + +use dropshot::endpoint; +use dropshot::HttpError; +use dropshot::HttpResponseOk; +use dropshot::Query; +use dropshot::TypedBody; +use dropshot::RequestContext; +use schemars::JsonSchema; +use serde::Deserialize; +use std::sync::Arc; + +#[allow(dead_code)] +#[derive(Deserialize, JsonSchema)] +struct Stuff { + x: String, +} + +// Test: exclusive extractor not as the last argument +#[endpoint { + method = GET, + path = "/test", +}] +async fn exclusive_extractor_not_last( + _rqctx: Arc>, + _param1: TypedBody, + _param2: Query, +) -> Result, HttpError> { + Ok(HttpResponseOk(())) +} + +fn main() {} diff --git a/dropshot/tests/fail/bad_endpoint18.stderr b/dropshot/tests/fail/bad_endpoint18.stderr new file mode 100644 index 000000000..e333c7037 --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint18.stderr @@ -0,0 +1,38 @@ +error[E0277]: the trait bound `TypedBody: SharedExtractor` is not satisfied + --> tests/fail/bad_endpoint18.rs:28:14 + | +28 | _param1: TypedBody, + | ^^^^^^^^^^^^^^^^ the trait `SharedExtractor` is not implemented for `TypedBody` + | + = help: the following other types implement trait `SharedExtractor`: + dropshot::Path + dropshot::Query +note: required by a bound in `need_shared_extractor` + --> tests/fail/bad_endpoint18.rs:22:1 + | +22 | / #[endpoint { +23 | | method = GET, +24 | | path = "/test", +25 | | }] + | |__^ required by this bound in `need_shared_extractor` +... +28 | _param1: TypedBody, + | --------- required by a bound in this + = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `fn(Arc>, TypedBody, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::exclusive_extractor_not_last}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied + --> tests/fail/bad_endpoint18.rs:26:10 + | +22 | / #[endpoint { +23 | | method = GET, +24 | | path = "/test", +25 | | }] + | |__- required by a bound introduced by this call +26 | async fn exclusive_extractor_not_last( + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(Arc>, TypedBody, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::exclusive_extractor_not_last}` + | +note: required by a bound in `ApiEndpoint::::new` + --> src/api_description.rs + | + | HandlerType: HttpHandlerFunc, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `ApiEndpoint::::new` diff --git a/dropshot/tests/fail/bad_endpoint19.rs b/dropshot/tests/fail/bad_endpoint19.rs new file mode 100644 index 000000000..0937f5017 --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint19.rs @@ -0,0 +1,33 @@ +// Copyright 2023 Oxide Computer Company + +#![allow(unused_imports)] + +use dropshot::endpoint; +use dropshot::HttpError; +use dropshot::HttpResponseOk; +use dropshot::Query; +use dropshot::RequestContext; +use schemars::JsonSchema; +use serde::Deserialize; +use std::sync::Arc; + +#[allow(dead_code)] +#[derive(Deserialize, JsonSchema)] +struct QueryParams { + x: String, +} + +// Test: middle parameter is not a SharedExtractor. +#[endpoint { + method = GET, + path = "/test", +}] +async fn non_extractor_as_last_argument( + _rqctx: Arc>, + _param1: String, + _param2: Query, +) -> Result, HttpError> { + Ok(HttpResponseOk(())) +} + +fn main() {} diff --git a/dropshot/tests/fail/bad_endpoint19.stderr b/dropshot/tests/fail/bad_endpoint19.stderr new file mode 100644 index 000000000..8b77ae6bf --- /dev/null +++ b/dropshot/tests/fail/bad_endpoint19.stderr @@ -0,0 +1,38 @@ +error[E0277]: the trait bound `std::string::String: SharedExtractor` is not satisfied + --> tests/fail/bad_endpoint19.rs:27:14 + | +27 | _param1: String, + | ^^^^^^ the trait `SharedExtractor` is not implemented for `std::string::String` + | + = help: the following other types implement trait `SharedExtractor`: + dropshot::Path + dropshot::Query +note: required by a bound in `need_shared_extractor` + --> tests/fail/bad_endpoint19.rs:21:1 + | +21 | / #[endpoint { +22 | | method = GET, +23 | | path = "/test", +24 | | }] + | |__^ required by this bound in `need_shared_extractor` +... +27 | _param1: String, + | ------ required by a bound in this + = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0277]: the trait bound `fn(Arc>, std::string::String, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::non_extractor_as_last_argument}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied + --> tests/fail/bad_endpoint19.rs:25:10 + | +21 | / #[endpoint { +22 | | method = GET, +23 | | path = "/test", +24 | | }] + | |__- required by a bound introduced by this call +25 | async fn non_extractor_as_last_argument( + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(Arc>, std::string::String, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::non_extractor_as_last_argument}` + | +note: required by a bound in `ApiEndpoint::::new` + --> src/api_description.rs + | + | HandlerType: HttpHandlerFunc, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `ApiEndpoint::::new` diff --git a/dropshot/tests/fail/bad_endpoint3.rs b/dropshot/tests/fail/bad_endpoint3.rs index 062c80104..7ae1a99e1 100644 --- a/dropshot/tests/fail/bad_endpoint3.rs +++ b/dropshot/tests/fail/bad_endpoint3.rs @@ -8,6 +8,7 @@ use dropshot::HttpResponseOk; use dropshot::RequestContext; use std::sync::Arc; +// Test: final parameter is neither an ExclusiveExtractor nor a SharedExtractor. #[endpoint { method = GET, path = "/test", diff --git a/dropshot/tests/fail/bad_endpoint3.stderr b/dropshot/tests/fail/bad_endpoint3.stderr index f6b90e23c..f27250cfd 100644 --- a/dropshot/tests/fail/bad_endpoint3.stderr +++ b/dropshot/tests/fail/bad_endpoint3.stderr @@ -1,12 +1,35 @@ +error[E0277]: the trait bound `String: SharedExtractor` is not satisfied + --> tests/fail/bad_endpoint3.rs:18:12 + | +18 | param: String, + | ^^^^^^ the trait `SharedExtractor` is not implemented for `String` + | + = help: the following other types implement trait `SharedExtractor`: + dropshot::Path + dropshot::Query + = note: required for `String` to implement `ExclusiveExtractor` +note: required by a bound in `need_exclusive_extractor` + --> tests/fail/bad_endpoint3.rs:12:1 + | +12 | / #[endpoint { +13 | | method = GET, +14 | | path = "/test", +15 | | }] + | |__^ required by this bound in `need_exclusive_extractor` +... +18 | param: String, + | ------ required by a bound in this + = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) + error[E0277]: the trait bound `fn(Arc>, String) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_endpoint}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied - --> tests/fail/bad_endpoint3.rs:15:10 + --> tests/fail/bad_endpoint3.rs:16:10 | -11 | / #[endpoint { -12 | | method = GET, -13 | | path = "/test", -14 | | }] +12 | / #[endpoint { +13 | | method = GET, +14 | | path = "/test", +15 | | }] | |__- required by a bound introduced by this call -15 | async fn bad_endpoint( +16 | async fn bad_endpoint( | ^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(Arc>, String) -> impl Future, HttpError>> { for ApiEndpoint<> as RequestContextArgument>::Context>>::from::bad_endpoint}` | note: required by a bound in `ApiEndpoint::::new` diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index acfd837a4..b28c7856f 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Test cases for the "demo" handlers. These handlers exercise various //! supported configurations of the HTTP handler interface. We exercise them //! here to make sure that even if these aren't used at a given point, they still diff --git a/dropshot_endpoint/Cargo.toml b/dropshot_endpoint/Cargo.toml index bf458f97b..681e092a0 100644 --- a/dropshot_endpoint/Cargo.toml +++ b/dropshot_endpoint/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "dropshot_endpoint" description = "macro used by dropshot consumers for registering handlers" -version = "0.8.1-dev" +version = "0.9.0-dev" authors = ["Adam H. Leventhal "] edition = "2018" license = "Apache-2.0" diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index c83636b8b..faeb929fb 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! This package defines macro attributes associated with HTTP handlers. These //! attributes are used both to define an HTTP API and to generate an OpenAPI @@ -171,7 +171,7 @@ fn do_channel( } = from_tokenstream(&attr)?; match protocol { ChannelProtocol::WEBSOCKETS => { - // here we construct a wrapper function and mutate the arguments a bit + // Here we construct a wrapper function and mutate the arguments a bit // for the outer layer: we replace WebsocketConnection, which is not // an extractor, with WebsocketUpgrade, which is. We also move it // to the end. @@ -220,12 +220,19 @@ fn do_channel( )); } - // XXX-dap TODO-cleanup This is a gross way to do it. - let mut input_pairs = - sig.inputs.clone().into_pairs().collect::>(); - let second_pair = input_pairs.remove(1); - input_pairs.push(second_pair); - sig.inputs = input_pairs.into_iter().collect(); + // Historically, we required that the `WebsocketConnection` argument + // be first after the `RequestContext`. However, we also require + // that any exclusive extractor (which includes the + // `WebsocketUpgrade` argument that we put in its place) appears + // last. We replaced the type above, but now we need to put it in + // the right spot. + sig.inputs = { + let mut input_pairs = + sig.inputs.clone().into_pairs().collect::>(); + let second_pair = input_pairs.remove(1); + input_pairs.push(second_pair); + input_pairs.into_iter().collect() + }; sig.output = syn::parse2(quote!(-> dropshot::WebsocketEndpointResult))?; @@ -424,8 +431,8 @@ fn do_endpoint_inner( // When the user attaches this proc macro to a function with the wrong type // signature, the resulting errors can be deeply inscrutable. To attempt to // make failures easier to understand, we inject code that asserts the types - // of the various parameters. We do this by calling a dummy function that - // requires a type that satisfies the trait Extractor. + // of the various parameters. We do this by calling dummy functions that + // require a type that satisfies SharedExtractor or ExclusiveExtractor. let mut arg_types = Vec::new(); let mut arg_is_receiver = false; let param_checks = ast @@ -433,12 +440,12 @@ fn do_endpoint_inner( .inputs .iter() .enumerate() - .filter_map(|(index, arg)| { + .map(|(index, arg)| { match arg { syn::FnArg::Receiver(_) => { // The compiler failure here is already comprehensible. arg_is_receiver = true; - Some(quote! {}) + quote! {} } syn::FnArg::Typed(pat) => { let span = pat.ty.span(); @@ -448,15 +455,38 @@ fn do_endpoint_inner( // The first parameter must be an Arc> // and fortunately we already have a trait that we can // use to validate this type. - Some(quote_spanned! { span=> + quote_spanned! { span=> const _: fn() = || { struct NeedRequestContext(<#ty as #dropshot::RequestContextArgument>::Context); }; - }) + } + } else if index < ast.sig.inputs.len() - 1 { + // Subsequent parameters aside from the last one must + // impl SharedExtractor. + quote_spanned! { span=> + const _: fn() = || { + fn need_shared_extractor() + where + T: ?Sized + #dropshot::SharedExtractor, + { + } + need_shared_extractor::<#ty>(); + }; + } } else { - // XXX-dap the remaining stuff must together impl - // `RequestExtractor` - None + // The final parameter must impl ExclusiveExtractor. + // (It's okay if it's another SharedExtractor. Those + // impl ExclusiveExtractor, too.) + quote_spanned! { span=> + const _: fn() = || { + fn need_exclusive_extractor() + where + T: ?Sized + #dropshot::ExclusiveExtractor, + { + } + need_exclusive_extractor::<#ty>(); + }; + } } } } @@ -935,6 +965,14 @@ mod tests { const _: fn() = || { struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; }; + const _: fn() = || { + fn need_exclusive_extractor() + where + T: ?Sized + dropshot::ExclusiveExtractor, + { + } + need_exclusive_extractor:: >(); + }; const _: fn() = || { trait ResultTrait { type T; @@ -1033,6 +1071,14 @@ mod tests { const _: fn() = || { struct NeedRequestContext( > as dropshot::RequestContextArgument>::Context) ; }; + const _: fn() = || { + fn need_exclusive_extractor() + where + T: ?Sized + dropshot::ExclusiveExtractor, + { + } + need_exclusive_extractor:: >(); + }; const _: fn() = || { trait ResultTrait { type T; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 7439ca54d..c7c3c5387 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -4,5 +4,5 @@ # The intent is to keep this updated as new stable versions are relased. [toolchain] -channel = "1.66.0" +channel = "1.66.1" profile = "default" From 951fb6c4efb5ea71638c572ff55f2b90417eb5fc Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 12 Jan 2023 13:01:40 -0800 Subject: [PATCH 35/47] rename the new pub type (requires moving the internal DTrace type with the same name) add demo test fix up docs a bit --- dropshot/src/dtrace.rs | 42 ++++++++++++++ dropshot/src/extractor/mod.rs | 4 +- dropshot/src/handler.rs | 11 ++-- dropshot/src/lib.rs | 67 ++++++----------------- dropshot/src/server.rs | 14 ++--- dropshot/src/websocket.rs | 4 +- dropshot/tests/fail/bad_endpoint1.stderr | 1 + dropshot/tests/fail/bad_endpoint11.stderr | 1 + dropshot/tests/fail/bad_endpoint13.stderr | 1 + dropshot/tests/fail/bad_endpoint2.stderr | 1 + dropshot/tests/fail/bad_endpoint8.stderr | 1 + dropshot/tests/test_demo.rs | 60 +++++++++++++++++++- dropshot_endpoint/src/lib.rs | 1 + 13 files changed, 139 insertions(+), 69 deletions(-) create mode 100644 dropshot/src/dtrace.rs diff --git a/dropshot/src/dtrace.rs b/dropshot/src/dtrace.rs new file mode 100644 index 000000000..3f6a9d415 --- /dev/null +++ b/dropshot/src/dtrace.rs @@ -0,0 +1,42 @@ +// Copyright 2023 Oxide Computer Company +//! DTrace probes and support + +#[derive(Debug, Clone, serde::Serialize)] +pub(crate) struct RequestInfo { + id: String, + local_addr: std::net::SocketAddr, + remote_addr: std::net::SocketAddr, + method: String, + path: String, + query: Option, +} + +#[derive(Debug, Clone, serde::Serialize)] +pub(crate) struct ResponseInfo { + id: String, + local_addr: std::net::SocketAddr, + remote_addr: std::net::SocketAddr, + status_code: u16, + message: String, +} + +#[cfg(feature = "usdt-probes")] +#[usdt::provider(provider = "dropshot")] +mod probes { + use super::{RequestInfo, ResponseInfo}; + fn request__start(_: &RequestInfo) {} + fn request__done(_: &ResponseInfo) {} +} + +/// The result of registering a server's DTrace USDT probes. +#[derive(Debug, Clone, PartialEq)] +pub enum ProbeRegistration { + /// The probes are explicitly disabled at compile time. + Disabled, + + /// Probes were successfully registered. + Succeeded, + + /// Registration failed, with an error message explaining the cause. + Failed(String), +} diff --git a/dropshot/src/extractor/mod.rs b/dropshot/src/extractor/mod.rs index d659f683a..a580c28a0 100644 --- a/dropshot/src/extractor/mod.rs +++ b/dropshot/src/extractor/mod.rs @@ -31,7 +31,7 @@ use std::fmt::Debug; mod common; -use crate::RequestHeader; +use crate::RequestInfo; pub use common::ExclusiveExtractor; pub use common::ExtractorMetadata; pub use common::RequestExtractor; @@ -58,7 +58,7 @@ impl Query { /// Given an HTTP request, pull out the query string and attempt to deserialize /// it as an instance of `QueryType`. fn http_request_load_query( - request: &RequestHeader, + request: &RequestInfo, ) -> Result, HttpError> where QueryType: DeserializeOwned + JsonSchema + Send + Sync, diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index 4f4fc3173..36056e50e 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -85,23 +85,22 @@ pub struct RequestContext { pub log: Logger, /// basic request information (method, URI, etc.) - pub request: RequestHeader, + pub request: RequestInfo, } // This is deliberately as close to compatible with `hyper::Request` as // reasonable. -// XXX-dap TODO This could use a better name. #[derive(Debug)] -pub struct RequestHeader { +pub struct RequestInfo { method: http::Method, uri: http::Uri, version: http::Version, headers: http::HeaderMap, } -impl From<&hyper::Request> for RequestHeader { +impl From<&hyper::Request> for RequestInfo { fn from(request: &hyper::Request) -> Self { - RequestHeader { + RequestInfo { method: request.method().clone(), uri: request.uri().clone(), version: request.version().clone(), @@ -110,7 +109,7 @@ impl From<&hyper::Request> for RequestHeader { } } -impl RequestHeader { +impl RequestInfo { pub fn method(&self) -> &http::Method { &self.method } diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index 83dc475cf..df01ca631 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -213,15 +213,16 @@ //! [path_params: Path

    ,] //! [body_param: TypedBody,] //! [body_param: UntypedBody,] +//! [raw_request: RawRequest,] //! ) -> Result //! ``` //! //! The `RequestContext` must appear first. The `Context` type is //! caller-provided context which is provided when the server is created. //! -//! The types `Query`, `Path`, `TypedBody`, and `UntypedBody` are called -//! **Extractors** because they cause information to be pulled out of the request -//! and made available to the handler function. +//! The types `Query`, `Path`, `TypedBody`, `UntypedBody`, and `RawRequest` are +//! called **Extractors** because they cause information to be pulled out of the +//! request and made available to the handler function. //! //! * [`Query`]`` extracts parameters from a query string, deserializing them //! into an instance of type `Q`. `Q` must implement `serde::Deserialize` and @@ -233,11 +234,14 @@ //! body as JSON (or form/url-encoded) and deserializing it into an instance //! of type `J`. `J` must implement `serde::Deserialize` and `schemars::JsonSchema`. //! * [`UntypedBody`] extracts the raw bytes of the request body. +//! * [`RawRequest`] provides access to the underlying [`hyper::Request`]. The +//! hope is that this would generally not be needed. It can be useful to +//! implement functionality not provided by Dropshot. //! -//! `Query` and `Path` impl `SharedExtractor`. `TypedBody` and `UntypedBody` -//! impl `ExclusiveExtractor`. Your function may accept 0-3 extractors, but -//! only one can be `ExclusiveExtractor`, and it must be the last one. -//! Otherwise, the order of extractor arguments does not matter. +//! `Query` and `Path` impl `SharedExtractor`. `TypedBody`, `UntypedBody`, and +//! `RawRequest` impl `ExclusiveExtractor`. Your function may accept 0-3 +//! extractors, but only one can be `ExclusiveExtractor`, and it must be the +//! last one. Otherwise, the order of extractor arguments does not matter. //! //! If the handler accepts any extractors and the corresponding extraction //! cannot be completed, the request fails with status code 400 and an error @@ -504,8 +508,8 @@ //! Dropshot optionally exposes two DTrace probes, `request_start` and //! `request_finish`. These provide detailed information about each request, //! such as their ID, the local and remote IPs, and the response information. -//! See the [`RequestInfo`] and [`ResponseInfo`] types for a complete listing -//! of what's available. +//! See the dropshot::dtrace::RequestInfo` and `dropshot::dtrae::ResponseInfo` +//! types for a complete listing of what's available. //! //! These probes are implemented via the [`usdt`] crate. They may require a //! nightly toolchain if built on macOS prior to Rust version 1.66. Otherwise a @@ -551,45 +555,9 @@ feature(asm_sym) )] -#[derive(Debug, Clone, serde::Serialize)] -pub(crate) struct RequestInfo { - id: String, - local_addr: std::net::SocketAddr, - remote_addr: std::net::SocketAddr, - method: String, - path: String, - query: Option, -} - -#[derive(Debug, Clone, serde::Serialize)] -pub(crate) struct ResponseInfo { - id: String, - local_addr: std::net::SocketAddr, - remote_addr: std::net::SocketAddr, - status_code: u16, - message: String, -} - -#[cfg(feature = "usdt-probes")] -#[usdt::provider(provider = "dropshot")] -mod probes { - use crate::{RequestInfo, ResponseInfo}; - fn request__start(_: &RequestInfo) {} - fn request__done(_: &ResponseInfo) {} -} - -/// The result of registering a server's DTrace USDT probes. -#[derive(Debug, Clone, PartialEq)] -pub enum ProbeRegistration { - /// The probes are explicitly disabled at compile time. - Disabled, - - /// Probes were successfully registered. - Succeeded, - - /// Registration failed, with an error message explaining the cause. - Failed(String), -} +// The macro used to define DTrace probes needs to be defined before anything +// that might use it. +mod dtrace; mod api_description; mod config; @@ -626,6 +594,7 @@ pub use api_description::TagDetails; pub use api_description::TagExternalDocs; pub use config::ConfigDropshot; pub use config::ConfigTls; +pub use dtrace::ProbeRegistration; pub use error::HttpError; pub use error::HttpErrorResponseBody; pub use extractor::ExclusiveExtractor; @@ -653,7 +622,7 @@ pub use handler::HttpResponseTemporaryRedirect; pub use handler::HttpResponseUpdatedNoContent; pub use handler::NoHeaders; pub use handler::RequestContext; -pub use handler::RequestHeader; +pub use handler::RequestInfo; pub use http_util::CONTENT_TYPE_JSON; pub use http_util::CONTENT_TYPE_NDJSON; pub use http_util::CONTENT_TYPE_OCTET_STREAM; diff --git a/dropshot/src/server.rs b/dropshot/src/server.rs index 0a250b937..380de0c8f 100644 --- a/dropshot/src/server.rs +++ b/dropshot/src/server.rs @@ -1,13 +1,13 @@ -// Copyright 2020 Oxide Computer Company +// Copyright 2023 Oxide Computer Company //! Generic server-wide state and facilities use super::api_description::ApiDescription; use super::config::{ConfigDropshot, ConfigTls}; +#[cfg(feature = "usdt-probes")] +use super::dtrace::probes; use super::error::HttpError; use super::handler::RequestContext; use super::http_util::HEADER_REQUEST_ID; -#[cfg(feature = "usdt-probes")] -use super::probes; use super::router::HttpRouter; use super::ProbeRegistration; @@ -38,7 +38,7 @@ use tokio::net::{TcpListener, TcpStream}; use tokio_rustls::{server::TlsStream, TlsAcceptor}; use uuid::Uuid; -use crate::RequestHeader; +use crate::RequestInfo; use slog::Logger; // TODO Replace this with something else? @@ -679,7 +679,7 @@ async fn http_request_handle_wrap( #[cfg(feature = "usdt-probes")] probes::request__start!(|| { let uri = request.uri(); - crate::RequestInfo { + crate::dtrace::RequestInfo { id: request_id.clone(), local_addr: server.local_addr, remote_addr, @@ -710,7 +710,7 @@ async fn http_request_handle_wrap( #[cfg(feature = "usdt-probes")] probes::request__done!(|| { - crate::ResponseInfo { + crate::dtrace::ResponseInfo { id: request_id.clone(), local_addr, remote_addr, @@ -771,7 +771,7 @@ async fn http_request_handle( server.router.lookup_route(&method, uri.path().into())?; let rqctx = RequestContext { server: Arc::clone(&server), - request: RequestHeader::from(&request), + request: RequestInfo::from(&request), path_variables: lookup_result.variables, body_content_type: lookup_result.body_content_type, request_id: request_id.to_string(), diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index df63d2c23..262dd9cac 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -297,7 +297,7 @@ mod tests { use crate::router::HttpRouter; use crate::server::{DropshotState, ServerConfig}; use crate::{ - ExclusiveExtractor, HttpError, RequestContext, RequestHeader, + ExclusiveExtractor, HttpError, RequestContext, RequestInfo, WebsocketUpgrade, }; use http::Request; @@ -332,7 +332,7 @@ mod tests { ), tls_acceptor: None, }), - request: RequestHeader::from(&request), + request: RequestInfo::from(&request), path_variables: Default::default(), body_content_type: Default::default(), request_id: "".to_string(), diff --git a/dropshot/tests/fail/bad_endpoint1.stderr b/dropshot/tests/fail/bad_endpoint1.stderr index 7d1989a1f..729ca36c1 100644 --- a/dropshot/tests/fail/bad_endpoint1.stderr +++ b/dropshot/tests/fail/bad_endpoint1.stderr @@ -5,6 +5,7 @@ error: Endpoint handlers must have the following signature: [path_params: Path

    ,] [body_param: TypedBody,] [body_param: UntypedBody,] + [raw_request: RawRequest,] ) -> Result --> tests/fail/bad_endpoint1.rs:20:1 | diff --git a/dropshot/tests/fail/bad_endpoint11.stderr b/dropshot/tests/fail/bad_endpoint11.stderr index 1d4f19d8d..7d39b1b7f 100644 --- a/dropshot/tests/fail/bad_endpoint11.stderr +++ b/dropshot/tests/fail/bad_endpoint11.stderr @@ -5,6 +5,7 @@ error: Endpoint handlers must have the following signature: [path_params: Path

    ,] [body_param: TypedBody,] [body_param: UntypedBody,] + [raw_request: RawRequest,] ) -> Result --> tests/fail/bad_endpoint11.rs:13:1 | diff --git a/dropshot/tests/fail/bad_endpoint13.stderr b/dropshot/tests/fail/bad_endpoint13.stderr index 1559b41d3..d209cd2b1 100644 --- a/dropshot/tests/fail/bad_endpoint13.stderr +++ b/dropshot/tests/fail/bad_endpoint13.stderr @@ -5,6 +5,7 @@ error: Endpoint handlers must have the following signature: [path_params: Path

    ,] [body_param: TypedBody,] [body_param: UntypedBody,] + [raw_request: RawRequest,] ) -> Result --> tests/fail/bad_endpoint13.rs:19:1 | diff --git a/dropshot/tests/fail/bad_endpoint2.stderr b/dropshot/tests/fail/bad_endpoint2.stderr index c71207619..42e88eb52 100644 --- a/dropshot/tests/fail/bad_endpoint2.stderr +++ b/dropshot/tests/fail/bad_endpoint2.stderr @@ -5,6 +5,7 @@ error: Endpoint handlers must have the following signature: [path_params: Path

    ,] [body_param: TypedBody,] [body_param: UntypedBody,] + [raw_request: RawRequest,] ) -> Result --> tests/fail/bad_endpoint2.rs:13:1 | diff --git a/dropshot/tests/fail/bad_endpoint8.stderr b/dropshot/tests/fail/bad_endpoint8.stderr index dc6067086..f1fede5a6 100644 --- a/dropshot/tests/fail/bad_endpoint8.stderr +++ b/dropshot/tests/fail/bad_endpoint8.stderr @@ -5,6 +5,7 @@ error: Endpoint handlers must have the following signature: [path_params: Path

    ,] [body_param: TypedBody,] [body_param: UntypedBody,] + [raw_request: RawRequest,] ) -> Result --> tests/fail/bad_endpoint8.rs:20:1 | diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index b28c7856f..9155d1303 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -6,9 +6,9 @@ //! //! Note that the purpose is mainly to exercise the various possible function //! signatures that can be used to implement handler functions. We don't need to -//! exercises very many cases (or error cases) of each one because the handlers -//! themselves are not important, but we need to exercise enough to validate that -//! the generic JSON and query parsing handles error cases. +//! exercise very many cases (or error cases) of each one because the handlers +//! themselves are not important, but we need to exercise enough to validate +//! that the generic JSON and query parsing handles error cases. //! //! TODO-hardening: add test cases that exceed limits (e.g., query string length, //! JSON body length) @@ -34,6 +34,7 @@ use dropshot::HttpResponseTemporaryRedirect; use dropshot::HttpResponseUpdatedNoContent; use dropshot::Path; use dropshot::Query; +use dropshot::RawRequest; use dropshot::RequestContext; use dropshot::TypedBody; use dropshot::UntypedBody; @@ -70,6 +71,7 @@ fn demo_api() -> ApiDescription { api.register(demo_handler_path_param_uuid).unwrap(); api.register(demo_handler_path_param_u32).unwrap(); api.register(demo_handler_untyped_body).unwrap(); + api.register(demo_handler_raw_request).unwrap(); api.register(demo_handler_delete).unwrap(); api.register(demo_handler_headers).unwrap(); api.register(demo_handler_302_bogus).unwrap(); @@ -670,6 +672,32 @@ async fn test_untyped_body() { testctx.teardown().await; } +// Test `RawRequest`. +#[tokio::test] +async fn test_raw_request() { + let api = demo_api(); + let testctx = common::test_setup("tet_raw_request", api); + let client = &testctx.client_testctx; + + // Success case + let body = "you may know what you need but to get what you want \ + better see that you keep what you have"; + let mut response = client + .make_request_with_body( + Method::PUT, + "/testing/raw_request", + body.into(), + StatusCode::OK, + ) + .await + .unwrap(); + let json: DemoRaw = read_json(&mut response).await; + assert_eq!(json.nbytes, 90); + assert_eq!(json.method, "PUT"); + + testctx.teardown().await; +} + // Test delete request #[tokio::test] async fn test_delete_request() { @@ -979,6 +1007,32 @@ async fn demo_handler_untyped_body( Ok(HttpResponseOk(DemoUntyped { nbytes, as_utf8 })) } +#[derive(Deserialize, Serialize, JsonSchema)] +pub struct DemoRaw { + pub nbytes: usize, + pub method: String, +} + +#[endpoint { + method = PUT, + path = "/testing/raw_request" +}] +async fn demo_handler_raw_request( + _rqctx: Arc>, + raw_request: RawRequest, +) -> Result, HttpError> { + let request = raw_request.into_inner(); + + let (parts, body) = request.into_parts(); + // This is not generally a good pattern because it allows untrusted + // consumers to use up all memory. This is just a narrow test. + let whole_body = hyper::body::to_bytes(body).await.unwrap(); + Ok(HttpResponseOk(DemoRaw { + nbytes: whole_body.len(), + method: parts.method.to_string(), + })) +} + #[derive(Deserialize, Serialize, JsonSchema)] pub struct DemoPathImpossible { pub test1: String, diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index faeb929fb..429011a51 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -86,6 +86,7 @@ const USAGE: &str = "Endpoint handlers must have the following signature: [path_params: Path

    ,] [body_param: TypedBody,] [body_param: UntypedBody,] + [raw_request: RawRequest,] ) -> Result"; /// This attribute transforms a handler function into a Dropshot endpoint From 3a41c9930f27d71bcedfb569d66a1354658d4698 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 12 Jan 2023 14:02:30 -0800 Subject: [PATCH 36/47] update changelog --- CHANGELOG.adoc | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index 922033986..f4847c52a 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -29,7 +29,20 @@ There are a number of breaking changes in this release but we expect they will b + 1. For any endpoint functions that use a `TypedBody` or `UntypedBody` extractor, this extractor must be the last argument to the function. Otherwise, you will get a compile error about the extractor not impl'ing `SharedExtractor`. 2. If you have your own type that impls `Extractor`, you will need to change that to either `ExclusiveExtractor` (if the impl needs a `mut` reference to the underlying `hyper::Request`, which is usually because it needs to read the request body) or `SharedExtractor`. If your extractor only needs to look at the URL or request headers and not the body, it can probably be a `SharedExtractor`. If it's an exclusive extractor, any function that accepts it must accept it as the last argument to the function. -3. If you have your own type that impls `Extractor`, you will also need to change the type signature of the `from_request` method to accept a `&RequestContext` instead of `Arc>`. (This should not be a problem unless your extractor was hanging on to a reference via the Arc. We don't know a reason this would be useful. If you were doing this, please reach out. You can probably instead copy whatever information you need out of the `RequestContext` instead.) +3. If you have your own type that impls `Extractor`, you will also need to change the type signature of the `from_request` method to accept a `&RequestContext` instead of `Arc>`. (This should not be a problem unless your extractor was hanging on to a reference via the Arc. We don't know a reason this would be useful. If you were doing this, please reach out. You can probably copy whatever information you need out of the `RequestContext` instead.) +* https://github.com/oxidecomputer/dropshot/pull/557[#557] Simpler, safer access to raw request. Prior to this change, the raw `hyper::Request` (`http::Request`) was accessible to endpoint functions via the `RequestContext`, but behind an `Arc>`. This was a little strange because your endpoint function was usually the only one with a reference to this object. (You could get into trouble if you defined your own Extractor that cloned one of the `Arc` objects -- your extractor could deadlock with the handler.) After this change, the raw request is available only through a separate `RawRequest` extractor. This is an exclusive extractor, which means you cannot use it with `TypedBody` or `UntypedBody`. As a result, there is no way to wind up with multiple references to the request. There's no lock and no way to get into this sort of trouble. ++ +After this change, the `hyper::Request` is passed as a separate argument to `ExclusiveExtractor::from_request()`. ++ +**What you need to do:** ++ +1. If you have a request handler that accesses `rqctx.request`, it's typically doing `let request = rqctx.request.lock().await`. +a. If that code is only accessing the HTTP method, URI, headers, or version, then you can replace that with `let request = &rqctx.request`. (That object has methods compatible with `http::Request` for accessing the method, URI, headers, and verison.) +b. If that code is accessing other parts of the request (e.g., reading the body or doing a protocol upgrade), then you must instead add a `raw_request: RawRequest` argument to your endpoint function. Then you can use `let request = raw_request.into_inner()`. +2. If you have an extractor that access `rqctx.request`, then it too is typically doing something like `let request = rqctx.request.lock().await`. +a. If that code is only accessing the HTTP method, URI, headers, or version, then just like above you can replace that with `let request = &rqctx.request`. This can be done from a `SharedExtractor` or an `ExclusiveExtractor`. +b. If that code is accessing other parts of the request (e.g., reading the body or doing a protocol upgrade), then this extractor must impl `ExclusiveExtractor` (not `SharedExtractor`). With `ExclusiveExtractor`, the `hyper::Request` is available as an argument to `from_request()`. ++ * https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot now allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. * https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. + From 28fd3108325ac4e6363ac2801fc155b63bd3fc9d Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 12 Jan 2023 14:03:12 -0800 Subject: [PATCH 37/47] remove TODO --- CHANGELOG.adoc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index f4847c52a..7552cef07 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -17,10 +17,6 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co === Breaking Changes -// XXX-dap TODO more updates for RawRequest extractor -// also update crate-level docs and other places we talk about TypedBody, etc. -// maybe add an example? - There are a number of breaking changes in this release but we expect they will be easy to manage. **If you have any trouble updating to this release or want help with it, please do https://github.com/oxidecomputer/dropshot/discussions[start a discussion] or https://github.com/oxidecomputer/dropshot/issues/new[file an issue]!** * https://github.com/oxidecomputer/dropshot/pull/556[#556] Better type-safety around the use of extractors. It is now a compile-time error to define an endpoint that accepts two extractors that use the HTTP request body (e.g., to accept both a `TypedBody` and an `UntypedBody`, or two `TypedBody` arguments). Previously, this would have resulted in a runtime error. The main change is that the `Extractor` trait has been split into two separate traits: `SharedExtractor` and `ExclusiveExtractor`. Endpoint functions can still accept 0-3 extractors, but only one can be an `ExclusiveExtractor` and it must be the last one. The function signatures for `*Extractor::from_request` have also changed. From 415c44494170c4ef44b06f8c53117ec02aee33e9 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 12 Jan 2023 14:21:54 -0800 Subject: [PATCH 38/47] add compatibility method --- CHANGELOG.adoc | 4 ++-- dropshot/src/handler.rs | 32 ++++++++++++++++++++++++++++++++ dropshot/tests/test_demo.rs | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index 7552cef07..8a7daf7ed 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -33,10 +33,10 @@ After this change, the `hyper::Request` is passed as a separate argument to `Exc **What you need to do:** + 1. If you have a request handler that accesses `rqctx.request`, it's typically doing `let request = rqctx.request.lock().await`. -a. If that code is only accessing the HTTP method, URI, headers, or version, then you can replace that with `let request = &rqctx.request`. (That object has methods compatible with `http::Request` for accessing the method, URI, headers, and verison.) +a. If that code is only accessing the HTTP method, URI, headers, or version, then _you can skip this step_. However, it's recommended that you replace that with `let request = &rqctx.request`. (That object has methods compatible with `http::Request` for accessing the method, URI, headers, and version.) b. If that code is accessing other parts of the request (e.g., reading the body or doing a protocol upgrade), then you must instead add a `raw_request: RawRequest` argument to your endpoint function. Then you can use `let request = raw_request.into_inner()`. 2. If you have an extractor that access `rqctx.request`, then it too is typically doing something like `let request = rqctx.request.lock().await`. -a. If that code is only accessing the HTTP method, URI, headers, or version, then just like above you can replace that with `let request = &rqctx.request`. This can be done from a `SharedExtractor` or an `ExclusiveExtractor`. +a. If that code is only accessing the HTTP method, URI, headers, or version, then just like above _you can skip this step_, but it's recommended that you replace that with `let request = &rqctx.request`. This can be done from a `SharedExtractor` or an `ExclusiveExtractor`. b. If that code is accessing other parts of the request (e.g., reading the body or doing a protocol upgrade), then this extractor must impl `ExclusiveExtractor` (not `SharedExtractor`). With `ExclusiveExtractor`, the `hyper::Request` is available as an argument to `from_request()`. + * https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot now allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index 36056e50e..b959a7f1c 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -125,6 +125,38 @@ impl RequestInfo { pub fn headers(&self) -> &http::HeaderMap { &self.headers } + + /// Returns a reference to the `RequestInfo` itself + /// + /// This is provided for source compatibility. In previous versions of + /// Dropshot, `RequestContext.request` was an + /// `Arc>>`. Now, it's just + /// `RequestInfo`, which provides many of the same functions as + /// `hyper::Request` does. Consumers _should_ just use `rqctx.request` + /// instead of this function. + /// + /// For example, in previous versions of Dropshot, you might have: + /// + /// ```ignore + /// let request = rqctx.request.lock().await; + /// let headers = request.headers(); + /// ``` + /// + /// Now, you would do this: + /// + /// ```ignore + /// let headers = rqctx.request.headers(); + /// ``` + /// + /// This function allows the older code to continue to work. + #[deprecated( + since = "0.9.0", + note = "use `rqctx.request` directly instead of \ + `rqctx.request.lock().await`" + )] + pub async fn lock(&self) -> &Self { + self + } } impl RequestContext { diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index 9155d1303..d3dc2bdbd 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -79,6 +79,7 @@ fn demo_api() -> ApiDescription { api.register(demo_handler_303_see_other).unwrap(); api.register(demo_handler_307_temporary_redirect).unwrap(); api.register(demo_handler_websocket).unwrap(); + api.register(demo_handler_request_compat).unwrap(); // We don't need to exhaustively test these cases, as they're tested by unit // tests. @@ -854,6 +855,25 @@ async fn test_demo_websocket() { testctx.teardown().await; } +#[tokio::test] +async fn test_request_compat() { + let api = demo_api(); + let testctx = common::test_setup("test_request_compat", api); + let mut response = testctx + .client_testctx + .make_request( + Method::GET, + "/testing/request_compat", + None as Option<()>, + StatusCode::OK, + ) + .await + .expect("expected success"); + let json: String = read_json(&mut response).await; + assert_eq!(json, "dummy"); + testctx.teardown().await; +} + // Demo handler functions type RequestCtx = Arc>; @@ -1137,6 +1157,22 @@ async fn demo_handler_websocket( Ok(()) } +#[endpoint { + method = GET, + path = "/testing/request_compat", +}] +async fn demo_handler_request_compat( + rqctx: RequestCtx, +) -> Result, HttpError> { + // Verifies that RequestInfo.lock() does what we expect. + #[allow(deprecated)] + let request = rqctx.request.lock().await; + let headers = request.headers(); + let header_value = headers.get("server").and_then(|v| v.to_str().ok()); + let value = header_value.unwrap_or("dummy"); + http_echo(&value) +} + fn http_echo(t: &T) -> Result, HttpError> { Ok(Response::builder() .header(http::header::CONTENT_TYPE, CONTENT_TYPE_JSON) From 309f96d3b08019ba1100f5e7e254b7a76bb534c5 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 12 Jan 2023 14:27:20 -0800 Subject: [PATCH 39/47] typo --- dropshot/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dropshot/src/lib.rs b/dropshot/src/lib.rs index df01ca631..0e83b76ee 100644 --- a/dropshot/src/lib.rs +++ b/dropshot/src/lib.rs @@ -508,7 +508,7 @@ //! Dropshot optionally exposes two DTrace probes, `request_start` and //! `request_finish`. These provide detailed information about each request, //! such as their ID, the local and remote IPs, and the response information. -//! See the dropshot::dtrace::RequestInfo` and `dropshot::dtrae::ResponseInfo` +//! See the dropshot::dtrace::RequestInfo` and `dropshot::dtrace::ResponseInfo` //! types for a complete listing of what's available. //! //! These probes are implemented via the [`usdt`] crate. They may require a From 3e2dbdb6b89d8a75e4e1323c56e9843cb620c06a Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Thu, 12 Jan 2023 14:33:30 -0800 Subject: [PATCH 40/47] fix dtrace probes --- dropshot/src/dtrace.rs | 24 ++++++++++++------------ dropshot/src/server.rs | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/dropshot/src/dtrace.rs b/dropshot/src/dtrace.rs index 3f6a9d415..a86eb1081 100644 --- a/dropshot/src/dtrace.rs +++ b/dropshot/src/dtrace.rs @@ -3,27 +3,27 @@ #[derive(Debug, Clone, serde::Serialize)] pub(crate) struct RequestInfo { - id: String, - local_addr: std::net::SocketAddr, - remote_addr: std::net::SocketAddr, - method: String, - path: String, - query: Option, + pub id: String, + pub local_addr: std::net::SocketAddr, + pub remote_addr: std::net::SocketAddr, + pub method: String, + pub path: String, + pub query: Option, } #[derive(Debug, Clone, serde::Serialize)] pub(crate) struct ResponseInfo { - id: String, - local_addr: std::net::SocketAddr, - remote_addr: std::net::SocketAddr, - status_code: u16, - message: String, + pub id: String, + pub local_addr: std::net::SocketAddr, + pub remote_addr: std::net::SocketAddr, + pub status_code: u16, + pub message: String, } #[cfg(feature = "usdt-probes")] #[usdt::provider(provider = "dropshot")] mod probes { - use super::{RequestInfo, ResponseInfo}; + use crate::dtrace::{RequestInfo, ResponseInfo}; fn request__start(_: &RequestInfo) {} fn request__done(_: &ResponseInfo) {} } diff --git a/dropshot/src/server.rs b/dropshot/src/server.rs index 380de0c8f..8cf8f4fd3 100644 --- a/dropshot/src/server.rs +++ b/dropshot/src/server.rs @@ -737,7 +737,7 @@ async fn http_request_handle_wrap( #[cfg(feature = "usdt-probes")] probes::request__done!(|| { - crate::ResponseInfo { + crate::dtrace::ResponseInfo { id: request_id.parse().unwrap(), local_addr, remote_addr, From 185f7f8e441843d9b9c794da150124408e7f464a Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 18 Jan 2023 08:51:44 -0800 Subject: [PATCH 41/47] websocket args should be last, too --- CHANGELOG.adoc | 4 ++-- dropshot/examples/websocket.rs | 2 +- dropshot_endpoint/src/lib.rs | 23 ++++------------------- 3 files changed, 7 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index f4ebff4eb..697e3c8fc 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -23,9 +23,9 @@ There are a number of breaking changes in this release but we expect they will b + **What you need to do:** + -1. For any endpoint functions that use a `TypedBody` or `UntypedBody` extractor, this extractor must be the last argument to the function. Otherwise, you will get a compile error about the extractor not impl'ing `SharedExtractor`. +1. For any endpoint functions that use a `TypedBody`, `UntypedBody`, or `WebsocketConnection` extractor, this extractor must be the last argument to the function. Otherwise, you will get a compile error about the extractor not impl'ing `SharedExtractor`. 2. If you have your own type that impls `Extractor`, you will need to change that to either `ExclusiveExtractor` (if the impl needs a `mut` reference to the underlying `hyper::Request`, which is usually because it needs to read the request body) or `SharedExtractor`. If your extractor only needs to look at the URL or request headers and not the body, it can probably be a `SharedExtractor`. If it's an exclusive extractor, any function that accepts it must accept it as the last argument to the function. -3. If you have your own type that impls `Extractor`, you will also need to change the type signature of the `from_request` method to accept a `&RequestContext` instead of `Arc>`. (This should not be a problem unless your extractor was hanging on to a reference via the Arc. We don't know a reason this would be useful. If you were doing this, please reach out. You can probably instead copy whatever information you need out of the `RequestContext` instead.) +3. If you have your own type that impls `Extractor`, you will also need to change the type signature of the `from_request` method to accept a `&RequestContext` instead of `Arc>`. (This should not be a problem unless your extractor was hanging on to a reference via the Arc. We don't know a reason this would be useful. If you were doing this, please https://github.com/oxidecomputer/dropshot/discussions[start a discussion] or https://github.com/oxidecomputer/dropshot/issues/new[file an issue]. In the meantime, you likely can copy whatever information you need out of the `RequestContext` rather than cloning the Arc.) * https://github.com/oxidecomputer/dropshot/pull/504[#504] Dropshot now allows TLS configuration to be supplied either by path or as bytes. For compatibility, the `AsFile` variant of `ConfigTls` contains the `cert_file` and `key_file` fields, and may be used similarly to the old variant. * https://github.com/oxidecomputer/dropshot/pull/502[#502] Dropshot exposes a `refresh_tls` method to update the TLS certificates being used by a running server. + diff --git a/dropshot/examples/websocket.rs b/dropshot/examples/websocket.rs index a79c79a80..1ba95db24 100644 --- a/dropshot/examples/websocket.rs +++ b/dropshot/examples/websocket.rs @@ -62,8 +62,8 @@ struct QueryParams { }] async fn example_api_websocket_counter( _rqctx: Arc>, - upgraded: WebsocketConnection, qp: Query, + upgraded: WebsocketConnection, ) -> dropshot::WebsocketChannelResult { let mut ws = tokio_tungstenite::WebSocketStream::from_raw_socket( upgraded.into_inner(), diff --git a/dropshot_endpoint/src/lib.rs b/dropshot_endpoint/src/lib.rs index faeb929fb..105f76a94 100644 --- a/dropshot_endpoint/src/lib.rs +++ b/dropshot_endpoint/src/lib.rs @@ -139,7 +139,7 @@ fn do_endpoint( /// /// The first argument still must be an `Arc>`. /// -/// The second argument passed to the handler function must be a +/// The last argument passed to the handler function must be a /// [`dropshot::WebsocketConnection`]. /// /// The function must return a [`dropshot::WebsocketChannelResult`] (which is @@ -173,8 +173,7 @@ fn do_channel( ChannelProtocol::WEBSOCKETS => { // Here we construct a wrapper function and mutate the arguments a bit // for the outer layer: we replace WebsocketConnection, which is not - // an extractor, with WebsocketUpgrade, which is. We also move it - // to the end. + // an extractor, with WebsocketUpgrade, which is. let ItemFnForSignature { attrs, vis, mut sig, _block: body } = syn::parse2(item)?; @@ -190,7 +189,7 @@ fn do_channel( } }) .collect(); - let found = sig.inputs.iter_mut().nth(1).and_then(|arg| { + let found = sig.inputs.iter_mut().last().and_then(|arg| { if let syn::FnArg::Typed(syn::PatType { pat, ty, .. }) = arg { if let syn::Pat::Ident(syn::PatIdent { ident, @@ -216,24 +215,10 @@ fn do_channel( if found.is_none() { return Err(Error::new_spanned( &attr, - "An argument of type dropshot::WebsocketConnection must be provided immediately following Arc>.", + "An argument of type dropshot::WebsocketConnection must be provided last.", )); } - // Historically, we required that the `WebsocketConnection` argument - // be first after the `RequestContext`. However, we also require - // that any exclusive extractor (which includes the - // `WebsocketUpgrade` argument that we put in its place) appears - // last. We replaced the type above, but now we need to put it in - // the right spot. - sig.inputs = { - let mut input_pairs = - sig.inputs.clone().into_pairs().collect::>(); - let second_pair = input_pairs.remove(1); - input_pairs.push(second_pair); - input_pairs.into_iter().collect() - }; - sig.output = syn::parse2(quote!(-> dropshot::WebsocketEndpointResult))?; From 00193d142e1ea4f9df4395b05b41be8441b9d94c Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 18 Jan 2023 09:00:05 -0800 Subject: [PATCH 42/47] found one straggler --- dropshot/src/websocket.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dropshot/src/websocket.rs b/dropshot/src/websocket.rs index 1db1b1306..7e739a35a 100644 --- a/dropshot/src/websocket.rs +++ b/dropshot/src/websocket.rs @@ -44,7 +44,7 @@ pub type WebsocketChannelResult = /// [WebsocketUpgrade::handle]. (This is done for you by `#[channel]`.) pub type WebsocketEndpointResult = Result, HttpError>; -/// The upgraded connection passed as the second argument to the websocket +/// The upgraded connection passed as the last argument to the websocket /// handler function. [`WebsocketConnection::into_inner`] can be used to /// access the raw upgraded connection, for passing to any implementation /// of the websockets protocol. From f21536cc8419ef8171a3a67137937c4c7b5fd2af Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 18 Jan 2023 09:40:17 -0800 Subject: [PATCH 43/47] draft CHANGELOG update --- CHANGELOG.adoc | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.adoc b/CHANGELOG.adoc index dca8ae206..58736db0d 100644 --- a/CHANGELOG.adoc +++ b/CHANGELOG.adoc @@ -19,13 +19,21 @@ https://github.com/oxidecomputer/dropshot/compare/v0.8.0\...HEAD[Full list of co There are a number of breaking changes in this release but we expect they will be easy to manage. **If you have any trouble updating to this release or want help with it, please do https://github.com/oxidecomputer/dropshot/discussions[start a discussion] or https://github.com/oxidecomputer/dropshot/issues/new[file an issue]!** +* https://github.com/oxidecomputer/dropshot/pull/558[#558] Remove `Arc` around `RequestContext`. Previously, endpoint functions and extractors accepted `Arc>`. They now accept just `RequestContext`. This better reflects the intent that the `RequestContext` is provided for the duration of your endpoint function. ++ +We expect this to be an annoying (sorry) but otherwise easy change for consumers to make. If it's tricky for some reason, please file an issue. ++ +**What you need to do:** ++ +1. For every endpoint function, change the type of the first argument from `Arc>` to `RequestContext`. In case it's useful, the following vim command worked to convert most of the cases we've seen: `%s/Arc]*\)>>/RequestContext<\1>/gc`. +2. For any type you've defined that impls `Extractor`, you will need to adjust the arguments similarly. See the next bullet item to fix these for both this change and #556. * https://github.com/oxidecomputer/dropshot/pull/556[#556] Better type-safety around the use of extractors. It is now a compile-time error to define an endpoint that accepts two extractors that use the HTTP request body (e.g., to accept both a `TypedBody` and an `UntypedBody`, or two `TypedBody` arguments). Previously, this would have resulted in a runtime error. The main change is that the `Extractor` trait has been split into two separate traits: `SharedExtractor` and `ExclusiveExtractor`. Endpoint functions can still accept 0-3 extractors, but only one can be an `ExclusiveExtractor` and it must be the last one. The function signatures for `*Extractor::from_request` have also changed. + **What you need to do:** + 1. For any endpoint functions that use a `TypedBody`, `UntypedBody`, or `WebsocketConnection` extractor, this extractor must be the last argument to the function. Otherwise, you will get a compile error about the extractor not impl'ing `SharedExtractor`. 2. If you have your own type that impls `Extractor`, you will need to change that to either `ExclusiveExtractor` (if the impl needs a `mut` reference to the underlying `hyper::Request`, which is usually because it needs to read the request body) or `SharedExtractor`. If your extractor only needs to look at the URL or request headers and not the body, it can probably be a `SharedExtractor`. If it's an exclusive extractor, any function that accepts it must accept it as the last argument to the function. -3. If you have your own type that impls `Extractor`, you will also need to change the type signature of the `from_request` method to accept a `&RequestContext` instead of `Arc>`. (This should not be a problem unless your extractor was hanging on to a reference via the Arc. We don't know a reason this would be useful. If you were doing this, please https://github.com/oxidecomputer/dropshot/discussions[start a discussion] or https://github.com/oxidecomputer/dropshot/issues/new[file an issue]. In the meantime, you likely can copy whatever information you need out of the `RequestContext` rather than cloning the Arc.) +3. Again if you have your own type that impls `Extractor`, having now updated it to either `SharedExtractor` or `ExclusiveExtractor`, you will also need to change the type signature of the `from_request` method to accept a `&RequestContext` instead of `Arc>`. (This should not be a problem unless your extractor was hanging on to a reference via the Arc. We don't know a reason this would be useful. If you were doing this, please https://github.com/oxidecomputer/dropshot/discussions[start a discussion] or https://github.com/oxidecomputer/dropshot/issues/new[file an issue]. In the meantime, you likely can copy whatever information you need out of the `RequestContext` rather than cloning the Arc.) * https://github.com/oxidecomputer/dropshot/pull/557[#557] Simpler, safer access to raw request. Prior to this change, the raw `hyper::Request` (`http::Request`) was accessible to endpoint functions via the `RequestContext`, but behind an `Arc>`. This was a little strange because your endpoint function was usually the only one with a reference to this object. (You could get into trouble if you defined your own Extractor that cloned one of the `Arc` objects -- your extractor could deadlock with the handler.) After this change, the raw request is available only through a separate `RawRequest` extractor. This is an exclusive extractor, which means you cannot use it with `TypedBody` or `UntypedBody`. As a result, there is no way to wind up with multiple references to the request. There's no lock and no way to get into this sort of trouble. + After this change, the `hyper::Request` is passed as a separate argument to `ExclusiveExtractor::from_request()`. @@ -45,6 +53,8 @@ b. If that code is accessing other parts of the request (e.g., reading the body **What you need to do:** If you previously tried to access `DropshotState.tls`, you can access the `DropshotState.using_tls()` method instead. * https://github.com/oxidecomputer/dropshot/pull/540[#540] `ConfigDropshot` now uses a [`camino::Utf8PathBuf`](https://docs.rs/camino/1.1.1/camino/struct.Utf8PathBuf.html) for its file path. There is no change to the configuration format itself, just its representation in Rust. +We realize this was a lot of breaking changes. We expect that most of these will affect few people (there don't seem to be a lot of custom extractor impls out there). The rest are pretty mechanical. We hope the result will be a safer, easier to use API. + === Other notable Changes * https://github.com/oxidecomputer/dropshot/pull/522[#522] Dropshot's DTrace From 7652cdf8baa589314e3660478b7fa05a2d3af750 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 18 Jan 2023 10:18:42 -0800 Subject: [PATCH 44/47] fix warnings --- dropshot/examples/basic.rs | 1 - dropshot/examples/file_server.rs | 1 - dropshot/examples/https.rs | 1 - dropshot/examples/index.rs | 1 - dropshot/examples/module-basic.rs | 1 - dropshot/examples/pagination-basic.rs | 1 - dropshot/examples/petstore.rs | 1 - dropshot/examples/request-headers.rs | 1 - dropshot/examples/schema-with-example.rs | 1 - dropshot/examples/websocket.rs | 1 - dropshot/examples/well-tagged.rs | 2 -- dropshot/src/api_description.rs | 1 - dropshot/src/router.rs | 1 - dropshot/tests/test_demo.rs | 1 - 14 files changed, 15 deletions(-) diff --git a/dropshot/examples/basic.rs b/dropshot/examples/basic.rs index ee57df5fe..dbd7ea4d6 100644 --- a/dropshot/examples/basic.rs +++ b/dropshot/examples/basic.rs @@ -17,7 +17,6 @@ use serde::Deserialize; use serde::Serialize; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; -use std::sync::Arc; #[tokio::main] async fn main() -> Result<(), String> { diff --git a/dropshot/examples/file_server.rs b/dropshot/examples/file_server.rs index e280ff44f..df9392496 100644 --- a/dropshot/examples/file_server.rs +++ b/dropshot/examples/file_server.rs @@ -14,7 +14,6 @@ use hyper::Body; use schemars::JsonSchema; use serde::Deserialize; use std::path::PathBuf; -use std::sync::Arc; /// Our context is simply the root of the directory we want to serve. struct FileServerContext { diff --git a/dropshot/examples/https.rs b/dropshot/examples/https.rs index bb449f818..1abaac272 100644 --- a/dropshot/examples/https.rs +++ b/dropshot/examples/https.rs @@ -20,7 +20,6 @@ use serde::Serialize; use std::io::Write; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; -use std::sync::Arc; use tempfile::NamedTempFile; // This function would not be used in a normal application. It is used to diff --git a/dropshot/examples/index.rs b/dropshot/examples/index.rs index 5668afd47..48d3b74e8 100644 --- a/dropshot/examples/index.rs +++ b/dropshot/examples/index.rs @@ -13,7 +13,6 @@ use http::{Response, StatusCode}; use hyper::Body; use schemars::JsonSchema; use serde::Deserialize; -use std::sync::Arc; #[tokio::main] async fn main() -> Result<(), String> { diff --git a/dropshot/examples/module-basic.rs b/dropshot/examples/module-basic.rs index 485639d36..4b95d6f04 100644 --- a/dropshot/examples/module-basic.rs +++ b/dropshot/examples/module-basic.rs @@ -79,7 +79,6 @@ pub mod routes { use dropshot::RequestContext; use dropshot::TypedBody; use std::sync::atomic::Ordering; - use std::sync::Arc; /// Fetch the current value of the counter. /// NOTE: The endpoint macro inherits its module visibility from diff --git a/dropshot/examples/pagination-basic.rs b/dropshot/examples/pagination-basic.rs index ed4c36113..342087e1d 100644 --- a/dropshot/examples/pagination-basic.rs +++ b/dropshot/examples/pagination-basic.rs @@ -35,7 +35,6 @@ use std::collections::BTreeMap; use std::net::Ipv4Addr; use std::net::SocketAddr; use std::ops::Bound; -use std::sync::Arc; /// Object returned by our paginated endpoint /// diff --git a/dropshot/examples/petstore.rs b/dropshot/examples/petstore.rs index e8acb48a4..9b4d7fc34 100644 --- a/dropshot/examples/petstore.rs +++ b/dropshot/examples/petstore.rs @@ -4,7 +4,6 @@ use dropshot::{ }; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::sync::Arc; fn main() -> Result<(), String> { // Build a description of the API. diff --git a/dropshot/examples/request-headers.rs b/dropshot/examples/request-headers.rs index e3ea37ee0..a32916d52 100644 --- a/dropshot/examples/request-headers.rs +++ b/dropshot/examples/request-headers.rs @@ -19,7 +19,6 @@ use dropshot::HttpError; use dropshot::HttpResponseOk; use dropshot::HttpServerStarter; use dropshot::RequestContext; -use std::sync::Arc; #[tokio::main] async fn main() -> Result<(), String> { diff --git a/dropshot/examples/schema-with-example.rs b/dropshot/examples/schema-with-example.rs index 1f54bccb0..1322e5e64 100644 --- a/dropshot/examples/schema-with-example.rs +++ b/dropshot/examples/schema-with-example.rs @@ -7,7 +7,6 @@ use dropshot::{ }; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::sync::Arc; // Define 2 structs here - Bar is nested inside Foo and should result in an // example that looks like: diff --git a/dropshot/examples/websocket.rs b/dropshot/examples/websocket.rs index 11386826b..5fa128cd9 100644 --- a/dropshot/examples/websocket.rs +++ b/dropshot/examples/websocket.rs @@ -13,7 +13,6 @@ use dropshot::WebsocketConnection; use futures::SinkExt; use schemars::JsonSchema; use serde::Deserialize; -use std::sync::Arc; use tokio_tungstenite::tungstenite::protocol::Role; use tokio_tungstenite::tungstenite::Message; diff --git a/dropshot/examples/well-tagged.rs b/dropshot/examples/well-tagged.rs index 30244dced..9284ede6c 100644 --- a/dropshot/examples/well-tagged.rs +++ b/dropshot/examples/well-tagged.rs @@ -5,8 +5,6 @@ //! documentation generators; Dropshot's tag policies are intended to make //! proper tagging innate. -use std::sync::Arc; - use dropshot::{ endpoint, ApiDescription, ConfigLogging, ConfigLoggingLevel, EndpointTagPolicy, HttpError, HttpResponseOk, HttpServerStarter, diff --git a/dropshot/src/api_description.rs b/dropshot/src/api_description.rs index 68bd107a0..c93e83ad8 100644 --- a/dropshot/src/api_description.rs +++ b/dropshot/src/api_description.rs @@ -1114,7 +1114,6 @@ mod test { use serde::Deserialize; use std::collections::HashSet; use std::str::from_utf8; - use std::sync::Arc; use crate as dropshot; // for "endpoint" macro diff --git a/dropshot/src/router.rs b/dropshot/src/router.rs index 03a9aff33..b8149d5d9 100644 --- a/dropshot/src/router.rs +++ b/dropshot/src/router.rs @@ -738,7 +738,6 @@ mod test { use hyper::Response; use serde::Deserialize; use std::collections::BTreeMap; - use std::sync::Arc; async fn test_handler( _: RequestContext<()>, diff --git a/dropshot/tests/test_demo.rs b/dropshot/tests/test_demo.rs index a86d81118..644e3462e 100644 --- a/dropshot/tests/test_demo.rs +++ b/dropshot/tests/test_demo.rs @@ -50,7 +50,6 @@ use hyper::Response; use schemars::JsonSchema; use serde::Deserialize; use serde::Serialize; -use std::sync::Arc; use tokio_tungstenite::tungstenite::protocol::Role; use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::WebSocketStream; From ed997871350de84cd3d645aa59f32385454e0963 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 18 Jan 2023 11:36:29 -0800 Subject: [PATCH 45/47] fix signature --- dropshot/src/handler.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index 94379ae56..bbef74e1a 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -124,8 +124,8 @@ impl RequestInfo { &self.uri } - pub fn version(&self) -> &http::Version { - &self.version + pub fn version(&self) -> http::Version { + self.version } pub fn headers(&self) -> &http::HeaderMap { From 4d44d6f80e77e984d0657bf4850ebceef03517f2 Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 18 Jan 2023 17:02:39 -0800 Subject: [PATCH 46/47] clippy --- dropshot/src/handler.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dropshot/src/handler.rs b/dropshot/src/handler.rs index bbef74e1a..38adc2ad9 100644 --- a/dropshot/src/handler.rs +++ b/dropshot/src/handler.rs @@ -103,7 +103,7 @@ impl From<&hyper::Request> for RequestInfo { RequestInfo { method: request.method().clone(), uri: request.uri().clone(), - version: request.version().clone(), + version: request.version(), headers: request.headers().clone(), } } From 7e1bfb619d96a97d863bc109b21a93c0210fbd4a Mon Sep 17 00:00:00 2001 From: David Pacheco Date: Wed, 18 Jan 2023 18:20:24 -0800 Subject: [PATCH 47/47] missed updating the new tests --- dropshot/tests/fail/bad_endpoint17.rs | 3 +- dropshot/tests/fail/bad_endpoint17.stderr | 47 ++++++++++++++++------- dropshot/tests/fail/bad_endpoint18.rs | 3 +- dropshot/tests/fail/bad_endpoint18.stderr | 47 ++++++++++++++++------- dropshot/tests/fail/bad_endpoint19.rs | 3 +- dropshot/tests/fail/bad_endpoint19.stderr | 47 ++++++++++++++++------- 6 files changed, 102 insertions(+), 48 deletions(-) diff --git a/dropshot/tests/fail/bad_endpoint17.rs b/dropshot/tests/fail/bad_endpoint17.rs index 44dbc09f9..1b71fa67f 100644 --- a/dropshot/tests/fail/bad_endpoint17.rs +++ b/dropshot/tests/fail/bad_endpoint17.rs @@ -10,7 +10,6 @@ use dropshot::TypedBody; use dropshot::UntypedBody; use schemars::JsonSchema; use serde::Deserialize; -use std::sync::Arc; #[allow(dead_code)] #[derive(Deserialize, JsonSchema)] @@ -27,7 +26,7 @@ struct Stuff { path = "/test", }] async fn two_exclusive_extractors( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _param1: TypedBody, _param2: UntypedBody, ) -> Result, HttpError> { diff --git a/dropshot/tests/fail/bad_endpoint17.stderr b/dropshot/tests/fail/bad_endpoint17.stderr index e644a5af9..d752a12a7 100644 --- a/dropshot/tests/fail/bad_endpoint17.stderr +++ b/dropshot/tests/fail/bad_endpoint17.stderr @@ -1,19 +1,38 @@ -error[E0277]: the trait bound `Arc>: RequestContextArgument` is not satisfied - --> tests/fail/bad_endpoint17.rs:30:13 +error[E0277]: the trait bound `TypedBody: SharedExtractor` is not satisfied + --> tests/fail/bad_endpoint17.rs:30:14 | -30 | _rqctx: Arc>, - | ^^^ the trait `RequestContextArgument` is not implemented for `Arc>` +30 | _param1: TypedBody, + | ^^^^^^^^^^^^^^^^ the trait `SharedExtractor` is not implemented for `TypedBody` | - = help: the trait `RequestContextArgument` is implemented for `RequestContext` + = help: the following other types implement trait `SharedExtractor`: + dropshot::Path + dropshot::Query +note: required by a bound in `need_shared_extractor` + --> tests/fail/bad_endpoint17.rs:24:1 + | +24 | / #[endpoint { +25 | | method = GET, +26 | | path = "/test", +27 | | }] + | |__^ required by this bound in `need_shared_extractor` +... +30 | _param1: TypedBody, + | --------- required by a bound in this + = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Arc>: RequestContextArgument` is not satisfied - --> tests/fail/bad_endpoint17.rs:25:1 +error[E0277]: the trait bound `fn(RequestContext<()>, TypedBody, UntypedBody) -> impl Future, HttpError>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::two_exclusive_extractors}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied + --> tests/fail/bad_endpoint17.rs:28:10 | -25 | / #[endpoint { -26 | | method = GET, -27 | | path = "/test", -28 | | }] - | |__^ the trait `RequestContextArgument` is not implemented for `Arc>` +24 | / #[endpoint { +25 | | method = GET, +26 | | path = "/test", +27 | | }] + | |__- required by a bound introduced by this call +28 | async fn two_exclusive_extractors( + | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(RequestContext<()>, TypedBody, UntypedBody) -> impl Future, HttpError>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::two_exclusive_extractors}` | - = help: the trait `RequestContextArgument` is implemented for `RequestContext` - = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) +note: required by a bound in `ApiEndpoint::::new` + --> src/api_description.rs + | + | HandlerType: HttpHandlerFunc, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `ApiEndpoint::::new` diff --git a/dropshot/tests/fail/bad_endpoint18.rs b/dropshot/tests/fail/bad_endpoint18.rs index 2b8193ebc..73219a7b0 100644 --- a/dropshot/tests/fail/bad_endpoint18.rs +++ b/dropshot/tests/fail/bad_endpoint18.rs @@ -10,7 +10,6 @@ use dropshot::TypedBody; use dropshot::RequestContext; use schemars::JsonSchema; use serde::Deserialize; -use std::sync::Arc; #[allow(dead_code)] #[derive(Deserialize, JsonSchema)] @@ -24,7 +23,7 @@ struct Stuff { path = "/test", }] async fn exclusive_extractor_not_last( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _param1: TypedBody, _param2: Query, ) -> Result, HttpError> { diff --git a/dropshot/tests/fail/bad_endpoint18.stderr b/dropshot/tests/fail/bad_endpoint18.stderr index 0120c42d6..bdad29c6d 100644 --- a/dropshot/tests/fail/bad_endpoint18.stderr +++ b/dropshot/tests/fail/bad_endpoint18.stderr @@ -1,19 +1,38 @@ -error[E0277]: the trait bound `Arc>: RequestContextArgument` is not satisfied - --> tests/fail/bad_endpoint18.rs:27:13 +error[E0277]: the trait bound `TypedBody: SharedExtractor` is not satisfied + --> tests/fail/bad_endpoint18.rs:27:14 | -27 | _rqctx: Arc>, - | ^^^ the trait `RequestContextArgument` is not implemented for `Arc>` +27 | _param1: TypedBody, + | ^^^^^^^^^^^^^^^^ the trait `SharedExtractor` is not implemented for `TypedBody` | - = help: the trait `RequestContextArgument` is implemented for `RequestContext` + = help: the following other types implement trait `SharedExtractor`: + dropshot::Path + dropshot::Query +note: required by a bound in `need_shared_extractor` + --> tests/fail/bad_endpoint18.rs:21:1 + | +21 | / #[endpoint { +22 | | method = GET, +23 | | path = "/test", +24 | | }] + | |__^ required by this bound in `need_shared_extractor` +... +27 | _param1: TypedBody, + | --------- required by a bound in this + = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Arc>: RequestContextArgument` is not satisfied - --> tests/fail/bad_endpoint18.rs:22:1 +error[E0277]: the trait bound `fn(RequestContext<()>, TypedBody, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::exclusive_extractor_not_last}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied + --> tests/fail/bad_endpoint18.rs:25:10 | -22 | / #[endpoint { -23 | | method = GET, -24 | | path = "/test", -25 | | }] - | |__^ the trait `RequestContextArgument` is not implemented for `Arc>` +21 | / #[endpoint { +22 | | method = GET, +23 | | path = "/test", +24 | | }] + | |__- required by a bound introduced by this call +25 | async fn exclusive_extractor_not_last( + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(RequestContext<()>, TypedBody, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::exclusive_extractor_not_last}` | - = help: the trait `RequestContextArgument` is implemented for `RequestContext` - = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) +note: required by a bound in `ApiEndpoint::::new` + --> src/api_description.rs + | + | HandlerType: HttpHandlerFunc, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `ApiEndpoint::::new` diff --git a/dropshot/tests/fail/bad_endpoint19.rs b/dropshot/tests/fail/bad_endpoint19.rs index 0937f5017..08056b833 100644 --- a/dropshot/tests/fail/bad_endpoint19.rs +++ b/dropshot/tests/fail/bad_endpoint19.rs @@ -9,7 +9,6 @@ use dropshot::Query; use dropshot::RequestContext; use schemars::JsonSchema; use serde::Deserialize; -use std::sync::Arc; #[allow(dead_code)] #[derive(Deserialize, JsonSchema)] @@ -23,7 +22,7 @@ struct QueryParams { path = "/test", }] async fn non_extractor_as_last_argument( - _rqctx: Arc>, + _rqctx: RequestContext<()>, _param1: String, _param2: Query, ) -> Result, HttpError> { diff --git a/dropshot/tests/fail/bad_endpoint19.stderr b/dropshot/tests/fail/bad_endpoint19.stderr index 90cc714a7..6ef8a640a 100644 --- a/dropshot/tests/fail/bad_endpoint19.stderr +++ b/dropshot/tests/fail/bad_endpoint19.stderr @@ -1,19 +1,38 @@ -error[E0277]: the trait bound `Arc>: RequestContextArgument` is not satisfied - --> tests/fail/bad_endpoint19.rs:26:13 +error[E0277]: the trait bound `std::string::String: SharedExtractor` is not satisfied + --> tests/fail/bad_endpoint19.rs:26:14 | -26 | _rqctx: Arc>, - | ^^^ the trait `RequestContextArgument` is not implemented for `Arc>` +26 | _param1: String, + | ^^^^^^ the trait `SharedExtractor` is not implemented for `std::string::String` | - = help: the trait `RequestContextArgument` is implemented for `RequestContext` + = help: the following other types implement trait `SharedExtractor`: + dropshot::Path + dropshot::Query +note: required by a bound in `need_shared_extractor` + --> tests/fail/bad_endpoint19.rs:20:1 + | +20 | / #[endpoint { +21 | | method = GET, +22 | | path = "/test", +23 | | }] + | |__^ required by this bound in `need_shared_extractor` +... +26 | _param1: String, + | ------ required by a bound in this + = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0277]: the trait bound `Arc>: RequestContextArgument` is not satisfied - --> tests/fail/bad_endpoint19.rs:21:1 +error[E0277]: the trait bound `fn(RequestContext<()>, std::string::String, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::non_extractor_as_last_argument}: dropshot::handler::HttpHandlerFunc<_, _, _>` is not satisfied + --> tests/fail/bad_endpoint19.rs:24:10 | -21 | / #[endpoint { -22 | | method = GET, -23 | | path = "/test", -24 | | }] - | |__^ the trait `RequestContextArgument` is not implemented for `Arc>` +20 | / #[endpoint { +21 | | method = GET, +22 | | path = "/test", +23 | | }] + | |__- required by a bound introduced by this call +24 | async fn non_extractor_as_last_argument( + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `dropshot::handler::HttpHandlerFunc<_, _, _>` is not implemented for fn item `fn(RequestContext<()>, std::string::String, dropshot::Query) -> impl Future, HttpError>> { for ApiEndpoint< as RequestContextArgument>::Context>>::from::non_extractor_as_last_argument}` | - = help: the trait `RequestContextArgument` is implemented for `RequestContext` - = note: this error originates in the attribute macro `endpoint` (in Nightly builds, run with -Z macro-backtrace for more info) +note: required by a bound in `ApiEndpoint::::new` + --> src/api_description.rs + | + | HandlerType: HttpHandlerFunc, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `ApiEndpoint::::new`