From 4ea0e1b212fa55a9b0493ea66a32dcf8e861e591 Mon Sep 17 00:00:00 2001 From: Nicolas Silva Date: Thu, 27 Oct 2022 16:18:04 +0200 Subject: [PATCH 1/4] Avoid integer overflow on multiplication in write_texture. with large depth values in copy_size validate_linear_texture_data can run into integer overflows This is avoided by validating the copy depth before calling validate_linear_texture_data in queue_write_texture. The other two validate_linear_texture_data call sites already have the copy size validated beforehand. --- wgpu-core/src/command/transfer.rs | 5 ++++- wgpu-core/src/device/queue.rs | 22 +++++++++++++--------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/wgpu-core/src/command/transfer.rs b/wgpu-core/src/command/transfer.rs index dd46e30a26..d0a338811d 100644 --- a/wgpu-core/src/command/transfer.rs +++ b/wgpu-core/src/command/transfer.rs @@ -219,7 +219,10 @@ pub(crate) fn validate_linear_texture_data( copy_size: &Extent3d, need_copy_aligned_rows: bool, ) -> Result<(BufferAddress, BufferAddress), TransferError> { - // Convert all inputs to BufferAddress (u64) to prevent overflow issues + // Convert all inputs to BufferAddress (u64) to avoid some of the overflow issues + // Note: u64 is not always enough to prevent overflow, especially when multiplying + // something with a potentially large depth value, so it is preferrable to validate + // the copy size before calling this function (for example via `validate_texture_copy_range`). let copy_width = copy_size.width as BufferAddress; let copy_height = copy_size.height as BufferAddress; let copy_depth = copy_size.depth_or_array_layers as BufferAddress; diff --git a/wgpu-core/src/device/queue.rs b/wgpu-core/src/device/queue.rs index aea5598731..57f05592ee 100644 --- a/wgpu-core/src/device/queue.rs +++ b/wgpu-core/src/device/queue.rs @@ -600,6 +600,19 @@ impl Global { let (selector, dst_base, texture_format) = extract_texture_selector(destination, size, &*texture_guard)?; let format_desc = texture_format.describe(); + + let dst = texture_guard.get_mut(destination.texture).unwrap(); + if !dst.desc.usage.contains(wgt::TextureUsages::COPY_DST) { + return Err( + TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(), + ); + } + + // Note: Doing the copy range validation early is important because ensures that the + // dimensions are not going to cause overflow in other parts of the validation. + let (hal_copy_size, array_layer_count) = + validate_texture_copy_range(destination, &dst.desc, CopySide::Destination, size)?; + // Note: `_source_bytes_per_array_layer` is ignored since we // have a staging copy, and it can have a different value. let (_, _source_bytes_per_array_layer) = validate_linear_texture_data( @@ -642,13 +655,6 @@ impl Global { (size.depth_or_array_layers - 1) * block_rows_per_image + height_blocks; let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64; - let dst = texture_guard.get_mut(destination.texture).unwrap(); - if !dst.desc.usage.contains(wgt::TextureUsages::COPY_DST) { - return Err( - TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(), - ); - } - let mut trackers = device.trackers.lock(); let encoder = device.pending_writes.activate(); @@ -702,8 +708,6 @@ impl Global { ) .ok_or(TransferError::InvalidTexture(destination.texture))?; - let (hal_copy_size, array_layer_count) = - validate_texture_copy_range(destination, &dst.desc, CopySide::Destination, size)?; dst.life_guard.use_at(device.active_submission_index + 1); let dst_raw = dst From 472b33d86bdabd308c71830996b00aaf3d17dffe Mon Sep 17 00:00:00 2001 From: Nicolas Silva Date: Fri, 28 Oct 2022 14:46:30 +0200 Subject: [PATCH 2/4] Add an entry in the changelog. --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27200b170d..462ab3099e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,6 +76,7 @@ Bottom level categories: - Fixed the mipmap example by adding the missing WRITE_TIMESTAMP_INSIDE_PASSES feature. By @Olaroll in [#3081](https://github.com/gfx-rs/wgpu/pull/3081). - Avoid panicking in some interactions with invalid resources by @nical in (#3094)[https://github.com/gfx-rs/wgpu/pull/3094] - Remove `wgpu_types::Features::DEPTH24PLUS_STENCIL8`, making `wgpu::TextureFormat::Depth24PlusStencil8` available on all backends. By @Healthire in (#3151)[https://github.com/gfx-rs/wgpu/pull/3151] +- Fix an integer overflow in `queue_write_texture` by @nical in (#3146)[https://github.com/gfx-rs/wgpu/pull/3146] #### WebGPU - Use `log` instead of `println` in hello example by @JolifantoBambla in [#2858](https://github.com/gfx-rs/wgpu/pull/2858) From 2f41823424af7c37dc56a29295756dff0e01c3a5 Mon Sep 17 00:00:00 2001 From: Nicolas Silva Date: Fri, 28 Oct 2022 16:47:15 +0200 Subject: [PATCH 3/4] Add a test. --- wgpu/tests/queue_transfer.rs | 49 ++++++++++++++++++++++++++++++++++++ wgpu/tests/root.rs | 1 + 2 files changed, 50 insertions(+) create mode 100644 wgpu/tests/queue_transfer.rs diff --git a/wgpu/tests/queue_transfer.rs b/wgpu/tests/queue_transfer.rs new file mode 100644 index 0000000000..7724c291cd --- /dev/null +++ b/wgpu/tests/queue_transfer.rs @@ -0,0 +1,49 @@ +//! Tests for buffer copy validation. + +use std::num::NonZeroU32; + +use crate::common::{fail, initialize_test, TestParameters}; + +#[test] +fn queue_write_texture_overflow() { + initialize_test(TestParameters::default(), |ctx| { + let texture = ctx.device.create_texture(&wgpu::TextureDescriptor { + label: None, + size: wgpu::Extent3d { + width: 146, + height: 25, + depth_or_array_layers: 192, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rgba32Float, + usage: wgpu::TextureUsages::COPY_DST, + }); + + let data = vec![255; 128]; + + fail(&ctx.device, || { + ctx.queue.write_texture( + wgpu::ImageCopyTexture { + texture: &texture, + mip_level: 0, + origin: wgpu::Origin3d::ZERO, + aspect: wgpu::TextureAspect::All, + }, + &data, + wgpu::ImageDataLayout { + offset: 0, + bytes_per_row: NonZeroU32::new(879161360), + //bytes_per_image: 4294967295, + rows_per_image: NonZeroU32::new(4294967295 / 879161360), + }, + wgpu::Extent3d { + width: 3056263286, + height: 64, + depth_or_array_layers: 1144576469, + }, + ); + }); + }); +} diff --git a/wgpu/tests/root.rs b/wgpu/tests/root.rs index 1deab376c3..3772e8d0f9 100644 --- a/wgpu/tests/root.rs +++ b/wgpu/tests/root.rs @@ -9,6 +9,7 @@ mod encoder; mod example_wgsl; mod instance; mod poll; +mod queue_transfer; mod resource_descriptor_accessor; mod resource_error; mod shader; From c162cb500a3f9508ba08a3123aa0b82156596dd2 Mon Sep 17 00:00:00 2001 From: Nicolas Silva Date: Mon, 7 Nov 2022 11:10:17 +0100 Subject: [PATCH 4/4] Store a reference to the error sink in the queue id. This is allows us to make (some of) the queue methods forward errors instead of panicking. --- wgpu/src/backend/direct.rs | 63 ++++++++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 16 deletions(-) diff --git a/wgpu/src/backend/direct.rs b/wgpu/src/backend/direct.rs index 7428b2e080..b103658acd 100644 --- a/wgpu/src/backend/direct.rs +++ b/wgpu/src/backend/direct.rs @@ -95,7 +95,7 @@ impl Context { hal_device: hal::OpenDevice, desc: &crate::DeviceDescriptor, trace_dir: Option<&std::path::Path>, - ) -> Result<(Device, wgc::id::QueueId), crate::RequestDeviceError> { + ) -> Result<(Device, Queue), crate::RequestDeviceError> { let global = &self.0; let (device_id, error) = global.create_device_from_hal( *adapter, @@ -107,12 +107,17 @@ impl Context { if let Some(err) = error { self.handle_error_fatal(err, "Adapter::create_device_from_hal"); } + let error_sink = Arc::new(Mutex::new(ErrorSinkRaw::new())); let device = Device { id: device_id, - error_sink: Arc::new(Mutex::new(ErrorSinkRaw::new())), + error_sink: error_sink.clone(), features: desc.features, }; - Ok((device, device_id)) + let queue = Queue { + id: device_id, + error_sink, + }; + Ok((device, queue)) } #[cfg(any(not(target_arch = "wasm32"), feature = "emscripten"))] @@ -808,6 +813,18 @@ pub struct Texture { error_sink: ErrorSink, } +#[derive(Debug)] +pub struct Queue { + id: wgc::id::QueueId, + error_sink: ErrorSink, +} + +impl Queue { + pub(crate) fn backend(&self) -> wgt::Backend { + self.id.backend() + } +} + #[derive(Debug)] pub(crate) struct CommandEncoder { id: wgc::id::CommandEncoderId, @@ -858,10 +875,17 @@ impl crate::GlobalId for CommandEncoder { } } +impl crate::GlobalId for Queue { + fn global_id(&self) -> Id { + use wgc::id::TypedId; + self.id.unzip() + } +} + impl crate::Context for Context { type AdapterId = wgc::id::AdapterId; type DeviceId = Device; - type QueueId = wgc::id::QueueId; + type QueueId = Queue; type ShaderModuleId = wgc::id::ShaderModuleId; type BindGroupLayoutId = wgc::id::BindGroupLayoutId; type BindGroupId = wgc::id::BindGroupId; @@ -951,12 +975,17 @@ impl crate::Context for Context { log::error!("Error in Adapter::request_device: {}", err); return ready(Err(crate::RequestDeviceError)); } + let error_sink = Arc::new(Mutex::new(ErrorSinkRaw::new())); let device = Device { id: device_id, - error_sink: Arc::new(Mutex::new(ErrorSinkRaw::new())), + error_sink: error_sink.clone(), features: desc.features, }; - ready(Ok((device, device_id))) + let queue = Queue { + id: device_id, + error_sink, + }; + ready(Ok((device, queue))) } fn adapter_is_surface_supported( @@ -2270,7 +2299,7 @@ impl crate::Context for Context { ) { let global = &self.0; match wgc::gfx_select!( - *queue => global.queue_write_buffer(*queue, buffer.id, offset, data) + *queue => global.queue_write_buffer(queue.id, buffer.id, offset, data) ) { Ok(()) => (), Err(err) => self.handle_error_fatal(err, "Queue::write_buffer"), @@ -2286,7 +2315,7 @@ impl crate::Context for Context { ) { let global = &self.0; match wgc::gfx_select!( - *queue => global.queue_validate_write_buffer(*queue, buffer.id, offset, size.get()) + *queue => global.queue_validate_write_buffer(queue.id, buffer.id, offset, size.get()) ) { Ok(()) => (), Err(err) => self.handle_error_fatal(err, "Queue::write_buffer_with"), @@ -2300,7 +2329,7 @@ impl crate::Context for Context { ) -> QueueWriteBuffer { let global = &self.0; match wgc::gfx_select!( - *queue => global.queue_create_staging_buffer(*queue, size, ()) + *queue => global.queue_create_staging_buffer(queue.id, size, ()) ) { Ok((buffer_id, ptr)) => QueueWriteBuffer { buffer_id, @@ -2322,10 +2351,12 @@ impl crate::Context for Context { ) { let global = &self.0; match wgc::gfx_select!( - *queue => global.queue_write_staging_buffer(*queue, buffer.id, offset, staging_buffer.buffer_id) + *queue => global.queue_write_staging_buffer(queue.id, buffer.id, offset, staging_buffer.buffer_id) ) { Ok(()) => (), - Err(err) => self.handle_error_fatal(err, "Queue::write_buffer_with"), + Err(err) => { + self.handle_error_nolabel(&queue.error_sink, err, "Queue::write_buffer_with"); + } } } @@ -2339,14 +2370,14 @@ impl crate::Context for Context { ) { let global = &self.0; match wgc::gfx_select!(*queue => global.queue_write_texture( - *queue, + queue.id, &map_texture_copy_view(texture), data, &data_layout, &size )) { Ok(()) => (), - Err(err) => self.handle_error_fatal(err, "Queue::write_texture"), + Err(err) => self.handle_error_nolabel(&queue.error_sink, err, "Queue::write_texture"), } } @@ -2358,7 +2389,7 @@ impl crate::Context for Context { let temp_command_buffers = command_buffers.collect::>(); let global = &self.0; - match wgc::gfx_select!(*queue => global.queue_submit(*queue, &temp_command_buffers)) { + match wgc::gfx_select!(*queue => global.queue_submit(queue.id, &temp_command_buffers)) { Ok(index) => index, Err(err) => self.handle_error_fatal(err, "Queue::submit"), } @@ -2367,7 +2398,7 @@ impl crate::Context for Context { fn queue_get_timestamp_period(&self, queue: &Self::QueueId) -> f32 { let global = &self.0; let res = wgc::gfx_select!(queue => global.queue_get_timestamp_period( - *queue + queue.id )); match res { Ok(v) => v, @@ -2385,7 +2416,7 @@ impl crate::Context for Context { let closure = wgc::device::queue::SubmittedWorkDoneClosure::from_rust(callback); let global = &self.0; - let res = wgc::gfx_select!(queue => global.queue_on_submitted_work_done(*queue, closure)); + let res = wgc::gfx_select!(queue => global.queue_on_submitted_work_done(queue.id, closure)); if let Err(cause) = res { self.handle_error_fatal(cause, "Queue::on_submitted_work_done"); }