Skip to content

Commit

Permalink
[wgpu-core] fix trying to create 0 sized staging buffers when creatin…
Browse files Browse the repository at this point in the history
…g mapped_at_creation buffers

This issue was introduced by fabbca2.
  • Loading branch information
teoxoy committed Jul 11, 2024
1 parent 349f182 commit a0c185a
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 29 deletions.
52 changes: 29 additions & 23 deletions wgpu-core/src/device/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -314,19 +314,19 @@ impl<A: HalApi> PendingWrites<A> {

pub(crate) fn prepare_staging_buffer<A: HalApi>(
device: &Arc<Device<A>>,
size: wgt::BufferAddress,
size: wgt::BufferSize,
instance_flags: wgt::InstanceFlags,
) -> Result<(StagingBuffer<A>, NonNull<u8>), DeviceError> {
profiling::scope!("prepare_staging_buffer");
let stage_desc = hal::BufferDescriptor {
label: hal_label(Some("(wgpu internal) Staging"), instance_flags),
size,
size: size.get(),
usage: hal::BufferUses::MAP_WRITE | hal::BufferUses::COPY_SRC,
memory_flags: hal::MemoryFlags::TRANSIENT,
};

let buffer = unsafe { device.raw().create_buffer(&stage_desc)? };
let mapping = unsafe { device.raw().map_buffer(&buffer, 0..size) }?;
let mapping = unsafe { device.raw().map_buffer(&buffer, 0..size.get()) }?;

let staging_buffer = StagingBuffer {
raw: Mutex::new(rank::STAGING_BUFFER_RAW, Some(buffer)),
Expand All @@ -344,7 +344,7 @@ impl<A: HalApi> StagingBuffer<A> {
unsafe {
device.flush_mapped_ranges(
self.raw.lock().as_ref().unwrap(),
iter::once(0..self.size),
iter::once(0..self.size.get()),
)
};
}
Expand Down Expand Up @@ -435,10 +435,12 @@ impl Global {

buffer.same_device_as(queue.as_ref())?;

if data_size == 0 {
let data_size = if let Some(data_size) = wgt::BufferSize::new(data_size) {
data_size
} else {
log::trace!("Ignoring write_buffer of size 0");
return Ok(());
}
};

// Platform validation requires that the staging buffer always be
// freed, even if an error occurs. All paths from here must call
Expand All @@ -450,7 +452,11 @@ impl Global {

if let Err(flush_error) = unsafe {
profiling::scope!("copy");
ptr::copy_nonoverlapping(data.as_ptr(), staging_buffer_ptr.as_ptr(), data.len());
ptr::copy_nonoverlapping(
data.as_ptr(),
staging_buffer_ptr.as_ptr(),
data_size.get() as usize,
);
staging_buffer.flush(device.raw())
} {
pending_writes.consume(staging_buffer);
Expand Down Expand Up @@ -487,7 +493,7 @@ impl Global {
let device = &queue.device;

let (staging_buffer, staging_buffer_ptr) =
prepare_staging_buffer(device, buffer_size.get(), device.instance_flags)?;
prepare_staging_buffer(device, buffer_size, device.instance_flags)?;

let fid = hub.staging_buffers.prepare(id_in);
let id = fid.assign(Arc::new(staging_buffer));
Expand Down Expand Up @@ -549,7 +555,7 @@ impl Global {
_queue_id: QueueId,
buffer_id: id::BufferId,
buffer_offset: u64,
buffer_size: u64,
buffer_size: wgt::BufferSize,
) -> Result<(), QueueWriteError> {
profiling::scope!("Queue::validate_write_buffer");
let hub = A::hub(self);
Expand All @@ -568,19 +574,19 @@ impl Global {
&self,
buffer: &Buffer<A>,
buffer_offset: u64,
buffer_size: u64,
buffer_size: wgt::BufferSize,
) -> Result<(), TransferError> {
buffer.check_usage(wgt::BufferUsages::COPY_DST)?;
if buffer_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(TransferError::UnalignedCopySize(buffer_size));
if buffer_size.get() % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(TransferError::UnalignedCopySize(buffer_size.get()));
}
if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(TransferError::UnalignedBufferOffset(buffer_offset));
}
if buffer_offset + buffer_size > buffer.size {
if buffer_offset + buffer_size.get() > buffer.size {
return Err(TransferError::BufferOverrun {
start_offset: buffer_offset,
end_offset: buffer_offset + buffer_size,
end_offset: buffer_offset + buffer_size.get(),
buffer_size: buffer.size,
side: CopySide::Destination,
});
Expand Down Expand Up @@ -615,16 +621,15 @@ impl Global {

dst.same_device_as(queue.as_ref())?;

let src_buffer_size = staging_buffer.size;
self.queue_validate_write_buffer_impl(&dst, buffer_offset, src_buffer_size)?;
self.queue_validate_write_buffer_impl(&dst, buffer_offset, staging_buffer.size)?;

dst.use_at(device.active_submission_index.load(Ordering::Relaxed) + 1);

let region = wgt::BufferSize::new(src_buffer_size).map(|size| hal::BufferCopy {
let region = hal::BufferCopy {
src_offset: 0,
dst_offset: buffer_offset,
size,
});
size: staging_buffer.size,
};
let inner_buffer = staging_buffer.raw.lock();
let barriers = iter::once(hal::BufferBarrier {
buffer: inner_buffer.as_ref().unwrap(),
Expand All @@ -637,7 +642,7 @@ impl Global {
encoder.copy_buffer_to_buffer(
inner_buffer.as_ref().unwrap(),
dst_raw,
region.into_iter(),
iter::once(region),
);
}

Expand All @@ -648,7 +653,7 @@ impl Global {
{
dst.initialization_status
.write()
.drain(buffer_offset..(buffer_offset + src_buffer_size));
.drain(buffer_offset..(buffer_offset + staging_buffer.size.get()));
}

Ok(())
Expand Down Expand Up @@ -760,7 +765,8 @@ impl Global {

let block_rows_in_copy =
(size.depth_or_array_layers - 1) * block_rows_per_image + height_blocks;
let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64;
let stage_size =
wgt::BufferSize::new(stage_bytes_per_row as u64 * block_rows_in_copy as u64).unwrap();

let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
Expand Down Expand Up @@ -836,7 +842,7 @@ impl Global {
ptr::copy_nonoverlapping(
data.as_ptr().offset(data_layout.offset as isize),
staging_buffer_ptr.as_ptr(),
stage_size as usize,
stage_size.get() as usize,
);
}
} else {
Expand Down
11 changes: 7 additions & 4 deletions wgpu-core/src/device/resource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -591,13 +591,16 @@ impl<A: HalApi> Device<A> {
};
hal::BufferUses::MAP_WRITE
} else {
let (staging_buffer, staging_buffer_ptr) =
queue::prepare_staging_buffer(self, desc.size, self.instance_flags)?;
let (staging_buffer, staging_buffer_ptr) = queue::prepare_staging_buffer(
self,
wgt::BufferSize::new(aligned_size).unwrap(),
self.instance_flags,
)?;

// Zero initialize memory and then mark the buffer as initialized
// (it's guaranteed that this is the case by the time the buffer is usable)
unsafe { std::ptr::write_bytes(staging_buffer_ptr.as_ptr(), 0, buffer.size as usize) };
buffer.initialization_status.write().drain(0..buffer.size);
unsafe { std::ptr::write_bytes(staging_buffer_ptr.as_ptr(), 0, aligned_size as usize) };
buffer.initialization_status.write().drain(0..aligned_size);

*buffer.map_state.lock() = resource::BufferMapState::Init {
staging_buffer,
Expand Down
2 changes: 1 addition & 1 deletion wgpu-core/src/resource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -867,7 +867,7 @@ impl<A: HalApi> Drop for DestroyedBuffer<A> {
pub struct StagingBuffer<A: HalApi> {
pub(crate) raw: Mutex<Option<A::Buffer>>,
pub(crate) device: Arc<Device<A>>,
pub(crate) size: wgt::BufferAddress,
pub(crate) size: wgt::BufferSize,
pub(crate) is_coherent: bool,
}

Expand Down
2 changes: 1 addition & 1 deletion wgpu/src/backend/wgpu_core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2203,7 +2203,7 @@ impl crate::Context for ContextWgpuCore {
size: wgt::BufferSize,
) -> Option<()> {
match wgc::gfx_select!(
*queue => self.0.queue_validate_write_buffer(*queue, *buffer, offset, size.get())
*queue => self.0.queue_validate_write_buffer(*queue, *buffer, offset, size)
) {
Ok(()) => Some(()),
Err(err) => {
Expand Down

0 comments on commit a0c185a

Please sign in to comment.