Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Minimize moves by introducing WeakVec #6419

Merged
merged 2 commits into from
Oct 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 39 additions & 44 deletions wgpu-core/src/device/resource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ use crate::{
UsageScopePool,
},
validation::{self, validate_color_attachment_bytes_per_sample},
weak_vec::WeakVec,
FastHashMap, LabelHelpers, PreHashedKey, PreHashedMap,
};

Expand All @@ -42,7 +43,7 @@ use wgt::{

use std::{
borrow::Cow,
mem::ManuallyDrop,
mem::{self, ManuallyDrop},
num::NonZeroU32,
sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
Expand Down Expand Up @@ -150,8 +151,8 @@ pub struct Device {
}

pub(crate) enum DeferredDestroy {
TextureView(Weak<TextureView>),
BindGroup(Weak<BindGroup>),
TextureViews(WeakVec<TextureView>),
BindGroups(WeakVec<BindGroup>),
}

impl std::fmt::Debug for Device {
Expand Down Expand Up @@ -384,36 +385,42 @@ impl Device {
/// implementation of a reference-counted structure).
/// The snatch lock must not be held while this function is called.
pub(crate) fn deferred_resource_destruction(&self) {
while let Some(item) = self.deferred_destroy.lock().pop() {
let deferred_destroy = mem::take(&mut *self.deferred_destroy.lock());
for item in deferred_destroy {
match item {
DeferredDestroy::TextureView(view) => {
let Some(view) = view.upgrade() else {
continue;
};
let Some(raw_view) = view.raw.snatch(&mut self.snatchable_lock.write()) else {
continue;
};

resource_log!("Destroy raw {}", view.error_ident());

unsafe {
self.raw().destroy_texture_view(raw_view);
DeferredDestroy::TextureViews(views) => {
for view in views {
let Some(view) = view.upgrade() else {
continue;
};
let Some(raw_view) = view.raw.snatch(&mut self.snatchable_lock.write())
else {
continue;
};

resource_log!("Destroy raw {}", view.error_ident());

unsafe {
self.raw().destroy_texture_view(raw_view);
}
}
}
DeferredDestroy::BindGroup(bind_group) => {
let Some(bind_group) = bind_group.upgrade() else {
continue;
};
let Some(raw_bind_group) =
bind_group.raw.snatch(&mut self.snatchable_lock.write())
else {
continue;
};

resource_log!("Destroy raw {}", bind_group.error_ident());

unsafe {
self.raw().destroy_bind_group(raw_bind_group);
DeferredDestroy::BindGroups(bind_groups) => {
for bind_group in bind_groups {
let Some(bind_group) = bind_group.upgrade() else {
continue;
};
let Some(raw_bind_group) =
bind_group.raw.snatch(&mut self.snatchable_lock.write())
else {
continue;
};

resource_log!("Destroy raw {}", bind_group.error_ident());

unsafe {
self.raw().destroy_bind_group(raw_bind_group);
}
}
}
}
Expand Down Expand Up @@ -638,7 +645,7 @@ impl Device {
map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
label: desc.label.to_string(),
tracking_data: TrackingData::new(self.tracker_indices.buffers.clone()),
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, Vec::new()),
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()),
#[cfg(feature = "indirect-validation")]
raw_indirect_validation_bind_group,
};
Expand Down Expand Up @@ -753,7 +760,7 @@ impl Device {
map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
label: desc.label.to_string(),
tracking_data: TrackingData::new(self.tracker_indices.buffers.clone()),
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, Vec::new()),
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, WeakVec::new()),
#[cfg(feature = "indirect-validation")]
raw_indirect_validation_bind_group,
};
Expand Down Expand Up @@ -1383,10 +1390,6 @@ impl Device {

{
let mut views = texture.views.lock();

// Remove stale weak references
views.retain(|view| view.strong_count() > 0);

views.push(Arc::downgrade(&view));
}

Expand Down Expand Up @@ -2376,18 +2379,10 @@ impl Device {
let weak_ref = Arc::downgrade(&bind_group);
for range in &bind_group.used_texture_ranges {
let mut bind_groups = range.texture.bind_groups.lock();

// Remove stale weak references
bind_groups.retain(|bg| bg.strong_count() > 0);

bind_groups.push(weak_ref.clone());
}
for range in &bind_group.used_buffer_ranges {
let mut bind_groups = range.buffer.bind_groups.lock();

// Remove stale weak references
bind_groups.retain(|bg| bg.strong_count() > 0);

bind_groups.push(weak_ref.clone());
}

Expand Down
1 change: 1 addition & 0 deletions wgpu-core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ pub mod resource;
mod snatch;
pub mod storage;
mod track;
mod weak_vec;
// This is public for users who pre-compile shaders while still wanting to
// preserve all run-time checks that `wgpu-core` does.
// See <https://github.com/gfx-rs/wgpu/issues/3103>, after which this can be
Expand Down
35 changes: 17 additions & 18 deletions wgpu-core/src/resource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ use crate::{
resource_log,
snatch::{SnatchGuard, Snatchable},
track::{SharedTrackerIndexAllocator, TextureSelector, TrackerIndex},
weak_vec::WeakVec,
Label, LabelHelpers,
};

Expand All @@ -26,7 +27,7 @@ use std::{
mem::{self, ManuallyDrop},
ops::Range,
ptr::NonNull,
sync::{Arc, Weak},
sync::Arc,
};

/// Information about the wgpu-core resource.
Expand Down Expand Up @@ -474,7 +475,7 @@ pub struct Buffer {
pub(crate) label: String,
pub(crate) tracking_data: TrackingData,
pub(crate) map_state: Mutex<BufferMapState>,
pub(crate) bind_groups: Mutex<Vec<Weak<BindGroup>>>,
pub(crate) bind_groups: Mutex<WeakVec<BindGroup>>,
#[cfg(feature = "indirect-validation")]
pub(crate) raw_indirect_validation_bind_group: Snatchable<Box<dyn hal::DynBindGroup>>,
}
Expand Down Expand Up @@ -824,7 +825,7 @@ pub struct DestroyedBuffer {
raw: ManuallyDrop<Box<dyn hal::DynBuffer>>,
device: Arc<Device>,
label: String,
bind_groups: Vec<Weak<BindGroup>>,
bind_groups: WeakVec<BindGroup>,
#[cfg(feature = "indirect-validation")]
raw_indirect_validation_bind_group: Option<Box<dyn hal::DynBindGroup>>,
}
Expand All @@ -838,9 +839,9 @@ impl DestroyedBuffer {
impl Drop for DestroyedBuffer {
fn drop(&mut self) {
let mut deferred = self.device.deferred_destroy.lock();
for bind_group in self.bind_groups.drain(..) {
deferred.push(DeferredDestroy::BindGroup(bind_group));
}
deferred.push(DeferredDestroy::BindGroups(mem::take(
&mut self.bind_groups,
)));
drop(deferred);

#[cfg(feature = "indirect-validation")]
Expand Down Expand Up @@ -1060,8 +1061,8 @@ pub struct Texture {
pub(crate) label: String,
pub(crate) tracking_data: TrackingData,
pub(crate) clear_mode: TextureClearMode,
pub(crate) views: Mutex<Vec<Weak<TextureView>>>,
pub(crate) bind_groups: Mutex<Vec<Weak<BindGroup>>>,
pub(crate) views: Mutex<WeakVec<TextureView>>,
pub(crate) bind_groups: Mutex<WeakVec<BindGroup>>,
}

impl Texture {
Expand Down Expand Up @@ -1095,8 +1096,8 @@ impl Texture {
label: desc.label.to_string(),
tracking_data: TrackingData::new(device.tracker_indices.textures.clone()),
clear_mode,
views: Mutex::new(rank::TEXTURE_VIEWS, Vec::new()),
bind_groups: Mutex::new(rank::TEXTURE_BIND_GROUPS, Vec::new()),
views: Mutex::new(rank::TEXTURE_VIEWS, WeakVec::new()),
bind_groups: Mutex::new(rank::TEXTURE_BIND_GROUPS, WeakVec::new()),
}
}
/// Checks that the given texture usage contains the required texture usage,
Expand Down Expand Up @@ -1430,8 +1431,8 @@ impl Global {
#[derive(Debug)]
pub struct DestroyedTexture {
raw: ManuallyDrop<Box<dyn hal::DynTexture>>,
views: Vec<Weak<TextureView>>,
bind_groups: Vec<Weak<BindGroup>>,
views: WeakVec<TextureView>,
bind_groups: WeakVec<BindGroup>,
device: Arc<Device>,
label: String,
}
Expand All @@ -1447,12 +1448,10 @@ impl Drop for DestroyedTexture {
let device = &self.device;

let mut deferred = device.deferred_destroy.lock();
for view in self.views.drain(..) {
deferred.push(DeferredDestroy::TextureView(view));
}
for bind_group in self.bind_groups.drain(..) {
deferred.push(DeferredDestroy::BindGroup(bind_group));
}
deferred.push(DeferredDestroy::TextureViews(mem::take(&mut self.views)));
deferred.push(DeferredDestroy::BindGroups(mem::take(
&mut self.bind_groups,
)));
drop(deferred);

resource_log!("Destroy raw Texture (destroyed) {:?}", self.label());
Expand Down
77 changes: 77 additions & 0 deletions wgpu-core/src/weak_vec.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
//! Module containing the [`WeakVec`] API.
teoxoy marked this conversation as resolved.
Show resolved Hide resolved

use std::sync::Weak;

/// An optimized container for `Weak` references of `T` that minimizes reallocations by
/// dropping older elements that no longer have strong references to them.
#[derive(Debug)]
pub(crate) struct WeakVec<T> {
inner: Vec<Option<Weak<T>>>,
empty_slots: Vec<usize>,
scan_slots_on_next_push: bool,
}

impl<T> Default for WeakVec<T> {
fn default() -> Self {
Self {
inner: Default::default(),
empty_slots: Default::default(),
scan_slots_on_next_push: false,
}
}
}

impl<T> WeakVec<T> {
pub(crate) fn new() -> Self {
Self {
inner: Vec::new(),
empty_slots: Vec::default(),
scan_slots_on_next_push: false,
}
}

/// Pushes a new element to this collection.
///
/// If the inner Vec needs to be reallocated, we will first drop older elements that
/// no longer have strong references to them.
pub(crate) fn push(&mut self, value: Weak<T>) {
ErichDonGubler marked this conversation as resolved.
Show resolved Hide resolved
if self.scan_slots_on_next_push {
for (i, value) in self.inner.iter_mut().enumerate() {
if let Some(w) = value {
if w.strong_count() == 0 {
*value = None;
self.empty_slots.push(i);
}
}
}
}
if let Some(i) = self.empty_slots.pop() {
self.inner[i] = Some(value);
self.scan_slots_on_next_push = false;
} else {
self.inner.push(Some(value));
self.scan_slots_on_next_push = self.inner.len() == self.inner.capacity();
}
}
}

pub(crate) struct WeakVecIter<T> {
inner: std::iter::Flatten<std::vec::IntoIter<Option<Weak<T>>>>,
}

impl<T> Iterator for WeakVecIter<T> {
type Item = Weak<T>;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
}

impl<T> IntoIterator for WeakVec<T> {
type Item = Weak<T>;
type IntoIter = WeakVecIter<T>;
fn into_iter(self) -> Self::IntoIter {
WeakVecIter {
inner: self.inner.into_iter().flatten(),
}
}
}