From 03a517cdb3879182cc53b5d9f289c21a70d4cbae Mon Sep 17 00:00:00 2001 From: Martin Schmidt Date: Tue, 29 Aug 2023 12:45:00 +0100 Subject: [PATCH] refactor(AddressRange): create and use a proper type Previously AddressRange was just a type alias to a usize pair. This caused confusion as sometimes it was (addr, size) or (start, end) and lead to a few confusing debugs. The new type hopefully clear up confusions and make it easy to iterate over the pages in an addressrange. --- hal_aarch64/src/mm/mod.rs | 16 +++-- hal_core/Cargo.toml | 1 + hal_core/src/lib.rs | 75 ++++++++++++++++-------- hal_core/src/mm.rs | 70 ++++++++++++++-------- hal_riscv64/src/mm/mod.rs | 16 +++-- kernel/src/device_tree.rs | 6 +- kernel/src/mm/mod.rs | 61 +++++++++++++------ kernel/src/mm/physical_memory_manager.rs | 73 ++++++++--------------- 8 files changed, 187 insertions(+), 131 deletions(-) diff --git a/hal_aarch64/src/mm/mod.rs b/hal_aarch64/src/mm/mod.rs index 693c0e1d..187a6133 100644 --- a/hal_aarch64/src/mm/mod.rs +++ b/hal_aarch64/src/mm/mod.rs @@ -1,6 +1,6 @@ use hal_core::{ - mm::{PageAllocFn, PageMap}, - Error, Range, + mm::{self, PageAllocFn, PageMap}, + AddressRange, Error, }; use cortex_a::asm::barrier; @@ -24,10 +24,10 @@ pub fn current() -> &'static mut PageTable { } pub fn init_paging( - r: impl Iterator, - rw: impl Iterator, - rwx: impl Iterator, - pre_allocated: impl Iterator, + r: impl Iterator, + rw: impl Iterator, + rwx: impl Iterator, + pre_allocated: impl Iterator, alloc: PageAllocFn, ) -> Result<(), Error> { hal_core::mm::init_paging::(r, rw, rwx, pre_allocated, alloc, |pt| { @@ -73,3 +73,7 @@ unsafe fn load_pagetable(pt: &'static mut PageTable) { barrier::isb(barrier::SY); } + +pub fn align_up(addr: usize) -> usize { + mm::align_up(addr, PageTable::PAGE_SIZE) +} diff --git a/hal_core/Cargo.toml b/hal_core/Cargo.toml index 59eebd76..704980af 100644 --- a/hal_core/Cargo.toml +++ b/hal_core/Cargo.toml @@ -7,3 +7,4 @@ edition = "2021" [dependencies] bitflags = "2.3" +log = "0.4" diff --git a/hal_core/src/lib.rs b/hal_core/src/lib.rs index d2c370c9..33f873bd 100644 --- a/hal_core/src/lib.rs +++ b/hal_core/src/lib.rs @@ -1,5 +1,8 @@ #![no_std] +use core::convert::Into; +use core::ops::Range; + pub mod mm; #[derive(Debug)] @@ -7,27 +10,51 @@ pub enum Error {} pub type TimerCallbackFn = fn(); -pub type Range = (usize, usize); -// /// A range similar to core::ops::Range but that is copyable. -// /// The range is half-open, inclusive below, exclusive above, ie. [start; end[ -// #[derive(Debug, Copy, Clone, PartialEq)] -// pub struct Range { -// pub start: T, -// pub end: T, -// } -// -// impl> -// Range -// { -// pub fn new(start: T, end: T) -> Self { -// Self { start, end } -// } -// -// pub fn contains(&self, val: T) -> bool { -// self.start <= val && val < self.end -// } -// -// pub fn size(&self) -> T { -// self.end - self.start -// } -// } +/// A range similar to core::ops::Range but that is copyable. +/// The range is half-open, inclusive below, exclusive above, ie. [start; end[ +#[derive(Debug, Copy, Clone, PartialEq)] +pub struct AddressRange { + pub start: usize, + pub end: usize, +} + +impl AddressRange { + pub fn new>(range: Range) -> Self { + let (start, end) = (range.start.into(), range.end.into()); + // assert!(range.start % page_size == 0); + // assert_eq!(range.end, mm::align_up(range.end, page_size)); + + assert!(start < end); + + Self { start, end } + } + + pub fn with_size(start: usize, size: usize) -> Self { + Self::new(start..start + size) + } + + pub fn round_up_to_page(self, page_size: usize) -> Self { + Self { + start: self.start, + end: mm::align_up(self.end, page_size), + } + } + + pub fn iter_pages(self, page_size: usize) -> impl Iterator { + assert_eq!(self.end, mm::align_up(self.end, page_size)); + + (self.start..=self.end).step_by(page_size) + } + + pub fn count_pages(&self, page_size: usize) -> usize { + mm::align_up(self.size(), page_size) / page_size + } + + pub fn contains(&self, val: usize) -> bool { + self.start <= val && val < self.end + } + + pub fn size(&self) -> usize { + self.end - self.start + } +} diff --git a/hal_core/src/mm.rs b/hal_core/src/mm.rs index cd71ca96..86de2ffc 100644 --- a/hal_core/src/mm.rs +++ b/hal_core/src/mm.rs @@ -1,4 +1,6 @@ -use super::{Error, Range}; +use log::trace; + +use super::{AddressRange, Error}; #[derive(Debug, Clone, Copy)] pub struct VAddr { @@ -103,6 +105,31 @@ pub trait PageMap { Ok(()) } + + fn add_invalid_entries( + &mut self, + range: AddressRange, + alloc: PageAllocFn, + ) -> Result<(), Error> { + for page in range.iter_pages(Self::PAGE_SIZE) { + self.add_invalid_entry(VAddr::new(page), alloc)?; + } + + Ok(()) + } + + fn identity_map_addressrange( + &mut self, + range: AddressRange, + perms: Permissions, + alloc: PageAllocFn, + ) -> Result<(), Error> { + for page in range.iter_pages(Self::PAGE_SIZE) { + self.identity_map(VAddr::new(page), perms, alloc)?; + } + + Ok(()) + } } pub fn align_up(val: usize, page_sz: usize) -> usize { @@ -110,43 +137,34 @@ pub fn align_up(val: usize, page_sz: usize) -> usize { } pub fn init_paging( - r: impl Iterator, - rw: impl Iterator, - rwx: impl Iterator, - pre_allocated: impl Iterator, + r: impl Iterator, + rw: impl Iterator, + rwx: impl Iterator, + pre_allocated: impl Iterator, alloc: PageAllocFn, store_pagetable: impl FnOnce(&'static mut P), ) -> Result<(), Error> { let pt: &'static mut P = P::new(alloc)?; let page_size = P::PAGE_SIZE; - for (addr, len) in pre_allocated { - let len = align_up(len, page_size); - for base in (addr..=addr + len).step_by(page_size) { - pt.add_invalid_entry(VAddr::new(base), alloc)?; - } + for range in pre_allocated { + pt.add_invalid_entries(range, alloc)?; } - for (addr, len) in r { - let page_count = align_up(len, page_size) / page_size; - pt.identity_map_range(VAddr::new(addr), page_count, Permissions::READ, alloc)?; + for range in r { + trace!("mapping as RO: {:X?}", range); + pt.identity_map_addressrange(range, Permissions::READ, alloc)?; } - for (addr, len) in rw { - let page_count = align_up(len, page_size) / page_size; - pt.identity_map_range( - VAddr::new(addr), - page_count, - Permissions::READ | Permissions::WRITE, - alloc, - )?; + for range in rw { + trace!("mapping as RW: {:X?}", range); + pt.identity_map_addressrange(range, Permissions::READ | Permissions::WRITE, alloc)?; } - for (addr, len) in rwx { - let page_count = align_up(len, page_size) / page_size; - pt.identity_map_range( - VAddr::new(addr), - page_count, + for range in rwx { + trace!("mapping as RWX: {:X?}", range); + pt.identity_map_addressrange( + range, Permissions::READ | Permissions::WRITE | Permissions::EXECUTE, alloc, )? diff --git a/hal_riscv64/src/mm/mod.rs b/hal_riscv64/src/mm/mod.rs index 449b0a8b..9e570d76 100644 --- a/hal_riscv64/src/mm/mod.rs +++ b/hal_riscv64/src/mm/mod.rs @@ -2,8 +2,8 @@ use core::arch::asm; use core::cell::OnceCell; use hal_core::{ - mm::{PageAllocFn, PageMap}, - Error, Range, + mm::{self, PageAllocFn, PageMap}, + AddressRange, Error, }; mod sv39; @@ -18,10 +18,10 @@ pub fn current() -> &'static mut PageTable { } pub fn init_paging( - r: impl Iterator, - rw: impl Iterator, - rwx: impl Iterator, - pre_allocated: impl Iterator, + r: impl Iterator, + rw: impl Iterator, + rwx: impl Iterator, + pre_allocated: impl Iterator, alloc: PageAllocFn, ) -> Result<(), Error> { hal_core::mm::init_paging::(r, rw, rwx, pre_allocated, alloc, |pt| { @@ -50,3 +50,7 @@ unsafe fn load_pagetable(pt: &'static mut PageTable) { asm!("sfence.vma"); } } + +pub fn align_up(addr: usize) -> usize { + mm::align_up(addr, PageTable::PAGE_SIZE) +} diff --git a/kernel/src/device_tree.rs b/kernel/src/device_tree.rs index 029a80cf..df0515e5 100644 --- a/kernel/src/device_tree.rs +++ b/kernel/src/device_tree.rs @@ -1,5 +1,7 @@ use super::Error; +use hal_core::AddressRange; + use fdt::node::FdtNode; pub struct DeviceTree { @@ -19,8 +21,8 @@ impl DeviceTree { }) } - pub fn memory_region(&self) -> (usize, usize) { - (self.addr, self.addr + self.total_size) + pub fn memory_region(&self) -> AddressRange { + AddressRange::new(self.addr..self.addr + self.total_size) } pub fn for_all_memory_regions)>( diff --git a/kernel/src/mm/mod.rs b/kernel/src/mm/mod.rs index 8e173d9c..aff8ef2a 100644 --- a/kernel/src/mm/mod.rs +++ b/kernel/src/mm/mod.rs @@ -8,7 +8,8 @@ use crate::globals; use crate::hal; use crate::Error; -use hal_core::mm::{PageMap, Permissions, VAddr}; +use hal_core::mm::{align_up, PageMap, Permissions, VAddr}; +use hal_core::AddressRange; use crate::drivers; use drivers::Driver; @@ -34,13 +35,15 @@ pub fn is_kernel_page(base: usize) -> bool { base >= kernel_start && base < kernel_end } -pub fn kernel_memory_region() -> (usize, usize) { - unsafe { +pub fn kernel_memory_region() -> AddressRange { + let (start, end) = unsafe { ( crate::utils::external_symbol_value(&KERNEL_START), crate::utils::external_symbol_value(&KERNEL_END), ) - } + }; + + AddressRange::new(start..end) } pub fn is_reserved_page(base: usize, device_tree: &DeviceTree) -> bool { @@ -58,16 +61,16 @@ pub fn is_reserved_page(base: usize, device_tree: &DeviceTree) -> bool { } fn map_kernel_rwx() -> ( - impl Iterator, - impl Iterator, - impl Iterator, + impl Iterator, + impl Iterator, + impl Iterator, ) { let page_size = hal::mm::PAGE_SIZE; let kernel_start = unsafe { crate::utils::external_symbol_value(&KERNEL_START) }; let kernel_end = unsafe { crate::utils::external_symbol_value(&KERNEL_END) }; let kernel_end_align = ((kernel_end + page_size - 1) / page_size) * page_size; - let rwx_entries = iter::once((kernel_start, kernel_end_align - kernel_start)); + let rwx_entries = iter::once(AddressRange::new(kernel_start..kernel_end_align)); (iter::empty(), iter::empty(), rwx_entries) } @@ -108,25 +111,37 @@ pub fn map_address_space<'a, I: Iterator>( device_tree: &DeviceTree, drivers: I, ) -> Result<(), Error> { - let mut r_entries = ArrayVec::<(usize, usize), 128>::new(); - let mut rw_entries = ArrayVec::<(usize, usize), 128>::new(); - let mut rwx_entries = ArrayVec::<(usize, usize), 128>::new(); - let mut pre_allocated_entries = ArrayVec::<(usize, usize), 1024>::new(); + let mut r_entries = ArrayVec::::new(); + let mut rw_entries = ArrayVec::::new(); + let mut rwx_entries = ArrayVec::::new(); + let mut pre_allocated_entries = ArrayVec::::new(); // Add entries/descriptors in the pagetable for all of accessible memory regions. // That way in the future, mapping those entries won't require any memory allocations, // just settings the entry to valid and filling up the bits. device_tree.for_all_memory_regions(|regions| { - regions.for_each(|entry| { - pre_allocated_entries.try_push(entry).unwrap(); + regions.for_each(|(base, size)| { + pre_allocated_entries + .try_push(AddressRange::with_size(base, size)) + .unwrap(); // TODO: this needs to be done differently, we're mapping all DRAM as rw... - rw_entries.try_push(entry).unwrap(); + rw_entries + .try_push(AddressRange::with_size(base, size)) + .unwrap(); }); }); - let (dt_start, dt_end) = device_tree.memory_region(); - let dt_size = dt_end - dt_start; - rw_entries.try_push((dt_start, dt_size)).unwrap(); + debug!( + "adding region containing the device tree to rw entries {:X?}", + device_tree.memory_region() + ); + rw_entries + .try_push( + device_tree + .memory_region() + .round_up_to_page(hal::mm::PAGE_SIZE), + ) + .unwrap(); let (kernel_r, kernel_rw, kernel_rwx) = map_kernel_rwx(); kernel_r.for_each(|entry| r_entries.try_push(entry).unwrap()); @@ -135,7 +150,15 @@ pub fn map_address_space<'a, I: Iterator>( for drv in drivers { if let Some((base, len)) = drv.get_address_range() { - rw_entries.try_push((base, len)).unwrap(); + let len = hal::mm::align_up(len); + debug!( + "adding driver memory region to RW entries: [{:X}; {:X}]", + base, + base + len + ); + rw_entries + .try_push(AddressRange::with_size(base, len)) + .unwrap(); } } diff --git a/kernel/src/mm/physical_memory_manager.rs b/kernel/src/mm/physical_memory_manager.rs index 22636f51..4999b4fc 100644 --- a/kernel/src/mm/physical_memory_manager.rs +++ b/kernel/src/mm/physical_memory_manager.rs @@ -4,34 +4,13 @@ use crate::hal; use crate::mm; use crate::Error; use core::mem; -use hal_core::mm::{PageMap, Permissions, VAddr}; +use hal_core::{ + mm::{PageMap, Permissions, VAddr}, + AddressRange, +}; use log::debug; -/// A range similar to core::ops::Range but that is copyable. -/// The range is half-open, inclusive below, exclusive above, ie. [start; end[ -#[derive(Debug, Copy, Clone, PartialEq)] -struct AddressRange { - pub start: T, - pub end: T, -} - -impl> - AddressRange -{ - pub fn new(start: T, end: T) -> Self { - Self { start, end } - } - - pub fn contains(&self, val: T) -> bool { - self.start <= val && val < self.end - } - - pub fn size(&self) -> T { - self.end - self.start - } -} - #[derive(Debug, PartialEq, Eq)] pub enum PageKind { Metadata, @@ -84,7 +63,7 @@ pub struct PhysicalMemoryManager { } impl PhysicalMemoryManager { - fn count_pages(regions: &[Option>], page_size: usize) -> usize { + fn count_pages(regions: &[Option], page_size: usize) -> usize { let total_memory_bytes: usize = regions .iter() .filter_map(|maybe_region| maybe_region.map(|region| region.size())) @@ -93,10 +72,7 @@ impl PhysicalMemoryManager { total_memory_bytes / page_size } - fn find_large_region( - regions: &[Option>], - minimum_size: usize, - ) -> Option { + fn find_large_region(regions: &[Option], minimum_size: usize) -> Option { regions .iter() .flatten() @@ -136,10 +112,10 @@ impl PhysicalMemoryManager { } fn exclude_range( - regions: &mut [Option>; MAX_REGIONS], - excluded: (usize, usize), + regions: &mut [Option; MAX_REGIONS], + excluded: AddressRange, ) { - let (excl_start, excl_end) = excluded; + let (excl_start, excl_end) = (excluded.start, excluded.end); assert!(excl_start < excl_end); @@ -160,7 +136,7 @@ impl PhysicalMemoryManager { start: excl_end, end: region.end, }; - regions[i] = Some(AddressRange::new(region.start, excl_start)); + regions[i] = Some(AddressRange::new(region.start..excl_start)); // The exclusion in the middle causes a split of the current region, put the new region (the end part) somewhere there is a none. *regions @@ -169,10 +145,10 @@ impl PhysicalMemoryManager { .expect("regions array is too small, increase MAX_REGIONS") = Some(new_region); } else if region.contains(excl_end) { // Region to be removed is at the left (at the beginning) of the current region. - regions[i] = Some(AddressRange::new(excl_end, region.end)); + regions[i] = Some(AddressRange::new(excl_end..region.end)); } else if region.contains(excl_start) { // Region to be removed is at the right (at the end) of the current region. - regions[i] = Some(AddressRange::new(region.start, excl_start)); + regions[i] = Some(AddressRange::new(region.start..excl_start)); } } } @@ -180,7 +156,7 @@ impl PhysicalMemoryManager { fn available_memory_regions( device_tree: &DeviceTree, page_size: usize, - ) -> [Option>; MAX_REGIONS] { + ) -> [Option; MAX_REGIONS] { // First put all regions in the array. let mut all_regions = [None; MAX_REGIONS]; device_tree.for_all_memory_regions(|regions| { @@ -203,8 +179,9 @@ impl PhysicalMemoryManager { Self::exclude_range(&mut all_regions, device_tree.memory_region()); device_tree.for_all_reserved_memory_regions(|reserved_regions| { - reserved_regions - .for_each(|(base, size)| Self::exclude_range(&mut all_regions, (base, base + size))) + reserved_regions.for_each(|(base, size)| { + Self::exclude_range(&mut all_regions, AddressRange::with_size(base, size)) + }) }); // Re-align the regions, for exemple things we exclude are not always aligned to a page boundary. @@ -377,34 +354,34 @@ mod tests { #[test_case] fn exclude_range_remove_in_the_middle(_ctx: &mut TestContext) { - let mut ranges = [Some(AddressRange::new(0x0, 0x1000)), None]; + let mut ranges = [Some(AddressRange::new(0x0..0x1000)), None]; PhysicalMemoryManager::exclude_range(&mut ranges, (0x500, 0x600)); - assert_eq!(ranges[0], Some(AddressRange::new(0x0, 0x500))); + assert_eq!(ranges[0], Some(AddressRange::new(0x0..0x500))); assert_eq!(ranges[1], Some(AddressRange::new(0x600, 0x1000))); } #[test_case] fn exclude_range_remove_beginning(_ctx: &mut TestContext) { - let mut ranges = [Some(AddressRange::new(0x100, 0x1000)), None]; + let mut ranges = [Some(AddressRange::new(0x100..0x1000)), None]; PhysicalMemoryManager::exclude_range(&mut ranges, (0x0, 0x200)); - assert_eq!(ranges[0], Some(AddressRange::new(0x200, 0x1000))); + assert_eq!(ranges[0], Some(AddressRange::new(0x200..0x1000))); assert!(ranges[1].is_none()); } #[test_case] fn exclude_range_remove_ending(_ctx: &mut TestContext) { - let mut ranges = [Some(AddressRange::new(0x100, 0x1000)), None]; + let mut ranges = [Some(AddressRange::new(0x100..0x1000)), None]; PhysicalMemoryManager::exclude_range(&mut ranges, (0x800, 0x1000)); - assert_eq!(ranges[0], Some(AddressRange::new(0x100, 0x800))); + assert_eq!(ranges[0], Some(AddressRange::new(0x100..0x800))); assert!(ranges[1].is_none()); } #[test_case] fn exclude_range_overlaps_exactly(_ctx: &mut TestContext) { - let mut ranges = [Some(AddressRange::new(0x400_000, 0x800_000)), None]; + let mut ranges = [Some(AddressRange::new(0x400_000..0x800_000)), None]; PhysicalMemoryManager::exclude_range(&mut ranges, (0x400_000, 0x800_000)); assert!(ranges[0].is_none()); @@ -413,10 +390,10 @@ mod tests { #[test_case] fn exclude_range_overlap_with_exact_beginning(_ctx: &mut TestContext) { - let mut ranges = [Some(AddressRange::new(0x400_000, 0x800_000)), None]; + let mut ranges = [Some(AddressRange::new(0x400_000..0x800_000)), None]; PhysicalMemoryManager::exclude_range(&mut ranges, (0x400_000, 0x401_000)); - assert_eq!(ranges[0], Some(AddressRange::new(0x401_000, 0x800_000))); + assert_eq!(ranges[0], Some(AddressRange::new(0x401_000..0x800_000))); assert!(ranges[1].is_none()); } }