Skip to content

Commit

Permalink
refactor(AddressRange): create and use a proper type
Browse files Browse the repository at this point in the history
Previously AddressRange was just a type alias to a usize pair.
This caused confusion as sometimes it was (addr, size) or (start, end)
and lead to a few confusing debugs.

The new type hopefully clear up confusions and make it easy to iterate
over the pages in an addressrange.
  • Loading branch information
n1tram1 committed Aug 31, 2023
1 parent 0153992 commit 03a517c
Show file tree
Hide file tree
Showing 8 changed files with 187 additions and 131 deletions.
16 changes: 10 additions & 6 deletions hal_aarch64/src/mm/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use hal_core::{
mm::{PageAllocFn, PageMap},
Error, Range,
mm::{self, PageAllocFn, PageMap},
AddressRange, Error,
};

use cortex_a::asm::barrier;
Expand All @@ -24,10 +24,10 @@ pub fn current() -> &'static mut PageTable {
}

pub fn init_paging(
r: impl Iterator<Item = Range>,
rw: impl Iterator<Item = Range>,
rwx: impl Iterator<Item = Range>,
pre_allocated: impl Iterator<Item = Range>,
r: impl Iterator<Item = AddressRange>,
rw: impl Iterator<Item = AddressRange>,
rwx: impl Iterator<Item = AddressRange>,
pre_allocated: impl Iterator<Item = AddressRange>,
alloc: PageAllocFn,
) -> Result<(), Error> {
hal_core::mm::init_paging::<PageTable>(r, rw, rwx, pre_allocated, alloc, |pt| {
Expand Down Expand Up @@ -73,3 +73,7 @@ unsafe fn load_pagetable(pt: &'static mut PageTable) {

barrier::isb(barrier::SY);
}

pub fn align_up(addr: usize) -> usize {
mm::align_up(addr, PageTable::PAGE_SIZE)
}
1 change: 1 addition & 0 deletions hal_core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ edition = "2021"

[dependencies]
bitflags = "2.3"
log = "0.4"
75 changes: 51 additions & 24 deletions hal_core/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,33 +1,60 @@
#![no_std]

use core::convert::Into;
use core::ops::Range;

pub mod mm;

#[derive(Debug)]
pub enum Error {}

pub type TimerCallbackFn = fn();

pub type Range = (usize, usize);
// /// A range similar to core::ops::Range but that is copyable.
// /// The range is half-open, inclusive below, exclusive above, ie. [start; end[
// #[derive(Debug, Copy, Clone, PartialEq)]
// pub struct Range<T: Copy> {
// pub start: T,
// pub end: T,
// }
//
// impl<T: Copy + core::cmp::PartialOrd + core::cmp::PartialEq + core::ops::Sub<Output = T>>
// Range<T>
// {
// pub fn new(start: T, end: T) -> Self {
// Self { start, end }
// }
//
// pub fn contains(&self, val: T) -> bool {
// self.start <= val && val < self.end
// }
//
// pub fn size(&self) -> T {
// self.end - self.start
// }
// }
/// A range similar to core::ops::Range but that is copyable.
/// The range is half-open, inclusive below, exclusive above, ie. [start; end[
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct AddressRange {
pub start: usize,
pub end: usize,
}

impl AddressRange {
pub fn new<T: Into<usize>>(range: Range<T>) -> Self {
let (start, end) = (range.start.into(), range.end.into());
// assert!(range.start % page_size == 0);
// assert_eq!(range.end, mm::align_up(range.end, page_size));

assert!(start < end);

Self { start, end }
}

pub fn with_size(start: usize, size: usize) -> Self {
Self::new(start..start + size)
}

pub fn round_up_to_page(self, page_size: usize) -> Self {
Self {
start: self.start,
end: mm::align_up(self.end, page_size),
}
}

pub fn iter_pages(self, page_size: usize) -> impl Iterator<Item = usize> {
assert_eq!(self.end, mm::align_up(self.end, page_size));

(self.start..=self.end).step_by(page_size)
}

pub fn count_pages(&self, page_size: usize) -> usize {
mm::align_up(self.size(), page_size) / page_size
}

pub fn contains(&self, val: usize) -> bool {
self.start <= val && val < self.end
}

pub fn size(&self) -> usize {
self.end - self.start
}
}
70 changes: 44 additions & 26 deletions hal_core/src/mm.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
use super::{Error, Range};
use log::trace;

use super::{AddressRange, Error};

#[derive(Debug, Clone, Copy)]
pub struct VAddr {
Expand Down Expand Up @@ -103,50 +105,66 @@ pub trait PageMap {

Ok(())
}

fn add_invalid_entries(
&mut self,
range: AddressRange,
alloc: PageAllocFn,
) -> Result<(), Error> {
for page in range.iter_pages(Self::PAGE_SIZE) {
self.add_invalid_entry(VAddr::new(page), alloc)?;
}

Ok(())
}

fn identity_map_addressrange(
&mut self,
range: AddressRange,
perms: Permissions,
alloc: PageAllocFn,
) -> Result<(), Error> {
for page in range.iter_pages(Self::PAGE_SIZE) {
self.identity_map(VAddr::new(page), perms, alloc)?;
}

Ok(())
}
}

pub fn align_up(val: usize, page_sz: usize) -> usize {
((val + page_sz - 1) / page_sz) * page_sz
}

pub fn init_paging<P: PageMap + 'static>(
r: impl Iterator<Item = Range>,
rw: impl Iterator<Item = Range>,
rwx: impl Iterator<Item = Range>,
pre_allocated: impl Iterator<Item = Range>,
r: impl Iterator<Item = AddressRange>,
rw: impl Iterator<Item = AddressRange>,
rwx: impl Iterator<Item = AddressRange>,
pre_allocated: impl Iterator<Item = AddressRange>,
alloc: PageAllocFn,
store_pagetable: impl FnOnce(&'static mut P),
) -> Result<(), Error> {
let pt: &'static mut P = P::new(alloc)?;
let page_size = P::PAGE_SIZE;

for (addr, len) in pre_allocated {
let len = align_up(len, page_size);
for base in (addr..=addr + len).step_by(page_size) {
pt.add_invalid_entry(VAddr::new(base), alloc)?;
}
for range in pre_allocated {
pt.add_invalid_entries(range, alloc)?;
}

for (addr, len) in r {
let page_count = align_up(len, page_size) / page_size;
pt.identity_map_range(VAddr::new(addr), page_count, Permissions::READ, alloc)?;
for range in r {
trace!("mapping as RO: {:X?}", range);
pt.identity_map_addressrange(range, Permissions::READ, alloc)?;
}

for (addr, len) in rw {
let page_count = align_up(len, page_size) / page_size;
pt.identity_map_range(
VAddr::new(addr),
page_count,
Permissions::READ | Permissions::WRITE,
alloc,
)?;
for range in rw {
trace!("mapping as RW: {:X?}", range);
pt.identity_map_addressrange(range, Permissions::READ | Permissions::WRITE, alloc)?;
}

for (addr, len) in rwx {
let page_count = align_up(len, page_size) / page_size;
pt.identity_map_range(
VAddr::new(addr),
page_count,
for range in rwx {
trace!("mapping as RWX: {:X?}", range);
pt.identity_map_addressrange(
range,
Permissions::READ | Permissions::WRITE | Permissions::EXECUTE,
alloc,
)?
Expand Down
16 changes: 10 additions & 6 deletions hal_riscv64/src/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@ use core::arch::asm;
use core::cell::OnceCell;

use hal_core::{
mm::{PageAllocFn, PageMap},
Error, Range,
mm::{self, PageAllocFn, PageMap},
AddressRange, Error,
};

mod sv39;
Expand All @@ -18,10 +18,10 @@ pub fn current() -> &'static mut PageTable {
}

pub fn init_paging(
r: impl Iterator<Item = Range>,
rw: impl Iterator<Item = Range>,
rwx: impl Iterator<Item = Range>,
pre_allocated: impl Iterator<Item = Range>,
r: impl Iterator<Item = AddressRange>,
rw: impl Iterator<Item = AddressRange>,
rwx: impl Iterator<Item = AddressRange>,
pre_allocated: impl Iterator<Item = AddressRange>,
alloc: PageAllocFn,
) -> Result<(), Error> {
hal_core::mm::init_paging::<PageTable>(r, rw, rwx, pre_allocated, alloc, |pt| {
Expand Down Expand Up @@ -50,3 +50,7 @@ unsafe fn load_pagetable(pt: &'static mut PageTable) {
asm!("sfence.vma");
}
}

pub fn align_up(addr: usize) -> usize {
mm::align_up(addr, PageTable::PAGE_SIZE)
}
6 changes: 4 additions & 2 deletions kernel/src/device_tree.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
use super::Error;

use hal_core::AddressRange;

use fdt::node::FdtNode;

pub struct DeviceTree {
Expand All @@ -19,8 +21,8 @@ impl DeviceTree {
})
}

pub fn memory_region(&self) -> (usize, usize) {
(self.addr, self.addr + self.total_size)
pub fn memory_region(&self) -> AddressRange {
AddressRange::new(self.addr..self.addr + self.total_size)
}

pub fn for_all_memory_regions<F: FnMut(&mut dyn Iterator<Item = (usize, usize)>)>(
Expand Down
61 changes: 42 additions & 19 deletions kernel/src/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@ use crate::globals;

use crate::hal;
use crate::Error;
use hal_core::mm::{PageMap, Permissions, VAddr};
use hal_core::mm::{align_up, PageMap, Permissions, VAddr};
use hal_core::AddressRange;

use crate::drivers;
use drivers::Driver;
Expand All @@ -34,13 +35,15 @@ pub fn is_kernel_page(base: usize) -> bool {
base >= kernel_start && base < kernel_end
}

pub fn kernel_memory_region() -> (usize, usize) {
unsafe {
pub fn kernel_memory_region() -> AddressRange {
let (start, end) = unsafe {
(
crate::utils::external_symbol_value(&KERNEL_START),
crate::utils::external_symbol_value(&KERNEL_END),
)
}
};

AddressRange::new(start..end)
}

pub fn is_reserved_page(base: usize, device_tree: &DeviceTree) -> bool {
Expand All @@ -58,16 +61,16 @@ pub fn is_reserved_page(base: usize, device_tree: &DeviceTree) -> bool {
}

fn map_kernel_rwx() -> (
impl Iterator<Item = (usize, usize)>,
impl Iterator<Item = (usize, usize)>,
impl Iterator<Item = (usize, usize)>,
impl Iterator<Item = AddressRange>,
impl Iterator<Item = AddressRange>,
impl Iterator<Item = AddressRange>,
) {
let page_size = hal::mm::PAGE_SIZE;
let kernel_start = unsafe { crate::utils::external_symbol_value(&KERNEL_START) };
let kernel_end = unsafe { crate::utils::external_symbol_value(&KERNEL_END) };
let kernel_end_align = ((kernel_end + page_size - 1) / page_size) * page_size;

let rwx_entries = iter::once((kernel_start, kernel_end_align - kernel_start));
let rwx_entries = iter::once(AddressRange::new(kernel_start..kernel_end_align));

(iter::empty(), iter::empty(), rwx_entries)
}
Expand Down Expand Up @@ -108,25 +111,37 @@ pub fn map_address_space<'a, I: Iterator<Item = &'a &'a dyn Driver>>(
device_tree: &DeviceTree,
drivers: I,
) -> Result<(), Error> {
let mut r_entries = ArrayVec::<(usize, usize), 128>::new();
let mut rw_entries = ArrayVec::<(usize, usize), 128>::new();
let mut rwx_entries = ArrayVec::<(usize, usize), 128>::new();
let mut pre_allocated_entries = ArrayVec::<(usize, usize), 1024>::new();
let mut r_entries = ArrayVec::<AddressRange, 128>::new();
let mut rw_entries = ArrayVec::<AddressRange, 128>::new();
let mut rwx_entries = ArrayVec::<AddressRange, 128>::new();
let mut pre_allocated_entries = ArrayVec::<AddressRange, 1024>::new();

// Add entries/descriptors in the pagetable for all of accessible memory regions.
// That way in the future, mapping those entries won't require any memory allocations,
// just settings the entry to valid and filling up the bits.
device_tree.for_all_memory_regions(|regions| {
regions.for_each(|entry| {
pre_allocated_entries.try_push(entry).unwrap();
regions.for_each(|(base, size)| {
pre_allocated_entries
.try_push(AddressRange::with_size(base, size))
.unwrap();

// TODO: this needs to be done differently, we're mapping all DRAM as rw...
rw_entries.try_push(entry).unwrap();
rw_entries
.try_push(AddressRange::with_size(base, size))
.unwrap();
});
});
let (dt_start, dt_end) = device_tree.memory_region();
let dt_size = dt_end - dt_start;
rw_entries.try_push((dt_start, dt_size)).unwrap();
debug!(
"adding region containing the device tree to rw entries {:X?}",
device_tree.memory_region()
);
rw_entries
.try_push(
device_tree
.memory_region()
.round_up_to_page(hal::mm::PAGE_SIZE),
)
.unwrap();

let (kernel_r, kernel_rw, kernel_rwx) = map_kernel_rwx();
kernel_r.for_each(|entry| r_entries.try_push(entry).unwrap());
Expand All @@ -135,7 +150,15 @@ pub fn map_address_space<'a, I: Iterator<Item = &'a &'a dyn Driver>>(

for drv in drivers {
if let Some((base, len)) = drv.get_address_range() {
rw_entries.try_push((base, len)).unwrap();
let len = hal::mm::align_up(len);
debug!(
"adding driver memory region to RW entries: [{:X}; {:X}]",
base,
base + len
);
rw_entries
.try_push(AddressRange::with_size(base, len))
.unwrap();
}
}

Expand Down
Loading

0 comments on commit 03a517c

Please sign in to comment.