Skip to content
Snippets Groups Projects
Commit faa1e924 authored by Eemeli Lehtonen's avatar Eemeli Lehtonen
Browse files

page allocation

parent cd4fb562
No related branches found
No related tags found
No related merge requests found
...@@ -18,6 +18,7 @@ spin = "0.9.4" ...@@ -18,6 +18,7 @@ spin = "0.9.4"
volatile = "0.4.5" volatile = "0.4.5"
x86_64 = "0.14.10" x86_64 = "0.14.10"
uart_16550 = "0.2.18" uart_16550 = "0.2.18"
# bitvec = "1.0.1"
#tracing = { version = "0.1.37", default-features = false } #tracing = { version = "0.1.37", default-features = false }
[dependencies.limine] [dependencies.limine]
......
...@@ -83,8 +83,8 @@ build: ${KERNEL} ...@@ -83,8 +83,8 @@ build: ${KERNEL}
# bootable iso alias # bootable iso alias
iso: ${HYPERION} iso: ${HYPERION}
reset-cargo-deps: clippy:
rm ${CARGO_DIR}/hyperion.d ${CARGO} clippy ${CARGO_FLAGS} -- -D warnings
# connect gdb to qemu # connect gdb to qemu
gdb: gdb:
......
...@@ -4,4 +4,4 @@ TIMEOUT=0 ...@@ -4,4 +4,4 @@ TIMEOUT=0
:Hyperion :Hyperion
PROTOCOL=limine PROTOCOL=limine
KERNEL_PATH=boot:///hyperion KERNEL_PATH=boot:///hyperion
KERNEL_CMDLINE=log=trace KERNEL_CMDLINE=log=debug
#!/usr/bin/env bash
#
# Hyperion x86_64 is runnable
set -xe
echo $@
LIMINE_GIT_URL="https://github.com/limine-bootloader/limine.git"
ISO_DIR=target/hyperion/x86_64/iso
KERNEL=$1
# Clone the `limine` repository if we don't have it yet.
if [ ! -d target/limine ]; then
git clone $LIMINE_GIT_URL --depth=1 --branch v3.0-branch-binary target/limine
fi
# Make sure we have an up-to-date version of the bootloader.
cd target/limine
git fetch
make
cd -
# Copy the needed files into an ISO image.
mkdir -p $ISO_DIR
cp cfg/limine.cfg target/limine/limine{.sys,-cd.bin,-cd-efi.bin} $ISO_DIR
cp $KERNEL $ISO_DIR/hyperion
xorriso -as mkisofs \
-b limine-cd.bin \
-no-emul-boot -boot-load-size 4 -boot-info-table \
--efi-boot limine-cd-efi.bin \
-efi-boot-part --efi-boot-image --protective-msdos-label \
$ISO_DIR -o $KERNEL.iso
# For the image to be bootable on BIOS systems, we must run `limine-deploy` on it.
target/limine/limine-deploy $KERNEL.iso
# A hack to detect if the kernel is a testing kernel
# Cargo test binary generates a 'random id' for testing binaries
if [ "$(basename $KERNEL)" = "hyperion" ]; then
# Run the created image with QEMU.
qemu-system-x86_64 \
-enable-kvm \
-machine q35 \
-cpu qemu64,+rdrand,+rdseed \
-smp 8 \
-m 512m \
-M smm=off \
-d int,guest_errors,cpu_reset \
-no-reboot \
-serial stdio \
$KERNEL.iso
#-s -S \
#-no-shutdown \
#-D target/log.txt \
else
set +e
# Run the created image with QEMU.
qemu-system-x86_64 \
-enable-kvm \
-machine q35 \
-cpu qemu64,+rdrand,+rdseed \
-smp 8 \
-m 512m \
-M smm=off \
-d int,guest_errors,cpu_reset \
-device isa-debug-exit,iobase=0xf4,iosize=0x04 \
-no-reboot \
-serial stdio \
-display none \
$KERNEL.iso
#-no-shutdown \
#-D target/log.txt \
[ $? -ne 33 ] && exit 1
exit 0
fi
use limine::{LimineKernelAddressRequest, LimineKernelAddressResponse}; use limine::{LimineHhdmRequest, LimineKernelAddressRequest, LimineKernelAddressResponse};
use spin::Lazy; use spin::Lazy;
use x86_64::{PhysAddr, VirtAddr}; use x86_64::{PhysAddr, VirtAddr};
...@@ -12,9 +12,23 @@ pub fn virt_addr() -> VirtAddr { ...@@ -12,9 +12,23 @@ pub fn virt_addr() -> VirtAddr {
VirtAddr::new(KERNEL_ADDR.virtual_base) VirtAddr::new(KERNEL_ADDR.virtual_base)
} }
pub fn hhdm_offset() -> u64 {
static HHDM_OFFSET: Lazy<u64> = Lazy::new(|| {
static REQ: LimineHhdmRequest = LimineHhdmRequest::new(0);
REQ.get_response()
.get()
.expect("Cannot get Limine HHDM response")
.offset
});
*HHDM_OFFSET
}
// //
static KERNEL_ADDR: Lazy<&'static LimineKernelAddressResponse> = Lazy::new(|| { static KERNEL_ADDR: Lazy<&'static LimineKernelAddressResponse> = Lazy::new(|| {
static REQ: LimineKernelAddressRequest = LimineKernelAddressRequest::new(0); static REQ: LimineKernelAddressRequest = LimineKernelAddressRequest::new(0);
REQ.get_response().get().unwrap() REQ.get_response()
.get()
.expect("Cannot get Limine HHDM response")
}); });
use crate::mem::map::Memmap; use crate::{
mem::map::{Memmap, Memtype},
trace,
};
use core::sync::atomic::{AtomicBool, Ordering};
use limine::{LimineMemmapEntry, LimineMemmapRequest, LimineMemoryMapEntryType, NonNullPtr}; use limine::{LimineMemmapEntry, LimineMemmapRequest, LimineMemoryMapEntryType, NonNullPtr};
use spin::Lazy; use x86_64::PhysAddr;
// //
pub fn memmap() -> impl Iterator<Item = Memmap> { pub fn memmap() -> impl Iterator<Item = Memmap> {
const DEFAULT_MEMMAP: Memmap = Memmap { static FIRST_TIME: AtomicBool = AtomicBool::new(true);
base: u64::MAX, let first_time = FIRST_TIME.swap(false, Ordering::SeqCst);
len: 0u64,
};
memiter() memiter().filter_map(move |memmap| {
.scan(DEFAULT_MEMMAP, |acc, memmap| {
// TODO: zero init reclaimable regions // TODO: zero init reclaimable regions
if let LimineMemoryMapEntryType::Usable
// | LimineMemoryMapEntryType::AcpiReclaimable if first_time && memmap.typ != LimineMemoryMapEntryType::Reserved {
// | LimineMemoryMapEntryType::BootloaderReclaimable trace!("{memmap:?}");
= memmap.typ
{
acc.base = memmap.base.min(acc.base);
acc.len += memmap.len;
Some(None)
} else if acc.len == 0 {
acc.base = u64::MAX;
Some(None)
} else {
Some(Some(core::mem::replace(acc, DEFAULT_MEMMAP)))
}
})
.flatten()
} }
pub fn memtotal() -> u64 { let ty = match memmap.typ {
static TOTAL: Lazy<u64> = Lazy::new(|| { LimineMemoryMapEntryType::Usable => Memtype::Usable,
memiter() LimineMemoryMapEntryType::BootloaderReclaimable => Memtype::BootloaderReclaimable,
.filter(|memmap| { LimineMemoryMapEntryType::KernelAndModules => Memtype::KernelAndModules,
memmap.typ != LimineMemoryMapEntryType::Reserved _ => return None,
&& memmap.typ != LimineMemoryMapEntryType::Framebuffer };
Some(Memmap {
base: PhysAddr::new(memmap.base),
len: memmap.len,
ty,
})
}) })
.map(|memmap| memmap.len)
.sum::<u64>()
});
*TOTAL
} }
fn memiter() -> impl Iterator<Item = &'static NonNullPtr<LimineMemmapEntry>> { fn memiter() -> impl Iterator<Item = &'static NonNullPtr<LimineMemmapEntry>> {
......
...@@ -3,10 +3,10 @@ use crate::{arch, kernel_main}; ...@@ -3,10 +3,10 @@ use crate::{arch, kernel_main};
// //
pub use addr::hhdm_offset;
pub use addr::phys_addr; pub use addr::phys_addr;
pub use addr::virt_addr; pub use addr::virt_addr;
pub use mem::memmap; pub use mem::memmap;
pub use mem::memtotal;
pub use term::_print; pub use term::_print;
// //
......
...@@ -57,45 +57,14 @@ fn kernel_main() -> ! { ...@@ -57,45 +57,14 @@ fn kernel_main() -> ! {
debug!("Cmdline: {:?}", env::Arguments::get()); debug!("Cmdline: {:?}", env::Arguments::get());
debug!( debug!(
"Kernel addr: {:?} {:?}", "Kernel addr: {:?}, {:?}, HDDM Offset: {:#0X?}",
boot::virt_addr(), boot::virt_addr(),
boot::phys_addr() boot::phys_addr(),
boot::hhdm_offset()
); );
mem::init(); mem::init();
/* let (l4, _) = Cr3::read();
let read_pt = |frame: PhysFrame| -> &mut PageTable {
let addr = VirtAddr::new(frame.start_address().as_u64());
let table: *mut PageTable = addr.as_mut_ptr();
unsafe { &mut *table }
};
for (i, e) in read_pt(l4).iter().enumerate() {
if !e.is_unused() {
println!("L4 entry {i}: {e:?}");
for (i, e) in read_pt(e.frame().unwrap()).iter().enumerate() {
if !e.is_unused() {
println!(" L3 entry {i}: {e:?}");
for (i, e) in read_pt(e.frame().unwrap()).iter().enumerate() {
if !e.is_unused() {
println!(" L2 entry {i}: {e:?}");
for (i, e) in read_pt(e.frame().unwrap()).iter().enumerate() {
if !e.is_unused() {
println!(" L1 entry {i}: {e:?}");
}
}
}
}
}
}
}
} */
// ofc. every kernel has to have this cringy ascii name splash // ofc. every kernel has to have this cringy ascii name splash
info!("\n{}\n", include_str!("./splash")); info!("\n{}\n", include_str!("./splash"));
......
use super::map::Memmap; use super::{map::Memmap, to_higher_half};
use crate::{boot, error}; use crate::{boot, error};
use core::{ use core::{
alloc::{GlobalAlloc, Layout}, alloc::{GlobalAlloc, Layout},
...@@ -57,11 +57,11 @@ unsafe impl GlobalAlloc for BumpAllocator { ...@@ -57,11 +57,11 @@ unsafe impl GlobalAlloc for BumpAllocator {
let memory = inner.map.base; let memory = inner.map.base;
let mut remaining = inner.remaining.lock(); let mut remaining = inner.remaining.lock();
let top = memory + *remaining; let top = (memory + *remaining).as_u64();
let Some(tmp) = top.checked_sub(layout.size() as u64) else { let Some(tmp) = top.checked_sub(layout.size() as u64) else {
error!("OUT OF MEMORY"); error!("OUT OF MEMORY");
error!( error!(
"ALLOC: size: {} align: {} top: {top} memory: {memory} remaining: {remaining}", "ALLOC: size: {} align: {} top: {top} memory: {memory:?} remaining: {remaining}",
layout.size(), layout.size(),
layout.align() layout.align()
); );
...@@ -72,11 +72,11 @@ unsafe impl GlobalAlloc for BumpAllocator { ...@@ -72,11 +72,11 @@ unsafe impl GlobalAlloc for BumpAllocator {
if let Some(left) = remaining.checked_sub(reservation) { if let Some(left) = remaining.checked_sub(reservation) {
*remaining = left; *remaining = left;
(memory + left) as _ to_higher_half(memory + left).as_mut_ptr()
} else { } else {
error!("OUT OF MEMORY"); error!("OUT OF MEMORY");
error!( error!(
"ALLOC: size: {} align: {} top: {top} new: {new_top} memory: {memory} remaining: {remaining}", "ALLOC: size: {} align: {} top: {top} new: {new_top} memory: {memory:?} remaining: {remaining}",
layout.size(), layout.size(),
layout.align() layout.align()
); );
......
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] use x86_64::PhysAddr;
//
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Memmap { pub struct Memmap {
pub base: u64, pub base: PhysAddr,
pub len: u64, pub len: u64,
pub ty: Memtype,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Memtype {
Usable,
BootloaderReclaimable,
KernelAndModules,
}
//
impl Memmap {
/// Returns `true` if the memtype is [`Usable`].
///
/// [`Usable`]: Memtype::Usable
#[must_use]
pub fn is_usable(&self) -> bool {
self.ty.is_usable()
}
/// Returns `true` if the memtype is [`BootloaderReclaimable`].
///
/// [`BootloaderReclaimable`]: Memtype::BootloaderReclaimable
#[must_use]
pub fn is_bootloader_reclaimable(&self) -> bool {
self.ty.is_bootloader_reclaimable()
}
/// Returns `true` if the memtype is [`KernelAndModules`].
///
/// [`KernelAndModules`]: Memtype::KernelAndModules
#[must_use]
pub fn is_kernel_and_modules(&self) -> bool {
self.ty.is_kernel_and_modules()
}
}
impl Memtype {
/// Returns `true` if the memtype is [`Usable`].
///
/// [`Usable`]: Memtype::Usable
#[must_use]
pub fn is_usable(&self) -> bool {
matches!(self, Self::Usable)
}
/// Returns `true` if the memtype is [`BootloaderReclaimable`].
///
/// [`BootloaderReclaimable`]: Memtype::BootloaderReclaimable
#[must_use]
pub fn is_bootloader_reclaimable(&self) -> bool {
matches!(self, Self::BootloaderReclaimable)
}
/// Returns `true` if the memtype is [`KernelAndModules`].
///
/// [`KernelAndModules`]: Memtype::KernelAndModules
#[must_use]
pub fn is_kernel_and_modules(&self) -> bool {
matches!(self, Self::KernelAndModules)
}
} }
// //
......
use crate::{boot, debug, mem::map::Memmap, util::fmt::NumberPostfix}; use crate::{boot, debug, mem::map::Memmap, util::fmt::NumberPostfix};
use x86_64::{
registers::control::Cr3,
structures::paging::{page_table::FrameError, PageTable, PhysFrame, Size2MiB, Size4KiB},
PhysAddr, VirtAddr,
};
// //
...@@ -6,16 +11,87 @@ pub mod map; ...@@ -6,16 +11,87 @@ pub mod map;
// allocator // allocator
pub mod bump; pub mod bump;
pub mod pfa; pub mod pmm;
// //
pub fn init() { pub fn init() {
let usable = boot::memmap().map(|Memmap { len, .. }| len).sum::<u64>();
let total = boot::memtotal();
debug!("Usable system memory: {}B", usable.postfix_binary());
debug!("Total system memory: {}B", total.postfix_binary());
bump::init(); bump::init();
pfa::init(); pmm::init();
}
//
fn is_higher_half(addr: u64) -> bool {
addr >= boot::hhdm_offset()
}
fn to_higher_half(addr: PhysAddr) -> VirtAddr {
let addr = addr.as_u64();
if is_higher_half(addr) {
VirtAddr::new(addr)
} else {
VirtAddr::new(addr + boot::hhdm_offset())
}
}
fn from_higher_half(addr: VirtAddr) -> PhysAddr {
let addr = addr.as_u64();
if is_higher_half(addr) {
PhysAddr::new(addr - boot::hhdm_offset())
} else {
PhysAddr::new(addr)
}
}
fn walk_page_tables(addr: VirtAddr) -> Option<PhysAddr> {
enum AnyPhysFrame {
Size4KiB(PhysFrame<Size4KiB>),
Size2MiB(PhysFrame<Size2MiB>),
}
impl AnyPhysFrame {
fn start_address(&self) -> PhysAddr {
match self {
AnyPhysFrame::Size4KiB(v) => v.start_address(),
AnyPhysFrame::Size2MiB(v) => v.start_address(),
}
}
}
let (l4, _) = Cr3::read();
let page_table_indices = [
addr.p4_index(),
addr.p3_index(),
addr.p2_index(),
addr.p1_index(),
];
let mut frame = AnyPhysFrame::Size4KiB(l4);
for index in page_table_indices {
let virt = to_higher_half(frame.start_address());
let table: *const PageTable = virt.as_ptr();
let table = unsafe { &*table };
let entry = &table[index];
frame = match entry.frame() {
Ok(frame) => AnyPhysFrame::Size4KiB(frame),
Err(FrameError::FrameNotPresent) => return None,
Err(FrameError::HugeFrame) => {
AnyPhysFrame::Size2MiB(PhysFrame::<Size2MiB>::containing_address(entry.addr()))
}
}
}
Some(frame.start_address() + u64::from(addr.page_offset()))
}
fn debug_phys_addr(addr: PhysAddr) {
debug!(
"{:?} {:?} {:?}",
addr,
walk_page_tables(VirtAddr::new(addr.as_u64())),
walk_page_tables(to_higher_half(addr))
);
} }
use super::map::Memmap;
use crate::{
boot, debug,
mem::bump,
util::{bitmap::Bitmap, fmt::NumberPostfix},
};
use core::slice;
use spin::{Mutex, Once};
use x86_64::{align_down, align_up};
//
const PAGE_SIZE: u64 = 2u64.pow(12); // 4KiB pages
// const PAGE_SIZE: u64 = 2u64.pow(21); // 2MiB pages
static PFA: Once<Mutex<PageFrameAllocator>> = Once::new();
//
pub fn init() {
let mem_bottom = boot::memmap()
.map(|Memmap { base, .. }| base)
.min()
.expect("No memory");
let mem_top = boot::memmap()
.map(|Memmap { base, len }| base + len)
.max()
.expect("No memory");
// size in bytes
let bitmap_size = (mem_top - mem_bottom) / PAGE_SIZE / 8 + 1;
let bitmap_data = boot::memmap()
.find(|Memmap { len, .. }| *len >= bitmap_size)
.expect("No place to store PageFrameAllocator bitmap")
.base;
// SAFETY: this bitmap is going to be initialized before it is read from
let bitmap = unsafe { slice::from_raw_parts_mut(bitmap_data as _, bitmap_size as _) };
let mut bitmap = Bitmap::new(bitmap);
bitmap.fill(true); // initialized here
let bottom_page = align_up(mem_bottom, PAGE_SIZE) / PAGE_SIZE;
// free up some pages
for Memmap { mut base, mut len } in boot::memmap() {
if let Some(map) = bump::map() && map.base == base {
// skip the BumpAllocator spot
base += map.base;
len -= map.len;
}
if base == bitmap_data {
// skip the bitmap allocation spot
base += bitmap_data;
len -= bitmap_size;
}
let mut bottom = align_up(base, PAGE_SIZE);
let mut top = align_down(base + len, PAGE_SIZE);
if bottom >= top {
continue;
}
debug!(
"Free pages: {:#0X?} ({}B)",
bottom..top,
(top - bottom).postfix_binary()
);
bottom /= PAGE_SIZE;
top /= PAGE_SIZE;
bottom -= bottom_page;
top -= bottom_page;
for page in bottom..top {
#[cfg(debug_assertions)]
bitmap.set(page as _, false).unwrap();
#[cfg(not(debug_assertions))]
let res = bitmap.set(page as _, false);
}
}
let free = bitmap.iter_false().count() as u64 * PAGE_SIZE;
let used = 0;
debug!("Free pages: ({}B)", free.postfix_binary());
PFA.call_once(|| {
Mutex::new(PageFrameAllocator {
bitmap,
free,
used,
bottom_page,
})
});
}
//
pub struct PageFrameAllocator {
bitmap: Bitmap<'static>,
free: u64,
used: u64,
bottom_page: u64,
}
//
impl PageFrameAllocator {
pub fn free_page(&mut self, _addr: u64) {}
}
use super::{map::Memmap, to_higher_half};
use crate::{
boot, debug,
log::{test_log_level, LogLevel},
mem::{bump, map::Memtype},
trace,
util::{bitmap::Bitmap, fmt::NumberPostfix},
};
use core::{
any::type_name,
convert::identity,
fmt, slice,
sync::atomic::{AtomicU64, AtomicUsize, Ordering},
};
use spin::{Mutex, Once};
use x86_64::{align_down, align_up, PhysAddr, VirtAddr};
//
const PAGE_SIZE: u64 = 2u64.pow(12); // 4KiB pages
// const PAGE_SIZE: u64 = 2u64.pow(21); // 2MiB pages
static PFA: Once<PageFrameAllocator> = Once::new();
//
pub fn init() {
// usable system memory
let usable: u64 = boot::memmap()
.filter(Memmap::is_usable)
.map(|Memmap { len, .. }| len)
.sum();
// total system memory
let total: u64 = boot::memmap().map(|Memmap { len, .. }| len).sum();
// the end of the usable physical memory address space
let top = boot::memmap()
.filter(Memmap::is_usable)
.map(|Memmap { base, len, ty }| base + len)
.max()
.expect("No memory");
// size in bytes
let bitmap_size = align_up(top.as_u64() / PAGE_SIZE / 8, PAGE_SIZE);
let bitmap_data = boot::memmap()
.filter(Memmap::is_usable)
.find(|Memmap { len, .. }| *len >= bitmap_size)
.expect("No place to store PageFrameAllocator bitmap")
.base;
// SAFETY: this bitmap is going to be initialized before it is read from
let bitmap = unsafe {
slice::from_raw_parts_mut(to_higher_half(bitmap_data).as_mut_ptr(), bitmap_size as _)
};
let mut bitmap = Bitmap::new(bitmap);
bitmap.fill(true); // initialized here
// free up some pages
for Memmap {
mut base,
mut len,
ty,
} in boot::memmap().filter(Memmap::is_usable)
{
if let Some(map) = bump::map() && map.base == base {
// skip the BumpAllocator spot
base += map.base.as_u64();
len -= map.len;
}
if base == bitmap_data {
// skip the bitmap allocation spot
base += bitmap_data.as_u64();
len -= bitmap_size;
}
let mut bottom = base.as_u64();
let mut top = base.as_u64() + len;
debug!(
"Free pages: {:#0X?} ({}B)",
bottom..top,
(top - bottom).postfix_binary()
);
bottom /= PAGE_SIZE;
top /= PAGE_SIZE;
for page in bottom..top {
bitmap.set(page as _, false).unwrap();
}
}
let used = bump::map().map(|Memmap { len, .. }| len).unwrap_or(0) + bitmap_size;
let pfa = PageFrameAllocator {
bitmap: bitmap.into(),
usable: usable.into(),
used: used.into(),
total: total.into(),
last_alloc_index: 0.into(),
};
debug!("PFA:\n{pfa}");
PFA.call_once(|| pfa);
}
//
pub struct PageFrameAllocator {
// 1 bits are used pages
bitmap: Mutex<Bitmap<'static>>,
usable: AtomicU64,
used: AtomicU64,
total: AtomicU64,
last_alloc_index: AtomicUsize,
}
#[derive(Debug)]
pub struct PageFrame {
first: PhysAddr,
count: usize,
}
//
impl PageFrameAllocator {
/// System total memory in bytes
pub fn total_mem(&self) -> u64 {
self.total.load(Ordering::SeqCst)
}
/// System usable memory in bytes
pub fn usable_mem(&self) -> u64 {
self.usable.load(Ordering::SeqCst)
}
/// Currently used usable memory in bytes
pub fn used_mem(&self) -> u64 {
self.used.load(Ordering::SeqCst)
}
/// Currently free usable memory in bytes
pub fn free_mem(&self) -> u64 {
self.usable_mem() - self.used_mem()
}
/// Reserved memory in bytes
pub fn reserved_mem(&self) -> u64 {
self.total_mem() - self.usable_mem()
}
/// Free up pages
///
/// Double frees are not possible due to [`PageFrame`] missing [`Clone`] and it cannot be
/// constructed manually
pub fn free(&self, frame: PageFrame) {
if frame.first.as_u64() == 0 || frame.count == 0 {
return;
}
let mut bitmap = self.bitmap.lock();
let page = (frame.first.as_u64() / PAGE_SIZE) as usize;
for page in page..page + frame.count {
bitmap.set(page, false).unwrap();
}
self.used
.fetch_sub(frame.count as u64 * PAGE_SIZE, Ordering::SeqCst);
}
/// Alloc pages
///
/// Use [`Self::free`] to not leak pages (-> memory)
pub fn alloc(&self, count: usize) -> PageFrame {
if count == 0 {
return PageFrame {
first: PhysAddr::new(0),
count: 0,
};
}
let mut bitmap = self.bitmap.lock();
let first_page = self.alloc_at(&mut bitmap, count).unwrap_or_else(|| {
// TODO: handle OOM a bit better
self.alloc_from(0);
self.alloc_at(&mut bitmap, count).expect("OOM")
});
self.alloc_from(first_page + count);
let addr = PhysAddr::new(first_page as u64 * PAGE_SIZE);
// SAFETY: TODO:
let page_data: &mut [u8] = unsafe {
slice::from_raw_parts_mut(
to_higher_half(addr).as_mut_ptr(),
count * PAGE_SIZE as usize,
)
};
// fill the page with zeros
trace!("Memzeroing {:?}", page_data.as_ptr_range());
page_data.fill(0);
self.used
.fetch_add(count as u64 * PAGE_SIZE, Ordering::SeqCst);
PageFrame { first: addr, count }
}
fn alloc_from(&self, index: usize) {
self.last_alloc_index.store(index, Ordering::SeqCst)
}
// returns the page index, not the page address
fn alloc_at(&self, bitmap: &mut Bitmap, count: usize) -> Option<usize> {
let mut first_page = self.last_alloc_index.load(Ordering::SeqCst);
'main: loop {
if first_page + count > bitmap.len() {
return None;
}
/* if test_log_level(LogLevel::Trace) {
trace!(
"Trying to allocate {count} pages from {:?}",
to_higher_half(PhysAddr::new(first_page as u64 * PAGE_SIZE))
);
} */
// go reversed so that skips would be more efficient
for offs in (0..count).rev() {
// SAFETY: `first_page + offs` < `first_page + count` <= `bitmap.len()`
// => bitmap has to contain `first_page + offs`
if unsafe { bitmap.get(first_page + offs).unwrap_unchecked() } {
// skip all page windows which have this locked page
first_page = first_page + offs + 1;
continue 'main;
}
}
// found a window of free pages
for offs in 0..count {
// lock them
_ = bitmap.set(first_page + offs, true);
}
return Some(first_page);
}
}
}
impl fmt::Display for PageFrameAllocator {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(
f,
"Total system memory: {}B",
self.total_mem().postfix_binary()
)?;
writeln!(
f,
"Usable system memory: {}B",
self.usable_mem().postfix_binary()
)?;
writeln!(
f,
"Used system memory: {}B",
self.used_mem().postfix_binary()
)?;
writeln!(
f,
"Free system memory: {}B",
self.free_mem().postfix_binary()
)?;
write!(
f,
"Reserved system memory: {}B",
self.reserved_mem().postfix_binary()
)?;
Ok(())
}
}
impl PageFrame {
pub fn addr(&self) -> PhysAddr {
self.first
}
}
//
#[cfg(test)]
mod tests {
use crate::mem::pmm::PFA;
#[test_case]
fn pfa_simple() {
let pfa = PFA.get().unwrap();
let a = pfa.alloc(1);
assert_ne!(a.addr().as_u64(), 0);
let b = pfa.alloc(1);
assert_ne!(b.addr().as_u64(), 0);
assert_ne!(a.addr().as_u64(), b.addr().as_u64());
pfa.free(a);
pfa.alloc_from(0);
let c = pfa.alloc(1);
assert_ne!(c.addr().as_u64(), 0);
assert_ne!(b.addr().as_u64(), c.addr().as_u64());
let d = pfa.alloc(1);
assert_ne!(d.addr().as_u64(), 0);
assert_ne!(c.addr().as_u64(), d.addr().as_u64());
// pfa.free(a); // <- compile error as expected
pfa.free(b);
pfa.free(c);
pfa.free(d);
}
}
This diff is collapsed.
...@@ -40,7 +40,18 @@ impl Writer { ...@@ -40,7 +40,18 @@ impl Writer {
match self.escapes.next(byte) { match self.escapes.next(byte) {
DecodedPart::Byte(b'\n') => { DecodedPart::Byte(b'\n') => {
if let Some(mut fbo) = get_fbo() { if let Some(mut fbo) = get_fbo() {
self.new_line(1, &mut fbo) #[cfg(debug_assertions)]
let lines = if self.cursor[1] + 1 >= Self::size(&mut fbo)[1] {
// scroll more if the cursor is near the bottom
//
// because scrolling is slow in debug mode
8
} else {
1
};
#[cfg(not(debug_assertions))]
let lines = 1;
self.new_line(lines, &mut fbo)
} }
} }
DecodedPart::Byte(b'\t') => { DecodedPart::Byte(b'\t') => {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment