commit bd2b796c500d1eac836e27da43389e2b973cbcea Author: g1n Date: Sun Jul 25 21:39:02 2021 +0300 Initial commit diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..5cdf1e9 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,18 @@ +[target.'cfg(target_os = "linux")'] +rustflags = ["-C", "link-arg=-nostartfiles"] + +[target.'cfg(target_os = "windows")'] +rustflags = ["-C", "link-args=/ENTRY:_start /SUBSYSTEM:console"] + +[target.'cfg(target_os = "macos")'] +rustflags = ["-C", "link-args=-e __start -static -nostartfiles"] + +[unstable] +build-std-features = ["compiler-builtins-mem"] +build-std = ["core", "compiler_builtins", "alloc"] + +[build] +target = "x86_64-gros.json" + +[target.'cfg(target_os = "none")'] +runner = "bootimage runner" \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a990e6b --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +/target +*~ +*# diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..d57cb8d --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,226 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "bit_field" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed8765909f9009617974ab6b7d332625b320b33c326b1e9321382ef1999b5d56" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "bootloader" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a3c1ceed1cd9e61c7998100cc18c13d413aa40d018992b871ab8e7435ce6372" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "conquer-once" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96eb12fb69466716fbae9009d389e6a30830ae8975e170eff2d2cff579f9efa3" +dependencies = [ + "conquer-util", +] + +[[package]] +name = "conquer-util" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "654fb2472cc369d311c547103a1fa81d467bef370ae7a0680f65939895b1182a" + +[[package]] +name = "crossbeam-queue" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "maybe-uninit", +] + +[[package]] +name = "crossbeam-utils" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +dependencies = [ + "autocfg", + "cfg-if", +] + +[[package]] +name = "futures-core" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af51b1b4a7fdff033703db39de8802c673eb91855f2e0d47dcf3bf2c0ef01f99" + +[[package]] +name = "futures-task" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbe54a98670017f3be909561f6ad13e810d9a51f3f061b902062ca3da80799f2" + +[[package]] +name = "futures-util" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67eb846bfd58e44a8481a00049e82c43e0ccb5d61f8dc071057cb19249dd4d78" +dependencies = [ + "autocfg", + "futures-core", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "gros" +version = "0.1.0" +dependencies = [ + "bootloader", + "conquer-once", + "crossbeam-queue", + "futures-util", + "lazy_static", + "linked_list_allocator", + "pc-keyboard", + "pic8259", + "spin", + "uart_16550", + "volatile 0.2.7", + "x86_64", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin", +] + +[[package]] +name = "linked_list_allocator" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0b725207570aa16096962d0b20c79f8a543df2280bd3c903022b9b0b4d7ea68" +dependencies = [ + "spinning_top", +] + +[[package]] +name = "lock_api" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0382880606dff6d15c9476c416d18690b72742aa7b605bb6dd6ec9030fbf07eb" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + +[[package]] +name = "pc-keyboard" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c6f2d937e3b8d63449b01401e2bae4041bc9dd1129c2e3e0d239407cf6635ac" + +[[package]] +name = "pic8259" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08cc920d83ee33c0f9b73aa441e75468bf2d10c959a3eb6260cf720b05ac91a1" +dependencies = [ + "x86_64", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spinning_top" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75adad84ee84b521fb2cca2d4fd0f1dab1d8d026bda3c5bea4ca63b5f9f9293c" +dependencies = [ + "lock_api", +] + +[[package]] +name = "uart_16550" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65ad019480ef5ff8ffe66d6f6a259cd87cf317649481394981db1739d844f374" +dependencies = [ + "bitflags", + "x86_64", +] + +[[package]] +name = "volatile" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6b06ad3ed06fef1713569d547cdbdb439eafed76341820fb0e0344f29a41945" + +[[package]] +name = "volatile" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4c2dbd44eb8b53973357e6e207e370f0c1059990df850aca1eca8947cf464f0" + +[[package]] +name = "x86_64" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95947de37ad0d2d9a4a4dd22e0d042e034e5cbd7ab53edbca0d8035e0a6a64d" +dependencies = [ + "bit_field", + "bitflags", + "volatile 0.4.4", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..4c3f0d9 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "gros" +version = "0.1.0" +authors = ["g1n "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +bootloader = { version = "0.9.8", features = ["map_physical_memory"]} +volatile = "0.2.6" +spin = "0.5.2" +x86_64 = "0.14.2" +uart_16550 = "0.2.0" +pic8259 = "0.10.1" +pc-keyboard = "0.5.0" +linked_list_allocator = "0.9.0" + +[dependencies.lazy_static] +version = "1.0" +features = ["spin_no_std"] + +[dependencies.crossbeam-queue] +version = "0.2.1" +default-features = false +features = ["alloc"] + +[dependencies.conquer-once] +version = "0.2.0" +default-features = false + +[dependencies.futures-util] +version = "0.3.4" +default-features = false +features = ["alloc"] + +[package.metadata.bootimage] +test-args = [ + "-device", "isa-debug-exit,iobase=0xf4,iosize=0x04", "-serial", "stdio", + "-display", "none" +] +test-success-exit-code = 33 # (0x10 << 1) | 1 +test-timeout = 300 + +[[test]] +name = "should_panic" +harness = false + +[[test]] +name = "stack_overflow" +harness = false \ No newline at end of file diff --git a/src/allocator.rs b/src/allocator.rs new file mode 100644 index 0000000..731e6d0 --- /dev/null +++ b/src/allocator.rs @@ -0,0 +1,93 @@ +use alloc::alloc::{GlobalAlloc, Layout}; +use core::ptr::null_mut; +use linked_list_allocator::LockedHeap; + +pub struct Dummy; + +pub mod bump; +pub mod linked_list; +pub mod fixed_size_block; +use linked_list::LinkedListAllocator; + +unsafe impl GlobalAlloc for Dummy { + unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { + null_mut() + } + + unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { + panic!("dealloc should be never called") + } +} + +use bump::BumpAllocator; +use fixed_size_block::FixedSizeBlockAllocator; + +#[global_allocator] +static ALLOCATOR: Locked = Locked::new( + FixedSizeBlockAllocator::new()); + +pub const HEAP_START: usize = 0x_4444_4444_0000; +pub const HEAP_SIZE: usize = 100 * 1024; + +use x86_64::{ + structures::paging::{ + mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB, + }, + VirtAddr, +}; + +pub fn init_heap( + mapper: &mut impl Mapper, + frame_allocator: &mut impl FrameAllocator, +) -> Result<(), MapToError> { + let page_range = { + let heap_start = VirtAddr::new(HEAP_START as u64); + let heap_end = heap_start + HEAP_SIZE - 1u64; + let heap_start_page = Page::containing_address(heap_start); + let heap_end_page = Page::containing_address(heap_end); + Page::range_inclusive(heap_start_page, heap_end_page) + }; + + for page in page_range { + let frame = frame_allocator + .allocate_frame() + .ok_or(MapToError::FrameAllocationFailed)?; + let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; + unsafe { + mapper.map_to(page, frame, flags, frame_allocator)?.flush() + }; + } + + unsafe { + ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE); + } + + Ok(()) +} + +/// A wrapper around spin::Mutex to permit trait implementations. +pub struct Locked { + inner: spin::Mutex, +} + +impl Locked { + pub const fn new(inner: A) -> Self { + Locked { + inner: spin::Mutex::new(inner), + } + } + + pub fn lock(&self) -> spin::MutexGuard { + self.inner.lock() + } +} + +/// Align the given address `addr` upwards to alignment `align`. +fn align_up(addr: usize, align: usize) -> usize { + let remainder = addr % align; + if remainder == 0 { + addr // addr already aligned + } else { + addr - remainder + align + } +} \ No newline at end of file diff --git a/src/allocator/bump.rs b/src/allocator/bump.rs new file mode 100644 index 0000000..16300c4 --- /dev/null +++ b/src/allocator/bump.rs @@ -0,0 +1,61 @@ +pub struct BumpAllocator { + heap_start: usize, + heap_end: usize, + next: usize, + allocations: usize, +} + +impl BumpAllocator { + /// Creates a new empty bump allocator. + pub const fn new() -> Self { + BumpAllocator { + heap_start: 0, + heap_end: 0, + next: 0, + allocations: 0, + } + } + + /// Initializes the bump allocator with the given heap bounds. + /// + /// This method is unsafe because the caller must ensure that the given + /// memory range is unused. Also, this method must be called only once. + pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) { + self.heap_start = heap_start; + self.heap_end = heap_start + heap_size; + self.next = heap_start; + } +} + +use alloc::alloc::{GlobalAlloc, Layout}; +use super::{align_up, Locked}; +use core::ptr; + +unsafe impl GlobalAlloc for Locked { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let mut bump = self.lock(); // get a mutable reference + + let alloc_start = align_up(bump.next, layout.align()); + let alloc_end = match alloc_start.checked_add(layout.size()) { + Some(end) => end, + None => return ptr::null_mut(), + }; + + if alloc_end > bump.heap_end { + ptr::null_mut() // out of memory + } else { + bump.next = alloc_end; + bump.allocations += 1; + alloc_start as *mut u8 + } + } + + unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) { + let mut bump = self.lock(); // get a mutable reference + + bump.allocations -= 1; + if bump.allocations == 0 { + bump.next = bump.heap_start; + } + } +} \ No newline at end of file diff --git a/src/allocator/fixed_size_block.rs b/src/allocator/fixed_size_block.rs new file mode 100644 index 0000000..316adf5 --- /dev/null +++ b/src/allocator/fixed_size_block.rs @@ -0,0 +1,103 @@ +use super::Locked; +use alloc::alloc::{GlobalAlloc, Layout}; +use core::{ + mem, + ptr::{self, NonNull}, +}; + +/// The block sizes to use. +/// +/// The sizes must each be power of 2 because they are also used as +/// the block alignment (alignments must be always powers of 2). +const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048]; + +/// Choose an appropriate block size for the given layout. +/// +/// Returns an index into the `BLOCK_SIZES` array. +fn list_index(layout: &Layout) -> Option { + let required_block_size = layout.size().max(layout.align()); + BLOCK_SIZES.iter().position(|&s| s >= required_block_size) +} + +struct ListNode { + next: Option<&'static mut ListNode>, +} + +pub struct FixedSizeBlockAllocator { + list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()], + fallback_allocator: linked_list_allocator::Heap, +} + +impl FixedSizeBlockAllocator { + /// Creates an empty FixedSizeBlockAllocator. + pub const fn new() -> Self { + const EMPTY: Option<&'static mut ListNode> = None; + FixedSizeBlockAllocator { + list_heads: [EMPTY; BLOCK_SIZES.len()], + fallback_allocator: linked_list_allocator::Heap::empty(), + } + } + + /// Initialize the allocator with the given heap bounds. + /// + /// This function is unsafe because the caller must guarantee that the given + /// heap bounds are valid and that the heap is unused. This method must be + /// called only once. + pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) { + self.fallback_allocator.init(heap_start, heap_size); + } + + /// Allocates using the fallback allocator. + fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 { + match self.fallback_allocator.allocate_first_fit(layout) { + Ok(ptr) => ptr.as_ptr(), + Err(_) => ptr::null_mut(), + } + } +} + +unsafe impl GlobalAlloc for Locked { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + let mut allocator = self.lock(); + match list_index(&layout) { + Some(index) => { + match allocator.list_heads[index].take() { + Some(node) => { + allocator.list_heads[index] = node.next.take(); + node as *mut ListNode as *mut u8 + } + None => { + // no block exists in list => allocate new block + let block_size = BLOCK_SIZES[index]; + // only works if all block sizes are a power of 2 + let block_align = block_size; + let layout = Layout::from_size_align(block_size, block_align).unwrap(); + allocator.fallback_alloc(layout) + } + } + } + None => allocator.fallback_alloc(layout), + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + let mut allocator = self.lock(); + match list_index(&layout) { + Some(index) => { + let new_node = ListNode { + next: allocator.list_heads[index].take(), + }; + // verify that block has size and alignment required for storing node + assert!(mem::size_of::() <= BLOCK_SIZES[index]); + assert!(mem::align_of::() <= BLOCK_SIZES[index]); + let new_node_ptr = ptr as *mut ListNode; + new_node_ptr.write(new_node); + allocator.list_heads[index] = Some(&mut *new_node_ptr); + } + None => { + let ptr = NonNull::new(ptr).unwrap(); + allocator.fallback_allocator.deallocate(ptr, layout); + } + } + } +} \ No newline at end of file diff --git a/src/allocator/linked_list.rs b/src/allocator/linked_list.rs new file mode 100644 index 0000000..e3ba31e --- /dev/null +++ b/src/allocator/linked_list.rs @@ -0,0 +1,153 @@ +use super::align_up; +use core::mem; + +struct ListNode { + size: usize, + next: Option<&'static mut ListNode>, +} + +impl ListNode { + const fn new(size: usize) -> Self { + ListNode { size, next: None } + } + + fn start_addr(&self) -> usize { + self as *const Self as usize + } + + fn end_addr(&self) -> usize { + self.start_addr() + self.size + } +} + +pub struct LinkedListAllocator { + head: ListNode, +} + +impl LinkedListAllocator { + /// Creates an empty LinkedListAllocator. + pub const fn new() -> Self { + Self { + head: ListNode::new(0), + } + } + + /// Initialize the allocator with the given heap bounds. + /// + /// This function is unsafe because the caller must guarantee that the given + /// heap bounds are valid and that the heap is unused. This method must be + /// called only once. + pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) { + self.add_free_region(heap_start, heap_size); + } + + /// Adds the given memory region to the front of the list. + unsafe fn add_free_region(&mut self, addr: usize, size: usize) { + // ensure that the freed region is capable of holding ListNode + assert_eq!(align_up(addr, mem::align_of::()), addr); + assert!(size >= mem::size_of::()); + + // create a new list node and append it at the start of the list + let mut node = ListNode::new(size); + node.next = self.head.next.take(); + let node_ptr = addr as *mut ListNode; + node_ptr.write(node); + self.head.next = Some(&mut *node_ptr) + } + /// Looks for a free region with the given size and alignment and removes + /// it from the list. + /// + /// Returns a tuple of the list node and the start address of the allocation. + fn find_region(&mut self, size: usize, align: usize) + -> Option<(&'static mut ListNode, usize)> + { + // reference to current list node, updated for each iteration + let mut current = &mut self.head; + // look for a large enough memory region in linked list + while let Some(ref mut region) = current.next { + if let Ok(alloc_start) = Self::alloc_from_region(®ion, size, align) { + // region suitable for allocation -> remove node from list + let next = region.next.take(); + let ret = Some((current.next.take().unwrap(), alloc_start)); + current.next = next; + return ret; + } else { + // region not suitable -> continue with next region + current = current.next.as_mut().unwrap(); + } + } + + // no suitable region found + None + } + /// Try to use the given region for an allocation with given size and + /// alignment. + /// + /// Returns the allocation start address on success. + fn alloc_from_region(region: &ListNode, size: usize, align: usize) + -> Result + { + let alloc_start = align_up(region.start_addr(), align); + let alloc_end = alloc_start.checked_add(size).ok_or(())?; + + if alloc_end > region.end_addr() { + // region too small + return Err(()); + } + + let excess_size = region.end_addr() - alloc_end; + if excess_size > 0 && excess_size < mem::size_of::() { + // rest of region too small to hold a ListNode (required because the + // allocation splits the region in a used and a free part) + return Err(()); + } + + // region suitable for allocation + Ok(alloc_start) + } +} + +use super::Locked; +use alloc::alloc::{GlobalAlloc, Layout}; +use core::ptr; + +unsafe impl GlobalAlloc for Locked { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + // perform layout adjustments + let (size, align) = LinkedListAllocator::size_align(layout); + let mut allocator = self.lock(); + + if let Some((region, alloc_start)) = allocator.find_region(size, align) { + let alloc_end = alloc_start.checked_add(size).expect("overflow"); + let excess_size = region.end_addr() - alloc_end; + if excess_size > 0 { + allocator.add_free_region(alloc_end, excess_size); + } + alloc_start as *mut u8 + } else { + ptr::null_mut() + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + // perform layout adjustments + let (size, _) = LinkedListAllocator::size_align(layout); + + self.lock().add_free_region(ptr as usize, size) + } +} + +impl LinkedListAllocator { + /// Adjust the given layout so that the resulting allocated memory + /// region is also capable of storing a `ListNode`. + /// + /// Returns the adjusted size and alignment as a (size, align) tuple. + fn size_align(layout: Layout) -> (usize, usize) { + let layout = layout + .align_to(mem::align_of::()) + .expect("adjusting alignment failed") + .pad_to_align(); + let size = layout.size().max(mem::size_of::()); + (size, layout.align()) + } +} \ No newline at end of file diff --git a/src/gdt.rs b/src/gdt.rs new file mode 100644 index 0000000..ea33629 --- /dev/null +++ b/src/gdt.rs @@ -0,0 +1,47 @@ +use x86_64::VirtAddr; +use x86_64::structures::tss::TaskStateSegment; +use x86_64::structures::gdt::{GlobalDescriptorTable, Descriptor}; +use x86_64::structures::gdt::SegmentSelector; +use lazy_static::lazy_static; + +pub const DOUBLE_FAULT_IST_INDEX: u16 = 0; + +lazy_static! { + static ref TSS: TaskStateSegment = { + let mut tss = TaskStateSegment::new(); + tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = { + const STACK_SIZE: usize = 4096 * 5; + static mut STACK: [u8; STACK_SIZE] = [0; STACK_SIZE]; + + let stack_start = VirtAddr::from_ptr(unsafe { &STACK }); + let stack_end = stack_start + STACK_SIZE; + stack_end + }; + tss + }; +} + +lazy_static! { + static ref GDT: (GlobalDescriptorTable, Selectors) = { + let mut gdt = GlobalDescriptorTable::new(); + let code_selector = gdt.add_entry(Descriptor::kernel_code_segment()); + let tss_selector = gdt.add_entry(Descriptor::tss_segment(&TSS)); + (gdt, Selectors { code_selector, tss_selector }) + }; +} + +struct Selectors { + code_selector: SegmentSelector, + tss_selector: SegmentSelector, +} + +pub fn init() { + use x86_64::instructions::segmentation::set_cs; + use x86_64::instructions::tables::load_tss; + + GDT.0.load(); + unsafe { + set_cs(GDT.1.code_selector); + load_tss(GDT.1.tss_selector); + } +} diff --git a/src/interrupts.rs b/src/interrupts.rs new file mode 100644 index 0000000..4e3ccf0 --- /dev/null +++ b/src/interrupts.rs @@ -0,0 +1,126 @@ +use x86_64::structures::idt::{InterruptDescriptorTable, InterruptStackFrame, PageFaultErrorCode}; +use crate::println; +use crate::print; +use crate::gdt; +use crate::hlt_loop; + +use lazy_static::lazy_static; + +use pic8259::ChainedPics; +use spin; + +lazy_static! { + static ref IDT: InterruptDescriptorTable = { + let mut idt = InterruptDescriptorTable::new(); + idt.breakpoint.set_handler_fn(breakpoint_handler); + unsafe { + idt.double_fault.set_handler_fn(double_fault_handler) + .set_stack_index(gdt::DOUBLE_FAULT_IST_INDEX); + } + + idt.page_fault.set_handler_fn(page_fault_handler); + + idt[InterruptIndex::Timer.as_usize()] + .set_handler_fn(timer_interrupt_handler); + + idt[InterruptIndex::Keyboard.as_usize()] + .set_handler_fn(keyboard_interrupt_handler); + + idt + }; +} + +extern "x86-interrupt" fn breakpoint_handler( + stack_frame: InterruptStackFrame) +{ + println!("EXCEPTION: BREAKPOINT\n{:#?}", stack_frame); +} + +extern "x86-interrupt" fn double_fault_handler( + stack_frame: InterruptStackFrame, _error_code: u64) -> ! +{ + panic!("EXCEPTION: DOUBLE FAULT\n{:#?}", stack_frame); +} + +extern "x86-interrupt" fn timer_interrupt_handler( + _stack_frame: InterruptStackFrame) +{ + print!("."); + + unsafe { + PICS.lock() + .notify_end_of_interrupt(InterruptIndex::Timer.as_u8()); + } +} + +pub fn init_idt() { + IDT.load(); +} + +#[test_case] +fn test_breakpoint_exception() { + // invoke a breakpoint exception + x86_64::instructions::interrupts::int3(); +} + +pub const PIC_1_OFFSET: u8 = 32; +pub const PIC_2_OFFSET: u8 = PIC_1_OFFSET + 8; + +pub static PICS: spin::Mutex = + spin::Mutex::new(unsafe { ChainedPics::new(PIC_1_OFFSET, PIC_2_OFFSET) }); + +#[derive(Debug, Clone, Copy)] +#[repr(u8)] +pub enum InterruptIndex { + Timer = PIC_1_OFFSET, + Keyboard, +} + +impl InterruptIndex { + fn as_u8(self) -> u8 { + self as u8 + } + + fn as_usize(self) -> usize { + usize::from(self.as_u8()) + } +} + +extern "x86-interrupt" fn keyboard_interrupt_handler( + _stack_frame: InterruptStackFrame) +{ + use pc_keyboard::{layouts, DecodedKey, HandleControl, Keyboard, ScancodeSet1}; + use spin::Mutex; + use x86_64::instructions::port::Port; + + lazy_static! { + static ref KEYBOARD: Mutex> = + Mutex::new(Keyboard::new(layouts::Us104Key, ScancodeSet1, + HandleControl::Ignore) + ); + } + + let mut keyboard = KEYBOARD.lock(); + let mut port = Port::new(0x60); + + let scancode: u8 = unsafe { port.read() }; + crate::task::keyboard::add_scancode(scancode); + + unsafe { + PICS.lock() + .notify_end_of_interrupt(InterruptIndex::Keyboard.as_u8()); + } +} + +extern "x86-interrupt" fn page_fault_handler( + stack_frame: InterruptStackFrame, + error_code: PageFaultErrorCode, +) { + use x86_64::registers::control::Cr2; + + println!("EXCEPTION: PAGE FAULT"); + println!("Accessed Address: {:?}", Cr2::read()); + println!("Error Code: {:?}", error_code); + println!("{:#?}", stack_frame); + hlt_loop(); +} \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..2452b22 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,104 @@ +#![no_std] + +#![cfg_attr(test, no_main)] +#![feature(custom_test_frameworks)] +#![test_runner(crate::test_runner)] +#![reexport_test_harness_main = "test_main"] +#![feature(abi_x86_interrupt)] +#![feature(alloc_error_handler)] +#![feature(const_mut_refs)] + +use core::panic::PanicInfo; +extern crate alloc; + +pub mod serial; +pub mod vga_buffer; +pub mod interrupts; +pub mod gdt; +pub mod memory; +pub mod task; +pub mod allocator; + +#[cfg(test)] +use bootloader::{entry_point, BootInfo}; + +#[cfg(test)] +entry_point!(test_kernel_main); + +pub trait Testable { + fn run(&self) -> (); +} + +impl Testable for T +where + T: Fn(), +{ + fn run(&self) { + serial_print!("{}...\t", core::any::type_name::()); + self(); + serial_println!("[ok]"); + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u32)] +pub enum QemuExitCode { + Success = 0x10, + Failed = 0x11, +} + +pub fn exit_qemu(exit_code: QemuExitCode) { + use x86_64::instructions::port::Port; + + unsafe { + let mut port = Port::new(0xf4); + port.write(exit_code as u32); + } +} + +pub fn test_runner(tests: &[&dyn Testable]) { + serial_println!("Running {} tests", tests.len()); + for test in tests { + test.run(); + } + exit_qemu(QemuExitCode::Success); +} + +pub fn test_panic_handler(info: &PanicInfo) -> ! { + serial_println!("[failed]\n"); + serial_println!("Error: {}\n", info); + exit_qemu(QemuExitCode::Failed); + hlt_loop(); +} + +/// Entry point for `cargo test` +#[cfg(test)] +fn test_kernel_main(_boot_info: &'static BootInfo) -> ! { + init(); + test_main(); + hlt_loop(); +} + +#[cfg(test)] +#[panic_handler] +fn panic(info: &PanicInfo) -> ! { + test_panic_handler(info) +} + +pub fn init() { + gdt::init(); + interrupts::init_idt(); + unsafe { interrupts::PICS.lock().initialize() }; + x86_64::instructions::interrupts::enable(); +} + +pub fn hlt_loop() -> ! { + loop { + x86_64::instructions::hlt(); + } +} + +#[alloc_error_handler] +fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! { + panic!("allocation error: {:?}", layout) +} \ No newline at end of file diff --git a/src/main.rs b/src/main.rs new file mode 100644 index 0000000..b162a41 --- /dev/null +++ b/src/main.rs @@ -0,0 +1,79 @@ +#![no_std] +#![no_main] +#![feature(custom_test_frameworks)] +#![test_runner(gros::test_runner)] +#![reexport_test_harness_main = "test_main"] + + +use core::panic::PanicInfo; +use gros::println; +use bootloader::{BootInfo, entry_point}; +use gros::memory::BootInfoFrameAllocator; +use gros::task::{Task, simple_executor::SimpleExecutor}; +use gros::task::keyboard; +use gros::task::executor::Executor; + +extern crate alloc; + +use alloc::{boxed::Box, vec, vec::Vec, rc::Rc}; + +entry_point!(kernel_main); + +fn kernel_main(boot_info: &'static BootInfo) -> ! { + use gros::allocator; + use gros::memory; + use x86_64::{structures::paging::Page, VirtAddr}; + println!("Hi from GROS{}", "!"); + println!("{}", " __ _ _ __ ___ ___"); + println!("{}", " / _` | '__/ _ \\/ __|"); + println!("{}", " | (_| | | | (_) \\__ \\"); + println!("{}", " \\__, |_| \\___/|___/"); + println!("{}", " |___/"); + + + gros::init(); + + let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset); + let mut mapper = unsafe { memory::init(phys_mem_offset) }; + let mut frame_allocator = unsafe { + BootInfoFrameAllocator::init(&boot_info.memory_map) + }; + + allocator::init_heap(&mut mapper, &mut frame_allocator) + .expect("heap initialization failed"); + + let mut executor = Executor::new(); + executor.spawn(Task::new(example_task())); + executor.spawn(Task::new(keyboard::print_keypresses())); + executor.run(); + + #[cfg(test)] + test_main(); + + println!("It did not crash!"); + gros::hlt_loop(); +} + +async fn async_number() -> u32 { + 42 +} + +async fn example_task() { + let number = async_number().await; + println!("async number: {}", number); +} + + +/// This function is called on panic. +#[cfg(not(test))] +#[panic_handler] +fn panic(info: &PanicInfo) -> ! { + println!("{}", info); + gros::hlt_loop(); +} + +#[cfg(test)] +#[panic_handler] +fn panic(info: &PanicInfo) -> ! { + gros::test_panic_handler(info) +} \ No newline at end of file diff --git a/src/memory.rs b/src/memory.rs new file mode 100644 index 0000000..69bba4f --- /dev/null +++ b/src/memory.rs @@ -0,0 +1,171 @@ +use x86_64::{ + structures::paging::PageTable, + VirtAddr, +}; + +use x86_64::structures::paging::OffsetPageTable; + +/// Initialize a new OffsetPageTable. +/// +/// This function is unsafe because the caller must guarantee that the +/// complete physical memory is mapped to virtual memory at the passed +/// `physical_memory_offset`. Also, this function must be only called once +/// to avoid aliasing `&mut` references (which is undefined behavior). +pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> { + let level_4_table = active_level_4_table(physical_memory_offset); + OffsetPageTable::new(level_4_table, physical_memory_offset) +} + +/// Returns a mutable reference to the active level 4 table. +/// +/// This function is unsafe because the caller must guarantee that the +/// complete physical memory is mapped to virtual memory at the passed +/// `physical_memory_offset`. Also, this function must be only called once +/// to avoid aliasing `&mut` references (which is undefined behavior). +unsafe fn active_level_4_table(physical_memory_offset: VirtAddr) + -> &'static mut PageTable +{ + use x86_64::registers::control::Cr3; + + let (level_4_table_frame, _) = Cr3::read(); + + let phys = level_4_table_frame.start_address(); + let virt = physical_memory_offset + phys.as_u64(); + let page_table_ptr: *mut PageTable = virt.as_mut_ptr(); + + &mut *page_table_ptr // unsafe +} + + +use x86_64::PhysAddr; + +/// Translates the given virtual address to the mapped physical address, or +/// `None` if the address is not mapped. +/// +/// This function is unsafe because the caller must guarantee that the +/// complete physical memory is mapped to virtual memory at the passed +/// `physical_memory_offset`. +pub unsafe fn translate_addr(addr: VirtAddr, physical_memory_offset: VirtAddr) + -> Option +{ + translate_addr_inner(addr, physical_memory_offset) +} + +/// Private function that is called by `translate_addr`. +/// +/// This function is safe to limit the scope of `unsafe` because Rust treats +/// the whole body of unsafe functions as an unsafe block. This function must +/// only be reachable through `unsafe fn` from outside of this module. +fn translate_addr_inner(addr: VirtAddr, physical_memory_offset: VirtAddr) + -> Option +{ + use x86_64::structures::paging::page_table::FrameError; + use x86_64::registers::control::Cr3; + + // read the active level 4 frame from the CR3 register + let (level_4_table_frame, _) = Cr3::read(); + + let table_indexes = [ + addr.p4_index(), addr.p3_index(), addr.p2_index(), addr.p1_index() + ]; + let mut frame = level_4_table_frame; + + // traverse the multi-level page table + for &index in &table_indexes { + // convert the frame into a page table reference + let virt = physical_memory_offset + frame.start_address().as_u64(); + let table_ptr: *const PageTable = virt.as_ptr(); + let table = unsafe {&*table_ptr}; + + // read the page table entry and update `frame` + let entry = &table[index]; + frame = match entry.frame() { + Ok(frame) => frame, + Err(FrameError::FrameNotPresent) => return None, + Err(FrameError::HugeFrame) => panic!("huge pages not supported"), + }; + } + + // calculate the physical address by adding the page offset + Some(frame.start_address() + u64::from(addr.page_offset())) +} + +use x86_64::{ + structures::paging::{Page, PhysFrame, Mapper, Size4KiB, FrameAllocator} +}; + +/// Creates an example mapping for the given page to frame `0xb8000`. +pub fn create_example_mapping( + page: Page, + mapper: &mut OffsetPageTable, + frame_allocator: &mut impl FrameAllocator, +) { + use x86_64::structures::paging::PageTableFlags as Flags; + + let frame = PhysFrame::containing_address(PhysAddr::new(0xb8000)); + let flags = Flags::PRESENT | Flags::WRITABLE; + + let map_to_result = unsafe { + // FIXME: this is not safe, we do it only for testing + mapper.map_to(page, frame, flags, frame_allocator) + }; + map_to_result.expect("map_to failed").flush(); +} + +/// A FrameAllocator that always returns `None`. +pub struct EmptyFrameAllocator; + +unsafe impl FrameAllocator for EmptyFrameAllocator { + fn allocate_frame(&mut self) -> Option { + None + } +} + +use bootloader::bootinfo::MemoryMap; + +/// A FrameAllocator that returns usable frames from the bootloader's memory map. +pub struct BootInfoFrameAllocator { + memory_map: &'static MemoryMap, + next: usize, +} + +impl BootInfoFrameAllocator { + /// Create a FrameAllocator from the passed memory map. + /// + /// This function is unsafe because the caller must guarantee that the passed + /// memory map is valid. The main requirement is that all frames that are marked + /// as `USABLE` in it are really unused. + pub unsafe fn init(memory_map: &'static MemoryMap) -> Self { + BootInfoFrameAllocator { + memory_map, + next: 0, + } + } +} + +use bootloader::bootinfo::MemoryRegionType; + +impl BootInfoFrameAllocator { + /// Returns an iterator over the usable frames specified in the memory map. + fn usable_frames(&self) -> impl Iterator { + // get usable regions from memory map + let regions = self.memory_map.iter(); + let usable_regions = regions + .filter(|r| r.region_type == MemoryRegionType::Usable); + // map each region to its address range + let addr_ranges = usable_regions + .map(|r| r.range.start_addr()..r.range.end_addr()); + // transform to an iterator of frame start addresses + let frame_addresses = addr_ranges.flat_map(|r| r.step_by(4096)); + // create `PhysFrame` types from the start addresses + frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr))) + } +} + +unsafe impl FrameAllocator for BootInfoFrameAllocator { + fn allocate_frame(&mut self) -> Option { + let frame = self.usable_frames().nth(self.next); + self.next += 1; + frame + } +} \ No newline at end of file diff --git a/src/serial.rs b/src/serial.rs new file mode 100644 index 0000000..0605e55 --- /dev/null +++ b/src/serial.rs @@ -0,0 +1,41 @@ +use uart_16550::SerialPort; +use spin::Mutex; +use lazy_static::lazy_static; + +lazy_static! { + pub static ref SERIAL1: Mutex = { + let mut serial_port = unsafe { SerialPort::new(0x3F8) }; + serial_port.init(); + Mutex::new(serial_port) + }; +} + +#[doc(hidden)] +pub fn _print(args: ::core::fmt::Arguments) { + use core::fmt::Write; + use x86_64::instructions::interrupts; // new + + interrupts::without_interrupts(|| { // new + SERIAL1 + .lock() + .write_fmt(args) + .expect("Printing to serial failed"); + }); +} + +/// Prints to the host through the serial interface. +#[macro_export] +macro_rules! serial_print { + ($($arg:tt)*) => { + $crate::serial::_print(format_args!($($arg)*)); + }; +} + +/// Prints to the host through the serial interface, appending a newline. +#[macro_export] +macro_rules! serial_println { + () => ($crate::serial_print!("\n")); + ($fmt:expr) => ($crate::serial_print!(concat!($fmt, "\n"))); + ($fmt:expr, $($arg:tt)*) => ($crate::serial_print!( + concat!($fmt, "\n"), $($arg)*)); +} \ No newline at end of file diff --git a/src/task/executor.rs b/src/task/executor.rs new file mode 100644 index 0000000..5f12272 --- /dev/null +++ b/src/task/executor.rs @@ -0,0 +1,99 @@ +use super::{Task, TaskId}; +use alloc::{collections::BTreeMap, sync::Arc}; +use core::task::{Context, Poll, Waker}; +use crossbeam_queue::ArrayQueue; + +pub struct Executor { + tasks: BTreeMap, + task_queue: Arc>, + waker_cache: BTreeMap, +} + +impl Executor { + pub fn run(&mut self) -> ! { + loop { + self.run_ready_tasks(); + self.sleep_if_idle(); + } + } + fn sleep_if_idle(&self) { + use x86_64::instructions::interrupts::{self, enable_and_hlt}; + + interrupts::disable(); + if self.task_queue.is_empty() { + enable_and_hlt(); + } else { + interrupts::enable(); + } + } + pub fn new() -> Self { + Executor { + tasks: BTreeMap::new(), + task_queue: Arc::new(ArrayQueue::new(100)), + waker_cache: BTreeMap::new(), + } + } + pub fn spawn(&mut self, task: Task) { + let task_id = task.id; + if self.tasks.insert(task.id, task).is_some() { + panic!("task with same ID already in tasks"); + } + self.task_queue.push(task_id).expect("queue full"); + } + fn run_ready_tasks(&mut self) { + // destructure `self` to avoid borrow checker errors + let Self { + tasks, + task_queue, + waker_cache, + } = self; + + while let Ok(task_id) = task_queue.pop() { + let task = match tasks.get_mut(&task_id) { + Some(task) => task, + None => continue, // task no longer exists + }; + let waker = waker_cache + .entry(task_id) + .or_insert_with(|| TaskWaker::new(task_id, task_queue.clone())); + let mut context = Context::from_waker(waker); + match task.poll(&mut context) { + Poll::Ready(()) => { + // task done -> remove it and its cached waker + tasks.remove(&task_id); + waker_cache.remove(&task_id); + } + Poll::Pending => {} + } + } + } +} + +struct TaskWaker { + task_id: TaskId, + task_queue: Arc>, +} + +impl TaskWaker { + fn new(task_id: TaskId, task_queue: Arc>) -> Waker { + Waker::from(Arc::new(TaskWaker { + task_id, + task_queue, + })) + } + fn wake_task(&self) { + self.task_queue.push(self.task_id).expect("task_queue full"); + } +} + +use alloc::task::Wake; + +impl Wake for TaskWaker { + fn wake(self: Arc) { + self.wake_task(); + } + + fn wake_by_ref(self: &Arc) { + self.wake_task(); + } +} \ No newline at end of file diff --git a/src/task/keyboard.rs b/src/task/keyboard.rs new file mode 100644 index 0000000..cc79053 --- /dev/null +++ b/src/task/keyboard.rs @@ -0,0 +1,81 @@ +use conquer_once::spin::OnceCell; +use crossbeam_queue::ArrayQueue; +use crate::println; + +static SCANCODE_QUEUE: OnceCell> = OnceCell::uninit(); + +pub struct ScancodeStream { + _private: (), +} + +impl ScancodeStream { + pub fn new() -> Self { + SCANCODE_QUEUE.try_init_once(|| ArrayQueue::new(100)) + .expect("ScancodeStream::new should only be called once"); + ScancodeStream { _private: () } + } +} + +use core::{pin::Pin, task::{Poll, Context}}; +use futures_util::stream::Stream; + +impl Stream for ScancodeStream { + type Item = u8; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let queue = SCANCODE_QUEUE + .try_get() + .expect("scancode queue not initialized"); + + // fast path + if let Ok(scancode) = queue.pop() { + return Poll::Ready(Some(scancode)); + } + + WAKER.register(&cx.waker()); + match queue.pop() { + Ok(scancode) => { + WAKER.take(); + Poll::Ready(Some(scancode)) + } + Err(crossbeam_queue::PopError) => Poll::Pending, + } + } +} + +use futures_util::task::AtomicWaker; + +static WAKER: AtomicWaker = AtomicWaker::new(); + +pub(crate) fn add_scancode(scancode: u8) { + if let Ok(queue) = SCANCODE_QUEUE.try_get() { + if let Err(_) = queue.push(scancode) { + println!("WARNING: scancode queue full; dropping keyboard input"); + } else { + WAKER.wake(); // new + } + } else { + println!("WARNING: scancode queue uninitialized"); + } +} + +use futures_util::stream::StreamExt; +use pc_keyboard::{layouts, DecodedKey, HandleControl, Keyboard, ScancodeSet1}; +use crate::print; + +pub async fn print_keypresses() { + let mut scancodes = ScancodeStream::new(); + let mut keyboard = Keyboard::new(layouts::Us104Key, ScancodeSet1, + HandleControl::Ignore); + + while let Some(scancode) = scancodes.next().await { + if let Ok(Some(key_event)) = keyboard.add_byte(scancode) { + if let Some(key) = keyboard.process_keyevent(key_event) { + match key { + DecodedKey::Unicode(character) => print!("{}", character), + DecodedKey::RawKey(key) => print!("{:?}", key), + } + } + } + } +} \ No newline at end of file diff --git a/src/task/mod.rs b/src/task/mod.rs new file mode 100644 index 0000000..4a1480e --- /dev/null +++ b/src/task/mod.rs @@ -0,0 +1,39 @@ +use alloc::boxed::Box; +use core::{ + future::Future, + pin::Pin, + sync::atomic::{AtomicU64, Ordering}, + task::{Context, Poll}, +}; + +pub mod executor; +pub mod keyboard; +pub mod simple_executor; + +pub struct Task { + id: TaskId, + future: Pin>>, +} + +impl Task { + pub fn new(future: impl Future + 'static) -> Task { + Task { + id: TaskId::new(), + future: Box::pin(future), + } + } + + fn poll(&mut self, context: &mut Context) -> Poll<()> { + self.future.as_mut().poll(context) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +struct TaskId(u64); + +impl TaskId { + fn new() -> Self { + static NEXT_ID: AtomicU64 = AtomicU64::new(0); + TaskId(NEXT_ID.fetch_add(1, Ordering::Relaxed)) + } +} diff --git a/src/task/simple_executor.rs b/src/task/simple_executor.rs new file mode 100644 index 0000000..acb6517 --- /dev/null +++ b/src/task/simple_executor.rs @@ -0,0 +1,44 @@ +use super::Task; +use alloc::collections::VecDeque; +use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; + +pub struct SimpleExecutor { + task_queue: VecDeque, +} + +impl SimpleExecutor { + pub fn new() -> SimpleExecutor { + SimpleExecutor { + task_queue: VecDeque::new(), + } + } + + pub fn spawn(&mut self, task: Task) { + self.task_queue.push_back(task) + } + + pub fn run(&mut self) { + while let Some(mut task) = self.task_queue.pop_front() { + let waker = dummy_waker(); + let mut context = Context::from_waker(&waker); + match task.poll(&mut context) { + Poll::Ready(()) => {} // task done + Poll::Pending => self.task_queue.push_back(task), + } + } + } +} + +fn dummy_raw_waker() -> RawWaker { + fn no_op(_: *const ()) {} + fn clone(_: *const ()) -> RawWaker { + dummy_raw_waker() + } + + let vtable = &RawWakerVTable::new(clone, no_op, no_op, no_op); + RawWaker::new(0 as *const (), vtable) +} + +fn dummy_waker() -> Waker { + unsafe { Waker::from_raw(dummy_raw_waker()) } +} \ No newline at end of file diff --git a/src/vga_buffer.rs b/src/vga_buffer.rs new file mode 100644 index 0000000..221645c --- /dev/null +++ b/src/vga_buffer.rs @@ -0,0 +1,177 @@ +use volatile::Volatile; +use core::fmt; +use lazy_static::lazy_static; +use spin::Mutex; + +#[allow(dead_code)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(u8)] +pub enum Color { + Black = 0, + Blue = 1, + Green = 2, + Cyan = 3, + Red = 4, + Magenta = 5, + Brown = 6, + LightGray = 7, + DarkGray = 8, + LightBlue = 9, + LightGreen = 10, + LightCyan = 11, + LightRed = 12, + Pink = 13, + Yellow = 14, + White = 15, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(transparent)] +struct ColorCode(u8); + +impl ColorCode { + fn new(foreground: Color, background: Color) -> ColorCode { + ColorCode((background as u8) << 4 | (foreground as u8)) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(C)] +struct ScreenChar { + ascii_character: u8, + color_code: ColorCode, +} + +const BUFFER_HEIGHT: usize = 25; +const BUFFER_WIDTH: usize = 80; + +#[repr(transparent)] +struct Buffer { + chars: [[Volatile; BUFFER_WIDTH]; BUFFER_HEIGHT], +} + +pub struct Writer { + column_position: usize, + color_code: ColorCode, + buffer: &'static mut Buffer, +} + +impl Writer { + pub fn write_byte(&mut self, byte: u8) { + match byte { + b'\n' => self.new_line(), + byte => { + if self.column_position >= BUFFER_WIDTH { + self.new_line(); + } + let row = BUFFER_HEIGHT - 1; + let col = self.column_position; + + let color_code = self.color_code; + self.buffer.chars[row][col].write(ScreenChar { + ascii_character: byte, + color_code, + }); + + self.column_position += 1; + } + } + } + + fn new_line(&mut self) { + for row in 1..BUFFER_HEIGHT { + for col in 0..BUFFER_WIDTH { + let character = self.buffer.chars[row][col].read(); + self.buffer.chars[row - 1][col].write(character); + } + } + self.clear_row(BUFFER_HEIGHT - 1); + self.column_position = 0; + } + + fn clear_row(&mut self, row: usize) { + let blank = ScreenChar { + ascii_character: b' ', + color_code: self.color_code, + }; + for col in 0..BUFFER_WIDTH { + self.buffer.chars[row][col].write(blank); + } + } + + pub fn write_string(&mut self, s: &str) { + for byte in s.bytes() { + match byte { + // printable ASCII byte or newline + 0x20..=0x7e | b'\n' => self.write_byte(byte), + // not part of printable ASCII range + _ => self.write_byte(0xfe), + } + + } + } +} + +impl fmt::Write for Writer { + fn write_str(&mut self, s: &str) -> fmt::Result { + self.write_string(s); + Ok(()) + } +} + +lazy_static! { + pub static ref WRITER: Mutex = Mutex::new(Writer { + column_position: 0, + color_code: ColorCode::new(Color::Yellow, Color::Black), + buffer: unsafe { &mut *(0xb8000 as *mut Buffer) }, + }); +} + +#[macro_export] +macro_rules! print { + ($($arg:tt)*) => ($crate::vga_buffer::_print(format_args!($($arg)*))); +} + +#[macro_export] +macro_rules! println { + () => ($crate::print!("\n")); + ($($arg:tt)*) => ($crate::print!("{}\n", format_args!($($arg)*))); +} + +#[doc(hidden)] +pub fn _print(args: fmt::Arguments) { + use core::fmt::Write; + use x86_64::instructions::interrupts; + + interrupts::without_interrupts(|| { + WRITER.lock().write_fmt(args).unwrap(); + }); +} + +#[test_case] +fn test_println_simple() { + println!("test_println_simple output"); +} + +#[test_case] +fn test_println_many() { + for _ in 0..200 { + println!("test_println_many output"); + } +} + +#[test_case] +fn test_println_output() { + use core::fmt::Write; + use x86_64::instructions::interrupts; + + let s = "Some test string that fits on a single line"; + interrupts::without_interrupts(|| { + let mut writer = WRITER.lock(); + writeln!(writer, "\n{}", s).expect("writeln failed"); + for (i, c) in s.chars().enumerate() { + let screen_char = writer.buffer.chars[BUFFER_HEIGHT - 2][i].read(); + assert_eq!(char::from(screen_char.ascii_character), c); + } + }); +} \ No newline at end of file diff --git a/tests/basic_boot.rs b/tests/basic_boot.rs new file mode 100644 index 0000000..16c343c --- /dev/null +++ b/tests/basic_boot.rs @@ -0,0 +1,27 @@ +#![no_std] +#![no_main] +#![feature(custom_test_frameworks)] +#![reexport_test_harness_main = "test_main"] +#![test_runner(gros::test_runner)] + +use core::panic::PanicInfo; + +#[no_mangle] // don't mangle the name of this function +pub extern "C" fn _start() -> ! { + test_main(); + + loop {} +} + + +#[panic_handler] +fn panic(info: &PanicInfo) -> ! { + gros::test_panic_handler(info) +} + +use gros::println; + +#[test_case] +fn test_println() { + println!("test_println output"); +} \ No newline at end of file diff --git a/tests/heap_allocation.rs b/tests/heap_allocation.rs new file mode 100644 index 0000000..cf6fd20 --- /dev/null +++ b/tests/heap_allocation.rs @@ -0,0 +1,69 @@ +#![no_std] +#![no_main] +#![feature(custom_test_frameworks)] +#![test_runner(gros::test_runner)] +#![reexport_test_harness_main = "test_main"] + +extern crate alloc; + +use bootloader::{entry_point, BootInfo}; +use core::panic::PanicInfo; + +entry_point!(main); + +fn main(boot_info: &'static BootInfo) -> ! { + use gros::allocator; + use gros::memory::{self, BootInfoFrameAllocator}; + use x86_64::VirtAddr; + + gros::init(); + let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset); + let mut mapper = unsafe { memory::init(phys_mem_offset) }; + let mut frame_allocator = unsafe { + BootInfoFrameAllocator::init(&boot_info.memory_map) + }; + allocator::init_heap(&mut mapper, &mut frame_allocator) + .expect("heap initialization failed"); + + test_main(); + loop {} +} + +#[panic_handler] +fn panic(info: &PanicInfo) -> ! { + gros::test_panic_handler(info) +} + +use alloc::boxed::Box; + +#[test_case] +fn simple_allocation() { + let heap_value_1 = Box::new(41); + let heap_value_2 = Box::new(13); + assert_eq!(*heap_value_1, 41); + assert_eq!(*heap_value_2, 13); +} + +use alloc::vec::Vec; + +#[test_case] +fn large_vec() { + let n = 1000; + let mut vec = Vec::new(); + for i in 0..n { + vec.push(i); + } + assert_eq!(vec.iter().sum::(), (n - 1) * n / 2); +} + +use gros::allocator::HEAP_SIZE; + +#[test_case] +fn many_boxes() { + let long_lived = Box::new(1); + for i in 0..HEAP_SIZE { + let x = Box::new(i); + assert_eq!(*x, i); + } + assert_eq!(*long_lived, 1); +} \ No newline at end of file diff --git a/tests/should_panic.rs b/tests/should_panic.rs new file mode 100644 index 0000000..d995ee3 --- /dev/null +++ b/tests/should_panic.rs @@ -0,0 +1,28 @@ +#![no_std] +#![no_main] + +use core::panic::PanicInfo; +use gros::{QemuExitCode, exit_qemu, serial_println, serial_print}; + +#[no_mangle] +pub extern "C" fn _start() -> ! { + should_fail(); + serial_println!("[test did not panic]"); + exit_qemu(QemuExitCode::Failed); + + loop {} +} + +fn should_fail() { + serial_print!("should_panic::should_fail...\t"); + assert_eq!(0, 1); +} + +#[panic_handler] +fn panic(_info: &PanicInfo) -> ! { + serial_println!("[ok]"); + exit_qemu(QemuExitCode::Success); + loop {} +} + + diff --git a/tests/stack_overflow.rs b/tests/stack_overflow.rs new file mode 100644 index 0000000..643b74f --- /dev/null +++ b/tests/stack_overflow.rs @@ -0,0 +1,62 @@ +#![no_std] +#![no_main] +#![feature(abi_x86_interrupt)] + +use core::panic::PanicInfo; +use gros::serial_print; + +use lazy_static::lazy_static; +use x86_64::structures::idt::InterruptDescriptorTable; + +use gros::{exit_qemu, QemuExitCode, serial_println}; +use x86_64::structures::idt::InterruptStackFrame; + +#[no_mangle] +pub extern "C" fn _start() -> ! { + serial_print!("stack_overflow::stack_overflow...\t"); + + gros::gdt::init(); + init_test_idt(); + + // trigger a stack overflow + stack_overflow(); + + panic!("Execution continued after stack overflow"); +} + +#[allow(unconditional_recursion)] +fn stack_overflow() { + stack_overflow(); // for each recursion, the return address is pushed + volatile::Volatile::new(0).read(); // prevent tail recursion optimizations +} + +#[panic_handler] +fn panic(info: &PanicInfo) -> ! { + gros::test_panic_handler(info) +} + +lazy_static! { + static ref TEST_IDT: InterruptDescriptorTable = { + let mut idt = InterruptDescriptorTable::new(); + unsafe { + idt.double_fault + .set_handler_fn(test_double_fault_handler) + .set_stack_index(gros::gdt::DOUBLE_FAULT_IST_INDEX); + } + + idt + }; +} + +pub fn init_test_idt() { + TEST_IDT.load(); +} + +extern "x86-interrupt" fn test_double_fault_handler( + _stack_frame: InterruptStackFrame, + _error_code: u64, +) -> ! { + serial_println!("[ok]"); + exit_qemu(QemuExitCode::Success); + loop {} +} \ No newline at end of file diff --git a/x86_64-gros.json b/x86_64-gros.json new file mode 100644 index 0000000..23da62b --- /dev/null +++ b/x86_64-gros.json @@ -0,0 +1,15 @@ +{ + "llvm-target": "x86_64-unknown-none", + "data-layout": "e-m:e-i64:64-f80:128-n8:16:32:64-S128", + "arch": "x86_64", + "target-endian": "little", + "target-pointer-width": "64", + "target-c-int-width": "32", + "os": "none", + "executables": true, + "linker-flavor": "ld.lld", + "linker": "rust-lld", + "panic-strategy": "abort", + "disable-redzone": true, + "features": "-mmx,-sse,+soft-float" +}