diff --git a/os/kernel/src/consts.rs b/os/kernel/src/consts.rs index 97a9b8f..3429e7f 100644 --- a/os/kernel/src/consts.rs +++ b/os/kernel/src/consts.rs @@ -1,5 +1,6 @@ -pub const MAIN_USER_STACK_START: usize = 0x400000000000; -pub const MAX_USER_STACK_SIZE: usize = 0x40000000; +pub const MAIN_USER_STACK_START: usize = 0x400000000000; // 10 TiB +pub const MAX_USER_STACK_SIZE: usize = 0x40000000; // 1 GiB pub const KERNEL_STACK_PAGES: usize = 64; +pub const STACK_ENTRY_SIZE: usize = 8; diff --git a/os/kernel/src/process/thread.rs b/os/kernel/src/process/thread.rs index c444fc5..d21c752 100644 --- a/os/kernel/src/process/thread.rs +++ b/os/kernel/src/process/thread.rs @@ -2,43 +2,55 @@ ║ Module: thread ║ ╟─────────────────────────────────────────────────────────────────────────╢ ║ Descr.: Implementation of threads. ║ + ║ Kernel threads have a stack of 'KERNEL_STACK_PAGES'. ║ + ║ User threads have an additional stack with a logical size of ║ + ║ 'MAX_USER_STACK_SIZE' and an initial phyiscal size of one page. ║ + ║ Additional pages are allocated for user stacks as need until ║ + ║ 'MAX_USER_STACK_SIZE' is reached. The thread is killed if this ║ + ║ limit is exceeded. The stack of a user thread stack within one ║ + ║ processes is logically allocated at 'MAIN_USER_STACK_START'. ║ + ║ The next stack for the next user stack is allocated at ║ + ║ 'MAIN_USER_STACK_START' + 'MAX_USER_STACK_SIZE' and so on. ║ ╟─────────────────────────────────────────────────────────────────────────╢ ║ Author: Fabian Ruhland, HHU ║ ╚═════════════════════════════════════════════════════════════════════════╝ */ + + +// each user thread gets 1 GiB stack (logical); 4 KiB physical at the beginning +// will be extended physically as needed but at most until 1 GiB +// all user level thread stacks are allocated sequentially in the logical address space (1 GiB per entry) + + +use crate::memory::alloc::StackAllocator; +use crate::memory::r#virtual::{VirtualMemoryArea, VmaType}; +use crate::memory::{MemorySpace, PAGE_SIZE}; +use crate::process::process::Process; use crate::process::scheduler; +use crate::syscall::syscall_dispatcher::CORE_LOCAL_STORAGE_TSS_RSP0_PTR_INDEX; +use crate::{memory, process_manager, scheduler, tss}; use alloc::rc::Rc; use alloc::sync::Arc; use alloc::vec::Vec; use core::arch::asm; use core::{mem, ptr}; -use goblin::elf64; use goblin::elf::Elf; +use goblin::elf64; use spin::Mutex; use x86_64::structures::gdt::SegmentSelector; -use x86_64::PrivilegeLevel::Ring3; -use x86_64::structures::paging::{Page, PageTableFlags, Size4KiB}; use x86_64::structures::paging::page::PageRange; +use x86_64::structures::paging::{Page, PageTableFlags, Size4KiB}; +use x86_64::PrivilegeLevel::Ring3; use x86_64::VirtAddr; -use crate::memory::{MemorySpace, PAGE_SIZE}; -use crate::{memory, process_manager, scheduler, tss}; -use crate::memory::alloc::StackAllocator; -use crate::memory::r#virtual::{VirtualMemoryArea, VmaType}; -use crate::process::process::Process; -use crate::syscall::syscall_dispatcher::CORE_LOCAL_STORAGE_TSS_RSP0_PTR_INDEX; +use crate::consts::KERNEL_STACK_PAGES; use crate::consts::MAIN_USER_STACK_START; use crate::consts::MAX_USER_STACK_SIZE; -use crate::consts::KERNEL_STACK_PAGES; - -/** - Description: Each thread has a user and kernel stack. -*/ struct Stacks { kernel_stack: Vec, user_stack: Vec, - old_rsp0: VirtAddr // used for thread switching; rsp3 is stored in TSS + old_rsp0: VirtAddr, // used for thread switching; rsp3 is stored in TSS } pub struct Thread { @@ -46,26 +58,44 @@ pub struct Thread { stacks: Mutex, process: Arc, entry: fn(), - user_rip: VirtAddr + user_rip: VirtAddr, } impl Stacks { - const fn new(kernel_stack: Vec, user_stack: Vec) -> Self { - Self { kernel_stack, user_stack, old_rsp0: VirtAddr::zero() } + const fn new( + kernel_stack: Vec, + user_stack: Vec, + ) -> Self { + Self { + kernel_stack, + user_stack, + old_rsp0: VirtAddr::zero(), + } } } impl Thread { + /** + Description: Create kernel thread. Not started yet, nor registered in the scheduler. + + Parameters: `entry` thread entry function. + */ pub fn new_kernel_thread(entry: fn()) -> Rc { - let kernel_stack = Vec::::with_capacity_in((KERNEL_STACK_PAGES * PAGE_SIZE) / 8, StackAllocator::new()); + let kernel_stack = Vec::::with_capacity_in( + (KERNEL_STACK_PAGES * PAGE_SIZE) / 8, + StackAllocator::new(), + ); let user_stack = Vec::with_capacity_in(0, StackAllocator::new()); // Dummy stack let thread = Thread { id: scheduler::next_thread_id(), stacks: Mutex::new(Stacks::new(kernel_stack, user_stack)), - process: process_manager().read().kernel_process().expect("Trying to create a kernel thread before process initialization!"), + process: process_manager() + .read() + .kernel_process() + .expect("Trying to create a kernel thread before process initialization!"), entry, - user_rip: VirtAddr::zero() + user_rip: VirtAddr::zero(), }; thread.prepare_kernel_stack(); @@ -77,30 +107,68 @@ impl Thread { let address_space = process.address_space(); let elf = Elf::parse(elf_buffer).expect("Failed to parse application"); - elf.program_headers.iter() + elf.program_headers + .iter() .filter(|header| header.p_type == elf64::program_header::PT_LOAD) .for_each(|header| { - let page_count = if header.p_memsz as usize % PAGE_SIZE == 0 { header.p_memsz as usize / PAGE_SIZE } else { (header.p_memsz as usize / PAGE_SIZE) + 1 }; + let page_count = if header.p_memsz as usize % PAGE_SIZE == 0 { + header.p_memsz as usize / PAGE_SIZE + } else { + (header.p_memsz as usize / PAGE_SIZE) + 1 + }; let frames = memory::physical::alloc(page_count); - let virt_start = Page::from_start_address(VirtAddr::new(header.p_vaddr)).expect("ELF: Program section not page aligned"); - let pages = PageRange { start: virt_start, end: virt_start + page_count as u64 }; + let virt_start = Page::from_start_address(VirtAddr::new(header.p_vaddr)) + .expect("ELF: Program section not page aligned"); + let pages = PageRange { + start: virt_start, + end: virt_start + page_count as u64, + }; unsafe { let code = elf_buffer.as_ptr().offset(header.p_offset as isize); let target = frames.start.start_address().as_u64() as *mut u8; target.copy_from(code, header.p_filesz as usize); - target.offset(header.p_filesz as isize).write_bytes(0, (header.p_memsz - header.p_filesz) as usize); + target + .offset(header.p_filesz as isize) + .write_bytes(0, (header.p_memsz - header.p_filesz) as usize); } - process.address_space().map_physical(frames, pages, MemorySpace::User, PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE); + process.address_space().map_physical( + frames, + pages, + MemorySpace::User, + PageTableFlags::PRESENT + | PageTableFlags::WRITABLE + | PageTableFlags::USER_ACCESSIBLE, + ); process.add_vma(VirtualMemoryArea::new(pages, VmaType::Code)); }); - let kernel_stack = Vec::::with_capacity_in((KERNEL_STACK_PAGES * PAGE_SIZE) / 8, StackAllocator::new()); - let user_stack_end = Page::from_start_address(VirtAddr::new((MAIN_USER_STACK_START + MAX_USER_STACK_SIZE) as u64)).unwrap(); - let user_stack_pages = PageRange { start: user_stack_end - 1, end: user_stack_end }; - let user_stack = unsafe { Vec::from_raw_parts_in(user_stack_pages.start.start_address().as_u64() as *mut u64, 0, PAGE_SIZE / 8, StackAllocator::new()) }; - address_space.map(user_stack_pages, MemorySpace::User, PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE); + let kernel_stack = Vec::::with_capacity_in( + (KERNEL_STACK_PAGES * PAGE_SIZE) / 8, + StackAllocator::new(), + ); + let user_stack_end = Page::from_start_address(VirtAddr::new( + (MAIN_USER_STACK_START + MAX_USER_STACK_SIZE) as u64, + )) + .unwrap(); + let user_stack_pages = PageRange { + start: user_stack_end - 1, + end: user_stack_end, + }; + let user_stack = unsafe { + Vec::from_raw_parts_in( + user_stack_pages.start.start_address().as_u64() as *mut u64, + 0, + PAGE_SIZE / 8, + StackAllocator::new(), + ) + }; + address_space.map( + user_stack_pages, + MemorySpace::User, + PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE, + ); process.add_vma(VirtualMemoryArea::new(user_stack_pages, VmaType::Stack)); let thread = Thread { @@ -108,22 +176,59 @@ impl Thread { stacks: Mutex::new(Stacks::new(kernel_stack, user_stack)), process, entry: unsafe { mem::transmute(ptr::null::()) }, - user_rip: VirtAddr::new(elf.entry) + user_rip: VirtAddr::new(elf.entry), }; thread.prepare_kernel_stack(); return Rc::new(thread); } - pub fn new_user_thread(parent: Arc, kickoff_addr: VirtAddr, entry: fn()) -> Rc { - let kernel_stack = Vec::::with_capacity_in((KERNEL_STACK_PAGES * PAGE_SIZE) / 8, StackAllocator::new()); - + /** + Description: Create user thread. Not started yet, nor registered in the scheduler. + + Parameters: \ + `parent` process the thread belongs to. \ + `kickoff_addr` address of `kickoff` function \ + `entry` address of thread entry function + */ + pub fn new_user_thread( + parent: Arc, + kickoff_addr: VirtAddr, + entry: fn(), + ) -> Rc { + // alloc memory for kernel stack + let kernel_stack = Vec::::with_capacity_in( + (KERNEL_STACK_PAGES * PAGE_SIZE) / 8, + StackAllocator::new(), + ); + + // get all stacks of my process; use highest entry and add 1 GB (stack size) let stack_vmas = parent.find_vmas(VmaType::Stack); - let highest_stack_vma = stack_vmas.last().expect("Trying to create a user thread, before the main thread has been created!"); - let user_stack_end = Page::::from_start_address(highest_stack_vma.end() + MAX_USER_STACK_SIZE as u64).unwrap(); - let user_stack_pages = PageRange { start: user_stack_end - 1, end: user_stack_end }; - let user_stack = unsafe { Vec::from_raw_parts_in(user_stack_pages.start.start_address().as_u64() as *mut u64, 0, PAGE_SIZE / 8, StackAllocator::new()) }; - parent.address_space().map(user_stack_pages, MemorySpace::User, PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE); + let highest_stack_vma = stack_vmas + .last() + .expect("Trying to create a user thread, before the main thread has been created!"); + let user_stack_end = Page::::from_start_address( + highest_stack_vma.end() + MAX_USER_STACK_SIZE as u64, + ) + .unwrap(); + + let user_stack_pages = PageRange { + start: user_stack_end - 1, + end: user_stack_end, + }; + let user_stack = unsafe { + Vec::from_raw_parts_in( + user_stack_pages.start.start_address().as_u64() as *mut u64, + 0, + PAGE_SIZE / 8, + StackAllocator::new(), + ) + }; + parent.address_space().map( + user_stack_pages, + MemorySpace::User, + PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE, + ); parent.add_vma(VirtualMemoryArea::new(user_stack_pages, VmaType::Stack)); let thread = Thread { @@ -131,7 +236,7 @@ impl Thread { stacks: Mutex::new(Stacks::new(kernel_stack, user_stack)), process: parent, entry, - user_rip: kickoff_addr + user_rip: kickoff_addr, }; thread.prepare_kernel_stack(); @@ -140,7 +245,7 @@ impl Thread { pub fn kickoff_kernel_thread() { let scheduler = scheduler(); - scheduler.set_init(); + scheduler.set_init(); // scheduler initialized let thread = scheduler.current_thread(); tss().lock().privilege_stack_table[0] = thread.kernel_stack_addr(); @@ -148,23 +253,32 @@ impl Thread { if thread.is_kernel_thread() { (thread.entry)(); drop(thread); // Manually decrease reference count, because exit() will never return - scheduler.exit(); + scheduler.exit(); } else { let thread_ptr = ptr::from_ref(thread.as_ref()); drop(thread); // Manually decrease reference count, because switch_to_user_mode() will never return let thread_ref = unsafe { thread_ptr.as_ref().unwrap() }; thread_ref.switch_to_user_mode(); + // exit is in the entry function -> runtime::lib.rs } } + /** + Description: High-level function for starting a thread in kernel mode + */ pub unsafe fn start_first(thread_ptr: *const Thread) { let thread = unsafe { thread_ptr.as_ref().unwrap() }; let old_rsp0 = thread.stacks.lock().old_rsp0; - unsafe { thread_kernel_start(old_rsp0.as_u64()); } + unsafe { + thread_kernel_start(old_rsp0.as_u64()); + } } + /** + Description: High-level thread switching function + */ pub unsafe fn switch(current_ptr: *const Thread, next_ptr: *const Thread) { let current = unsafe { current_ptr.as_ref().unwrap() }; let next = unsafe { next_ptr.as_ref().unwrap() }; @@ -173,64 +287,96 @@ impl Thread { let next_rsp0_end = next.kernel_stack_addr().as_u64(); let next_address_space = next.process.address_space().page_table_address().as_u64(); - unsafe { thread_switch(current_rsp0, next_rsp0, next_rsp0_end, next_address_space); } - } - - pub fn is_kernel_thread(&self) -> bool { - return self.stacks.lock().user_stack.capacity() == 0; + unsafe { + thread_switch(current_rsp0, next_rsp0, next_rsp0_end, next_address_space); + } } + // check if stacks are locked pub fn stacks_locked(&self) -> bool { self.stacks.is_locked() } + /** + Description: Grow user stack on demand + */ pub fn grow_user_stack(&self) { let mut stacks = self.stacks.lock(); // Grow stack area -> Allocate one page right below the stack - self.process.find_vmas(VmaType::Stack).iter().find(|vma| { - vma.start().as_u64() == stacks.user_stack.as_ptr() as u64 - }).expect("Failed to find VMA for growing stack").grow_downwards(1); + self.process + .find_vmas(VmaType::Stack) + .iter() + .find(|vma| vma.start().as_u64() == stacks.user_stack.as_ptr() as u64) + .expect("Failed to find VMA for growing stack") + .grow_downwards(1); // Adapt stack Vec to new start address let user_stack_capacity = stacks.user_stack.capacity() + (PAGE_SIZE / 8); if user_stack_capacity > MAX_USER_STACK_SIZE / 8 { panic!("Stack overflow!"); } - + let user_stack_start = stacks.user_stack.as_ptr() as usize - PAGE_SIZE; - stacks.user_stack = unsafe { Vec::from_raw_parts_in(user_stack_start as *mut u64, 0, user_stack_capacity, StackAllocator::new()) }; + stacks.user_stack = unsafe { + Vec::from_raw_parts_in( + user_stack_start as *mut u64, + 0, + user_stack_capacity, + StackAllocator::new(), + ) + }; + } + + /** + Description: Check if self is kernel thread or not + */ + pub fn is_kernel_thread(&self) -> bool { + return self.stacks.lock().user_stack.capacity() == 0; } + // last usable address of stack, used to grow stack pub fn user_stack_start(&self) -> VirtAddr { let stacks = self.stacks.lock(); VirtAddr::new(stacks.user_stack.as_ptr() as u64) } + /** + Description: Return reference to my process + */ pub fn process(&self) -> Arc { return Arc::clone(&self.process); } + // wait for one thread #[allow(dead_code)] pub fn join(&self) { scheduler().join(self.id()); } + /** + Description: Return my thread id + */ pub fn id(&self) -> usize { return self.id; } + // ? pub fn kernel_stack_addr(&self) -> VirtAddr { let stacks = self.stacks.lock(); let kernel_stack_addr = VirtAddr::new(stacks.kernel_stack.as_ptr() as u64); return kernel_stack_addr + (stacks.kernel_stack.capacity() * 8) as u64; } + /** + Description: prepare a fake stack for starting a thread in kernel mode + */ fn prepare_kernel_stack(&self) { let mut stacks = self.stacks.lock(); let stack_addr = stacks.kernel_stack.as_ptr() as u64; let capacity = stacks.kernel_stack.capacity(); + // init stack with 0s for _ in 0..stacks.kernel_stack.capacity() { stacks.kernel_stack.push(0); } @@ -260,15 +406,20 @@ impl Thread { stacks.old_rsp0 = VirtAddr::new(stack_addr + ((capacity - 18) * 8) as u64); } + /** + Description: switch a thread to user mode by preparing a fake stackframe + */ fn switch_to_user_mode(&self) { let old_rsp0: u64; - { // Separate block to make sure that the lock is released, before calling `thread_user_start()`. + { + // Separate block to make sure that the lock is released, before calling `thread_user_start()`. let mut stacks = self.stacks.lock(); let kernel_stack_addr = stacks.kernel_stack.as_ptr() as u64; let user_stack_addr = stacks.user_stack.as_ptr() as u64; let capacity = stacks.kernel_stack.capacity(); + // init stack with 0s for _ in 0..stacks.user_stack.capacity() { stacks.user_stack.push(0); } @@ -277,7 +428,8 @@ impl Thread { stacks.kernel_stack[capacity - 5] = SegmentSelector::new(4, Ring3).0 as u64; // cs = user code segment stacks.kernel_stack[capacity - 4] = 0x202; // rflags (Interrupts enabled) - stacks.kernel_stack[capacity - 3] = user_stack_addr + (stacks.user_stack.capacity() - 1) as u64 * 8; // rsp for user stack + stacks.kernel_stack[capacity - 3] = + user_stack_addr + (stacks.user_stack.capacity() - 1) as u64 * 8; // rsp for user stack stacks.kernel_stack[capacity - 2] = SegmentSelector::new(3, Ring3).0 as u64; // ss = user data segment stacks.kernel_stack[capacity - 1] = 0x00DEAD00u64; // Dummy return address @@ -286,53 +438,68 @@ impl Thread { old_rsp0 = stacks.old_rsp0.as_u64(); } - unsafe { thread_user_start(old_rsp0, self.entry); } + unsafe { + thread_user_start(old_rsp0, self.entry); + } } } +/** + Description: Low-level function for starting a thread in kernel mode +*/ #[naked] #[allow(unsafe_op_in_unsafe_fn)] unsafe extern "C" fn thread_kernel_start(old_rsp0: u64) { asm!( - "mov rsp, rdi", // First parameter -> load 'old_rsp0' - "pop rbp", - "pop rdi", // 'old_rsp0' is here - "pop rsi", - "pop rdx", - "pop rcx", - "pop rbx", - "pop rax", - "pop r15", - "pop r14", - "pop r13", - "pop r12", - "pop r11", - "pop r10", - "pop r9", - "pop r8", - "popf", - - "call unlock_scheduler", - "ret", - options(noreturn) + "mov rsp, rdi", // First parameter -> load 'old_rsp0' + "pop rbp", + "pop rdi", // 'old_rsp0' is here + "pop rsi", + "pop rdx", + "pop rcx", + "pop rbx", + "pop rax", + "pop r15", + "pop r14", + "pop r13", + "pop r12", + "pop r11", + "pop r10", + "pop r9", + "pop r8", + "popf", + "call unlock_scheduler", // force unlock, thread_switch locks Scheduler but returns later + "ret", + options(noreturn) ); } +/** + Description: Low-level function for starting a thread in user mode +*/ #[naked] #[allow(unsafe_op_in_unsafe_fn)] #[allow(improper_ctypes_definitions)] // 'entry' takes no arguments and has no return value, so we just assume that the "C" and "Rust" ABIs act the same way in this case unsafe extern "C" fn thread_user_start(old_rsp0: u64, entry: fn()) { asm!( - "mov rsp, rdi", // Load 'old_rsp' (first parameter) - "mov rdi, rsi", // Second parameter becomes first parameter for 'kickoff_user_thread()' - "iretq", // Switch to user-mode - options(noreturn) + "mov rsp, rdi", // Load 'old_rsp' (first parameter) + "mov rdi, rsi", // Second parameter becomes first parameter for 'kickoff_user_thread()' + "iretq", // Switch to user-mode + options(noreturn) ) } +/** + Description: Low-level thread switching function +*/ #[naked] #[allow(unsafe_op_in_unsafe_fn)] -unsafe extern "C" fn thread_switch(current_rsp0: *mut u64, next_rsp0: u64, next_rsp0_end: u64, next_cr3: u64) { +unsafe extern "C" fn thread_switch( + current_rsp0: *mut u64, + next_rsp0: u64, + next_rsp0_end: u64, + next_cr3: u64, +) { asm!( // Save registers of current thread "pushf", @@ -383,9 +550,9 @@ unsafe extern "C" fn thread_switch(current_rsp0: *mut u64, next_rsp0: u64, next_ "pop r8", "popf", - "call unlock_scheduler", + "call unlock_scheduler", // force unlock, thread_switch locks Scheduler but returns later "ret", // Return to next thread CORE_LOCAL_STORAGE_TSS_RSP0_PTR_INDEX = const CORE_LOCAL_STORAGE_TSS_RSP0_PTR_INDEX, options(noreturn) ) -} \ No newline at end of file +}