x86_64: shrink kernel heap size and remove 'enlarge_heap'

master
WangRunji 6 years ago
parent bc1bad3060
commit ba74f93ab8

@ -1,6 +1,6 @@
pub const MEMORY_OFFSET: usize = 0; pub const MEMORY_OFFSET: usize = 0;
pub const KERNEL_OFFSET: usize = 0xffffff00_00000000; pub const KERNEL_OFFSET: usize = 0xffffff00_00000000;
pub const KERNEL_HEAP_SIZE: usize = 32 * 1024 * 1024; // 32 MB pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
pub const USER_STACK_OFFSET: usize = 0x00008000_00000000 - USER_STACK_SIZE; pub const USER_STACK_OFFSET: usize = 0x00008000_00000000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 8 * 1024 * 1024; // 8 MB, the default config of Linux pub const USER_STACK_SIZE: usize = 8 * 1024 * 1024; // 8 MB, the default config of Linux

@ -2,9 +2,7 @@ use crate::consts::KERNEL_OFFSET;
use bitmap_allocator::BitAlloc; use bitmap_allocator::BitAlloc;
// Depends on kernel // Depends on kernel
use super::{BootInfo, MemoryRegionType}; use super::{BootInfo, MemoryRegionType};
use crate::memory::{active_table, alloc_frame, init_heap, FRAME_ALLOCATOR}; use crate::memory::{active_table, init_heap, FRAME_ALLOCATOR};
use crate::HEAP_ALLOCATOR;
use alloc::vec::Vec;
use log::*; use log::*;
use once::*; use once::*;
use rcore_memory::paging::*; use rcore_memory::paging::*;
@ -15,7 +13,6 @@ pub fn init(boot_info: &BootInfo) {
init_frame_allocator(boot_info); init_frame_allocator(boot_info);
init_device_vm_map(); init_device_vm_map();
init_heap(); init_heap();
enlarge_heap();
info!("memory: init end"); info!("memory: init end");
} }
@ -42,30 +39,3 @@ fn init_device_vm_map() {
.map(KERNEL_OFFSET + 0xfee00000, 0xfee00000) .map(KERNEL_OFFSET + 0xfee00000, 0xfee00000)
.update(); .update();
} }
fn enlarge_heap() {
let mut page_table = active_table();
let mut addrs = Vec::new();
let va_offset = KERNEL_OFFSET + 0xe0000000;
for i in 0..16384 {
let page = alloc_frame().unwrap();
let va = KERNEL_OFFSET + 0xe0000000 + page;
if let Some((ref mut addr, ref mut len)) = addrs.last_mut() {
if *addr - PAGE_SIZE == va {
*len += PAGE_SIZE;
*addr -= PAGE_SIZE;
continue;
}
}
addrs.push((va, PAGE_SIZE));
}
for (addr, len) in addrs.into_iter() {
for va in (addr..(addr + len)).step_by(PAGE_SIZE) {
page_table.map(va, va - va_offset).update();
}
info!("Adding {:#X} {:#X} to heap", addr, len);
unsafe {
HEAP_ALLOCATOR.lock().init(addr, len);
}
}
}

Loading…
Cancel
Save