diff --git a/kernel/include/memory.hpp b/kernel/include/memory.hpp index a01acd66..ba6b8651 100644 --- a/kernel/include/memory.hpp +++ b/kernel/include/memory.hpp @@ -15,6 +15,14 @@ void init_memory_manager(); void* k_malloc(uint64_t bytes); void k_free(void* block); +/*! + * \brief Allocates bytes of memory at the given address. + * \param address The virtual address where memory should be put. + * \param bytes The number of bytes to allocate. + * \return The address of the physical allocated memory, 0 indicates failure. + */ +void* k_malloc(uint64_t address, uint64_t bytes); + template T* k_malloc(){ return reinterpret_cast(k_malloc(sizeof(T))); diff --git a/kernel/include/paging.hpp b/kernel/include/paging.hpp index 04f65f9b..f3f76cd4 100644 --- a/kernel/include/paging.hpp +++ b/kernel/include/paging.hpp @@ -19,12 +19,21 @@ constexpr bool page_aligned(T* addr){ return !(reinterpret_cast(addr) & (paging::PAGE_SIZE - 1)); } +template +constexpr T* page_align(T* addr){ + return reinterpret_cast((reinterpret_cast(addr) / paging::PAGE_SIZE) * paging::PAGE_SIZE); +} + void* physical_address(void* virt); bool page_present(void* virt); bool page_free_or_set(void* virt, void* physical); + bool identity_map(void* virt); bool identity_map(void* virt, size_t pages); +bool map(void* virt, void* physical); +bool map(void* virt, void* physical, size_t pages); + } //end of namespace paging #endif diff --git a/kernel/src/memory.cpp b/kernel/src/memory.cpp index 46be12b9..5dd7f610 100644 --- a/kernel/src/memory.cpp +++ b/kernel/src/memory.cpp @@ -302,6 +302,34 @@ void* k_malloc(uint64_t bytes){ return b; } +void* k_malloc(uint64_t address, uint64_t bytes){ + auto page = reinterpret_cast(paging::page_align(reinterpret_cast(address))); + + //1. Verify that all the necessary pages are free + while(page < address + bytes){ + //If the virtual address is already mapped, indicates failure + if(paging::page_present(reinterpret_cast(page))){ + return nullptr; + } + + page += paging::PAGE_SIZE; + } + + //2. Allocate enough physical memory + auto physical = k_malloc(bytes); + + //3. Map physical allocated memory to the necessary virtual emory + + auto left_padding = page % paging::PAGE_SIZE; + auto pages = ((bytes + left_padding) / paging::PAGE_SIZE) + 1; + + if(!paging::map(paging::page_align(reinterpret_cast(address)), physical, pages)){ + return nullptr; + } + + return physical; +} + malloc_header_chunk* left_block(malloc_header_chunk* b){ auto left_footer = reinterpret_cast( reinterpret_cast(b) - sizeof(malloc_footer_chunk)); diff --git a/kernel/src/paging.cpp b/kernel/src/paging.cpp index fedcd167..a896abfa 100644 --- a/kernel/src/paging.cpp +++ b/kernel/src/paging.cpp @@ -98,13 +98,13 @@ bool paging::page_free_or_set(void* virt, void* physical){ return false; } -bool paging::identity_map(void* virt){ +bool paging::map(void* virt, void* physical){ //The address must be page-aligned if(!page_aligned(virt)){ return false; } - //Find the correct indexes inside the paging table for the physical address + //Find the correct indexes inside the paging table for the virtual address auto table = (reinterpret_cast(virt) >> 12) & 0x1FF; auto directory = (reinterpret_cast(virt) >> 21) & 0x1FF; auto directory_ptr = (reinterpret_cast(virt) >> 30) & 0x1FF; @@ -137,16 +137,16 @@ bool paging::identity_map(void* virt){ if(reinterpret_cast(pt[table]) & PRESENT){ //If the page is already set to the correct value, return true //If the page is set to another value, return false - return reinterpret_cast(pt[table]) == (reinterpret_cast(virt) | (PRESENT | WRITEABLE)); + return reinterpret_cast(pt[table]) == (reinterpret_cast(physical) | (PRESENT | WRITEABLE)); } - //Identity map the physical address - pt[table] = reinterpret_cast(reinterpret_cast(virt) | (PRESENT | WRITEABLE)); + //Map to the physical address + pt[table] = reinterpret_cast(reinterpret_cast(physical) | (PRESENT | WRITEABLE)); return true; } -bool paging::identity_map(void* virt, size_t pages){ +bool paging::map(void* virt, void* physical, size_t pages){ //The address must be page-aligned if(!page_aligned(virt)){ return false; @@ -155,18 +155,31 @@ bool paging::identity_map(void* virt, size_t pages){ //To avoid mapping only a subset of the pages //check if one of the page is already mapped to another value for(size_t page = 0; page < pages; ++page){ - auto addr = reinterpret_cast(reinterpret_cast(virt) + page * PAGE_SIZE); - if(!page_free_or_set(addr, addr)){ + auto virt_addr = reinterpret_cast(reinterpret_cast(virt) + page * PAGE_SIZE); + auto phys_addr = reinterpret_cast(reinterpret_cast(physical) + page * PAGE_SIZE); + + if(!page_free_or_set(virt_addr, phys_addr)){ return false; } } //Identity map each page for(size_t page = 0; page < pages; ++page){ - if(!identity_map(reinterpret_cast(reinterpret_cast(virt) + page * PAGE_SIZE))){ + auto virt_addr = reinterpret_cast(reinterpret_cast(virt) + page * PAGE_SIZE); + auto phys_addr = reinterpret_cast(reinterpret_cast(physical) + page * PAGE_SIZE); + + if(!map(virt_addr, phys_addr)){ return false; } } return true; } + +bool paging::identity_map(void* virt){ + return map(virt, virt); +} + +bool paging::identity_map(void* virt, size_t pages){ + return map(virt, virt, pages); +}