mirror of
https://github.com/wichtounet/thor-os.git
synced 2025-09-09 12:31:06 -04:00
Prepare paging and memory support for running programs
This commit is contained in:
parent
c62e86b894
commit
dfcca04b51
@ -15,6 +15,14 @@ void init_memory_manager();
|
|||||||
void* k_malloc(uint64_t bytes);
|
void* k_malloc(uint64_t bytes);
|
||||||
void k_free(void* block);
|
void k_free(void* block);
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* \brief Allocates bytes of memory at the given address.
|
||||||
|
* \param address The virtual address where memory should be put.
|
||||||
|
* \param bytes The number of bytes to allocate.
|
||||||
|
* \return The address of the physical allocated memory, 0 indicates failure.
|
||||||
|
*/
|
||||||
|
void* k_malloc(uint64_t address, uint64_t bytes);
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
T* k_malloc(){
|
T* k_malloc(){
|
||||||
return reinterpret_cast<T*>(k_malloc(sizeof(T)));
|
return reinterpret_cast<T*>(k_malloc(sizeof(T)));
|
||||||
|
@ -19,12 +19,21 @@ constexpr bool page_aligned(T* addr){
|
|||||||
return !(reinterpret_cast<uintptr_t>(addr) & (paging::PAGE_SIZE - 1));
|
return !(reinterpret_cast<uintptr_t>(addr) & (paging::PAGE_SIZE - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
constexpr T* page_align(T* addr){
|
||||||
|
return reinterpret_cast<T*>((reinterpret_cast<uintptr_t>(addr) / paging::PAGE_SIZE) * paging::PAGE_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
void* physical_address(void* virt);
|
void* physical_address(void* virt);
|
||||||
bool page_present(void* virt);
|
bool page_present(void* virt);
|
||||||
bool page_free_or_set(void* virt, void* physical);
|
bool page_free_or_set(void* virt, void* physical);
|
||||||
|
|
||||||
bool identity_map(void* virt);
|
bool identity_map(void* virt);
|
||||||
bool identity_map(void* virt, size_t pages);
|
bool identity_map(void* virt, size_t pages);
|
||||||
|
|
||||||
|
bool map(void* virt, void* physical);
|
||||||
|
bool map(void* virt, void* physical, size_t pages);
|
||||||
|
|
||||||
} //end of namespace paging
|
} //end of namespace paging
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -302,6 +302,34 @@ void* k_malloc(uint64_t bytes){
|
|||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* k_malloc(uint64_t address, uint64_t bytes){
|
||||||
|
auto page = reinterpret_cast<uintptr_t>(paging::page_align(reinterpret_cast<void*>(address)));
|
||||||
|
|
||||||
|
//1. Verify that all the necessary pages are free
|
||||||
|
while(page < address + bytes){
|
||||||
|
//If the virtual address is already mapped, indicates failure
|
||||||
|
if(paging::page_present(reinterpret_cast<void*>(page))){
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
page += paging::PAGE_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
//2. Allocate enough physical memory
|
||||||
|
auto physical = k_malloc(bytes);
|
||||||
|
|
||||||
|
//3. Map physical allocated memory to the necessary virtual emory
|
||||||
|
|
||||||
|
auto left_padding = page % paging::PAGE_SIZE;
|
||||||
|
auto pages = ((bytes + left_padding) / paging::PAGE_SIZE) + 1;
|
||||||
|
|
||||||
|
if(!paging::map(paging::page_align(reinterpret_cast<void*>(address)), physical, pages)){
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return physical;
|
||||||
|
}
|
||||||
|
|
||||||
malloc_header_chunk* left_block(malloc_header_chunk* b){
|
malloc_header_chunk* left_block(malloc_header_chunk* b){
|
||||||
auto left_footer = reinterpret_cast<malloc_footer_chunk*>(
|
auto left_footer = reinterpret_cast<malloc_footer_chunk*>(
|
||||||
reinterpret_cast<uintptr_t>(b) - sizeof(malloc_footer_chunk));
|
reinterpret_cast<uintptr_t>(b) - sizeof(malloc_footer_chunk));
|
||||||
|
@ -98,13 +98,13 @@ bool paging::page_free_or_set(void* virt, void* physical){
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool paging::identity_map(void* virt){
|
bool paging::map(void* virt, void* physical){
|
||||||
//The address must be page-aligned
|
//The address must be page-aligned
|
||||||
if(!page_aligned(virt)){
|
if(!page_aligned(virt)){
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Find the correct indexes inside the paging table for the physical address
|
//Find the correct indexes inside the paging table for the virtual address
|
||||||
auto table = (reinterpret_cast<uintptr_t>(virt) >> 12) & 0x1FF;
|
auto table = (reinterpret_cast<uintptr_t>(virt) >> 12) & 0x1FF;
|
||||||
auto directory = (reinterpret_cast<uintptr_t>(virt) >> 21) & 0x1FF;
|
auto directory = (reinterpret_cast<uintptr_t>(virt) >> 21) & 0x1FF;
|
||||||
auto directory_ptr = (reinterpret_cast<uintptr_t>(virt) >> 30) & 0x1FF;
|
auto directory_ptr = (reinterpret_cast<uintptr_t>(virt) >> 30) & 0x1FF;
|
||||||
@ -137,16 +137,16 @@ bool paging::identity_map(void* virt){
|
|||||||
if(reinterpret_cast<uintptr_t>(pt[table]) & PRESENT){
|
if(reinterpret_cast<uintptr_t>(pt[table]) & PRESENT){
|
||||||
//If the page is already set to the correct value, return true
|
//If the page is already set to the correct value, return true
|
||||||
//If the page is set to another value, return false
|
//If the page is set to another value, return false
|
||||||
return reinterpret_cast<uintptr_t>(pt[table]) == (reinterpret_cast<uintptr_t>(virt) | (PRESENT | WRITEABLE));
|
return reinterpret_cast<uintptr_t>(pt[table]) == (reinterpret_cast<uintptr_t>(physical) | (PRESENT | WRITEABLE));
|
||||||
}
|
}
|
||||||
|
|
||||||
//Identity map the physical address
|
//Map to the physical address
|
||||||
pt[table] = reinterpret_cast<page_entry>(reinterpret_cast<uintptr_t>(virt) | (PRESENT | WRITEABLE));
|
pt[table] = reinterpret_cast<page_entry>(reinterpret_cast<uintptr_t>(physical) | (PRESENT | WRITEABLE));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool paging::identity_map(void* virt, size_t pages){
|
bool paging::map(void* virt, void* physical, size_t pages){
|
||||||
//The address must be page-aligned
|
//The address must be page-aligned
|
||||||
if(!page_aligned(virt)){
|
if(!page_aligned(virt)){
|
||||||
return false;
|
return false;
|
||||||
@ -155,18 +155,31 @@ bool paging::identity_map(void* virt, size_t pages){
|
|||||||
//To avoid mapping only a subset of the pages
|
//To avoid mapping only a subset of the pages
|
||||||
//check if one of the page is already mapped to another value
|
//check if one of the page is already mapped to another value
|
||||||
for(size_t page = 0; page < pages; ++page){
|
for(size_t page = 0; page < pages; ++page){
|
||||||
auto addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(virt) + page * PAGE_SIZE);
|
auto virt_addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(virt) + page * PAGE_SIZE);
|
||||||
if(!page_free_or_set(addr, addr)){
|
auto phys_addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(physical) + page * PAGE_SIZE);
|
||||||
|
|
||||||
|
if(!page_free_or_set(virt_addr, phys_addr)){
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//Identity map each page
|
//Identity map each page
|
||||||
for(size_t page = 0; page < pages; ++page){
|
for(size_t page = 0; page < pages; ++page){
|
||||||
if(!identity_map(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(virt) + page * PAGE_SIZE))){
|
auto virt_addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(virt) + page * PAGE_SIZE);
|
||||||
|
auto phys_addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(physical) + page * PAGE_SIZE);
|
||||||
|
|
||||||
|
if(!map(virt_addr, phys_addr)){
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool paging::identity_map(void* virt){
|
||||||
|
return map(virt, virt);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool paging::identity_map(void* virt, size_t pages){
|
||||||
|
return map(virt, virt, pages);
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user