diff --git a/kernel/src/virtual_allocator.cpp b/kernel/src/virtual_allocator.cpp index e8696c07..474b6d2c 100644 --- a/kernel/src/virtual_allocator.cpp +++ b/kernel/src/virtual_allocator.cpp @@ -18,7 +18,6 @@ constexpr const size_t last_virtual_address = virtual_allocator::kernel_virtual_ constexpr const size_t managed_space = last_virtual_address - first_virtual_address; constexpr const size_t unit = paging::PAGE_SIZE; -size_t next_virtual_address = first_virtual_address; size_t allocated_pages = first_virtual_address / paging::PAGE_SIZE; struct static_bitmap { @@ -74,6 +73,17 @@ struct static_bitmap { return 0; } + size_t free_word() const { + for(size_t w = 0; w < words; ++w){ + if(data[w] == ~static_cast(0)){ + return w * bits_per_word; + } + } + + //TODO Use an assert here + return 0; + } + bool is_set(size_t bit) const { return data[word_offset(bit)] & bit_mask(bit); } @@ -135,11 +145,6 @@ size_t level_size(size_t level){ return size; } -size_t get_free_block_index(size_t level){ - auto& bitmap = bitmaps[level]; - return bitmap.free_bit(); -} - } //end of anonymous namespace void virtual_allocator::init(){ @@ -211,7 +216,7 @@ void free_up(size_t level, size_t index){ } } -uintptr_t compute_address(size_t level, size_t index){ +uintptr_t block_start(size_t level, size_t index){ return first_virtual_address + index * level_size(level) * unit; } @@ -219,31 +224,56 @@ size_t get_block_index(size_t address, size_t level){ return (address - first_virtual_address) / (level_size(level) * unit); } +void mark_used(size_t l, size_t index){ + //The current level block is not free anymore + bitmaps[l].unset(index); + + //Mark all sub blocks as taken + taken_down(l, index); + + //Mark all up blocks as taken + taken_up(l, index); +} + size_t virtual_allocator::allocate(size_t pages){ - //TODO Do something if not enough pages + //TODO Return 0 if not enough pages allocated_pages += pages; if(pages > max_block){ - //TODO Special algorithm for big pages - return 0; + if(pages > max_block * static_bitmap::bits_per_word){ + //That means we try to allocate more than 33M at the same time + //probably not a good idea + //TODO Implement it all the same + return 0; + } else { + auto l = bitmaps.size() - 1; + auto index = bitmaps[l].free_word(); + auto address = block_start(l, index); + + //TODO check also address + size + if(address >= last_virtual_address){ + return 0; + } + + //Mark all bits of the word as used + for(size_t b = 0; b < static_bitmap::bits_per_word; ++b){ + mark_used(l, index + b); + } + + return address; + } } else { auto l = level(pages); - auto index = get_free_block_index(l); - auto address = compute_address(l, index); + auto index = bitmaps[l].free_bit(); + auto address = block_start(l, index); - if(address > last_virtual_address){ + //TODO check also address + size + if(address >= last_virtual_address){ return 0; } - //The current level block is not free anymore - bitmaps[l].unset(index); - - //Mark all sub blocks as taken - taken_down(l, index); - - //Mark all up blocks as taken - taken_up(l, index); + mark_used(l, index); return address; }