#define _DEFAULT_SOURCE #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include "mem_internals.h" #include "mem.h" #include "util.h" void debug_block(struct block_header* b, const char* fmt, ... ); void debug(const char* fmt, ... ); extern inline block_size size_from_capacity( block_capacity cap ); extern inline block_capacity capacity_from_size( block_size sz ); static bool block_is_big_enough( size_t query, struct block_header* block ) { return block->capacity.bytes >= query; } static size_t pages_count ( size_t mem ) { return mem / getpagesize() + ((mem % getpagesize()) > 0); } static size_t round_pages ( size_t mem ) { return getpagesize() * pages_count( mem ) ; } static void block_init( void* restrict addr, block_size block_sz, void* restrict next ) { *((struct block_header*)addr) = (struct block_header) { .next = next, .capacity = capacity_from_size(block_sz), .is_free = true }; } static size_t region_actual_size( size_t query ) { return size_max( round_pages( query ), REGION_MIN_SIZE ); } extern inline bool region_is_invalid( const struct region* r ); static void* map_pages(void const* addr, size_t length, int additional_flags) { return mmap( (void*) addr, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | additional_flags , -1, 0 ); } /* аллоцировать регион памяти и инициализировать его блоком */ static struct region alloc_region ( void const * addr, size_t query ) { size_t region_size = region_actual_size(size_from_capacity((block_capacity) {query}).bytes); struct block_header* region = map_pages(addr, region_size, MAP_FIXED); if (region == MAP_FAILED) { region = map_pages(addr, region_size, 0); } if (region == MAP_FAILED) { return REGION_INVALID; } block_init(region, (block_size) {region_size}, NULL); return (struct region) { .addr = region, .size = region_size, .extends = region == addr }; } static void* block_after( struct block_header const* block ) ; void* heap_init( size_t initial ) { const struct region region = alloc_region( HEAP_START, initial ); if ( region_is_invalid(®ion) ) return NULL; return region.addr; } #define BLOCK_MIN_CAPACITY 24 /* --- Разделение блоков (если найденный свободный блок слишком большой )--- */ static bool block_splittable( struct block_header* restrict block, size_t query) { return block != NULL && block->is_free && query + offsetof( struct block_header, contents ) + BLOCK_MIN_CAPACITY <= block->capacity.bytes; } static bool split_if_too_big( struct block_header* block, size_t query ) { if (block == NULL || !block_splittable(block, query)) { return false; } query = size_max(query, BLOCK_MIN_CAPACITY); size_t capacity = block->capacity.bytes; block->capacity = (block_capacity) {query}; block_init(block_after(block), (block_size) {capacity - query}, block->next); block->next = block_after(block); return true; } /* --- Слияние соседних свободных блоков --- */ static void* block_after( struct block_header const* block ) { return (void*) (block->contents + block->capacity.bytes); } static bool blocks_continuous ( struct block_header const* fst, struct block_header const* snd ) { return (void*)snd == block_after(fst); } static bool mergeable(struct block_header const* restrict fst, struct block_header const* restrict snd) { return fst->is_free && snd->is_free && blocks_continuous( fst, snd ) ; } static bool try_merge_with_next( struct block_header* block ) { struct block_header* next_block = block->next; if (next_block == NULL || !mergeable(block, next_block)) { return false; } block->capacity.bytes += size_from_capacity(next_block->capacity).bytes; block->next = next_block->next; return true; } /* --- ... ecли размера кучи хватает --- */ struct block_search_result { enum {BSR_FOUND_GOOD_BLOCK, BSR_REACHED_END_NOT_FOUND, BSR_CORRUPTED} type; struct block_header* block; }; static struct block_search_result CORRUPTED = {BSR_CORRUPTED}; static struct block_search_result find_good_or_last ( struct block_header* restrict block, size_t sz ) { if (block == NULL) { return CORRUPTED; } while (true) { while (try_merge_with_next(block)) { } if (block->is_free && block_is_big_enough(size_max(sz, BLOCK_MIN_CAPACITY), block)) { return (struct block_search_result) { .type = BSR_FOUND_GOOD_BLOCK, .block = block }; } if (block->next == NULL) { return (struct block_search_result) { .type = BSR_REACHED_END_NOT_FOUND, .block = block }; } block = block->next; } } /* Попробовать выделить память в куче начиная с блока `block` не пытаясь расширить кучу Можно переиспользовать как только кучу расширили. */ static struct block_search_result try_memalloc_existing ( size_t query, struct block_header* block ) { if (block == NULL) { return CORRUPTED; } struct block_search_result result = find_good_or_last(block, query); if (result.type == BSR_FOUND_GOOD_BLOCK) { split_if_too_big(result.block, query); result.block->is_free = false; } return result; } static struct block_header* grow_heap( struct block_header* restrict last, size_t query ) { if (last == NULL) return NULL; struct region region = alloc_region(block_after(last), query); if (region_is_invalid(®ion)) { return NULL; } last->next = region.addr; if (region.extends && try_merge_with_next(last)) { return last; } return region.addr; } /* Реализует основную логику malloc и возвращает заголовок выделенного блока */ static struct block_header* memalloc( size_t query, struct block_header* heap_start) { query = size_max(query, BLOCK_MIN_CAPACITY); struct block_search_result result = try_memalloc_existing(query, heap_start); switch (result.type) { case BSR_FOUND_GOOD_BLOCK: { return result.block; } case BSR_REACHED_END_NOT_FOUND: { result = try_memalloc_existing(query, grow_heap(result.block, query)); if (result.type == BSR_FOUND_GOOD_BLOCK) { return result.block; } return NULL; } case BSR_CORRUPTED: { return NULL; } } } void* _malloc( size_t query ) { struct block_header* const addr = memalloc( query, (struct block_header*) HEAP_START ); if (addr) return addr->contents; else return NULL; } static struct block_header* block_get_header(void* contents) { return (struct block_header*) (((uint8_t*)contents)-offsetof(struct block_header, contents)); } void _free( void* mem ) { if (!mem) return ; struct block_header* header = block_get_header( mem ); header->is_free = true; while (try_merge_with_next(header)) { } } /* освободить всю память, выделенную под кучу */ void heap_term() { struct block_header* l = HEAP_START; while (l != NULL) { size_t length = size_from_capacity(l->capacity).bytes; struct block_header* r = l; while (r->next != NULL && blocks_continuous(r, r->next)) { r = r->next; length += size_from_capacity(r->capacity).bytes; } r = r->next; munmap(l, length); l = r; } }