Commit 47f19474 authored by Alekseev Andrey's avatar Alekseev Andrey
Browse files

without test module

parent 6b6db20d
Pipeline #100689 failed with stages
in 8 seconds
......@@ -37,9 +37,17 @@ static void* map_pages(void const* addr, size_t length, int additional_flags) {
return mmap( (void*) addr, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | additional_flags , -1, 0 );
}
/* аллоцировать регион памяти и инициализировать его блоком */
static struct region alloc_region ( void const * addr, size_t query ) {
/* ??? */
static struct region alloc_region ( void const * addr, size_t query ) {
size_t full_block_size = size_from_capacity((block_capacity) {query}).bytes;
size_t reg_size = region_actual_size(full_block_size);
void* mapped_addr = map_pages(addr, reg_size, MAP_FIXED_NOREPLACE);
if (mapped_addr == MAP_FAILED) mapped_addr = map_pages(addr, reg_size, 0);
if (mapped_addr == MAP_FAILED) return REGION_INVALID;
block_init(mapped_addr, (block_size) {reg_size}, NULL);
return (struct region) {mapped_addr, reg_size, addr == mapped_addr};
}
static void* block_after( struct block_header const* block ) ;
......@@ -51,9 +59,17 @@ void* heap_init( size_t initial ) {
return region.addr;
}
/* освободить всю память, выделенную под кучу */
void block_term(struct block_header* bh) {
if (!bh) return;
block_term(bh->next);
size_t full_block_size = size_from_capacity(bh->capacity).bytes;
munmap(bh, region_actual_size(full_block_size));
}
void heap_term( ) {
/* ??? */
block_term((struct block_header*) HEAP_START);
}
#define BLOCK_MIN_CAPACITY 24
......@@ -65,7 +81,16 @@ static bool block_splittable( struct block_header* restrict block, size_t query)
}
static bool split_if_too_big( struct block_header* block, size_t query ) {
/* ??? */
if (block == NULL || !block_splittable(block, query)) return false;
void* snd = block->contents + query;
block_size bs = (block_size) {block->capacity.bytes - query};
block_init(snd, bs, block->next);
block->next = snd;
block->capacity.bytes = query;
return true;
}
......@@ -74,6 +99,7 @@ static bool split_if_too_big( struct block_header* block, size_t query ) {
static void* block_after( struct block_header const* block ) {
return (void*) (block->contents + block->capacity.bytes);
}
static bool blocks_continuous (
struct block_header const* fst,
struct block_header const* snd ) {
......@@ -85,7 +111,10 @@ static bool mergeable(struct block_header const* restrict fst, struct block_head
}
static bool try_merge_with_next( struct block_header* block ) {
/* ??? */
if (!block->next || !mergeable(block, block->next)) return false;
block->capacity.bytes += size_from_capacity(block->next->capacity).bytes;
block->next = block->next->next;
return true;
}
......@@ -96,28 +125,53 @@ struct block_search_result {
struct block_header* block;
};
static struct block_search_result find_good_or_last ( struct block_header* restrict block, size_t sz ) {
if (block == NULL) return (struct block_search_result) {BSR_CORRUPTED};
while (block) {
while (try_merge_with_next(block));
if (block->is_free && block_is_big_enough(sz, block))
return (struct block_search_result) {BSR_FOUND_GOOD_BLOCK, block};
static struct block_search_result find_good_or_last ( struct block_header* restrict block, size_t sz ) {
/*??? */
if (!block->next) break;
block = block->next;
}
return (struct block_search_result) {BSR_REACHED_END_NOT_FOUND, block};
}
/* Попробовать выделить память в куче начиная с блока `block` не пытаясь расширить кучу
Можно переиспользовать как только кучу расширили. */
static struct block_search_result try_memalloc_existing ( size_t query, struct block_header* block ) {
}
struct block_search_result bsr = find_good_or_last(block, query);
if (bsr.type) return bsr;
split_if_too_big(bsr.block, query);
bsr.block->is_free = false;
static struct block_header* grow_heap( struct block_header* restrict last, size_t query ) {
/* ??? */
return bsr;
}
/* Реализует основную логику malloc и возвращает заголовок выделенного блока */
static struct block_header* memalloc( size_t query, struct block_header* heap_start) {
static struct block_header* grow_heap( struct block_header* restrict last, size_t query ) {
if (last == NULL) return NULL;
/* ??? */
struct region reg = alloc_region(block_after(last), query);
if (region_is_invalid(&reg)) return NULL;
last->next = reg.addr;
return try_merge_with_next(last) ? last : last->next;
}
static struct block_header* alloc_block(struct block_header* block, size_t query) {
struct block_search_result bsr = try_memalloc_existing(query, block);
if (bsr.type == BSR_CORRUPTED) return NULL;
return !bsr.type ? bsr.block : alloc_block(grow_heap(bsr.block, query), query);
}
static struct block_header* memalloc( size_t query, struct block_header* heap_start) {
query = size_max(query, BLOCK_MIN_CAPACITY);
return alloc_block(heap_start, query);
}
void* _malloc( size_t query ) {
......@@ -134,5 +188,5 @@ void _free( void* mem ) {
if (!mem) return ;
struct block_header* header = block_get_header( mem );
header->is_free = true;
/* ??? */
while (try_merge_with_next(header));
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment