mem.c 6.5 KB
Newer Older
Igor Zhirkov's avatar
Igor Zhirkov committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
#include <stdarg.h>
#define _DEFAULT_SOURCE
#include <unistd.h>
#include <stdio.h>
#include <stddef.h>
#include <stdlib.h>
#include <assert.h>

#include "mem_internals.h"
#include "mem.h"
#include "util.h"

void debug_block(struct block_header* b, const char* fmt, ... );
void debug(const char* fmt, ... );

extern inline block_size size_from_capacity( block_capacity cap );
extern inline block_capacity capacity_from_size( block_size sz );

static bool            block_is_big_enough( size_t query, struct block_header* block ) { return block->capacity.bytes >= query; }
static size_t          pages_count   ( size_t mem )                      { return mem / getpagesize() + ((mem % getpagesize()) > 0); }
static size_t          round_pages   ( size_t mem )                      { return getpagesize() * pages_count( mem ) ; }

static void block_init( void* restrict addr, block_size block_sz, void* restrict next ) {
  *((struct block_header*)addr) = (struct block_header) {
    .next = next,
    .capacity = capacity_from_size(block_sz),
    .is_free = true
  };
}

static size_t region_actual_size( size_t query ) { return size_max( round_pages( query ), REGION_MIN_SIZE ); }

extern inline bool region_is_invalid( const struct region* r );

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
35
void* map_pages(void const* addr, size_t length, int additional_flags) {
Igor Zhirkov's avatar
Igor Zhirkov committed
36 37 38 39
  return mmap( (void*) addr, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | additional_flags , 0, 0 );
}

/*  аллоцировать регион памяти и инициализировать его блоком */
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
40 41
static struct region alloc_region(void const* addr, size_t query) {
  struct region allocated_region;
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
42
  allocated_region.addr = map_pages(addr, query, MAP_FIXED_NOREPLACE );
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
43 44 45 46 47 48 49 50 51 52 53 54

  size_t region_size = region_actual_size(query);

  if (allocated_region.addr == MAP_FAILED || allocated_region.addr == NULL) {
      allocated_region = (struct region) { map_pages(addr, query, 0), region_size, false };
  } else {
      allocated_region = (struct region) { allocated_region.addr, region_size, true };
  }

  block_init(allocated_region.addr, (block_size) { region_size }, NULL);

  return allocated_region;
Igor Zhirkov's avatar
Igor Zhirkov committed
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
}

static void* block_after( struct block_header const* block )         ;

void* heap_init( size_t initial ) {
  const struct region region = alloc_region( HEAP_START, initial );
  if ( region_is_invalid(&region) ) return NULL;

  return region.addr;
}

#define BLOCK_MIN_CAPACITY 24

/*  --- Разделение блоков (если найденный свободный блок слишком большой )--- */

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
70
static bool block_splittable(struct block_header* restrict block, size_t query) {
Igor Zhirkov's avatar
Igor Zhirkov committed
71 72 73
  return block-> is_free && query + offsetof( struct block_header, contents ) + BLOCK_MIN_CAPACITY <= block->capacity.bytes;
}

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
74 75
static bool split_if_too_big(struct block_header* block, size_t query) {
  if (block_splittable(block, query)) {
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
76
    const block_size new_size = { .bytes = block->capacity.bytes - (query + offsetof(struct block_header, contents)) };
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
77
    block->capacity.bytes = query;
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
78
    block_init(block_after(block), new_size, block->next);
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
79
    block->next = block_after(block);
Igor Zhirkov's avatar
Igor Zhirkov committed
80

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
81 82 83 84 85
    return  true;
  }

  return false;
}
Igor Zhirkov's avatar
Igor Zhirkov committed
86 87 88

/*  --- Слияние соседних свободных блоков --- */

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
89
static void* block_after(struct block_header const* block) {
Igor Zhirkov's avatar
Igor Zhirkov committed
90 91
  return  (void*) (block->contents + block->capacity.bytes);
}
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
92
static bool blocks_continuous (struct block_header const* fst,
Igor Zhirkov's avatar
Igor Zhirkov committed
93 94 95 96 97 98 99 100
                               struct block_header const* snd ) {
  return (void*)snd == block_after(fst);
}

static bool mergeable(struct block_header const* restrict fst, struct block_header const* restrict snd) {
  return fst->is_free && snd->is_free && blocks_continuous( fst, snd ) ;
}

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
101 102 103 104
static bool try_merge_with_next(struct block_header* block) {
  if (block->next && mergeable(block, block->next)) {
    block->capacity.bytes = block->capacity.bytes + size_from_capacity(block->next->capacity).bytes;
    block->next = block->next->next;
Igor Zhirkov's avatar
Igor Zhirkov committed
105

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
106 107 108 109 110
    return true;
  }

  return false;
}
Igor Zhirkov's avatar
Igor Zhirkov committed
111 112 113 114 115 116 117 118

/*  --- ... ecли размера кучи хватает --- */

struct block_search_result {
  enum {BSR_FOUND_GOOD_BLOCK, BSR_REACHED_END_NOT_FOUND, BSR_CORRUPTED} type;
  struct block_header* block;
};

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
119
static struct block_search_result find_good_or_last (struct block_header* restrict block, size_t sz) {
Nikita Kuznetsov's avatar
fixes  
Nikita Kuznetsov committed
120
  struct block_header* current_block;
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
121
  
Nikita Kuznetsov's avatar
fixes  
Nikita Kuznetsov committed
122
  while (block != NULL) {
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
123
    while (try_merge_with_next(block));
Nikita Kuznetsov's avatar
fixes  
Nikita Kuznetsov committed
124 125
    if (block->is_free && block_is_big_enough(sz, block)) {
      return (struct block_search_result){ .type = BSR_FOUND_GOOD_BLOCK, .block = block };
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
126
    }
Nikita Kuznetsov's avatar
fixes  
Nikita Kuznetsov committed
127 128
    current_block = block;
    block = block->next;
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
129
  }
Igor Zhirkov's avatar
Igor Zhirkov committed
130

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
131
  return (struct block_search_result) { .type=BSR_REACHED_END_NOT_FOUND, .block=current_block};
Igor Zhirkov's avatar
Igor Zhirkov committed
132 133 134 135 136
}

/*  Попробовать выделить память в куче начиная с блока `block` не пытаясь расширить кучу
 Можно переиспользовать как только кучу расширили. */
static struct block_search_result try_memalloc_existing ( size_t query, struct block_header* block ) {
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
137
  struct block_search_result result = find_good_or_last(block, query);
Igor Zhirkov's avatar
Igor Zhirkov committed
138

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
139 140 141 142
  if (result.type == BSR_FOUND_GOOD_BLOCK) {
    split_if_too_big(result.block, query);
    result.block->is_free = false;
  }
Igor Zhirkov's avatar
Igor Zhirkov committed
143

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
144 145 146 147 148 149
  return result;
}

static struct block_header *grow_heap(struct block_header* restrict last, size_t query) {
  const struct region new_region = alloc_region(block_after(last), query);
  last->next = new_region.addr;
Igor Zhirkov's avatar
Igor Zhirkov committed
150

Nikita Kuznetsov's avatar
fixes  
Nikita Kuznetsov committed
151 152 153 154 155
  if(try_merge_with_next(last)) {
    return last;
  } else {
    return last->next;
  }
Igor Zhirkov's avatar
Igor Zhirkov committed
156 157 158 159
}

/*  Реализует основную логику malloc и возвращает заголовок выделенного блока */

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
160 161
static struct block_header* memalloc(size_t query, struct block_header* heap_start) {
  struct block_search_result result = try_memalloc_existing(query, heap_start);
Igor Zhirkov's avatar
Igor Zhirkov committed
162

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
163 164 165 166 167
  if (result.type != BSR_FOUND_GOOD_BLOCK) {
    result = try_memalloc_existing(query, grow_heap(result.block, query));
  }

  return result.block;
Igor Zhirkov's avatar
Igor Zhirkov committed
168 169
}

Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
170 171 172 173 174 175 176 177
void* _malloc(size_t query) {
  struct block_header* const addr = memalloc(query, (struct block_header*) HEAP_START);
  if (addr) {
    addr->is_free = false;
    return addr->contents;
  } else {
    return NULL;
  }
Igor Zhirkov's avatar
Igor Zhirkov committed
178 179
}

180 181 182 183
static struct block_header* block_get_header(void* contents) {
  return (struct block_header*) (((uint8_t*)contents)-offsetof(struct block_header, contents));
}

Igor Zhirkov's avatar
Igor Zhirkov committed
184
void _free( void* mem ) {
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
185 186
  if (!mem) return;
  struct block_header* header = block_get_header(mem);
Igor Zhirkov's avatar
Igor Zhirkov committed
187
  header->is_free = true;
Nikita Kuznetsov's avatar
Nikita Kuznetsov committed
188 189
  while(try_merge_with_next(header));
}