[malloc] Tidy up debug output
[ipxe.git] / src / core / malloc.c
1 /*
2 * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 */
19
20 FILE_LICENCE ( GPL2_OR_LATER );
21
22 #include <stddef.h>
23 #include <stdint.h>
24 #include <string.h>
25 #include <strings.h>
26 #include <ipxe/io.h>
27 #include <ipxe/list.h>
28 #include <ipxe/init.h>
29 #include <ipxe/refcnt.h>
30 #include <ipxe/malloc.h>
31 #include <valgrind/memcheck.h>
32
33 /** @file
34 *
35 * Dynamic memory allocation
36 *
37 */
38
39 /** A free block of memory */
40 struct memory_block {
41 /** Size of this block */
42 size_t size;
43 /** Padding
44 *
45 * This padding exists to cover the "count" field of a
46 * reference counter, in the common case where a reference
47 * counter is the first element of a dynamically-allocated
48 * object. It avoids clobbering the "count" field as soon as
49 * the memory is freed, and so allows for the possibility of
50 * detecting reference counting errors.
51 */
52 char pad[ offsetof ( struct refcnt, count ) +
53 sizeof ( ( ( struct refcnt * ) NULL )->count ) ];
54 /** List of free blocks */
55 struct list_head list;
56 };
57
58 #define MIN_MEMBLOCK_SIZE \
59 ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
60
61 /** A block of allocated memory complete with size information */
62 struct autosized_block {
63 /** Size of this block */
64 size_t size;
65 /** Remaining data */
66 char data[0];
67 };
68
69 /**
70 * Address for zero-length memory blocks
71 *
72 * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
73 * NOWHERE. Calling @c free(NOWHERE) will have no effect.
74 *
75 * This is consistent with the ANSI C standards, which state that
76 * "either NULL or a pointer suitable to be passed to free()" must be
77 * returned in these cases. Using a special non-NULL value means that
78 * the caller can take a NULL return value to indicate failure,
79 * without first having to check for a requested size of zero.
80 *
81 * Code outside of malloc.c do not ever need to refer to the actual
82 * value of @c NOWHERE; this is an internal definition.
83 */
84 #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
85
86 /** List of free memory blocks */
87 static LIST_HEAD ( free_blocks );
88
89 /** Total amount of free memory */
90 size_t freemem;
91
92 /**
93 * Heap size
94 *
95 * Currently fixed at 512kB.
96 */
97 #define HEAP_SIZE ( 512 * 1024 )
98
99 /** The heap itself */
100 static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
101
102 /**
103 * Mark all blocks in free list as defined
104 *
105 */
106 static inline void valgrind_make_blocks_defined ( void ) {
107 struct memory_block *block;
108
109 if ( RUNNING_ON_VALGRIND <= 0 )
110 return;
111
112 /* Traverse free block list, marking each block structure as
113 * defined. Some contortions are necessary to avoid errors
114 * from list_check().
115 */
116
117 /* Mark block list itself as defined */
118 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks, sizeof ( free_blocks ) );
119
120 /* Mark areas accessed by list_check() as defined */
121 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.prev->next,
122 sizeof ( free_blocks.prev->next ) );
123 VALGRIND_MAKE_MEM_DEFINED ( free_blocks.next,
124 sizeof ( *free_blocks.next ) );
125 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->next->prev,
126 sizeof ( free_blocks.next->next->prev ) );
127
128 /* Mark each block in list as defined */
129 list_for_each_entry ( block, &free_blocks, list ) {
130
131 /* Mark block as defined */
132 VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
133
134 /* Mark areas accessed by list_check() as defined */
135 VALGRIND_MAKE_MEM_DEFINED ( block->list.next,
136 sizeof ( *block->list.next ) );
137 VALGRIND_MAKE_MEM_DEFINED ( &block->list.next->next->prev,
138 sizeof ( block->list.next->next->prev ) );
139 }
140 }
141
142 /**
143 * Mark all blocks in free list as inaccessible
144 *
145 */
146 static inline void valgrind_make_blocks_noaccess ( void ) {
147 struct memory_block *block;
148 struct memory_block *prev = NULL;
149
150 if ( RUNNING_ON_VALGRIND <= 0 )
151 return;
152
153 /* Traverse free block list, marking each block structure as
154 * inaccessible. Some contortions are necessary to avoid
155 * errors from list_check().
156 */
157
158 /* Mark each block in list as inaccessible */
159 list_for_each_entry ( block, &free_blocks, list ) {
160
161 /* Mark previous block (if any) as inaccessible. (Current
162 * block will be accessed by list_check().)
163 */
164 if ( prev )
165 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
166 prev = block;
167
168 /* At the end of the list, list_check() will end up
169 * accessing the first list item. Temporarily mark
170 * this area as defined.
171 */
172 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->prev,
173 sizeof ( free_blocks.next->prev ) );
174 }
175 /* Mark last block (if any) as inaccessible */
176 if ( prev )
177 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
178
179 /* Mark as inaccessible the area that was temporarily marked
180 * as defined to avoid errors from list_check().
181 */
182 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks.next->prev,
183 sizeof ( free_blocks.next->prev ) );
184
185 /* Mark block list itself as inaccessible */
186 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
187 }
188
189 /**
190 * Discard some cached data
191 *
192 * @ret discarded Number of cached items discarded
193 */
194 static unsigned int discard_cache ( void ) {
195 struct cache_discarder *discarder;
196 unsigned int discarded;
197
198 for_each_table_entry ( discarder, CACHE_DISCARDERS ) {
199 discarded = discarder->discard();
200 if ( discarded )
201 return discarded;
202 }
203 return 0;
204 }
205
206 /**
207 * Discard all cached data
208 *
209 */
210 static void discard_all_cache ( void ) {
211 unsigned int discarded;
212
213 do {
214 discarded = discard_cache();
215 } while ( discarded );
216 }
217
218 /**
219 * Allocate a memory block
220 *
221 * @v size Requested size
222 * @v align Physical alignment
223 * @v offset Offset from physical alignment
224 * @ret ptr Memory block, or NULL
225 *
226 * Allocates a memory block @b physically aligned as requested. No
227 * guarantees are provided for the alignment of the virtual address.
228 *
229 * @c align must be a power of two. @c size may not be zero.
230 */
231 void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
232 struct memory_block *block;
233 size_t align_mask;
234 size_t pre_size;
235 ssize_t post_size;
236 struct memory_block *pre;
237 struct memory_block *post;
238 struct memory_block *ptr;
239
240 valgrind_make_blocks_defined();
241
242 /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
243 * calculate alignment mask.
244 */
245 size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
246 align_mask = ( align - 1 ) | ( MIN_MEMBLOCK_SIZE - 1 );
247
248 DBGC2 ( &heap, "Allocating %#zx (aligned %#zx+%zx)\n",
249 size, align, offset );
250 while ( 1 ) {
251 /* Search through blocks for the first one with enough space */
252 list_for_each_entry ( block, &free_blocks, list ) {
253 pre_size = ( ( offset - virt_to_phys ( block ) )
254 & align_mask );
255 post_size = ( block->size - pre_size - size );
256 if ( post_size >= 0 ) {
257 /* Split block into pre-block, block, and
258 * post-block. After this split, the "pre"
259 * block is the one currently linked into the
260 * free list.
261 */
262 pre = block;
263 block = ( ( ( void * ) pre ) + pre_size );
264 post = ( ( ( void * ) block ) + size );
265 DBGC2 ( &heap, "[%p,%p) -> [%p,%p) + [%p,%p)\n",
266 pre, ( ( ( void * ) pre ) + pre->size ),
267 pre, block, post,
268 ( ( ( void * ) pre ) + pre->size ) );
269 /* If there is a "post" block, add it in to
270 * the free list. Leak it if it is too small
271 * (which can happen only at the very end of
272 * the heap).
273 */
274 if ( (size_t) post_size >= MIN_MEMBLOCK_SIZE ) {
275 VALGRIND_MAKE_MEM_DEFINED ( post,
276 sizeof ( *post ) );
277 post->size = post_size;
278 list_add ( &post->list, &pre->list );
279 }
280 /* Shrink "pre" block, leaving the main block
281 * isolated and no longer part of the free
282 * list.
283 */
284 pre->size = pre_size;
285 /* If there is no "pre" block, remove it from
286 * the list. Also remove it (i.e. leak it) if
287 * it is too small, which can happen only at
288 * the very start of the heap.
289 */
290 if ( pre_size < MIN_MEMBLOCK_SIZE )
291 list_del ( &pre->list );
292 /* Update total free memory */
293 freemem -= size;
294 /* Return allocated block */
295 DBGC2 ( &heap, "Allocated [%p,%p)\n", block,
296 ( ( ( void * ) block ) + size ) );
297 ptr = block;
298 goto done;
299 }
300 }
301
302 /* Try discarding some cached data to free up memory */
303 if ( ! discard_cache() ) {
304 /* Nothing available to discard */
305 DBGC ( &heap, "Failed to allocate %#zx (aligned "
306 "%#zx)\n", size, align );
307 ptr = NULL;
308 goto done;
309 }
310 }
311
312 done:
313 valgrind_make_blocks_noaccess();
314 return ptr;
315 }
316
317 /**
318 * Free a memory block
319 *
320 * @v ptr Memory allocated by alloc_memblock(), or NULL
321 * @v size Size of the memory
322 *
323 * If @c ptr is NULL, no action is taken.
324 */
325 void free_memblock ( void *ptr, size_t size ) {
326 struct memory_block *freeing;
327 struct memory_block *block;
328 struct memory_block *tmp;
329 ssize_t gap_before;
330 ssize_t gap_after = -1;
331
332 /* Allow for ptr==NULL */
333 if ( ! ptr )
334 return;
335
336 valgrind_make_blocks_defined();
337
338 /* Round up size to match actual size that alloc_memblock()
339 * would have used.
340 */
341 size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
342 freeing = ptr;
343 VALGRIND_MAKE_MEM_DEFINED ( freeing, sizeof ( *freeing ) );
344 freeing->size = size;
345 DBGC2 ( &heap, "Freeing [%p,%p)\n",
346 freeing, ( ( ( void * ) freeing ) + size ) );
347
348 /* Insert/merge into free list */
349 list_for_each_entry_safe ( block, tmp, &free_blocks, list ) {
350 /* Calculate gaps before and after the "freeing" block */
351 gap_before = ( ( ( void * ) freeing ) -
352 ( ( ( void * ) block ) + block->size ) );
353 gap_after = ( ( ( void * ) block ) -
354 ( ( ( void * ) freeing ) + freeing->size ) );
355 /* Merge with immediately preceding block, if possible */
356 if ( gap_before == 0 ) {
357 DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", block,
358 ( ( ( void * ) block ) + block->size ), freeing,
359 ( ( ( void * ) freeing ) + freeing->size ),
360 block,
361 ( ( ( void * ) freeing ) + freeing->size ) );
362 block->size += size;
363 list_del ( &block->list );
364 freeing = block;
365 }
366 /* Stop processing as soon as we reach a following block */
367 if ( gap_after >= 0 )
368 break;
369 }
370
371 /* Insert before the immediately following block. If
372 * possible, merge the following block into the "freeing"
373 * block.
374 */
375 DBGC2 ( &heap, "[%p,%p)\n",
376 freeing, ( ( ( void * ) freeing ) + freeing->size ) );
377 list_add_tail ( &freeing->list, &block->list );
378 if ( gap_after == 0 ) {
379 DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing,
380 ( ( ( void * ) freeing ) + freeing->size ), block,
381 ( ( ( void * ) block ) + block->size ), freeing,
382 ( ( ( void * ) block ) + block->size ) );
383 freeing->size += block->size;
384 list_del ( &block->list );
385 }
386
387 /* Update free memory counter */
388 freemem += size;
389
390 valgrind_make_blocks_noaccess();
391 }
392
393 /**
394 * Reallocate memory
395 *
396 * @v old_ptr Memory previously allocated by malloc(), or NULL
397 * @v new_size Requested size
398 * @ret new_ptr Allocated memory, or NULL
399 *
400 * Allocates memory with no particular alignment requirement. @c
401 * new_ptr will be aligned to at least a multiple of sizeof(void*).
402 * If @c old_ptr is non-NULL, then the contents of the newly allocated
403 * memory will be the same as the contents of the previously allocated
404 * memory, up to the minimum of the old and new sizes. The old memory
405 * will be freed.
406 *
407 * If allocation fails the previously allocated block is left
408 * untouched and NULL is returned.
409 *
410 * Calling realloc() with a new size of zero is a valid way to free a
411 * memory block.
412 */
413 void * realloc ( void *old_ptr, size_t new_size ) {
414 struct autosized_block *old_block;
415 struct autosized_block *new_block;
416 size_t old_total_size;
417 size_t new_total_size;
418 size_t old_size;
419 void *new_ptr = NOWHERE;
420
421 /* Allocate new memory if necessary. If allocation fails,
422 * return without touching the old block.
423 */
424 if ( new_size ) {
425 new_total_size = ( new_size +
426 offsetof ( struct autosized_block, data ) );
427 new_block = alloc_memblock ( new_total_size, 1, 0 );
428 if ( ! new_block )
429 return NULL;
430 VALGRIND_MAKE_MEM_UNDEFINED ( new_block, offsetof ( struct autosized_block, data ) );
431 new_block->size = new_total_size;
432 VALGRIND_MAKE_MEM_NOACCESS ( new_block, offsetof ( struct autosized_block, data ) );
433 new_ptr = &new_block->data;
434 VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
435 }
436
437 /* Copy across relevant part of the old data region (if any),
438 * then free it. Note that at this point either (a) new_ptr
439 * is valid, or (b) new_size is 0; either way, the memcpy() is
440 * valid.
441 */
442 if ( old_ptr && ( old_ptr != NOWHERE ) ) {
443 old_block = container_of ( old_ptr, struct autosized_block,
444 data );
445 VALGRIND_MAKE_MEM_DEFINED ( old_block, offsetof ( struct autosized_block, data ) );
446 old_total_size = old_block->size;
447 old_size = ( old_total_size -
448 offsetof ( struct autosized_block, data ) );
449 memcpy ( new_ptr, old_ptr,
450 ( ( old_size < new_size ) ? old_size : new_size ) );
451 free_memblock ( old_block, old_total_size );
452 VALGRIND_MAKE_MEM_NOACCESS ( old_block, offsetof ( struct autosized_block, data ) );
453 VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
454 }
455
456 return new_ptr;
457 }
458
459 /**
460 * Allocate memory
461 *
462 * @v size Requested size
463 * @ret ptr Memory, or NULL
464 *
465 * Allocates memory with no particular alignment requirement. @c ptr
466 * will be aligned to at least a multiple of sizeof(void*).
467 */
468 void * malloc ( size_t size ) {
469 return realloc ( NULL, size );
470 }
471
472 /**
473 * Free memory
474 *
475 * @v ptr Memory allocated by malloc(), or NULL
476 *
477 * Memory allocated with malloc_dma() cannot be freed with free(); it
478 * must be freed with free_dma() instead.
479 *
480 * If @c ptr is NULL, no action is taken.
481 */
482 void free ( void *ptr ) {
483 realloc ( ptr, 0 );
484 }
485
486 /**
487 * Allocate cleared memory
488 *
489 * @v size Requested size
490 * @ret ptr Allocated memory
491 *
492 * Allocate memory as per malloc(), and zero it.
493 *
494 * This function name is non-standard, but pretty intuitive.
495 * zalloc(size) is always equivalent to calloc(1,size)
496 */
497 void * zalloc ( size_t size ) {
498 void *data;
499
500 data = malloc ( size );
501 if ( data )
502 memset ( data, 0, size );
503 return data;
504 }
505
506 /**
507 * Add memory to allocation pool
508 *
509 * @v start Start address
510 * @v end End address
511 *
512 * Adds a block of memory [start,end) to the allocation pool. This is
513 * a one-way operation; there is no way to reclaim this memory.
514 *
515 * @c start must be aligned to at least a multiple of sizeof(void*).
516 */
517 void mpopulate ( void *start, size_t len ) {
518 /* Prevent free_memblock() from rounding up len beyond the end
519 * of what we were actually given...
520 */
521 free_memblock ( start, ( len & ~( MIN_MEMBLOCK_SIZE - 1 ) ) );
522 }
523
524 /**
525 * Initialise the heap
526 *
527 */
528 static void init_heap ( void ) {
529 VALGRIND_MAKE_MEM_NOACCESS ( heap, sizeof ( heap ) );
530 mpopulate ( heap, sizeof ( heap ) );
531 }
532
533 /** Memory allocator initialisation function */
534 struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
535 .initialise = init_heap,
536 };
537
538 /**
539 * Discard all cached data on shutdown
540 *
541 */
542 static void shutdown_cache ( int booting __unused ) {
543 discard_all_cache();
544 }
545
546 /** Memory allocator shutdown function */
547 struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
548 .shutdown = shutdown_cache,
549 };
550
551 #if 0
552 #include <stdio.h>
553 /**
554 * Dump free block list
555 *
556 */
557 void mdumpfree ( void ) {
558 struct memory_block *block;
559
560 printf ( "Free block list:\n" );
561 list_for_each_entry ( block, &free_blocks, list ) {
562 printf ( "[%p,%p] (size %#zx)\n", block,
563 ( ( ( void * ) block ) + block->size ), block->size );
564 }
565 }
566 #endif