[intel] Forcibly skip PHY reset on some models
[ipxe.git] / src / core / malloc.c
1 /*
2 * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stddef.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <strings.h>
30 #include <ipxe/io.h>
31 #include <ipxe/list.h>
32 #include <ipxe/init.h>
33 #include <ipxe/refcnt.h>
34 #include <ipxe/malloc.h>
35 #include <valgrind/memcheck.h>
36
37 /** @file
38 *
39 * Dynamic memory allocation
40 *
41 */
42
43 /** A free block of memory */
44 struct memory_block {
45 /** Size of this block */
46 size_t size;
47 /** Padding
48 *
49 * This padding exists to cover the "count" field of a
50 * reference counter, in the common case where a reference
51 * counter is the first element of a dynamically-allocated
52 * object. It avoids clobbering the "count" field as soon as
53 * the memory is freed, and so allows for the possibility of
54 * detecting reference counting errors.
55 */
56 char pad[ offsetof ( struct refcnt, count ) +
57 sizeof ( ( ( struct refcnt * ) NULL )->count ) ];
58 /** List of free blocks */
59 struct list_head list;
60 };
61
62 #define MIN_MEMBLOCK_SIZE \
63 ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
64
65 /** A block of allocated memory complete with size information */
66 struct autosized_block {
67 /** Size of this block */
68 size_t size;
69 /** Remaining data */
70 char data[0];
71 };
72
73 /**
74 * Address for zero-length memory blocks
75 *
76 * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
77 * NOWHERE. Calling @c free(NOWHERE) will have no effect.
78 *
79 * This is consistent with the ANSI C standards, which state that
80 * "either NULL or a pointer suitable to be passed to free()" must be
81 * returned in these cases. Using a special non-NULL value means that
82 * the caller can take a NULL return value to indicate failure,
83 * without first having to check for a requested size of zero.
84 *
85 * Code outside of malloc.c do not ever need to refer to the actual
86 * value of @c NOWHERE; this is an internal definition.
87 */
88 #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
89
90 /** List of free memory blocks */
91 static LIST_HEAD ( free_blocks );
92
93 /** Total amount of free memory */
94 size_t freemem;
95
96 /**
97 * Heap size
98 *
99 * Currently fixed at 512kB.
100 */
101 #define HEAP_SIZE ( 512 * 1024 )
102
103 /** The heap itself */
104 static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
105
106 /**
107 * Mark all blocks in free list as defined
108 *
109 */
110 static inline void valgrind_make_blocks_defined ( void ) {
111 struct memory_block *block;
112
113 /* Do nothing unless running under Valgrind */
114 if ( RUNNING_ON_VALGRIND <= 0 )
115 return;
116
117 /* Traverse free block list, marking each block structure as
118 * defined. Some contortions are necessary to avoid errors
119 * from list_check().
120 */
121
122 /* Mark block list itself as defined */
123 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks, sizeof ( free_blocks ) );
124
125 /* Mark areas accessed by list_check() as defined */
126 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.prev->next,
127 sizeof ( free_blocks.prev->next ) );
128 VALGRIND_MAKE_MEM_DEFINED ( free_blocks.next,
129 sizeof ( *free_blocks.next ) );
130 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->next->prev,
131 sizeof ( free_blocks.next->next->prev ) );
132
133 /* Mark each block in list as defined */
134 list_for_each_entry ( block, &free_blocks, list ) {
135
136 /* Mark block as defined */
137 VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
138
139 /* Mark areas accessed by list_check() as defined */
140 VALGRIND_MAKE_MEM_DEFINED ( block->list.next,
141 sizeof ( *block->list.next ) );
142 VALGRIND_MAKE_MEM_DEFINED ( &block->list.next->next->prev,
143 sizeof ( block->list.next->next->prev ) );
144 }
145 }
146
147 /**
148 * Mark all blocks in free list as inaccessible
149 *
150 */
151 static inline void valgrind_make_blocks_noaccess ( void ) {
152 struct memory_block *block;
153 struct memory_block *prev = NULL;
154
155 /* Do nothing unless running under Valgrind */
156 if ( RUNNING_ON_VALGRIND <= 0 )
157 return;
158
159 /* Traverse free block list, marking each block structure as
160 * inaccessible. Some contortions are necessary to avoid
161 * errors from list_check().
162 */
163
164 /* Mark each block in list as inaccessible */
165 list_for_each_entry ( block, &free_blocks, list ) {
166
167 /* Mark previous block (if any) as inaccessible. (Current
168 * block will be accessed by list_check().)
169 */
170 if ( prev )
171 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
172 prev = block;
173
174 /* At the end of the list, list_check() will end up
175 * accessing the first list item. Temporarily mark
176 * this area as defined.
177 */
178 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->prev,
179 sizeof ( free_blocks.next->prev ) );
180 }
181 /* Mark last block (if any) as inaccessible */
182 if ( prev )
183 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
184
185 /* Mark as inaccessible the area that was temporarily marked
186 * as defined to avoid errors from list_check().
187 */
188 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks.next->prev,
189 sizeof ( free_blocks.next->prev ) );
190
191 /* Mark block list itself as inaccessible */
192 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
193 }
194
195 /**
196 * Check integrity of the blocks in the free list
197 *
198 */
199 static inline void check_blocks ( void ) {
200 struct memory_block *block;
201 struct memory_block *prev = NULL;
202
203 if ( ! ASSERTING )
204 return;
205
206 list_for_each_entry ( block, &free_blocks, list ) {
207
208 /* Check that list structure is intact */
209 list_check ( &block->list );
210
211 /* Check that block size is not too small */
212 assert ( block->size >= sizeof ( *block ) );
213 assert ( block->size >= MIN_MEMBLOCK_SIZE );
214
215 /* Check that block does not wrap beyond end of address space */
216 assert ( ( ( void * ) block + block->size ) >
217 ( ( void * ) block ) );
218
219 /* Check that blocks remain in ascending order, and
220 * that adjacent blocks have been merged.
221 */
222 if ( prev ) {
223 assert ( ( ( void * ) block ) > ( ( void * ) prev ) );
224 assert ( ( ( void * ) block ) >
225 ( ( ( void * ) prev ) + prev->size ) );
226 }
227 prev = block;
228 }
229 }
230
231 /**
232 * Discard some cached data
233 *
234 * @ret discarded Number of cached items discarded
235 */
236 static unsigned int discard_cache ( void ) {
237 struct cache_discarder *discarder;
238 unsigned int discarded;
239
240 for_each_table_entry ( discarder, CACHE_DISCARDERS ) {
241 discarded = discarder->discard();
242 if ( discarded )
243 return discarded;
244 }
245 return 0;
246 }
247
248 /**
249 * Discard all cached data
250 *
251 */
252 static void discard_all_cache ( void ) {
253 unsigned int discarded;
254
255 do {
256 discarded = discard_cache();
257 } while ( discarded );
258 }
259
260 /**
261 * Allocate a memory block
262 *
263 * @v size Requested size
264 * @v align Physical alignment
265 * @v offset Offset from physical alignment
266 * @ret ptr Memory block, or NULL
267 *
268 * Allocates a memory block @b physically aligned as requested. No
269 * guarantees are provided for the alignment of the virtual address.
270 *
271 * @c align must be a power of two. @c size may not be zero.
272 */
273 void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
274 struct memory_block *block;
275 size_t align_mask;
276 size_t actual_size;
277 size_t pre_size;
278 size_t post_size;
279 struct memory_block *pre;
280 struct memory_block *post;
281 void *ptr;
282
283 /* Sanity checks */
284 assert ( size != 0 );
285 assert ( ( align == 0 ) || ( ( align & ( align - 1 ) ) == 0 ) );
286 valgrind_make_blocks_defined();
287 check_blocks();
288
289 /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
290 * calculate alignment mask.
291 */
292 actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
293 ~( MIN_MEMBLOCK_SIZE - 1 ) );
294 assert ( actual_size >= size );
295 align_mask = ( ( align - 1 ) | ( MIN_MEMBLOCK_SIZE - 1 ) );
296 assert ( ( actual_size + align_mask ) > actual_size );
297
298 DBGC2 ( &heap, "Allocating %#zx (aligned %#zx+%zx)\n",
299 size, align, offset );
300 while ( 1 ) {
301 /* Search through blocks for the first one with enough space */
302 list_for_each_entry ( block, &free_blocks, list ) {
303 pre_size = ( ( offset - virt_to_phys ( block ) )
304 & align_mask );
305 if ( block->size < ( pre_size + actual_size ) )
306 continue;
307 post_size = ( block->size - pre_size - actual_size );
308 /* Split block into pre-block, block, and
309 * post-block. After this split, the "pre"
310 * block is the one currently linked into the
311 * free list.
312 */
313 pre = block;
314 block = ( ( ( void * ) pre ) + pre_size );
315 post = ( ( ( void * ) block ) + actual_size );
316 DBGC2 ( &heap, "[%p,%p) -> [%p,%p) + [%p,%p)\n", pre,
317 ( ( ( void * ) pre ) + pre->size ), pre, block,
318 post, ( ( ( void * ) pre ) + pre->size ) );
319 /* If there is a "post" block, add it in to
320 * the free list. Leak it if it is too small
321 * (which can happen only at the very end of
322 * the heap).
323 */
324 if ( post_size >= MIN_MEMBLOCK_SIZE ) {
325 VALGRIND_MAKE_MEM_UNDEFINED ( post,
326 sizeof ( *post ));
327 post->size = post_size;
328 list_add ( &post->list, &pre->list );
329 }
330 /* Shrink "pre" block, leaving the main block
331 * isolated and no longer part of the free
332 * list.
333 */
334 pre->size = pre_size;
335 /* If there is no "pre" block, remove it from
336 * the list. Also remove it (i.e. leak it) if
337 * it is too small, which can happen only at
338 * the very start of the heap.
339 */
340 if ( pre_size < MIN_MEMBLOCK_SIZE ) {
341 list_del ( &pre->list );
342 VALGRIND_MAKE_MEM_NOACCESS ( pre,
343 sizeof ( *pre ) );
344 }
345 /* Update total free memory */
346 freemem -= actual_size;
347 /* Return allocated block */
348 DBGC2 ( &heap, "Allocated [%p,%p)\n", block,
349 ( ( ( void * ) block ) + size ) );
350 ptr = block;
351 VALGRIND_MAKE_MEM_UNDEFINED ( ptr, size );
352 goto done;
353 }
354
355 /* Try discarding some cached data to free up memory */
356 if ( ! discard_cache() ) {
357 /* Nothing available to discard */
358 DBGC ( &heap, "Failed to allocate %#zx (aligned "
359 "%#zx)\n", size, align );
360 ptr = NULL;
361 goto done;
362 }
363 }
364
365 done:
366 check_blocks();
367 valgrind_make_blocks_noaccess();
368 return ptr;
369 }
370
371 /**
372 * Free a memory block
373 *
374 * @v ptr Memory allocated by alloc_memblock(), or NULL
375 * @v size Size of the memory
376 *
377 * If @c ptr is NULL, no action is taken.
378 */
379 void free_memblock ( void *ptr, size_t size ) {
380 struct memory_block *freeing;
381 struct memory_block *block;
382 struct memory_block *tmp;
383 size_t actual_size;
384 ssize_t gap_before;
385 ssize_t gap_after = -1;
386
387 /* Allow for ptr==NULL */
388 if ( ! ptr )
389 return;
390 VALGRIND_MAKE_MEM_NOACCESS ( ptr, size );
391
392 /* Sanity checks */
393 valgrind_make_blocks_defined();
394 check_blocks();
395
396 /* Round up size to match actual size that alloc_memblock()
397 * would have used.
398 */
399 assert ( size != 0 );
400 actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
401 ~( MIN_MEMBLOCK_SIZE - 1 ) );
402 freeing = ptr;
403 VALGRIND_MAKE_MEM_UNDEFINED ( freeing, sizeof ( *freeing ) );
404 DBGC2 ( &heap, "Freeing [%p,%p)\n",
405 freeing, ( ( ( void * ) freeing ) + size ) );
406
407 /* Check that this block does not overlap the free list */
408 if ( ASSERTING ) {
409 list_for_each_entry ( block, &free_blocks, list ) {
410 if ( ( ( ( void * ) block ) <
411 ( ( void * ) freeing + actual_size ) ) &&
412 ( ( void * ) freeing <
413 ( ( void * ) block + block->size ) ) ) {
414 assert ( 0 );
415 DBGC ( &heap, "Double free of [%p,%p) "
416 "overlapping [%p,%p) detected from %p\n",
417 freeing,
418 ( ( ( void * ) freeing ) + size ), block,
419 ( ( void * ) block + block->size ),
420 __builtin_return_address ( 0 ) );
421 }
422 }
423 }
424
425 /* Insert/merge into free list */
426 freeing->size = actual_size;
427 list_for_each_entry_safe ( block, tmp, &free_blocks, list ) {
428 /* Calculate gaps before and after the "freeing" block */
429 gap_before = ( ( ( void * ) freeing ) -
430 ( ( ( void * ) block ) + block->size ) );
431 gap_after = ( ( ( void * ) block ) -
432 ( ( ( void * ) freeing ) + freeing->size ) );
433 /* Merge with immediately preceding block, if possible */
434 if ( gap_before == 0 ) {
435 DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", block,
436 ( ( ( void * ) block ) + block->size ), freeing,
437 ( ( ( void * ) freeing ) + freeing->size ),
438 block,
439 ( ( ( void * ) freeing ) + freeing->size ) );
440 block->size += actual_size;
441 list_del ( &block->list );
442 VALGRIND_MAKE_MEM_NOACCESS ( freeing,
443 sizeof ( *freeing ) );
444 freeing = block;
445 }
446 /* Stop processing as soon as we reach a following block */
447 if ( gap_after >= 0 )
448 break;
449 }
450
451 /* Insert before the immediately following block. If
452 * possible, merge the following block into the "freeing"
453 * block.
454 */
455 DBGC2 ( &heap, "[%p,%p)\n",
456 freeing, ( ( ( void * ) freeing ) + freeing->size ) );
457 list_add_tail ( &freeing->list, &block->list );
458 if ( gap_after == 0 ) {
459 DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing,
460 ( ( ( void * ) freeing ) + freeing->size ), block,
461 ( ( ( void * ) block ) + block->size ), freeing,
462 ( ( ( void * ) block ) + block->size ) );
463 freeing->size += block->size;
464 list_del ( &block->list );
465 VALGRIND_MAKE_MEM_NOACCESS ( block, sizeof ( *block ) );
466 }
467
468 /* Update free memory counter */
469 freemem += actual_size;
470
471 check_blocks();
472 valgrind_make_blocks_noaccess();
473 }
474
475 /**
476 * Reallocate memory
477 *
478 * @v old_ptr Memory previously allocated by malloc(), or NULL
479 * @v new_size Requested size
480 * @ret new_ptr Allocated memory, or NULL
481 *
482 * Allocates memory with no particular alignment requirement. @c
483 * new_ptr will be aligned to at least a multiple of sizeof(void*).
484 * If @c old_ptr is non-NULL, then the contents of the newly allocated
485 * memory will be the same as the contents of the previously allocated
486 * memory, up to the minimum of the old and new sizes. The old memory
487 * will be freed.
488 *
489 * If allocation fails the previously allocated block is left
490 * untouched and NULL is returned.
491 *
492 * Calling realloc() with a new size of zero is a valid way to free a
493 * memory block.
494 */
495 void * realloc ( void *old_ptr, size_t new_size ) {
496 struct autosized_block *old_block;
497 struct autosized_block *new_block;
498 size_t old_total_size;
499 size_t new_total_size;
500 size_t old_size;
501 void *new_ptr = NOWHERE;
502
503 /* Allocate new memory if necessary. If allocation fails,
504 * return without touching the old block.
505 */
506 if ( new_size ) {
507 new_total_size = ( new_size +
508 offsetof ( struct autosized_block, data ) );
509 new_block = alloc_memblock ( new_total_size, 1, 0 );
510 if ( ! new_block )
511 return NULL;
512 new_block->size = new_total_size;
513 VALGRIND_MAKE_MEM_NOACCESS ( &new_block->size,
514 sizeof ( new_block->size ) );
515 new_ptr = &new_block->data;
516 VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
517 }
518
519 /* Copy across relevant part of the old data region (if any),
520 * then free it. Note that at this point either (a) new_ptr
521 * is valid, or (b) new_size is 0; either way, the memcpy() is
522 * valid.
523 */
524 if ( old_ptr && ( old_ptr != NOWHERE ) ) {
525 old_block = container_of ( old_ptr, struct autosized_block,
526 data );
527 VALGRIND_MAKE_MEM_DEFINED ( &old_block->size,
528 sizeof ( old_block->size ) );
529 old_total_size = old_block->size;
530 assert ( old_total_size != 0 );
531 old_size = ( old_total_size -
532 offsetof ( struct autosized_block, data ) );
533 memcpy ( new_ptr, old_ptr,
534 ( ( old_size < new_size ) ? old_size : new_size ) );
535 VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
536 free_memblock ( old_block, old_total_size );
537 }
538
539 if ( ASSERTED ) {
540 DBGC ( &heap, "Possible memory corruption detected from %p\n",
541 __builtin_return_address ( 0 ) );
542 }
543 return new_ptr;
544 }
545
546 /**
547 * Allocate memory
548 *
549 * @v size Requested size
550 * @ret ptr Memory, or NULL
551 *
552 * Allocates memory with no particular alignment requirement. @c ptr
553 * will be aligned to at least a multiple of sizeof(void*).
554 */
555 void * malloc ( size_t size ) {
556 void *ptr;
557
558 ptr = realloc ( NULL, size );
559 if ( ASSERTED ) {
560 DBGC ( &heap, "Possible memory corruption detected from %p\n",
561 __builtin_return_address ( 0 ) );
562 }
563 return ptr;
564 }
565
566 /**
567 * Free memory
568 *
569 * @v ptr Memory allocated by malloc(), or NULL
570 *
571 * Memory allocated with malloc_dma() cannot be freed with free(); it
572 * must be freed with free_dma() instead.
573 *
574 * If @c ptr is NULL, no action is taken.
575 */
576 void free ( void *ptr ) {
577
578 realloc ( ptr, 0 );
579 if ( ASSERTED ) {
580 DBGC ( &heap, "Possible memory corruption detected from %p\n",
581 __builtin_return_address ( 0 ) );
582 }
583 }
584
585 /**
586 * Allocate cleared memory
587 *
588 * @v size Requested size
589 * @ret ptr Allocated memory
590 *
591 * Allocate memory as per malloc(), and zero it.
592 *
593 * This function name is non-standard, but pretty intuitive.
594 * zalloc(size) is always equivalent to calloc(1,size)
595 */
596 void * zalloc ( size_t size ) {
597 void *data;
598
599 data = malloc ( size );
600 if ( data )
601 memset ( data, 0, size );
602 if ( ASSERTED ) {
603 DBGC ( &heap, "Possible memory corruption detected from %p\n",
604 __builtin_return_address ( 0 ) );
605 }
606 return data;
607 }
608
609 /**
610 * Add memory to allocation pool
611 *
612 * @v start Start address
613 * @v end End address
614 *
615 * Adds a block of memory [start,end) to the allocation pool. This is
616 * a one-way operation; there is no way to reclaim this memory.
617 *
618 * @c start must be aligned to at least a multiple of sizeof(void*).
619 */
620 void mpopulate ( void *start, size_t len ) {
621 /* Prevent free_memblock() from rounding up len beyond the end
622 * of what we were actually given...
623 */
624 free_memblock ( start, ( len & ~( MIN_MEMBLOCK_SIZE - 1 ) ) );
625 }
626
627 /**
628 * Initialise the heap
629 *
630 */
631 static void init_heap ( void ) {
632 VALGRIND_MAKE_MEM_NOACCESS ( heap, sizeof ( heap ) );
633 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
634 mpopulate ( heap, sizeof ( heap ) );
635 }
636
637 /** Memory allocator initialisation function */
638 struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
639 .initialise = init_heap,
640 };
641
642 /**
643 * Discard all cached data on shutdown
644 *
645 */
646 static void shutdown_cache ( int booting __unused ) {
647 discard_all_cache();
648 }
649
650 /** Memory allocator shutdown function */
651 struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
652 .shutdown = shutdown_cache,
653 };
654
655 #if 0
656 #include <stdio.h>
657 /**
658 * Dump free block list
659 *
660 */
661 void mdumpfree ( void ) {
662 struct memory_block *block;
663
664 printf ( "Free block list:\n" );
665 list_for_each_entry ( block, &free_blocks, list ) {
666 printf ( "[%p,%p] (size %#zx)\n", block,
667 ( ( ( void * ) block ) + block->size ), block->size );
668 }
669 }
670 #endif