[init] Show startup and shutdown function names in debug messages
[ipxe.git] / src / core / malloc.c
1 /*
2 * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stddef.h>
27 #include <stdint.h>
28 #include <string.h>
29 #include <strings.h>
30 #include <ipxe/io.h>
31 #include <ipxe/list.h>
32 #include <ipxe/init.h>
33 #include <ipxe/refcnt.h>
34 #include <ipxe/malloc.h>
35 #include <valgrind/memcheck.h>
36
37 /** @file
38 *
39 * Dynamic memory allocation
40 *
41 */
42
43 /** A free block of memory */
44 struct memory_block {
45 /** Size of this block */
46 size_t size;
47 /** Padding
48 *
49 * This padding exists to cover the "count" field of a
50 * reference counter, in the common case where a reference
51 * counter is the first element of a dynamically-allocated
52 * object. It avoids clobbering the "count" field as soon as
53 * the memory is freed, and so allows for the possibility of
54 * detecting reference counting errors.
55 */
56 char pad[ offsetof ( struct refcnt, count ) +
57 sizeof ( ( ( struct refcnt * ) NULL )->count ) ];
58 /** List of free blocks */
59 struct list_head list;
60 };
61
62 #define MIN_MEMBLOCK_SIZE \
63 ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
64
65 /** A block of allocated memory complete with size information */
66 struct autosized_block {
67 /** Size of this block */
68 size_t size;
69 /** Remaining data */
70 char data[0];
71 };
72
73 /**
74 * Address for zero-length memory blocks
75 *
76 * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
77 * NOWHERE. Calling @c free(NOWHERE) will have no effect.
78 *
79 * This is consistent with the ANSI C standards, which state that
80 * "either NULL or a pointer suitable to be passed to free()" must be
81 * returned in these cases. Using a special non-NULL value means that
82 * the caller can take a NULL return value to indicate failure,
83 * without first having to check for a requested size of zero.
84 *
85 * Code outside of malloc.c do not ever need to refer to the actual
86 * value of @c NOWHERE; this is an internal definition.
87 */
88 #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
89
90 /** List of free memory blocks */
91 static LIST_HEAD ( free_blocks );
92
93 /** Total amount of free memory */
94 size_t freemem;
95
96 /** Total amount of used memory */
97 size_t usedmem;
98
99 /** Maximum amount of used memory */
100 size_t maxusedmem;
101
102 /**
103 * Heap size
104 *
105 * Currently fixed at 512kB.
106 */
107 #define HEAP_SIZE ( 512 * 1024 )
108
109 /** The heap itself */
110 static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
111
112 /**
113 * Mark all blocks in free list as defined
114 *
115 */
116 static inline void valgrind_make_blocks_defined ( void ) {
117 struct memory_block *block;
118
119 /* Do nothing unless running under Valgrind */
120 if ( RUNNING_ON_VALGRIND <= 0 )
121 return;
122
123 /* Traverse free block list, marking each block structure as
124 * defined. Some contortions are necessary to avoid errors
125 * from list_check().
126 */
127
128 /* Mark block list itself as defined */
129 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks, sizeof ( free_blocks ) );
130
131 /* Mark areas accessed by list_check() as defined */
132 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.prev->next,
133 sizeof ( free_blocks.prev->next ) );
134 VALGRIND_MAKE_MEM_DEFINED ( free_blocks.next,
135 sizeof ( *free_blocks.next ) );
136 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->next->prev,
137 sizeof ( free_blocks.next->next->prev ) );
138
139 /* Mark each block in list as defined */
140 list_for_each_entry ( block, &free_blocks, list ) {
141
142 /* Mark block as defined */
143 VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
144
145 /* Mark areas accessed by list_check() as defined */
146 VALGRIND_MAKE_MEM_DEFINED ( block->list.next,
147 sizeof ( *block->list.next ) );
148 VALGRIND_MAKE_MEM_DEFINED ( &block->list.next->next->prev,
149 sizeof ( block->list.next->next->prev ) );
150 }
151 }
152
153 /**
154 * Mark all blocks in free list as inaccessible
155 *
156 */
157 static inline void valgrind_make_blocks_noaccess ( void ) {
158 struct memory_block *block;
159 struct memory_block *prev = NULL;
160
161 /* Do nothing unless running under Valgrind */
162 if ( RUNNING_ON_VALGRIND <= 0 )
163 return;
164
165 /* Traverse free block list, marking each block structure as
166 * inaccessible. Some contortions are necessary to avoid
167 * errors from list_check().
168 */
169
170 /* Mark each block in list as inaccessible */
171 list_for_each_entry ( block, &free_blocks, list ) {
172
173 /* Mark previous block (if any) as inaccessible. (Current
174 * block will be accessed by list_check().)
175 */
176 if ( prev )
177 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
178 prev = block;
179
180 /* At the end of the list, list_check() will end up
181 * accessing the first list item. Temporarily mark
182 * this area as defined.
183 */
184 VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->prev,
185 sizeof ( free_blocks.next->prev ) );
186 }
187 /* Mark last block (if any) as inaccessible */
188 if ( prev )
189 VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
190
191 /* Mark as inaccessible the area that was temporarily marked
192 * as defined to avoid errors from list_check().
193 */
194 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks.next->prev,
195 sizeof ( free_blocks.next->prev ) );
196
197 /* Mark block list itself as inaccessible */
198 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
199 }
200
201 /**
202 * Check integrity of the blocks in the free list
203 *
204 */
205 static inline void check_blocks ( void ) {
206 struct memory_block *block;
207 struct memory_block *prev = NULL;
208
209 if ( ! ASSERTING )
210 return;
211
212 list_for_each_entry ( block, &free_blocks, list ) {
213
214 /* Check that list structure is intact */
215 list_check ( &block->list );
216
217 /* Check that block size is not too small */
218 assert ( block->size >= sizeof ( *block ) );
219 assert ( block->size >= MIN_MEMBLOCK_SIZE );
220
221 /* Check that block does not wrap beyond end of address space */
222 assert ( ( ( void * ) block + block->size ) >
223 ( ( void * ) block ) );
224
225 /* Check that blocks remain in ascending order, and
226 * that adjacent blocks have been merged.
227 */
228 if ( prev ) {
229 assert ( ( ( void * ) block ) > ( ( void * ) prev ) );
230 assert ( ( ( void * ) block ) >
231 ( ( ( void * ) prev ) + prev->size ) );
232 }
233 prev = block;
234 }
235 }
236
237 /**
238 * Discard some cached data
239 *
240 * @ret discarded Number of cached items discarded
241 */
242 static unsigned int discard_cache ( void ) {
243 struct cache_discarder *discarder;
244 unsigned int discarded;
245
246 for_each_table_entry ( discarder, CACHE_DISCARDERS ) {
247 discarded = discarder->discard();
248 if ( discarded )
249 return discarded;
250 }
251 return 0;
252 }
253
254 /**
255 * Discard all cached data
256 *
257 */
258 static void discard_all_cache ( void ) {
259 unsigned int discarded;
260
261 do {
262 discarded = discard_cache();
263 } while ( discarded );
264 }
265
266 /**
267 * Allocate a memory block
268 *
269 * @v size Requested size
270 * @v align Physical alignment
271 * @v offset Offset from physical alignment
272 * @ret ptr Memory block, or NULL
273 *
274 * Allocates a memory block @b physically aligned as requested. No
275 * guarantees are provided for the alignment of the virtual address.
276 *
277 * @c align must be a power of two. @c size may not be zero.
278 */
279 void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
280 struct memory_block *block;
281 size_t align_mask;
282 size_t actual_size;
283 size_t pre_size;
284 size_t post_size;
285 struct memory_block *pre;
286 struct memory_block *post;
287 unsigned int discarded;
288 void *ptr;
289
290 /* Sanity checks */
291 assert ( size != 0 );
292 assert ( ( align == 0 ) || ( ( align & ( align - 1 ) ) == 0 ) );
293 valgrind_make_blocks_defined();
294 check_blocks();
295
296 /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
297 * calculate alignment mask.
298 */
299 actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
300 ~( MIN_MEMBLOCK_SIZE - 1 ) );
301 if ( ! actual_size ) {
302 /* The requested size is not permitted to be zero. A
303 * zero result at this point indicates that either the
304 * original requested size was zero, or that unsigned
305 * integer overflow has occurred.
306 */
307 ptr = NULL;
308 goto done;
309 }
310 assert ( actual_size >= size );
311 align_mask = ( ( align - 1 ) | ( MIN_MEMBLOCK_SIZE - 1 ) );
312
313 DBGC2 ( &heap, "Allocating %#zx (aligned %#zx+%zx)\n",
314 size, align, offset );
315 while ( 1 ) {
316 /* Search through blocks for the first one with enough space */
317 list_for_each_entry ( block, &free_blocks, list ) {
318 pre_size = ( ( offset - virt_to_phys ( block ) )
319 & align_mask );
320 if ( ( block->size < pre_size ) ||
321 ( ( block->size - pre_size ) < actual_size ) )
322 continue;
323 post_size = ( block->size - pre_size - actual_size );
324 /* Split block into pre-block, block, and
325 * post-block. After this split, the "pre"
326 * block is the one currently linked into the
327 * free list.
328 */
329 pre = block;
330 block = ( ( ( void * ) pre ) + pre_size );
331 post = ( ( ( void * ) block ) + actual_size );
332 DBGC2 ( &heap, "[%p,%p) -> [%p,%p) + [%p,%p)\n", pre,
333 ( ( ( void * ) pre ) + pre->size ), pre, block,
334 post, ( ( ( void * ) pre ) + pre->size ) );
335 /* If there is a "post" block, add it in to
336 * the free list. Leak it if it is too small
337 * (which can happen only at the very end of
338 * the heap).
339 */
340 if ( post_size >= MIN_MEMBLOCK_SIZE ) {
341 VALGRIND_MAKE_MEM_UNDEFINED ( post,
342 sizeof ( *post ));
343 post->size = post_size;
344 list_add ( &post->list, &pre->list );
345 }
346 /* Shrink "pre" block, leaving the main block
347 * isolated and no longer part of the free
348 * list.
349 */
350 pre->size = pre_size;
351 /* If there is no "pre" block, remove it from
352 * the list. Also remove it (i.e. leak it) if
353 * it is too small, which can happen only at
354 * the very start of the heap.
355 */
356 if ( pre_size < MIN_MEMBLOCK_SIZE ) {
357 list_del ( &pre->list );
358 VALGRIND_MAKE_MEM_NOACCESS ( pre,
359 sizeof ( *pre ) );
360 }
361 /* Update memory usage statistics */
362 freemem -= actual_size;
363 usedmem += actual_size;
364 if ( usedmem > maxusedmem )
365 maxusedmem = usedmem;
366 /* Return allocated block */
367 DBGC2 ( &heap, "Allocated [%p,%p)\n", block,
368 ( ( ( void * ) block ) + size ) );
369 ptr = block;
370 VALGRIND_MAKE_MEM_UNDEFINED ( ptr, size );
371 goto done;
372 }
373
374 /* Try discarding some cached data to free up memory */
375 DBGC ( &heap, "Attempting discard for %#zx (aligned %#zx+%zx), "
376 "used %zdkB\n", size, align, offset, ( usedmem >> 10 ) );
377 valgrind_make_blocks_noaccess();
378 discarded = discard_cache();
379 valgrind_make_blocks_defined();
380 check_blocks();
381 if ( ! discarded ) {
382 /* Nothing available to discard */
383 DBGC ( &heap, "Failed to allocate %#zx (aligned "
384 "%#zx)\n", size, align );
385 ptr = NULL;
386 goto done;
387 }
388 }
389
390 done:
391 check_blocks();
392 valgrind_make_blocks_noaccess();
393 return ptr;
394 }
395
396 /**
397 * Free a memory block
398 *
399 * @v ptr Memory allocated by alloc_memblock(), or NULL
400 * @v size Size of the memory
401 *
402 * If @c ptr is NULL, no action is taken.
403 */
404 void free_memblock ( void *ptr, size_t size ) {
405 struct memory_block *freeing;
406 struct memory_block *block;
407 struct memory_block *tmp;
408 size_t actual_size;
409 ssize_t gap_before;
410 ssize_t gap_after = -1;
411
412 /* Allow for ptr==NULL */
413 if ( ! ptr )
414 return;
415 VALGRIND_MAKE_MEM_NOACCESS ( ptr, size );
416
417 /* Sanity checks */
418 valgrind_make_blocks_defined();
419 check_blocks();
420
421 /* Round up size to match actual size that alloc_memblock()
422 * would have used.
423 */
424 assert ( size != 0 );
425 actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
426 ~( MIN_MEMBLOCK_SIZE - 1 ) );
427 freeing = ptr;
428 VALGRIND_MAKE_MEM_UNDEFINED ( freeing, sizeof ( *freeing ) );
429 DBGC2 ( &heap, "Freeing [%p,%p)\n",
430 freeing, ( ( ( void * ) freeing ) + size ) );
431
432 /* Check that this block does not overlap the free list */
433 if ( ASSERTING ) {
434 list_for_each_entry ( block, &free_blocks, list ) {
435 if ( ( ( ( void * ) block ) <
436 ( ( void * ) freeing + actual_size ) ) &&
437 ( ( void * ) freeing <
438 ( ( void * ) block + block->size ) ) ) {
439 assert ( 0 );
440 DBGC ( &heap, "Double free of [%p,%p) "
441 "overlapping [%p,%p) detected from %p\n",
442 freeing,
443 ( ( ( void * ) freeing ) + size ), block,
444 ( ( void * ) block + block->size ),
445 __builtin_return_address ( 0 ) );
446 }
447 }
448 }
449
450 /* Insert/merge into free list */
451 freeing->size = actual_size;
452 list_for_each_entry_safe ( block, tmp, &free_blocks, list ) {
453 /* Calculate gaps before and after the "freeing" block */
454 gap_before = ( ( ( void * ) freeing ) -
455 ( ( ( void * ) block ) + block->size ) );
456 gap_after = ( ( ( void * ) block ) -
457 ( ( ( void * ) freeing ) + freeing->size ) );
458 /* Merge with immediately preceding block, if possible */
459 if ( gap_before == 0 ) {
460 DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", block,
461 ( ( ( void * ) block ) + block->size ), freeing,
462 ( ( ( void * ) freeing ) + freeing->size ),
463 block,
464 ( ( ( void * ) freeing ) + freeing->size ) );
465 block->size += actual_size;
466 list_del ( &block->list );
467 VALGRIND_MAKE_MEM_NOACCESS ( freeing,
468 sizeof ( *freeing ) );
469 freeing = block;
470 }
471 /* Stop processing as soon as we reach a following block */
472 if ( gap_after >= 0 )
473 break;
474 }
475
476 /* Insert before the immediately following block. If
477 * possible, merge the following block into the "freeing"
478 * block.
479 */
480 DBGC2 ( &heap, "[%p,%p)\n",
481 freeing, ( ( ( void * ) freeing ) + freeing->size ) );
482 list_add_tail ( &freeing->list, &block->list );
483 if ( gap_after == 0 ) {
484 DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing,
485 ( ( ( void * ) freeing ) + freeing->size ), block,
486 ( ( ( void * ) block ) + block->size ), freeing,
487 ( ( ( void * ) block ) + block->size ) );
488 freeing->size += block->size;
489 list_del ( &block->list );
490 VALGRIND_MAKE_MEM_NOACCESS ( block, sizeof ( *block ) );
491 }
492
493 /* Update memory usage statistics */
494 freemem += actual_size;
495 usedmem -= actual_size;
496
497 check_blocks();
498 valgrind_make_blocks_noaccess();
499 }
500
501 /**
502 * Reallocate memory
503 *
504 * @v old_ptr Memory previously allocated by malloc(), or NULL
505 * @v new_size Requested size
506 * @ret new_ptr Allocated memory, or NULL
507 *
508 * Allocates memory with no particular alignment requirement. @c
509 * new_ptr will be aligned to at least a multiple of sizeof(void*).
510 * If @c old_ptr is non-NULL, then the contents of the newly allocated
511 * memory will be the same as the contents of the previously allocated
512 * memory, up to the minimum of the old and new sizes. The old memory
513 * will be freed.
514 *
515 * If allocation fails the previously allocated block is left
516 * untouched and NULL is returned.
517 *
518 * Calling realloc() with a new size of zero is a valid way to free a
519 * memory block.
520 */
521 void * realloc ( void *old_ptr, size_t new_size ) {
522 struct autosized_block *old_block;
523 struct autosized_block *new_block;
524 size_t old_total_size;
525 size_t new_total_size;
526 size_t old_size;
527 void *new_ptr = NOWHERE;
528
529 /* Allocate new memory if necessary. If allocation fails,
530 * return without touching the old block.
531 */
532 if ( new_size ) {
533 new_total_size = ( new_size +
534 offsetof ( struct autosized_block, data ) );
535 if ( new_total_size < new_size )
536 return NULL;
537 new_block = alloc_memblock ( new_total_size, 1, 0 );
538 if ( ! new_block )
539 return NULL;
540 new_block->size = new_total_size;
541 VALGRIND_MAKE_MEM_NOACCESS ( &new_block->size,
542 sizeof ( new_block->size ) );
543 new_ptr = &new_block->data;
544 VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
545 }
546
547 /* Copy across relevant part of the old data region (if any),
548 * then free it. Note that at this point either (a) new_ptr
549 * is valid, or (b) new_size is 0; either way, the memcpy() is
550 * valid.
551 */
552 if ( old_ptr && ( old_ptr != NOWHERE ) ) {
553 old_block = container_of ( old_ptr, struct autosized_block,
554 data );
555 VALGRIND_MAKE_MEM_DEFINED ( &old_block->size,
556 sizeof ( old_block->size ) );
557 old_total_size = old_block->size;
558 assert ( old_total_size != 0 );
559 old_size = ( old_total_size -
560 offsetof ( struct autosized_block, data ) );
561 memcpy ( new_ptr, old_ptr,
562 ( ( old_size < new_size ) ? old_size : new_size ) );
563 VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
564 free_memblock ( old_block, old_total_size );
565 }
566
567 if ( ASSERTED ) {
568 DBGC ( &heap, "Possible memory corruption detected from %p\n",
569 __builtin_return_address ( 0 ) );
570 }
571 return new_ptr;
572 }
573
574 /**
575 * Allocate memory
576 *
577 * @v size Requested size
578 * @ret ptr Memory, or NULL
579 *
580 * Allocates memory with no particular alignment requirement. @c ptr
581 * will be aligned to at least a multiple of sizeof(void*).
582 */
583 void * malloc ( size_t size ) {
584 void *ptr;
585
586 ptr = realloc ( NULL, size );
587 if ( ASSERTED ) {
588 DBGC ( &heap, "Possible memory corruption detected from %p\n",
589 __builtin_return_address ( 0 ) );
590 }
591 return ptr;
592 }
593
594 /**
595 * Free memory
596 *
597 * @v ptr Memory allocated by malloc(), or NULL
598 *
599 * Memory allocated with malloc_dma() cannot be freed with free(); it
600 * must be freed with free_dma() instead.
601 *
602 * If @c ptr is NULL, no action is taken.
603 */
604 void free ( void *ptr ) {
605
606 realloc ( ptr, 0 );
607 if ( ASSERTED ) {
608 DBGC ( &heap, "Possible memory corruption detected from %p\n",
609 __builtin_return_address ( 0 ) );
610 }
611 }
612
613 /**
614 * Allocate cleared memory
615 *
616 * @v size Requested size
617 * @ret ptr Allocated memory
618 *
619 * Allocate memory as per malloc(), and zero it.
620 *
621 * This function name is non-standard, but pretty intuitive.
622 * zalloc(size) is always equivalent to calloc(1,size)
623 */
624 void * zalloc ( size_t size ) {
625 void *data;
626
627 data = malloc ( size );
628 if ( data )
629 memset ( data, 0, size );
630 if ( ASSERTED ) {
631 DBGC ( &heap, "Possible memory corruption detected from %p\n",
632 __builtin_return_address ( 0 ) );
633 }
634 return data;
635 }
636
637 /**
638 * Add memory to allocation pool
639 *
640 * @v start Start address
641 * @v end End address
642 *
643 * Adds a block of memory [start,end) to the allocation pool. This is
644 * a one-way operation; there is no way to reclaim this memory.
645 *
646 * @c start must be aligned to at least a multiple of sizeof(void*).
647 */
648 void mpopulate ( void *start, size_t len ) {
649
650 /* Prevent free_memblock() from rounding up len beyond the end
651 * of what we were actually given...
652 */
653 len &= ~( MIN_MEMBLOCK_SIZE - 1 );
654
655 /* Add to allocation pool */
656 free_memblock ( start, len );
657
658 /* Fix up memory usage statistics */
659 usedmem += len;
660 }
661
662 /**
663 * Initialise the heap
664 *
665 */
666 static void init_heap ( void ) {
667 VALGRIND_MAKE_MEM_NOACCESS ( heap, sizeof ( heap ) );
668 VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
669 mpopulate ( heap, sizeof ( heap ) );
670 }
671
672 /** Memory allocator initialisation function */
673 struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
674 .initialise = init_heap,
675 };
676
677 /**
678 * Discard all cached data on shutdown
679 *
680 */
681 static void shutdown_cache ( int booting __unused ) {
682 discard_all_cache();
683 DBGC ( &heap, "Maximum heap usage %zdkB\n", ( maxusedmem >> 10 ) );
684 }
685
686 /** Memory allocator shutdown function */
687 struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
688 .name = "heap",
689 .shutdown = shutdown_cache,
690 };
691
692 #if 0
693 #include <stdio.h>
694 /**
695 * Dump free block list
696 *
697 */
698 void mdumpfree ( void ) {
699 struct memory_block *block;
700
701 printf ( "Free block list:\n" );
702 list_for_each_entry ( block, &free_blocks, list ) {
703 printf ( "[%p,%p] (size %#zx)\n", block,
704 ( ( ( void * ) block ) + block->size ), block->size );
705 }
706 }
707 #endif