Changeset 80f2885b in rtems
- Timestamp:
- 05/20/05 19:15:41 (18 years ago)
- Branches:
- 4.10, 4.11, 4.8, 4.9, 5, master
- Children:
- 5c1af4c
- Parents:
- 207a979
- Files:
-
- 3 added
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
c/src/ada/rtems.adb
r207a979 r80f2885b 1506 1506 Size_Base'Unchecked_Access 1507 1507 ); 1508 Size := S IZE_Base;1508 Size := Size_Base; 1509 1509 1510 1510 end Region_Get_Segment_Size; 1511 1511 1512 procedure Region_Resize_Segment ( 1513 ID : in RTEMS.ID; 1514 Segment : in RTEMS.Address; 1515 Size : in RTEMS.Unsigned32; 1516 Old_Size : out RTEMS.Unsigned32; 1517 Result : out RTEMS.Status_Codes 1518 ) is 1519 function Region_Resize_Segment_Base ( 1520 ID : RTEMS.ID; 1521 Segment : RTEMS.Address; 1522 Size : RTEMS.Unsigned32; 1523 Old_Size : access RTEMS.Unsigned32 1524 ) return RTEMS.Status_Codes; 1525 pragma Import (C, Region_Resize_Segment_Base, 1526 "rtems_region_get_segment_size"); 1527 Old_Size_Base : aliased RTEMS.Unsigned32; 1528 begin 1529 1530 Result := Region_Resize_Segment_Base ( 1531 ID, 1532 Segment, 1533 Size, 1534 Size_Base'Unchecked_Access 1535 ); 1536 Old_Size := Old_Size_Base; 1537 1538 end Region_Resize_Segment; 1539 1512 1540 procedure Region_Return_Segment ( 1513 1541 ID : in RTEMS.ID; -
c/src/ada/rtems.ads
r207a979 r80f2885b 1300 1300 ); 1301 1301 1302 procedure Region_Resize_Segment ( 1303 ID : in RTEMS.ID; 1304 Segment : in RTEMS.Address; 1305 Old_Size : in RTEMS.Unsigned32; 1306 Size : out RTEMS.Unsigned32; 1307 Result : out RTEMS.Status_Codes 1308 ); 1309 1302 1310 procedure Region_Return_Segment ( 1303 1311 ID : in RTEMS.ID; -
cpukit/ChangeLog
r207a979 r80f2885b 1 2005-05-14 Sergei Organov <osv@topconrd.ru> 2 3 PR 746/rtems 4 Optimize realloc(). The problem is that realloc() can neither grow 5 nor shrink efficiently the current memory region without support 6 from underlying heap/region modules. The patch introduces one new 7 routine for each of heap and region modules, _Heap_Resize_block(), 8 and rtems_region_resize_segment(), respectively, and uses the 9 latter to optimize realloc(). 10 11 The implementation of _Heap_Resize_block() lead to changing of the 12 heap allocation strategy: now the heap manager, when splits larger 13 free block into used and new free parts, makes the first part of 14 the block used, not the last one as it was before. Due to this new 15 strategy, _Heap_Resize_block() never needs to change the user 16 pointer. 17 18 Caveat: unlike previous heap implementation, first few bytes of 19 the contents of the memory allocated from the heap are now almost 20 never all zero. This can trigger bugs in client code that have not 21 been visible before this patch. 22 23 * libcsupport/src/malloc.c (realloc): try to resize segment in 24 place using new rtems_region_resize_segment() routine before 25 falling back to the malloc()/free() method. 26 * score/src/heap.c: 27 (_Heap_Initialize): change initial heap layout to reflect new 28 allocation strategy of using of the lower part of a previously 29 free block when splitting it for the purpose of allocation. 30 (_Heap_Block_allocate): when split, make the lower part used, and 31 leave the upper part free. Return type changed from Heap_Block* to 32 uint32_t. 33 * score/include/rtems/score/heap.h: 34 (Heap_Statistics): added 'resizes' field. 35 (Heap_Resize_status): new enum. 36 (_Heap_Resize_block): new routine. 37 (_Heap_Block_allocate): return type changed from Heap_Block* to 38 uint32_t. 39 * score/src/heapwalk.c: reflect new heap layout in checks. 40 * score/src/heapsizeofuserarea.c: more assertions added. 41 * score/src/heapresizeblock.c: new file. 42 (_Heap_Resize_block): new routine. 43 * score/src/heapfree.c: reverse the checks _Heap_Is_block_in() and 44 _Heap_Is_prev_used() on entry to be in this order. 45 * score/src/heapallocate.c, score/src/heapallocatealigned.c: 46 ignore return value of _Heap_Block_allocate(). 47 * score/Makefile.am (HEAP_C_FILES): added src/heapresizeblock.c. 48 * rtems/include/rtems/rtems/region.h: 49 (rtems_region_resize_segment): new interface routine. 50 (_Region_Process_queue): new internal routine called from 51 rtems_region_resize_segment() and rtems_region_return_segment(). 52 * rtems/src/regionreturnsegment.c: move queue management code into 53 the new internal routine _Region_Process_queue() and call it. 54 55 * rtems/src/regionresizesegment.c: new file. 56 (rtems_region_resize_segment): new interface routine. 57 * rtems/src/regionprocessqueue.c: new file. 58 (_Region_Process_queue): new internal routine containing queue 59 management code factored out from 'regionreturnsegment.c'. 60 * rtems/Makefile.am (REGION_C_FILES): Added 61 src/regionresizesegment.c, and src/regionprocessqueue.c. 62 * ada/rtems.adb, ada/rtems.ads: Added Region_Resize_Segment. 63 1 64 2005-05-20 Eric Norum <norume@aps.anl.gov> 2 65 -
cpukit/ada/rtems.adb
r207a979 r80f2885b 1506 1506 Size_Base'Unchecked_Access 1507 1507 ); 1508 Size := S IZE_Base;1508 Size := Size_Base; 1509 1509 1510 1510 end Region_Get_Segment_Size; 1511 1511 1512 procedure Region_Resize_Segment ( 1513 ID : in RTEMS.ID; 1514 Segment : in RTEMS.Address; 1515 Size : in RTEMS.Unsigned32; 1516 Old_Size : out RTEMS.Unsigned32; 1517 Result : out RTEMS.Status_Codes 1518 ) is 1519 function Region_Resize_Segment_Base ( 1520 ID : RTEMS.ID; 1521 Segment : RTEMS.Address; 1522 Size : RTEMS.Unsigned32; 1523 Old_Size : access RTEMS.Unsigned32 1524 ) return RTEMS.Status_Codes; 1525 pragma Import (C, Region_Resize_Segment_Base, 1526 "rtems_region_get_segment_size"); 1527 Old_Size_Base : aliased RTEMS.Unsigned32; 1528 begin 1529 1530 Result := Region_Resize_Segment_Base ( 1531 ID, 1532 Segment, 1533 Size, 1534 Size_Base'Unchecked_Access 1535 ); 1536 Old_Size := Old_Size_Base; 1537 1538 end Region_Resize_Segment; 1539 1512 1540 procedure Region_Return_Segment ( 1513 1541 ID : in RTEMS.ID; -
cpukit/ada/rtems.ads
r207a979 r80f2885b 1300 1300 ); 1301 1301 1302 procedure Region_Resize_Segment ( 1303 ID : in RTEMS.ID; 1304 Segment : in RTEMS.Address; 1305 Old_Size : in RTEMS.Unsigned32; 1306 Size : out RTEMS.Unsigned32; 1307 Result : out RTEMS.Status_Codes 1308 ); 1309 1302 1310 procedure Region_Return_Segment ( 1303 1311 ID : in RTEMS.ID; -
cpukit/libcsupport/src/malloc.c
r207a979 r80f2885b 309 309 310 310 /* 311 * Continue with calloc().311 * Continue with realloc(). 312 312 */ 313 313 if ( !ptr ) … … 316 316 if ( !size ) { 317 317 free( ptr ); 318 return (void *) 0; 319 } 320 321 status = 322 rtems_region_resize_segment( RTEMS_Malloc_Heap, ptr, size, &old_size ); 323 324 if( status == RTEMS_SUCCESSFUL ) { 325 return ptr; 326 } 327 else if ( status != RTEMS_UNSATISFIED ) { 328 errno = EINVAL; 318 329 return (void *) 0; 319 330 } -
cpukit/rtems/Makefile.am
r207a979 r80f2885b 106 106 src/regionextend.c src/regiongetsegment.c src/regiongetsegmentsize.c \ 107 107 src/regionident.c src/regionreturnsegment.c src/regiongetinfo.c \ 108 src/regiongetfreeinfo.c 108 src/regiongetfreeinfo.c src/regionresizesegment.c \ 109 src/regionprocessqueue.c 109 110 110 111 PARTITION_C_FILES = src/part.c src/partcreate.c src/partdelete.c \ -
cpukit/rtems/include/rtems/rtems/region.h
r207a979 r80f2885b 233 233 ); 234 234 235 /* 236 * rtems_region_resize_segment 237 * 238 * DESCRIPTION: 239 * 240 * This routine implements the rtems_region_resize_segment directive. It 241 * tries to resize segment in the region associated with 'id' to the new size 242 * 'size' in place. The first 'size' or old size bytes of the segment 243 * (whatever is less) are guaranteed to remain unmodified. The segment must 244 * have been previously allocated from the same region. If resizing the 245 * segment results in enough memory being available to satisfy the 246 * rtems_region_get_segment of the first blocked task, then that task and as 247 * many subsequent tasks as possible will be unblocked with their requests 248 * satisfied. 249 * Returns: 250 * RTEMS_SUCCESSFUL - operation successful 251 * RTEMS_UNSATISFIED - the segment can't be resized in place 252 * any other code - failure. 253 * On RTEMS_SUCCESSFUL or RTEMS_UNSATISFIED exit it returns into the 254 * 'old_size' the old size in bytes of the user memory area of the specified 255 * segment. 256 */ 257 258 rtems_status_code rtems_region_resize_segment( 259 Objects_Id id, 260 void *segment, 261 size_t size, 262 size_t *old_size 263 ); 264 235 265 #ifndef __RTEMS_APPLICATION__ 236 266 #include <rtems/rtems/region.inl> 267 extern void _Region_Process_queue(Region_Control *the_region); 237 268 #endif 238 269 #if defined(RTEMS_MULTIPROCESSING) -
cpukit/rtems/src/regionreturnsegment.c
r207a979 r80f2885b 56 56 { 57 57 register Region_Control *the_region; 58 Thread_Control *the_thread;59 58 Objects_Locations location; 60 void **the_segment;61 59 #ifdef RTEMS_REGION_FREE_SHRED_PATTERN 62 60 uint32_t size; … … 82 80 #ifdef RTEMS_REGION_FREE_SHRED_PATTERN 83 81 if ( _Heap_Size_of_user_area( &the_region->Memory, segment, &size ) ) { 84 memset( segment, (RTEMS_REGION_FREE_SHRED_PATTERN & 0xFF), size);82 memset( segment, (RTEMS_REGION_FREE_SHRED_PATTERN & 0xFF), size ); 85 83 } else { 86 84 _RTEMS_Unlock_allocator(); … … 100 98 the_region->number_of_used_blocks -= 1; 101 99 102 /* 103 * Switch from using the memory allocation mutex to using a 104 * dispatching disabled critical section. We have to do this 105 * because this thread may unblock one or more threads that were 106 * waiting on memory. 107 * 108 * NOTE: The following loop is O(n) where n is the number of 109 * threads whose memory request is satisfied. 110 */ 111 _RTEMS_Unlock_allocator(); 112 _Thread_Disable_dispatch(); 113 114 for ( ; ; ) { 115 the_thread = _Thread_queue_First( &the_region->Wait_queue ); 116 117 if ( the_thread == NULL ) 118 break; 119 120 the_segment = (void **) _Region_Allocate_segment( 121 the_region, 122 the_thread->Wait.count 123 ); 124 125 if ( the_segment == NULL ) 126 break; 127 128 *(void **)the_thread->Wait.return_argument = the_segment; 129 the_region->number_of_used_blocks += 1; 130 _Thread_queue_Extract( &the_region->Wait_queue, the_thread ); 131 the_thread->Wait.return_code = RTEMS_SUCCESSFUL; 132 } 133 _Thread_Enable_dispatch(); 100 _Region_Process_queue(the_region); /* unlocks allocator internally */ 134 101 135 102 return RTEMS_SUCCESSFUL; -
cpukit/score/Makefile.am
r207a979 r80f2885b 99 99 HEAP_C_FILES = src/heap.c src/heapallocate.c src/heapextend.c src/heapfree.c \ 100 100 src/heapsizeofuserarea.c src/heapwalk.c src/heapgetinfo.c \ 101 src/heapgetfreeinfo.c src/heapallocatealigned.c 101 src/heapgetfreeinfo.c src/heapallocatealigned.c \ 102 src/heapresizeblock.c 102 103 103 104 OBJECT_C_FILES = src/object.c src/objectallocate.c \ -
cpukit/score/include/rtems/score/heap.h
r207a979 r80f2885b 166 166 /** total number of suceessful calls to free */ 167 167 uint32_t frees; 168 /** total number of successful resizes */ 169 uint32_t resizes; 168 170 } Heap_Statistics; 169 171 … … 191 193 192 194 /** 193 * Status codes for heap_extend195 * Status codes for _Heap_Extend 194 196 */ 195 197 … … 199 201 HEAP_EXTEND_NOT_IMPLEMENTED 200 202 } Heap_Extend_status; 203 204 /** 205 * Status codes for _Heap_Resize_block 206 */ 207 208 typedef enum { 209 HEAP_RESIZE_SUCCESSFUL, 210 HEAP_RESIZE_UNSATISFIED, 211 HEAP_RESIZE_FATAL_ERROR 212 } Heap_Resize_status; 201 213 202 214 /** … … 327 339 ); 328 340 341 /* 342 * This function tries to resize in place the block that is pointed to by the 343 * @a starting_address to the new @a size. 344 * 345 * @param the_heap (in) is the heap to operate upon 346 * @param starting_address (in) is the starting address of the user block 347 * to be resized 348 * @param size (in) is the new size 349 * @param old_mem_size (in) points to a user area to return the size of the 350 * user memory area of the block before resizing. 351 * @param avail_mem_size (in) points to a user area to return the size of 352 * the user memory area of the free block that has been enlarged or 353 * created due to resizing, 0 if none. 354 * @return HEAP_RESIZE_SUCCESSFUL if successfully able to resize the block, 355 * HEAP_RESIZE_UNSATISFIED if the block can't be resized in place, 356 * HEAP_RESIZE_FATAL_ERROR if failure 357 * @return *old_mem_size filled in with the size of the user memory area of 358 * the block before resizing. 359 * @return *avail_mem_size filled in with the size of the user memory area 360 * of the free block that has been enlarged or created due to 361 * resizing, 0 if none. 362 */ 363 Heap_Resize_status _Heap_Resize_block( 364 Heap_Control *the_heap, 365 void *starting_address, 366 uint32_t size, 367 uint32_t *old_mem_size, 368 uint32_t *avail_mem_size 369 ); 370 329 371 /** 330 372 * This routine returns the block of memory which begins … … 405 447 uint32_t min_size); 406 448 407 extern Heap_Block*_Heap_Block_allocate(449 extern uint32_t _Heap_Block_allocate( 408 450 Heap_Control* the_heap, 409 451 Heap_Block* the_block, -
cpukit/score/src/heap.c
r207a979 r80f2885b 45 45 * | size < page_size | 46 46 * 0 +--------------------------------+ <- first block 47 * | prev_size = 1 (arbitrary)|47 * | prev_size = page_size | 48 48 * 4 +--------------------------------+ 49 49 * | size = size0 | 1 | … … 60 60 * | prev_size = size0 | 61 61 * +4 +--------------------------------+ 62 * | size = 0 (arbitrary)| 0 | <- prev block is free62 * | size = page_size | 0 | <- prev block is free 63 63 * +8 +--------------------------------+ <- aligned on page_size 64 64 * | unused space due to alignment | … … 75 75 * | unused space due to alignment | 76 76 * | size < page_size | 77 * 0 +--------------------------------+ <- firstblock78 * | prev_size = 1 (arbitrary)|77 * 0 +--------------------------------+ <- used block 78 * | prev_size = page_size | 79 79 * 4 +--------------------------------+ 80 * | size = S = size0 - BSIZE | 1 | 81 * 8 +---------------------+----------+ <- aligned on page_size 82 * | next = HEAP_TAIL | | 83 * 12 +---------------------+ | 84 * | prev = HEAP_HEAD | memory | 85 * +---------------------+ | 86 * | available | 87 * | | 88 * | for allocation | 89 * | | 90 * S +--------------------------------+ <- used block 91 * | prev_size = size0 - BSIZE | 92 * +4 +--------------------------------+ 93 * | size = BSIZE | 0 | <- prev block is free 94 * +8 +--------------------------------+ <- aligned on page_size 80 * | size = BSIZE | 1 | <- prev block is used 81 * 8 +--------------------------------+ <- aligned on page_size 95 82 * | . | Pointer returned to the user 96 * | . | is (S+8)for _Heap_Allocate()83 * | . | is 8 for _Heap_Allocate() 97 84 * | . | and is in range 98 * S + 8 + | user-accessible | [S+8,S+8+page_size) for99 * page_size+- - - - - -+ _Heap_Allocate_aligned()85 * 8 + | user-accessible | [8,8+page_size) for 86 * page_size +- - - - - -+ _Heap_Allocate_aligned() 100 87 * | area | 101 88 * | . | 102 * S + BSIZE +- - - - - . - - - - -+ <- last dummyblock89 * BSIZE +- - - - - . - - - - -+ <- free block 103 90 * | . | 104 * +4 +--------------------------------+ 105 * | size = 0 (arbitrary) | 1 | <- prev block is used 106 * +8 +--------------------------------+ <- aligned on page_size 91 * BSIZE +4 +--------------------------------+ 92 * | size = S = size0 - BSIZE | 1 | <- prev block is used 93 * BSIZE +8 +-------------------+------------+ <- aligned on page_size 94 * | next = HEAP_TAIL | | 95 * BSIZE +12 +-------------------+ | 96 * | prev = HEAP_HEAD | memory | 97 * +-------------------+ | 98 * | . available | 99 * | . | 100 * | . for | 101 * | . | 102 * BSIZE +S+0 +-------------------+ allocation + <- last dummy block 103 * | prev_size = S | | 104 * +S+4 +-------------------+------------+ 105 * | size = page_size | 0 | <- prev block is free 106 * +S+8 +--------------------------------+ <- aligned on page_size 107 107 * | unused space due to alignment | 108 108 * | size < page_size | … … 161 161 the_block = (Heap_Block *) aligned_start; 162 162 163 the_block->prev_size = HEAP_PREV_USED;163 the_block->prev_size = page_size; 164 164 the_block->size = the_size | HEAP_PREV_USED; 165 165 the_block->next = _Heap_Tail( the_heap ); … … 176 176 the_heap->final = the_block; /* Permanent final block of the heap */ 177 177 the_block->prev_size = the_size; /* Previous block is free */ 178 the_block->size = 0; /* This is the only block with size=0 */178 the_block->size = page_size; 179 179 180 180 stats->size = size; … … 188 188 stats->searches = 0; 189 189 stats->frees = 0; 190 stats->resizes = 0; 190 191 stats->instance = instance++; 191 192 … … 214 215 _Heap_Align_up(&block_size, page_size); 215 216 if(block_size < min_size) block_size = min_size; 217 /* 'block_size' becomes <= 'size' if and only if overflow occured. */ 216 218 return (block_size > size) ? block_size : 0; 217 219 } … … 219 221 /* 220 222 * Allocate block of size 'alloc_size' from 'the_block' belonging to 221 * 'the_heap'. Either split 'the_block' or allocate it entirely. 222 * Return the block allocated. 223 */ 224 Heap_Block* _Heap_Block_allocate( 223 * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely. 224 * When split, make the lower part used, and leave the upper part free. 225 * Return the size of allocated block. 226 */ 227 unsigned32 _Heap_Block_allocate( 225 228 Heap_Control* the_heap, 226 229 Heap_Block* the_block, … … 234 237 _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size)); 235 238 _HAssert(alloc_size <= block_size); 239 _HAssert(_Heap_Is_prev_used(the_block)); 236 240 237 241 if(the_rest >= the_heap->min_block_size) { 238 /* Split the block so that lower part is still free, and upper part 239 becomes used. */ 240 the_block->size = the_rest | HEAP_PREV_USED; 241 the_block = _Heap_Block_at(the_block, the_rest); 242 the_block->prev_size = the_rest; 243 the_block->size = alloc_size; 242 /* Split the block so that upper part is still free, and lower part 243 becomes used. This is slightly less optimal than leaving lower part 244 free as it requires replacing block in the free blocks list, but it 245 makes it possible to reuse this code in the _Heap_Resize_block(). */ 246 Heap_Block *next_block = _Heap_Block_at(the_block, alloc_size); 247 _Heap_Block_replace(the_block, next_block); 248 the_block->size = alloc_size | HEAP_PREV_USED; 249 next_block->size = the_rest | HEAP_PREV_USED; 250 _Heap_Block_at(next_block, the_rest)->prev_size = the_rest; 244 251 } 245 252 else { … … 249 256 _Heap_Block_remove(the_block); 250 257 alloc_size = block_size; 258 _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED; 251 259 stats->free_blocks -= 1; 252 260 } 253 /* Mark the block as used (in the next block). */254 _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;255 261 /* Update statistics */ 256 262 stats->free_size -= alloc_size; … … 258 264 stats->min_free_size = stats->free_size; 259 265 stats->used_blocks += 1; 260 return the_block;266 return alloc_size; 261 267 } -
cpukit/score/src/heapallocate.c
r207a979 r80f2885b 63 63 result of the comparison. */ 64 64 if(the_block->size >= the_size) { 65 the_block =_Heap_Block_allocate(the_heap, the_block, the_size );65 (void)_Heap_Block_allocate(the_heap, the_block, the_size ); 66 66 67 67 ptr = _Heap_User_area(the_block); -
cpukit/score/src/heapallocatealigned.c
r207a979 r80f2885b 173 173 _HAssert(_Heap_Is_aligned_ptr((void*)aligned_user_addr, alignment)); 174 174 175 the_block = 176 _Heap_Block_allocate(the_heap, the_block, alloc_size); 175 (void)_Heap_Block_allocate(the_heap, the_block, alloc_size); 177 176 178 177 stats->searches += search_count + 1; -
cpukit/score/src/heapfree.c
r207a979 r80f2885b 52 52 if ( !_Heap_Is_block_in( the_heap, the_block ) ) { 53 53 _HAssert(starting_address == NULL); 54 _HAssert(FALSE); 54 55 return( FALSE ); 55 56 } … … 58 59 next_block = _Heap_Block_at( the_block, the_size ); 59 60 60 if ( !_Heap_Is_ prev_used(next_block ) ) {61 if ( !_Heap_Is_block_in( the_heap, next_block ) ) { 61 62 _HAssert(FALSE); 62 63 return( FALSE ); 63 64 } 64 65 65 if ( !_Heap_Is_ block_in( the_heap,next_block ) ) {66 if ( !_Heap_Is_prev_used( next_block ) ) { 66 67 _HAssert(FALSE); 67 68 return( FALSE ); -
cpukit/score/src/heapsizeofuserarea.c
r207a979 r80f2885b 56 56 _Heap_Start_of_block( the_heap, starting_address, &the_block ); 57 57 58 _HAssert(_Heap_Is_block_in( the_heap, the_block )); 58 59 if ( !_Heap_Is_block_in( the_heap, the_block ) ) 59 60 return( FALSE ); … … 62 63 next_block = _Heap_Block_at( the_block, the_size ); 63 64 65 _HAssert(_Heap_Is_block_in( the_heap, next_block )); 66 _HAssert(_Heap_Is_prev_used( next_block )); 64 67 if ( 65 68 !_Heap_Is_block_in( the_heap, next_block ) || -
cpukit/score/src/heapwalk.c
r207a979 r80f2885b 88 88 } 89 89 90 if (the_block->prev_size != HEAP_PREV_USED) {91 printf("PASS: %d !prev_size of 1st block isn't HEAP_PREV_USED\n", source);90 if (the_block->prev_size != the_heap->page_size) { 91 printf("PASS: %d !prev_size of 1st block isn't page_size\n", source); 92 92 error = 1; 93 93 } … … 163 163 } 164 164 165 if (_Heap_Block_size(the_block) != 0) {166 printf("PASS: %d !last block's size isn't 0\n", source);165 if (_Heap_Block_size(the_block) != the_heap->page_size) { 166 printf("PASS: %d !last block's size isn't page_size\n", source); 167 167 error = 1; 168 168 }
Note: See TracChangeset
for help on using the changeset viewer.