source: rtems/cpukit/include/rtems/score/heapimpl.h @ 878487b0

5
Last change on this file since 878487b0 was 2afb22b, checked in by Chris Johns <chrisj@…>, on 12/23/17 at 07:18:56

Remove make preinstall

A speciality of the RTEMS build system was the make preinstall step. It
copied header files from arbitrary locations into the build tree. The
header files were included via the -Bsome/build/tree/path GCC command
line option.

This has at least seven problems:

  • The make preinstall step itself needs time and disk space.
  • Errors in header files show up in the build tree copy. This makes it hard for editors to open the right file to fix the error.
  • There is no clear relationship between source and build tree header files. This makes an audit of the build process difficult.
  • The visibility of all header files in the build tree makes it difficult to enforce API barriers. For example it is discouraged to use BSP-specifics in the cpukit.
  • An introduction of a new build system is difficult.
  • Include paths specified by the -B option are system headers. This may suppress warnings.
  • The parallel build had sporadic failures on some hosts.

This patch removes the make preinstall step. All installed header
files are moved to dedicated include directories in the source tree.
Let @RTEMS_CPU@ be the target architecture, e.g. arm, powerpc, sparc,
etc. Let @RTEMS_BSP_FAMILIY@ be a BSP family base directory, e.g.
erc32, imx, qoriq, etc.

The new cpukit include directories are:

  • cpukit/include
  • cpukit/score/cpu/@RTEMS_CPU@/include
  • cpukit/libnetworking

The new BSP include directories are:

  • bsps/include
  • bsps/@RTEMS_CPU@/include
  • bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILIY@/include

There are build tree include directories for generated files.

The include directory order favours the most general header file, e.g.
it is not possible to override general header files via the include path
order.

The "bootstrap -p" option was removed. The new "bootstrap -H" option
should be used to regenerate the "headers.am" files.

Update #3254.

  • Property mode set to 100644
File size: 14.6 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler Implementation
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2008.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#ifndef _RTEMS_SCORE_HEAPIMPL_H
19#define _RTEMS_SCORE_HEAPIMPL_H
20
21#include <rtems/score/heap.h>
22
23#ifdef __cplusplus
24extern "C" {
25#endif
26
27/**
28 * @addtogroup ScoreHeap
29 */
30/**@{**/
31
32/**
33 * @brief See also @ref Heap_Block.size_and_flag.
34 */
35#define HEAP_PREV_BLOCK_USED ((uintptr_t) 1)
36
37/**
38 * @brief Size of the part at the block begin which may be used for allocation
39 * in charge of the previous block.
40 */
41#define HEAP_ALLOC_BONUS sizeof(uintptr_t)
42
43/**
44 * @brief See _Heap_Resize_block().
45 */
46typedef enum {
47  HEAP_RESIZE_SUCCESSFUL,
48  HEAP_RESIZE_UNSATISFIED,
49  HEAP_RESIZE_FATAL_ERROR
50} Heap_Resize_status;
51
52/**
53 * @brief Gets the first and last block for the heap area with begin
54 * @a heap_area_begin and size @a heap_area_size.
55 *
56 * A page size of @a page_size and minimal block size of @a min_block_size will
57 * be used for calculation.
58 *
59 * Nothing will be written to this area.
60 *
61 * In case of success the pointers to the first and last block will be returned
62 * via @a first_block_ptr and @a last_block_ptr.
63 *
64 * Returns @c true if the area is big enough, and @c false otherwise.
65 */
66bool _Heap_Get_first_and_last_block(
67  uintptr_t heap_area_begin,
68  uintptr_t heap_area_size,
69  uintptr_t page_size,
70  uintptr_t min_block_size,
71  Heap_Block **first_block_ptr,
72  Heap_Block **last_block_ptr
73);
74
75/**
76 * @brief Initializes the heap control block @a heap to manage the area
77 * starting at @a area_begin of size @a area_size bytes.
78 *
79 * Blocks of memory are allocated from the heap in multiples of @a page_size
80 * byte units.  If the @a page_size is equal to zero or is not multiple of
81 * @c CPU_ALIGNMENT, it is aligned up to the nearest @c CPU_ALIGNMENT boundary.
82 *
83 * Returns the maximum memory available, or zero in case of failure.
84 *
85 * @see Heap_Initialization_or_extend_handler.
86 */
87uintptr_t _Heap_Initialize(
88  Heap_Control *heap,
89  void *area_begin,
90  uintptr_t area_size,
91  uintptr_t page_size
92);
93
94/**
95 * @brief Allocates a memory area of size @a size bytes from the heap @a heap.
96 *
97 * If the alignment parameter @a alignment is not equal to zero, the allocated
98 * memory area will begin at an address aligned by this value.
99 *
100 * If the boundary parameter @a boundary is not equal to zero, the allocated
101 * memory area will fulfill a boundary constraint.  The boundary value
102 * specifies the set of addresses which are aligned by the boundary value.  The
103 * interior of the allocated memory area will not contain an element of this
104 * set.  The begin or end address of the area may be a member of the set.
105 *
106 * A size value of zero will return a unique address which may be freed with
107 * _Heap_Free().
108 *
109 * Returns a pointer to the begin of the allocated memory area, or @c NULL if
110 * no memory is available or the parameters are inconsistent.
111 */
112void *_Heap_Allocate_aligned_with_boundary(
113  Heap_Control *heap,
114  uintptr_t size,
115  uintptr_t alignment,
116  uintptr_t boundary
117);
118
119/**
120 * @brief See _Heap_Allocate_aligned_with_boundary() with boundary equals zero.
121 */
122RTEMS_INLINE_ROUTINE void *_Heap_Allocate_aligned(
123  Heap_Control *heap,
124  uintptr_t size,
125  uintptr_t alignment
126)
127{
128  return _Heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 );
129}
130
131/**
132 * @brief See _Heap_Allocate_aligned_with_boundary() with alignment and
133 * boundary equals zero.
134 */
135RTEMS_INLINE_ROUTINE void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
136{
137  return _Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 );
138}
139
140/**
141 * @brief Frees the allocated memory area starting at @a addr in the heap
142 * @a heap.
143 *
144 * Inappropriate values for @a addr may corrupt the heap.
145 *
146 * Returns @c true in case of success, and @c false otherwise.
147 */
148bool _Heap_Free( Heap_Control *heap, void *addr );
149
150/**
151 * @brief Walks the heap @a heap to verify its integrity.
152 *
153 * If @a dump is @c true, then diagnostic messages will be printed to standard
154 * output.  In this case @a source is used to mark the output lines.
155 *
156 * Returns @c true if no errors occurred, and @c false if the heap is corrupt.
157 */
158bool _Heap_Walk(
159  Heap_Control *heap,
160  int source,
161  bool dump
162);
163
164/**
165 * @brief Heap block visitor.
166 *
167 * @see _Heap_Iterate().
168 *
169 * @retval true Stop the iteration.
170 * @retval false Continue the iteration.
171 */
172typedef bool (*Heap_Block_visitor)(
173  const Heap_Block *block,
174  uintptr_t block_size,
175  bool block_is_used,
176  void *visitor_arg
177);
178
179/**
180 * @brief Iterates over all blocks of the heap.
181 *
182 * For each block the @a visitor with the argument @a visitor_arg will be
183 * called.
184 */
185void _Heap_Iterate(
186  Heap_Control *heap,
187  Heap_Block_visitor visitor,
188  void *visitor_arg
189);
190
191/**
192 * @brief Greedy allocate that empties the heap.
193 *
194 * Afterwards the heap has at most @a block_count allocatable blocks of sizes
195 * specified by @a block_sizes.  The @a block_sizes must point to an array with
196 * @a block_count members.  All other blocks are used.
197 *
198 * @see _Heap_Greedy_free().
199 */
200Heap_Block *_Heap_Greedy_allocate(
201  Heap_Control *heap,
202  const uintptr_t *block_sizes,
203  size_t block_count
204);
205
206/**
207 * @brief Greedy allocate all blocks except the largest free block.
208 *
209 * Afterwards the heap has at most one allocatable block.  This block is the
210 * largest free block if it exists.  The allocatable size of this block is
211 * stored in @a allocatable_size.  All other blocks are used.
212 *
213 * @see _Heap_Greedy_free().
214 */
215Heap_Block *_Heap_Greedy_allocate_all_except_largest(
216  Heap_Control *heap,
217  uintptr_t *allocatable_size
218);
219
220/**
221 * @brief Frees blocks of a greedy allocation.
222 *
223 * The @a blocks must be the return value of _Heap_Greedy_allocate().
224 */
225void _Heap_Greedy_free(
226  Heap_Control *heap,
227  Heap_Block *blocks
228);
229
230/**
231 * @brief Returns information about used and free blocks for the heap @a heap
232 * in @a info.
233 */
234void _Heap_Get_information(
235  Heap_Control *heap,
236  Heap_Information_block *info
237);
238
239/**
240 * @brief Returns information about free blocks for the heap @a heap in
241 * @a info.
242 */
243void _Heap_Get_free_information(
244  Heap_Control *heap,
245  Heap_Information *info
246);
247
248/**
249 * @brief Returns the size of the allocatable memory area starting at @a addr
250 * in @a size.
251 *
252 * The size value may be greater than the initially requested size in
253 * _Heap_Allocate_aligned_with_boundary().
254 *
255 * Inappropriate values for @a addr will not corrupt the heap, but may yield
256 * invalid size values.
257 *
258 * Returns @a true if successful, and @c false otherwise.
259 */
260bool _Heap_Size_of_alloc_area(
261  Heap_Control *heap,
262  void *addr,
263  uintptr_t *size
264);
265
266/**
267 * @brief Resizes the block of the allocated memory area starting at @a addr.
268 *
269 * The new memory area will have a size of at least @a size bytes.  A resize
270 * may be impossible and depends on the current heap usage.
271 *
272 * The size available for allocation in the current block before the resize
273 * will be returned in @a old_size.  The size available for allocation in
274 * the resized block will be returned in @a new_size.  If the resize was not
275 * successful, then a value of zero will be returned in @a new_size.
276 *
277 * Inappropriate values for @a addr may corrupt the heap.
278 */
279Heap_Resize_status _Heap_Resize_block(
280  Heap_Control *heap,
281  void *addr,
282  uintptr_t size,
283  uintptr_t *old_size,
284  uintptr_t *new_size
285);
286
287/**
288 * @brief Allocates the memory area starting at @a alloc_begin of size
289 * @a alloc_size bytes in the block @a block.
290 *
291 * The block may be split up into multiple blocks.  The previous and next block
292 * may be used or free.  Free block parts which form a vaild new block will be
293 * inserted into the free list or merged with an adjacent free block.  If the
294 * block is used, they will be inserted after the free list head.  If the block
295 * is free, they will be inserted after the previous block in the free list.
296 *
297 * Inappropriate values for @a alloc_begin or @a alloc_size may corrupt the
298 * heap.
299 *
300 * Returns the block containing the allocated memory area.
301 */
302Heap_Block *_Heap_Block_allocate(
303  Heap_Control *heap,
304  Heap_Block *block,
305  uintptr_t alloc_begin,
306  uintptr_t alloc_size
307);
308
309#ifndef HEAP_PROTECTION
310  #define _Heap_Protection_block_initialize( heap, block ) ((void) 0)
311  #define _Heap_Protection_block_check( heap, block ) ((void) 0)
312  #define _Heap_Protection_block_error( heap, block ) ((void) 0)
313  #define _Heap_Protection_free_all_delayed_blocks( heap ) ((void) 0)
314#else
315  static inline void _Heap_Protection_block_initialize(
316    Heap_Control *heap,
317    Heap_Block *block
318  )
319  {
320    (*heap->Protection.block_initialize)( heap, block );
321  }
322
323  static inline void _Heap_Protection_block_check(
324    Heap_Control *heap,
325    Heap_Block *block
326  )
327  {
328    (*heap->Protection.block_check)( heap, block );
329  }
330
331  static inline void _Heap_Protection_block_error(
332    Heap_Control *heap,
333    Heap_Block *block
334  )
335  {
336    (*heap->Protection.block_error)( heap, block );
337  }
338
339  static inline void _Heap_Protection_free_all_delayed_blocks( Heap_Control *heap )
340  {
341    uintptr_t large = 0
342      - (uintptr_t) HEAP_BLOCK_HEADER_SIZE
343      - (uintptr_t) HEAP_ALLOC_BONUS
344      - (uintptr_t) 1;
345    void *p = _Heap_Allocate( heap, large );
346    _Heap_Free( heap, p );
347  }
348#endif
349
350/**
351 * @brief Sets the fraction of delayed free blocks that is actually freed
352 * during memory shortage.
353 *
354 * The default is to free half the delayed free blocks.  This is equal to a
355 * fraction value of two.
356 *
357 * @param[in] heap The heap control.
358 * @param[in] fraction The fraction is one divided by this fraction value.
359 */
360RTEMS_INLINE_ROUTINE void _Heap_Protection_set_delayed_free_fraction(
361  Heap_Control *heap,
362  uintptr_t fraction
363)
364{
365#ifdef HEAP_PROTECTION
366  heap->Protection.delayed_free_fraction = fraction;
367#else
368  (void) heap;
369  (void) fraction;
370#endif
371}
372
373RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_head( Heap_Control *heap )
374{
375  return &heap->free_list;
376}
377
378RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_tail( Heap_Control *heap )
379{
380  return &heap->free_list;
381}
382
383RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_first( Heap_Control *heap )
384{
385  return _Heap_Free_list_head(heap)->next;
386}
387
388RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_last( Heap_Control *heap )
389{
390  return _Heap_Free_list_tail(heap)->prev;
391}
392
393RTEMS_INLINE_ROUTINE void _Heap_Free_list_remove( Heap_Block *block )
394{
395  Heap_Block *next = block->next;
396  Heap_Block *prev = block->prev;
397
398  prev->next = next;
399  next->prev = prev;
400}
401
402RTEMS_INLINE_ROUTINE void _Heap_Free_list_replace(
403  Heap_Block *old_block,
404  Heap_Block *new_block
405)
406{
407  Heap_Block *next = old_block->next;
408  Heap_Block *prev = old_block->prev;
409
410  new_block->next = next;
411  new_block->prev = prev;
412
413  next->prev = new_block;
414  prev->next = new_block;
415}
416
417RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_after(
418  Heap_Block *block_before,
419  Heap_Block *new_block
420)
421{
422  Heap_Block *next = block_before->next;
423
424  new_block->next = next;
425  new_block->prev = block_before;
426  block_before->next = new_block;
427  next->prev = new_block;
428}
429
430RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_before(
431  Heap_Block *block_next,
432  Heap_Block *new_block
433)
434{
435  Heap_Block *prev = block_next->prev;
436
437  new_block->next = block_next;
438  new_block->prev = prev;
439  prev->next = new_block;
440  block_next->prev = new_block;
441}
442
443RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned(
444  uintptr_t value,
445  uintptr_t alignment
446)
447{
448  return (value % alignment) == 0;
449}
450
451RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
452  uintptr_t value,
453  uintptr_t alignment
454)
455{
456  return value - (value % alignment);
457}
458
459/**
460 * @brief Returns the block which is @a offset away from @a block.
461 */
462RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
463  const Heap_Block *block,
464  uintptr_t offset
465)
466{
467  return (Heap_Block *) ((uintptr_t) block + offset);
468}
469
470RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Prev_block(
471  const Heap_Block *block
472)
473{
474  return (Heap_Block *) ((uintptr_t) block - block->prev_size);
475}
476
477RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block(
478  const Heap_Block *block
479)
480{
481  return (uintptr_t) block + HEAP_BLOCK_HEADER_SIZE;
482}
483
484RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_of_alloc_area(
485  uintptr_t alloc_begin,
486  uintptr_t page_size
487)
488{
489  return (Heap_Block *) (_Heap_Align_down( alloc_begin, page_size )
490    - HEAP_BLOCK_HEADER_SIZE);
491}
492
493RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
494{
495  return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
496}
497
498RTEMS_INLINE_ROUTINE void _Heap_Block_set_size(
499  Heap_Block *block,
500  uintptr_t size
501)
502{
503  uintptr_t flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
504
505  block->size_and_flag = size | flag;
506}
507
508RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
509{
510  return block->size_and_flag & HEAP_PREV_BLOCK_USED;
511}
512
513RTEMS_INLINE_ROUTINE bool _Heap_Is_used(
514  const Heap_Block *block
515)
516{
517  const Heap_Block *const next_block =
518    _Heap_Block_at( block, _Heap_Block_size( block ) );
519
520  return _Heap_Is_prev_used( next_block );
521}
522
523RTEMS_INLINE_ROUTINE bool _Heap_Is_free(
524  const Heap_Block *block
525)
526{
527  return !_Heap_Is_used( block );
528}
529
530RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
531  const Heap_Control *heap,
532  const Heap_Block *block
533)
534{
535  return (uintptr_t) block >= (uintptr_t) heap->first_block
536    && (uintptr_t) block <= (uintptr_t) heap->last_block;
537}
538
539/**
540 * @brief Sets the size of the last block for heap @a heap.
541 *
542 * The next block of the last block will be the first block.  Since the first
543 * block indicates that the previous block is used, this ensures that the last
544 * block appears as used for the _Heap_Is_used() and _Heap_Is_free()
545 * functions.
546 *
547 * This feature will be used to terminate the scattered heap area list.  See
548 * also _Heap_Extend().
549 */
550RTEMS_INLINE_ROUTINE void _Heap_Set_last_block_size( Heap_Control *heap )
551{
552  _Heap_Block_set_size(
553    heap->last_block,
554    (uintptr_t) heap->first_block - (uintptr_t) heap->last_block
555  );
556}
557
558/**
559 * @brief Returns the size of the allocatable area in bytes.
560 *
561 * This value is an integral multiple of the page size.
562 */
563RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( const Heap_Control *heap )
564{
565  return heap->stats.size;
566}
567
568RTEMS_INLINE_ROUTINE uintptr_t _Heap_Max( uintptr_t a, uintptr_t b )
569{
570  return a > b ? a : b;
571}
572
573RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min( uintptr_t a, uintptr_t b )
574{
575  return a < b ? a : b;
576}
577
578#ifdef RTEMS_DEBUG
579  #define RTEMS_HEAP_DEBUG
580#endif
581
582#ifdef RTEMS_HEAP_DEBUG
583  #include <assert.h>
584  #define _HAssert( cond ) \
585    do { \
586      if ( !(cond) ) { \
587        __assert( __FILE__, __LINE__, #cond ); \
588      } \
589    } while (0)
590#else
591  #define _HAssert( cond ) ((void) 0)
592#endif
593
594/** @} */
595
596#ifdef __cplusplus
597}
598#endif
599
600#endif
601/* end of include file */
Note: See TracBrowser for help on using the repository browser.