source: rtems/cpukit/score/src/heapallocatealigned.c @ c6bc9d8

4.104.114.84.95
Last change on this file since c6bc9d8 was c6bc9d8, checked in by Joel Sherrill <joel.sherrill@…>, on Jan 20, 2005 at 7:45:02 PM

2005-01-20 Sergei Organov <osv@@topconrd.ru>

PR 536/rtems
Heap manager re-implementation to consume less memory and still satisfy
alignment requirements.

  • score/src/heap.c, score/src/heapallocate.c, score/src/heapextend.c, score/src/heapfree.c, score/src/heapgetinfo.c, score/src/heapgetfreeinfo.c, core/src/heapsizeofuserarea.c, score/src/heapwalk.c, core/macros/rtems/score/heap.inl, score/inline/rtems/score/heap.inl, score/include/rtems/score/heap.h: Reimplemented.
  • score/src/heapallocatealigned.c: new file
  • score/Makefile.am: HEAP_C_FILES: add score/src/heapallocatealigned.c
  • Property mode set to 100644
File size: 6.3 KB
Line 
1/*
2 *  Heap Handler
3 *
4 *  COPYRIGHT (c) 1989-1999.
5 *  On-Line Applications Research Corporation (OAR).
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
9 *  http://www.OARcorp.com/rtems/license.html.
10 *
11 *  $Id$
12 */
13
14
15#include <rtems/system.h>
16#include <rtems/score/sysstate.h>
17#include <rtems/score/heap.h>
18
19#if defined(RTEMS_HEAP_DEBUG)
20
21static void
22check_result(
23  Heap_Control* the_heap,
24  Heap_Block* the_block,
25  _H_uptr_t user_addr,
26  _H_uptr_t aligned_user_addr,
27  uint32_t size)
28{
29  _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));
30  _H_uptr_t const block_end = _H_p2u(the_block)
31    + _Heap_Block_size(the_block) + HEAP_BLOCK_HEADER_OFFSET;
32  _H_uptr_t const user_end = aligned_user_addr + size;
33  _H_uptr_t const heap_start = _H_p2u(the_heap->start) + HEAP_OVERHEAD;
34  _H_uptr_t const heap_end = _H_p2u(the_heap->final)
35    + HEAP_BLOCK_HEADER_OFFSET;
36  uint32_t const page_size = the_heap->page_size;
37
38  _HAssert(user_addr == user_area);
39  _HAssert(aligned_user_addr - user_area < page_size);
40  _HAssert(aligned_user_addr >= user_area);
41  _HAssert(aligned_user_addr < block_end);
42  _HAssert(user_end > user_area);
43  _HAssert(user_end <= block_end);
44  _HAssert(aligned_user_addr >= heap_start);
45  _HAssert(aligned_user_addr < heap_end);
46  _HAssert(user_end > heap_start);
47  _HAssert(user_end <= heap_end);
48}
49
50#else /* !defined(RTEMS_HEAP_DEBUG) */
51
52#define check_result(a, b, c, d, e) ((void)0)
53
54#endif /* !defined(RTEMS_HEAP_DEBUG) */
55
56
57/*PAGE
58 *
59 *  _Heap_Allocate_aligned
60 *
61 *  This kernel routine allocates the requested size of memory
62 *  from the specified heap so that returned address is aligned according to
63 *  the 'alignment'.
64 *
65 *  Input parameters:
66 *    the_heap  - pointer to the heap control block.
67 *    size      - size in bytes of the memory block to allocate.
68 *    alignment - required user pointer alignment in bytes
69 *
70 *  Output parameters:
71 *    returns - starting address of memory block allocated. The address is
72 *              aligned on specified boundary.
73 */
74
75void *_Heap_Allocate_aligned(
76  Heap_Control *the_heap,
77  uint32_t   size,
78  uint32_t   alignment
79)
80{
81  uint32_t search_count;
82  Heap_Block *the_block;
83
84  void *user_ptr = NULL;
85  uint32_t  const page_size = the_heap->page_size;
86  Heap_Statistics *const stats = &the_heap->stats;
87  Heap_Block *const tail = _Heap_Tail(the_heap);
88
89  uint32_t const end_to_user_offs = size - HEAP_BLOCK_HEADER_OFFSET;
90
91  uint32_t const the_size =
92    _Heap_Calc_block_size(size, page_size, the_heap->min_block_size);
93
94  if(the_size == 0)
95    return NULL;
96
97  if(alignment == 0)
98    alignment = CPU_ALIGNMENT;
99
100  /* Find large enough free block that satisfies the alignment requirements. */
101
102  for(the_block = _Heap_Head(the_heap)->next, search_count = 0;
103      the_block != tail;
104      the_block = the_block->next, ++search_count)
105  {
106    uint32_t const block_size = _Heap_Block_size(the_block);
107
108    /* As we always coalesce free blocks, prev block must have been used. */
109    _HAssert(_Heap_Is_prev_used(the_block));
110
111    if(block_size >= the_size) { /* the_block is large enough. */
112
113      _H_uptr_t user_addr;
114      _H_uptr_t aligned_user_addr;
115      _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));
116
117      /* Calculate 'aligned_user_addr' that will become the user pointer we
118         return. It should be at least 'end_to_user_offs' bytes less than the
119         the 'block_end' and should be aligned on 'alignment' boundary.
120         Calculations are from the 'block_end' as we are going to split free
121         block so that the upper part of the block becomes used block. */
122      _H_uptr_t const block_end = _H_p2u(the_block) + block_size;
123      aligned_user_addr = block_end - end_to_user_offs;
124      _Heap_Align_down_uptr(&aligned_user_addr, alignment);
125
126      /* 'user_addr' is the 'aligned_user_addr' further aligned down to the
127         'page_size' boundary. We need it as blocks' user areas should begin
128         only at 'page_size' aligned addresses */
129      user_addr = aligned_user_addr;
130      _Heap_Align_down_uptr(&user_addr, page_size);
131
132      /* Make sure 'user_addr' calculated didn't run out of 'the_block. */
133      if(user_addr >= user_area) {
134
135        /* The block seems to be acceptable. Check if the remainder of
136           'the_block' is less than 'min_block_size' so that 'the_block' won't
137           actually be split at the address we assume. */
138        if(user_addr - user_area < the_heap->min_block_size) {
139
140          /* The block won't be split, so 'user_addr' will be equal to the
141             'user_area'. */
142          user_addr = user_area;
143
144          /* We can't allow the distance between 'user_addr' and
145           'aligned_user_addr' to be outside of [0,page_size) range. If we do,
146           we will need to store this distance somewhere to be able to
147           resurrect the block address from the user pointer. (Having the
148           distance within [0,page_size) range allows resurrection by
149           aligning user pointer down to the nearest 'page_size' boundary.) */
150          if(aligned_user_addr - user_addr >= page_size) {
151
152            /* The user pointer will be too far from 'user_addr'. See if we
153               can make 'aligned_user_addr' to be close enough to the
154               'user_addr'. */
155            aligned_user_addr = user_addr;
156            _Heap_Align_up_uptr(&aligned_user_addr, alignment);
157            if(aligned_user_addr - user_addr >= page_size) {
158              /* No, we can't use the block */
159              aligned_user_addr = 0;
160            }
161          }
162        }
163
164        if(aligned_user_addr) {
165
166          /* The block is indeed acceptable: calculate the size of the block
167             to be allocated and perform allocation. */
168          uint32_t const alloc_size =
169            block_end - user_addr + HEAP_BLOCK_USER_OFFSET;
170
171          _HAssert(_Heap_Is_aligned_ptr((void*)aligned_user_addr, alignment));
172
173          the_block =
174            _Heap_Block_allocate(the_heap, the_block, alloc_size);
175
176          stats->searches += search_count + 1;
177          stats->allocs += 1;
178
179          check_result(the_heap, the_block, user_addr,
180            aligned_user_addr, size);
181
182          user_ptr = (void*)aligned_user_addr;
183          break;
184        }
185      }
186    }
187  }
188
189  if(stats->max_search < search_count)
190    stats->max_search = search_count;
191
192  return user_ptr;
193}
Note: See TracBrowser for help on using the repository browser.