source: rtems/cpukit/score/src/heapallocatealigned.c @ 1fb90ec7

4.104.114.84.95
Last change on this file since 1fb90ec7 was 1fb90ec7, checked in by Joel Sherrill <joel.sherrill@…>, on Mar 12, 2007 at 11:16:02 AM

2007-03-12 Joel Sherrill <joel@…>

  • score/src/heapallocatealigned.c, score/src/threadqrequeue.c: Correct license URL and/or fix mistake in copyright notice. Both of these mistakes appear to be from code submitted after these changes were made previously.
  • Property mode set to 100644
File size: 6.3 KB
Line 
1/*
2 *  Heap Handler
3 *
4 *  COPYRIGHT (c) 1989-1999.
5 *  On-Line Applications Research Corporation (OAR).
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
9 *  http://www.rtems.com/license/LICENSE.
10 *
11 *  $Id$
12 */
13
14#if HAVE_CONFIG_H
15#include "config.h"
16#endif
17
18#include <rtems/system.h>
19#include <rtems/score/sysstate.h>
20#include <rtems/score/heap.h>
21
22#if defined(RTEMS_HEAP_DEBUG)
23
24static void
25check_result(
26  Heap_Control* the_heap,
27  Heap_Block* the_block,
28  _H_uptr_t user_addr,
29  _H_uptr_t aligned_user_addr,
30  uint32_t size)
31{
32  _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));
33  _H_uptr_t const block_end = _H_p2u(the_block)
34    + _Heap_Block_size(the_block) + HEAP_BLOCK_HEADER_OFFSET;
35  _H_uptr_t const user_end = aligned_user_addr + size;
36  _H_uptr_t const heap_start = _H_p2u(the_heap->start) + HEAP_OVERHEAD;
37  _H_uptr_t const heap_end = _H_p2u(the_heap->final)
38    + HEAP_BLOCK_HEADER_OFFSET;
39  uint32_t const page_size = the_heap->page_size;
40
41  _HAssert(user_addr == user_area);
42  _HAssert(aligned_user_addr - user_area < page_size);
43  _HAssert(aligned_user_addr >= user_area);
44  _HAssert(aligned_user_addr < block_end);
45  _HAssert(user_end > user_area);
46  _HAssert(user_end <= block_end);
47  _HAssert(aligned_user_addr >= heap_start);
48  _HAssert(aligned_user_addr < heap_end);
49  _HAssert(user_end > heap_start);
50  _HAssert(user_end <= heap_end);
51}
52
53#else /* !defined(RTEMS_HEAP_DEBUG) */
54
55#define check_result(a, b, c, d, e) ((void)0)
56
57#endif /* !defined(RTEMS_HEAP_DEBUG) */
58
59/*PAGE
60 *
61 *  _Heap_Allocate_aligned
62 *
63 *  This kernel routine allocates the requested size of memory
64 *  from the specified heap so that returned address is aligned according to
65 *  the 'alignment'.
66 *
67 *  Input parameters:
68 *    the_heap  - pointer to the heap control block.
69 *    size      - size in bytes of the memory block to allocate.
70 *    alignment - required user pointer alignment in bytes
71 *
72 *  Output parameters:
73 *    returns - starting address of memory block allocated. The address is
74 *              aligned on specified boundary.
75 */
76
77void *_Heap_Allocate_aligned(
78  Heap_Control *the_heap,
79  size_t        size,
80  uint32_t      alignment
81)
82{
83  uint32_t search_count;
84  Heap_Block *the_block;
85
86  void *user_ptr = NULL;
87  uint32_t  const page_size = the_heap->page_size;
88  Heap_Statistics *const stats = &the_heap->stats;
89  Heap_Block *const tail = _Heap_Tail(the_heap);
90
91  uint32_t const end_to_user_offs = size - HEAP_BLOCK_HEADER_OFFSET;
92
93  uint32_t const the_size =
94    _Heap_Calc_block_size(size, page_size, the_heap->min_block_size);
95
96  if(the_size == 0)
97    return NULL;
98
99  if(alignment == 0)
100    alignment = CPU_ALIGNMENT;
101
102  /* Find large enough free block that satisfies the alignment requirements. */
103
104  for(the_block = _Heap_Head(the_heap)->next, search_count = 0;
105      the_block != tail;
106      the_block = the_block->next, ++search_count)
107  {
108    uint32_t const block_size = _Heap_Block_size(the_block);
109
110    /* As we always coalesce free blocks, prev block must have been used. */
111    _HAssert(_Heap_Is_prev_used(the_block));
112
113    if(block_size >= the_size) { /* the_block is large enough. */
114
115      _H_uptr_t user_addr;
116      _H_uptr_t aligned_user_addr;
117      _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));
118
119      /* Calculate 'aligned_user_addr' that will become the user pointer we
120         return. It should be at least 'end_to_user_offs' bytes less than the
121         the 'block_end' and should be aligned on 'alignment' boundary.
122         Calculations are from the 'block_end' as we are going to split free
123         block so that the upper part of the block becomes used block. */
124      _H_uptr_t const block_end = _H_p2u(the_block) + block_size;
125      aligned_user_addr = block_end - end_to_user_offs;
126      _Heap_Align_down_uptr(&aligned_user_addr, alignment);
127
128      /* 'user_addr' is the 'aligned_user_addr' further aligned down to the
129         'page_size' boundary. We need it as blocks' user areas should begin
130         only at 'page_size' aligned addresses */
131      user_addr = aligned_user_addr;
132      _Heap_Align_down_uptr(&user_addr, page_size);
133
134      /* Make sure 'user_addr' calculated didn't run out of 'the_block. */
135      if(user_addr >= user_area) {
136
137        /* The block seems to be acceptable. Check if the remainder of
138           'the_block' is less than 'min_block_size' so that 'the_block' won't
139           actually be split at the address we assume. */
140        if(user_addr - user_area < the_heap->min_block_size) {
141
142          /* The block won't be split, so 'user_addr' will be equal to the
143             'user_area'. */
144          user_addr = user_area;
145
146          /* We can't allow the distance between 'user_addr' and
147           'aligned_user_addr' to be outside of [0,page_size) range. If we do,
148           we will need to store this distance somewhere to be able to
149           resurrect the block address from the user pointer. (Having the
150           distance within [0,page_size) range allows resurrection by
151           aligning user pointer down to the nearest 'page_size' boundary.) */
152          if(aligned_user_addr - user_addr >= page_size) {
153
154            /* The user pointer will be too far from 'user_addr'. See if we
155               can make 'aligned_user_addr' to be close enough to the
156               'user_addr'. */
157            aligned_user_addr = user_addr;
158            _Heap_Align_up_uptr(&aligned_user_addr, alignment);
159            if(aligned_user_addr - user_addr >= page_size) {
160              /* No, we can't use the block */
161              aligned_user_addr = 0;
162            }
163          }
164        }
165
166        if(aligned_user_addr) {
167
168          /* The block is indeed acceptable: calculate the size of the block
169             to be allocated and perform allocation. */
170          uint32_t const alloc_size =
171            block_end - user_addr + HEAP_BLOCK_USER_OFFSET;
172
173          _HAssert(_Heap_Is_aligned_ptr((void*)aligned_user_addr, alignment));
174
175          (void)_Heap_Block_allocate(the_heap, the_block, alloc_size);
176
177          stats->searches += search_count + 1;
178          stats->allocs += 1;
179
180          check_result(the_heap, the_block, user_addr,
181            aligned_user_addr, size);
182
183          user_ptr = (void*)aligned_user_addr;
184          break;
185        }
186      }
187    }
188  }
189
190  if(stats->max_search < search_count)
191    stats->max_search = search_count;
192
193  return user_ptr;
194}
Note: See TracBrowser for help on using the repository browser.