source: rtems/cpukit/libcsupport/src/malloc.c @ 68b9f58

4.104.114.84.95
Last change on this file since 68b9f58 was e746a88, checked in by Joel Sherrill <joel.sherrill@…>, on 05/03/07 at 21:33:39

2007-05-03 Joel Sherrill <joel@…>

  • ChangeLog?, libcsupport/src/malloc.c, libcsupport/src/mallocfreespace.c, sapi/include/confdefs.h, score/Makefile.am, score/preinstall.am: malloc never blocks so the Region Manager is quite heavy for implementing this. This patch implements the C Program Heap directly in terms of the new Protected Heap handler. This handler is a direct use of a SuperCore? Heap in conjunction with the Allocator Mutex used internally by RTEMS. This saves 3184 bytes on most SPARC test executables.
  • score/include/rtems/score/protectedheap.h, score/src/pheapallocate.c, score/src/pheapallocatealigned.c, score/src/pheapextend.c, score/src/pheapfree.c, score/src/pheapgetblocksize.c, score/src/pheapgetfreeinfo.c, score/src/pheapgetinfo.c, score/src/pheapinit.c, score/src/pheapresizeblock.c, score/src/pheapwalk.c: New files.
  • Property mode set to 100644
File size: 13.9 KB
Line 
1/*
2 *  RTEMS Malloc Family Implementation
3 *
4 *
5 *  COPYRIGHT (c) 1989-1999.
6 *  On-Line Applications Research Corporation (OAR).
7 *
8 *  The license and distribution terms for this file may be
9 *  found in the file LICENSE in this distribution or at
10 *  http://www.rtems.com/license/LICENSE.
11 *
12 *  $Id$
13 */
14
15#if HAVE_CONFIG_H
16#include "config.h"
17#endif
18
19#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
20#include <rtems.h>
21#include <rtems/libcsupport.h>
22#include <rtems/score/protectedheap.h>
23#ifdef RTEMS_NEWLIB
24#include <sys/reent.h>
25#endif
26
27#include <stdio.h>
28#include <stdlib.h>
29#include <sys/types.h>
30#include <assert.h>
31#include <errno.h>
32#include <string.h>
33
34#include <unistd.h>    /* sbrk(2) */
35#include <inttypes.h>
36
37#include <rtems/chain.h>
38
39#ifndef HAVE_UINTMAX_T
40/* Fall back to unsigned long if uintmax_t is not available */
41#define unsigned long uintmax_t
42
43#ifndef PRIuMAX
44#define PRIuMAX         "lu"
45#endif
46#endif
47
48#ifdef MALLOC_ARENA_CHECK
49#define SENTINELSIZE    12
50#define SENTINEL       "\xD1\xAC\xB2\xF1" "BITE ME"
51#define CALLCHAINSIZE 5
52struct mallocNode {
53    struct mallocNode *back;
54    struct mallocNode *forw;
55    int                callChain[CALLCHAINSIZE];
56    size_t             size;
57    void              *memory;
58};
59static struct mallocNode mallocNodeHead = { &mallocNodeHead, &mallocNodeHead };
60void reportMallocError(const char *msg, struct mallocNode *mp)
61{
62    unsigned char *sp = (unsigned char *)mp->memory + mp->size;
63    int i, ind = 0;
64    static char cbuf[500];
65    ind += sprintf(cbuf+ind, "Malloc Error: %s\n", msg);
66    if ((mp->forw->back != mp) || (mp->back->forw != mp))
67        ind += sprintf(cbuf+ind, "mp:0x%x  mp->forw:0x%x  mp->forw->back:0x%x  mp->back:0x%x  mp->back->forw:0x%x\n",
68                        mp, mp->forw, mp->forw->back, mp->back, mp->back->forw);
69    if (mp->memory != (mp + 1))
70        ind += sprintf(cbuf+ind, "mp+1:0x%x  ", mp + 1);
71    ind += sprintf(cbuf+ind, "mp->memory:0x%x  mp->size:%d\n", mp->memory, mp->size);
72    if (memcmp((char *)mp->memory + mp->size, SENTINEL, SENTINELSIZE) != 0) {
73        ind += sprintf(cbuf+ind, "mp->sentinel: ");
74        for (i = 0 ; i < SENTINELSIZE ; i++)
75            ind += sprintf(cbuf+ind, " 0x%x", sp[i]);
76        ind += sprintf(cbuf+ind, "\n");
77    }
78    ind += sprintf(cbuf+ind, "Call chain:");
79    for (i = 0 ; i < CALLCHAINSIZE ; i++) {
80        if (mp->callChain[i] == 0)
81            break;
82        ind += sprintf(cbuf+ind, " 0x%x", mp->callChain[i]);
83    }
84    printk("\n\n%s\n\n", cbuf);
85}
86#endif
87
88Heap_Control  RTEMS_Malloc_Heap;
89Chain_Control RTEMS_Malloc_GC_list;
90
91/* rtems_id RTEMS_Malloc_Heap; */
92size_t RTEMS_Malloc_Sbrk_amount;
93
94#ifdef RTEMS_DEBUG
95#define MALLOC_STATS
96#define MALLOC_DIRTY
97#endif
98
99#ifdef MALLOC_STATS
100#define MSBUMP(f,n)    rtems_malloc_stats.f += (n)
101
102struct {
103    uint32_t    space_available;             /* current size of malloc area */
104    uint32_t    malloc_calls;                /* # calls to malloc */
105    uint32_t    free_calls;
106    uint32_t    realloc_calls;
107    uint32_t    calloc_calls;
108    uint32_t    max_depth;                   /* most ever malloc'd at 1 time */
109    uintmax_t    lifetime_allocated;
110    uintmax_t    lifetime_freed;
111} rtems_malloc_stats;
112
113#else                   /* No rtems_malloc_stats */
114#define MSBUMP(f,n)
115#endif
116
117void RTEMS_Malloc_Initialize(
118  void   *start,
119  size_t  length,
120  size_t  sbrk_amount
121)
122{
123  uint32_t      status;
124  void         *starting_address;
125  uintptr_t     old_address;
126  uintptr_t     uaddress;
127
128  /*
129   *  Initialize the garbage collection list to start with nothing on it.
130   */
131  Chain_Initialize_empty(&RTEMS_Malloc_GC_list);
132
133  /*
134   * If the starting address is 0 then we are to attempt to
135   * get length worth of memory using sbrk. Make sure we
136   * align the address that we get back.
137   */
138
139  starting_address = start;
140  RTEMS_Malloc_Sbrk_amount = sbrk_amount;
141
142  if (!starting_address) {
143    uaddress = (uintptr_t)sbrk(length);
144
145    if (uaddress == (uintptr_t) -1) {
146      rtems_fatal_error_occurred( RTEMS_NO_MEMORY );
147      /* DOES NOT RETURN!!! */
148    }
149
150    if (uaddress & (CPU_HEAP_ALIGNMENT-1)) {
151      old_address = uaddress;
152      uaddress = (uaddress + CPU_HEAP_ALIGNMENT) & ~(CPU_HEAP_ALIGNMENT-1);
153
154       /*
155        * adjust the length by whatever we aligned by
156        */
157
158      length -= uaddress - old_address;
159    }
160
161    starting_address = (void *)uaddress;
162  }
163
164  /*
165   *  If the BSP is not clearing out the workspace, then it is most likely
166   *  not clearing out the initial memory for the heap.  There is no
167   *  standard supporting zeroing out the heap memory.  But much code
168   *  with UNIX history seems to assume that memory malloc'ed during
169   *  initialization (before any free's) is zero'ed.  This is true most
170   *  of the time under UNIX because zero'ing memory when it is first
171   *  given to a process eliminates the chance of a process seeing data
172   *  left over from another process.  This would be a security violation.
173   */
174
175  if ( rtems_cpu_configuration_get_do_zero_of_workspace() )
176     memset( starting_address, 0, length );
177
178  /*
179   *  Unfortunately we cannot use assert if this fails because if this
180   *  has failed we do not have a heap and if we do not have a heap
181   *  STDIO cannot work because there will be no buffers.
182   */
183
184  status = _Protected_heap_Initialize(
185    &RTEMS_Malloc_Heap,
186    starting_address,
187    length,
188    CPU_HEAP_ALIGNMENT
189  );
190  if ( !status )
191    rtems_fatal_error_occurred( status );
192
193#ifdef MALLOC_STATS
194  /* zero all the stats */
195  (void) memset( &rtems_malloc_stats, 0, sizeof(rtems_malloc_stats) );
196#endif
197
198  MSBUMP(space_available, length);
199}
200
201#ifdef RTEMS_NEWLIB
202void *malloc(
203  size_t  size
204)
205{
206  void        *return_this;
207  void        *starting_address;
208  uint32_t     the_size;
209  uint32_t     sbrk_amount;
210  Chain_Node  *to_be_freed;
211
212  MSBUMP(malloc_calls, 1);
213
214  if ( !size )
215    return (void *) 0;
216
217  /*
218   *  Do not attempt to allocate memory if in a critical section or ISR.
219   */
220
221  if (_System_state_Is_up(_System_state_Get())) {
222    if (_Thread_Dispatch_disable_level > 0)
223      return (void *) 0;
224
225    if (_ISR_Nest_level > 0)
226      return (void *) 0;
227  }
228
229  /*
230   *  If some free's have been deferred, then do them now.
231   */
232  while ((to_be_freed = Chain_Get(&RTEMS_Malloc_GC_list)) != NULL)
233    free(to_be_freed);
234
235  /*
236   * Try to give a segment in the current heap if there is not
237   * enough space then try to grow the heap.
238   * If this fails then return a NULL pointer.
239   */
240
241#ifdef MALLOC_ARENA_CHECK
242  size += sizeof(struct mallocNode) + SENTINELSIZE;
243#endif
244  return_this = _Protected_heap_Allocate( &RTEMS_Malloc_Heap, size );
245
246  if ( !return_this ) {
247    /*
248     *  Round to the "requested sbrk amount" so hopefully we won't have
249     *  to grow again for a while.  This effectively does sbrk() calls
250     *  in "page" amounts.
251     */
252
253    sbrk_amount = RTEMS_Malloc_Sbrk_amount;
254
255    if ( sbrk_amount == 0 )
256      return (void *) 0;
257
258    the_size = ((size + sbrk_amount) / sbrk_amount * sbrk_amount);
259
260    if ((starting_address = (void *)sbrk(the_size))
261            == (void*) -1)
262      return (void *) 0;
263
264    if ( !_Protected_heap_Extend(
265            &RTEMS_Malloc_Heap, starting_address, the_size) ) {
266      sbrk(-the_size);
267      errno = ENOMEM;
268      return (void *) 0;
269    }
270
271    MSBUMP(space_available, the_size);
272
273    return_this = _Protected_heap_Allocate( &RTEMS_Malloc_Heap, size );
274    if ( !return_this ) {
275      errno = ENOMEM;
276      return (void *) 0;
277    }
278  }
279
280#ifdef MALLOC_STATS
281  if (return_this)
282  {
283      size_t     actual_size = 0;
284      uint32_t   current_depth;
285      Protected_heap_Get_block_size(&RTEMS_Malloc_Heap, ptr, &actual_size);
286      MSBUMP(lifetime_allocated, actual_size);
287      current_depth = rtems_malloc_stats.lifetime_allocated -
288                   rtems_malloc_stats.lifetime_freed;
289      if (current_depth > rtems_malloc_stats.max_depth)
290          rtems_malloc_stats.max_depth = current_depth;
291  }
292#endif
293
294#ifdef MALLOC_DIRTY
295  (void) memset(return_this, 0xCF, size);
296#endif
297
298#ifdef MALLOC_ARENA_CHECK
299  {
300  struct mallocNode *mp = (struct mallocNode *)return_this;
301  int key, *fp, *nfp, i;
302  rtems_interrupt_disable(key);
303  mp->memory = mp + 1;
304  return_this = mp->memory;
305  mp->size = size - (sizeof(struct mallocNode) + SENTINELSIZE);
306  fp = (int *)&size - 2;
307  for (i = 0 ; i < CALLCHAINSIZE ; i++) {
308    mp->callChain[i] = fp[1];
309    nfp = (int *)(fp[0]);
310    if((nfp <= fp) || (nfp > (int *)(1 << 24)))
311     break;
312    fp = nfp;
313  }
314  while (i < CALLCHAINSIZE)
315    mp->callChain[i++] = 0;
316  memcpy((char *)mp->memory + mp->size, SENTINEL, SENTINELSIZE);
317  mp->forw = mallocNodeHead.forw;
318  mp->back = &mallocNodeHead;
319  mallocNodeHead.forw->back = mp;
320  mallocNodeHead.forw = mp;
321  rtems_interrupt_enable(key);
322  }
323#endif
324  return return_this;
325}
326
327void *calloc(
328  size_t nelem,
329  size_t elsize
330)
331{
332  register char *cptr;
333  int length;
334
335  MSBUMP(calloc_calls, 1);
336
337  length = nelem * elsize;
338  cptr = malloc( length );
339  if ( cptr )
340    memset( cptr, '\0', length );
341
342  MSBUMP(malloc_calls, -1);   /* subtract off the malloc */
343
344  return cptr;
345}
346
347void *realloc(
348  void *ptr,
349  size_t size
350)
351{
352  size_t old_size;
353  char *new_area;
354
355  MSBUMP(realloc_calls, 1);
356
357  /*
358   *  Do not attempt to allocate memory if in a critical section or ISR.
359   */
360
361  if (_System_state_Is_up(_System_state_Get())) {
362    if (_Thread_Dispatch_disable_level > 0)
363      return (void *) 0;
364
365    if (_ISR_Nest_level > 0)
366      return (void *) 0;
367  }
368
369  /*
370   * Continue with realloc().
371   */
372  if ( !ptr )
373    return malloc( size );
374
375  if ( !size ) {
376    free( ptr );
377    return (void *) 0;
378  }
379
380#ifdef MALLOC_ARENA_CHECK
381  {
382    void *np;
383    np = malloc(size);
384    if (!np) return np;
385    memcpy(np,ptr,size);
386    free(ptr);
387    return np;
388  }
389#endif
390  if ( _Protected_heap_Resize_block( &RTEMS_Malloc_Heap, ptr, size ) ) {
391    return ptr;
392  }
393
394  new_area = malloc( size );
395
396  MSBUMP(malloc_calls, -1);   /* subtract off the malloc */
397
398  /*
399   *  There used to be a free on this error case but it is wrong to
400   *  free the memory per OpenGroup Single UNIX Specification V2
401   *  and the C Standard.
402   */
403
404  if ( !new_area ) {
405    return (void *) 0;
406  }
407
408  if ( !_Protected_heap_Get_block_size(&RTEMS_Malloc_Heap, ptr, &old_size) ) {
409    errno = EINVAL;
410    return (void *) 0;
411  }
412
413  memcpy( new_area, ptr, (size < old_size) ? size : old_size );
414  free( ptr );
415
416  return new_area;
417
418}
419
420void free(
421  void *ptr
422)
423{
424  MSBUMP(free_calls, 1);
425
426  if ( !ptr )
427    return;
428
429  /*
430   *  Do not attempt to free memory if in a critical section or ISR.
431   */
432
433  if (_System_state_Is_up(_System_state_Get())) {
434    if ((_Thread_Dispatch_disable_level > 0) || (_ISR_Nest_level > 0)) {
435      Chain_Append(&RTEMS_Malloc_GC_list, (Chain_Node *)ptr);
436      return;
437    }
438  }
439
440#ifdef MALLOC_ARENA_CHECK
441  {
442  struct mallocNode *mp = (struct mallocNode *)ptr - 1;
443  struct mallocNode *mp1;
444  int key;
445  rtems_interrupt_disable(key);
446  if ((mp->memory != (mp + 1))
447   || (memcmp((char *)mp->memory + mp->size, SENTINEL, SENTINELSIZE) != 0))
448    reportMallocError("Freeing with inconsistent pointer/sentinel", mp);
449  mp1 = mallocNodeHead.forw;
450  while (mp1 != &mallocNodeHead) {
451    if (mp1 == mp)
452      break;
453    mp1 = mp1->forw;
454  }
455  if (mp1 != mp)
456    reportMallocError("Freeing, but not on allocated list", mp);
457  mp->forw->back = mp->back;
458  mp->back->forw = mp->forw;
459  mp->back = mp->forw = NULL;
460  ptr = mp;
461  rtems_interrupt_enable(key);
462  }
463#endif
464#ifdef MALLOC_STATS
465  {
466    size_t size;
467    if (Protected_heap_Get_block_size(&RTEMS_Malloc_Heap, ptr, &size) ) {
468      MSBUMP(lifetime_freed, size);
469    }
470  }
471#endif
472
473  if ( !_Protected_heap_Free( &RTEMS_Malloc_Heap, ptr ) ) {
474    errno = EINVAL;
475    assert( 0 );
476  }
477}
478
479#ifdef MALLOC_ARENA_CHECK
480void checkMallocArena(void)
481{
482    struct mallocNode *mp = mallocNodeHead.forw;
483    int key;
484    rtems_interrupt_disable(key);
485    while (mp != &mallocNodeHead) {
486        if ((mp->forw->back != mp)
487         || (mp->back->forw != mp))
488            reportMallocError("Pointers mangled", mp);
489        if((mp->memory != (mp + 1))
490         || (memcmp((char *)mp->memory + mp->size, SENTINEL, SENTINELSIZE) != 0))
491            reportMallocError("Inconsistent pointer/sentinel", mp);
492        mp = mp->forw;
493    }
494    rtems_interrupt_enable(key);
495}
496#endif
497
498/* end if RTEMS_NEWLIB */
499#endif
500
501#ifdef MALLOC_STATS
502/*
503 * Dump the malloc statistics
504 * May be called via atexit()  (installable by our bsp) or
505 * at any time by user
506 */
507
508void malloc_dump(void)
509{
510    uint32_t   allocated = rtems_malloc_stats.lifetime_allocated -
511                     rtems_malloc_stats.lifetime_freed;
512
513    printf("Malloc stats\n");
514    printf("  avail:%"PRIu32"k  allocated:%"PRIu32"k (%"PRId32"%%) "
515              "max:%"PRIu32"k (%"PRIu32"%%)"
516              " lifetime:%"PRIuMAX"k freed:%"PRIuMAX"k\n",
517           rtems_malloc_stats.space_available / 1024,
518           allocated / 1024,
519           /* avoid float! */
520           (allocated * 100) / rtems_malloc_stats.space_available,
521           rtems_malloc_stats.max_depth / 1024,
522           (rtems_malloc_stats.max_depth * 100) / rtems_malloc_stats.space_available,
523           rtems_malloc_stats.lifetime_allocated / 1024,
524           rtems_malloc_stats.lifetime_freed / 1024
525           );
526    printf("  Call counts:   malloc:%"PRIu32"   free:%"PRIu32"   realloc:%"PRIu32"   calloc:%"PRIu32"\n",
527           rtems_malloc_stats.malloc_calls,
528           rtems_malloc_stats.free_calls,
529           rtems_malloc_stats.realloc_calls,
530           rtems_malloc_stats.calloc_calls);
531}
532
533
534void malloc_walk(size_t source, size_t printf_enabled)
535{
536  _Protected_heap_Walk( &RTEMS_Malloc_Heap, source, printf_enabled );
537}
538
539#else
540
541void malloc_dump(void)
542{
543   return;
544}
545
546void malloc_walk(size_t source, size_t printf_enabled)
547{
548   return;
549}
550
551#endif
552
553/*
554 *  "Reentrant" versions of the above routines implemented above.
555 */
556
557#ifdef RTEMS_NEWLIB
558void *_malloc_r(
559  struct _reent *ignored,
560  size_t  size
561)
562{
563  return malloc( size );
564}
565
566void *_calloc_r(
567  struct _reent *ignored,
568  size_t nelem,
569  size_t elsize
570)
571{
572  return calloc( nelem, elsize );
573}
574
575void *_realloc_r(
576  struct _reent *ignored,
577  void *ptr,
578  size_t size
579)
580{
581  return realloc( ptr, size );
582}
583
584void _free_r(
585  struct _reent *ignored,
586  void *ptr
587)
588{
589  free( ptr );
590}
591
592#endif
Note: See TracBrowser for help on using the repository browser.