source: rtems/cpukit/libcsupport/src/malloc.c @ be31de7

4.104.114.84.95
Last change on this file since be31de7 was be31de7, checked in by Eric Norum <WENorum@…>, on 05/25/06 at 17:36:31

Provide customisable mbuf allocation.
Patch from Steven Johnson <sjohnson@…>

  • Property mode set to 100644
File size: 14.7 KB
Line 
1/*
2 *  RTEMS Malloc Family Implementation
3 *
4 *
5 *  COPYRIGHT (c) 1989-1999.
6 *  On-Line Applications Research Corporation (OAR).
7 *
8 *  The license and distribution terms for this file may be
9 *  found in the file LICENSE in this distribution or at
10 *  http://www.rtems.com/license/LICENSE.
11 *
12 *  $Id$
13 */
14
15#if HAVE_CONFIG_H
16#include "config.h"
17#endif
18
19#define __RTEMS_VIOLATE_KERNEL_VISIBILITY__
20#include <rtems.h>
21#include <rtems/libcsupport.h>
22#include <rtems/score/apimutex.h>
23#ifdef RTEMS_NEWLIB
24#include <sys/reent.h>
25#endif
26
27#include <stdio.h>
28#include <stdlib.h>
29#include <sys/types.h>
30#include <assert.h>
31#include <errno.h>
32#include <string.h>
33
34#include <unistd.h>    /* sbrk(2) */
35
36#include <rtems/chain.h>
37
38#ifdef MALLOC_ARENA_CHECK
39#define SENTINELSIZE    12
40#define SENTINEL       "\xD1\xAC\xB2\xF1" "BITE ME"
41#define CALLCHAINSIZE 5
42struct mallocNode {
43    struct mallocNode *back;
44    struct mallocNode *forw;
45    int                callChain[CALLCHAINSIZE];
46    size_t             size;
47    void              *memory;
48};
49static struct mallocNode mallocNodeHead = { &mallocNodeHead, &mallocNodeHead };
50void reportMallocError(const char *msg, struct mallocNode *mp)
51{
52    unsigned char *sp = (unsigned char *)mp->memory + mp->size;
53    int i, ind = 0;
54    static char cbuf[500];
55    ind += sprintf(cbuf+ind, "Malloc Error: %s\n", msg);
56    if ((mp->forw->back != mp) || (mp->back->forw != mp))
57        ind += sprintf(cbuf+ind, "mp:0x%x  mp->forw:0x%x  mp->forw->back:0x%x  mp->back:0x%x  mp->back->forw:0x%x\n",
58                        mp, mp->forw, mp->forw->back, mp->back, mp->back->forw);
59    if (mp->memory != (mp + 1))
60        ind += sprintf(cbuf+ind, "mp+1:0x%x  ", mp + 1);
61    ind += sprintf(cbuf+ind, "mp->memory:0x%x  mp->size:%d\n", mp->memory, mp->size);
62    if (memcmp((char *)mp->memory + mp->size, SENTINEL, SENTINELSIZE) != 0) {
63        ind += sprintf(cbuf+ind, "mp->sentinel: ");
64        for (i = 0 ; i < SENTINELSIZE ; i++)
65            ind += sprintf(cbuf+ind, " 0x%x", sp[i]);
66        ind += sprintf(cbuf+ind, "\n");
67    }
68    ind += sprintf(cbuf+ind, "Call chain:");
69    for (i = 0 ; i < CALLCHAINSIZE ; i++) {
70        if (mp->callChain[i] == 0)
71            break;
72        ind += sprintf(cbuf+ind, " 0x%x", mp->callChain[i]);
73    }
74    printk("\n\n%s\n\n", cbuf);
75}
76#endif
77
78Chain_Control RTEMS_Malloc_GC_list;
79
80rtems_id RTEMS_Malloc_Heap;
81size_t RTEMS_Malloc_Sbrk_amount;
82
83#ifdef RTEMS_DEBUG
84#define MALLOC_STATS
85#define MALLOC_DIRTY
86#endif
87
88#ifdef MALLOC_STATS
89#define MSBUMP(f,n)    rtems_malloc_stats.f += (n)
90
91struct {
92    uint32_t    space_available;             /* current size of malloc area */
93    uint32_t    malloc_calls;                /* # calls to malloc */
94    uint32_t    free_calls;
95    uint32_t    realloc_calls;
96    uint32_t    calloc_calls;
97    uint32_t    max_depth;                   /* most ever malloc'd at 1 time */
98    uint64_t    lifetime_allocated;
99    uint64_t    lifetime_freed;
100} rtems_malloc_stats;
101
102#else                   /* No rtems_malloc_stats */
103#define MSBUMP(f,n)
104#endif
105
106void RTEMS_Malloc_Initialize(
107  void   *start,
108  size_t  length,
109  size_t  sbrk_amount
110)
111{
112  rtems_status_code   status;
113  void               *starting_address;
114  uint32_t      old_address;
115  uint32_t      u32_address;
116
117  /*
118   *  Initialize the garbage collection list to start with nothing on it.
119   */
120  Chain_Initialize_empty(&RTEMS_Malloc_GC_list);
121
122  /*
123   * If the starting address is 0 then we are to attempt to
124   * get length worth of memory using sbrk. Make sure we
125   * align the address that we get back.
126   */
127
128  starting_address = start;
129  RTEMS_Malloc_Sbrk_amount = sbrk_amount;
130
131  if (!starting_address) {
132    u32_address = (unsigned int)sbrk(length);
133
134    if (u32_address == (uint32_t  ) -1) {
135      rtems_fatal_error_occurred( RTEMS_NO_MEMORY );
136      /* DOES NOT RETURN!!! */
137    }
138
139    if (u32_address & (CPU_HEAP_ALIGNMENT-1)) {
140      old_address = u32_address;
141      u32_address = (u32_address + CPU_HEAP_ALIGNMENT) & ~(CPU_HEAP_ALIGNMENT-1);
142
143       /*
144        * adjust the length by whatever we aligned by
145        */
146
147      length -= u32_address - old_address;
148    }
149
150    starting_address = (void *)u32_address;
151  }
152
153  /*
154   *  If the BSP is not clearing out the workspace, then it is most likely
155   *  not clearing out the initial memory for the heap.  There is no
156   *  standard supporting zeroing out the heap memory.  But much code
157   *  with UNIX history seems to assume that memory malloc'ed during
158   *  initialization (before any free's) is zero'ed.  This is true most
159   *  of the time under UNIX because zero'ing memory when it is first
160   *  given to a process eliminates the chance of a process seeing data
161   *  left over from another process.  This would be a security violation.
162   */
163
164  if ( rtems_cpu_configuration_get_do_zero_of_workspace() )
165     memset( starting_address, 0, length );
166
167  /*
168   *  Unfortunately we cannot use assert if this fails because if this
169   *  has failed we do not have a heap and if we do not have a heap
170   *  STDIO cannot work because there will be no buffers.
171   */
172
173  status = rtems_region_create(
174    rtems_build_name( 'H', 'E', 'A', 'P' ),
175    starting_address,
176    length,
177    CPU_HEAP_ALIGNMENT,
178    RTEMS_DEFAULT_ATTRIBUTES,
179    &RTEMS_Malloc_Heap
180  );
181  if ( status != RTEMS_SUCCESSFUL )
182    rtems_fatal_error_occurred( status );
183
184#ifdef MALLOC_STATS
185  /* zero all the stats */
186  (void) memset( &rtems_malloc_stats, 0, sizeof(rtems_malloc_stats) );
187#endif
188
189  MSBUMP(space_available, length);
190}
191
192#ifdef RTEMS_NEWLIB
193void *malloc(
194  size_t  size
195)
196{
197  void              *return_this;
198  void              *starting_address;
199  uint32_t     the_size;
200  uint32_t     sbrk_amount;
201  rtems_status_code  status;
202  Chain_Node        *to_be_freed;
203
204  MSBUMP(malloc_calls, 1);
205
206  if ( !size )
207    return (void *) 0;
208
209  /*
210   *  Do not attempt to allocate memory if in a critical section or ISR.
211   */
212
213  if (_System_state_Is_up(_System_state_Get())) {
214    if (_Thread_Dispatch_disable_level > 0)
215      return (void *) 0;
216
217    if (_ISR_Nest_level > 0)
218      return (void *) 0;
219  }
220
221  /*
222   *  If some free's have been deferred, then do them now.
223   */
224  while ((to_be_freed = Chain_Get(&RTEMS_Malloc_GC_list)) != NULL)
225    free(to_be_freed);
226
227  /*
228   * Try to give a segment in the current region if there is not
229   * enough space then try to grow the region using rtems_region_extend().
230   * If this fails then return a NULL pointer.
231   */
232
233#ifdef MALLOC_ARENA_CHECK
234  size += sizeof(struct mallocNode) + SENTINELSIZE;
235#endif
236  status = rtems_region_get_segment(
237    RTEMS_Malloc_Heap,
238    size,
239    RTEMS_NO_WAIT,
240    RTEMS_NO_TIMEOUT,
241    &return_this
242  );
243
244  if ( status != RTEMS_SUCCESSFUL ) {
245    /*
246     *  Round to the "requested sbrk amount" so hopefully we won't have
247     *  to grow again for a while.  This effectively does sbrk() calls
248     *  in "page" amounts.
249     */
250
251    sbrk_amount = RTEMS_Malloc_Sbrk_amount;
252
253    if ( sbrk_amount == 0 )
254      return (void *) 0;
255
256    the_size = ((size + sbrk_amount) / sbrk_amount * sbrk_amount);
257
258    if ((starting_address = (void *)sbrk(the_size))
259            == (void*) -1)
260      return (void *) 0;
261
262    status = rtems_region_extend(
263      RTEMS_Malloc_Heap,
264      starting_address,
265      the_size
266    );
267    if ( status != RTEMS_SUCCESSFUL ) {
268      sbrk(-the_size);
269      errno = ENOMEM;
270      return (void *) 0;
271    }
272
273    MSBUMP(space_available, the_size);
274
275    status = rtems_region_get_segment(
276      RTEMS_Malloc_Heap,
277       size,
278       RTEMS_NO_WAIT,
279       RTEMS_NO_TIMEOUT,
280       &return_this
281    );
282    if ( status != RTEMS_SUCCESSFUL ) {
283      errno = ENOMEM;
284      return (void *) 0;
285    }
286  }
287
288#ifdef MALLOC_STATS
289  if (return_this)
290  {
291      size_t     actual_size;
292      uint32_t   current_depth;
293      status = rtems_region_get_segment_size(
294                   RTEMS_Malloc_Heap, return_this, &actual_size);
295      MSBUMP(lifetime_allocated, actual_size);
296      current_depth = rtems_malloc_stats.lifetime_allocated -
297                   rtems_malloc_stats.lifetime_freed;
298      if (current_depth > rtems_malloc_stats.max_depth)
299          rtems_malloc_stats.max_depth = current_depth;
300  }
301#endif
302
303#ifdef MALLOC_DIRTY
304  (void) memset(return_this, 0xCF, size);
305#endif
306
307#ifdef MALLOC_ARENA_CHECK
308  {
309  struct mallocNode *mp = (struct mallocNode *)return_this;
310  int key, *fp, *nfp, i;
311  rtems_interrupt_disable(key);
312  mp->memory = mp + 1;
313  return_this = mp->memory;
314  mp->size = size - (sizeof(struct mallocNode) + SENTINELSIZE);
315  fp = (int *)&size - 2;
316  for (i = 0 ; i < CALLCHAINSIZE ; i++) {
317    mp->callChain[i] = fp[1];
318    nfp = (int *)(fp[0]);
319    if((nfp <= fp) || (nfp > (int *)(1 << 24)))
320     break;
321    fp = nfp;
322  }
323  while (i < CALLCHAINSIZE)
324    mp->callChain[i++] = 0;
325  memcpy((char *)mp->memory + mp->size, SENTINEL, SENTINELSIZE);
326  mp->forw = mallocNodeHead.forw;
327  mp->back = &mallocNodeHead;
328  mallocNodeHead.forw->back = mp;
329  mallocNodeHead.forw = mp;
330  rtems_interrupt_enable(key);
331  }
332#endif
333  return return_this;
334}
335
336void *calloc(
337  size_t nelem,
338  size_t elsize
339)
340{
341  register char *cptr;
342  int length;
343
344  MSBUMP(calloc_calls, 1);
345
346  length = nelem * elsize;
347  cptr = malloc( length );
348  if ( cptr )
349    memset( cptr, '\0', length );
350
351  MSBUMP(malloc_calls, -1);   /* subtract off the malloc */
352
353  return cptr;
354}
355
356void *realloc(
357  void *ptr,
358  size_t size
359)
360{
361  size_t old_size;
362  rtems_status_code status;
363  char *new_area;
364
365  MSBUMP(realloc_calls, 1);
366
367  /*
368   *  Do not attempt to allocate memory if in a critical section or ISR.
369   */
370
371  if (_System_state_Is_up(_System_state_Get())) {
372    if (_Thread_Dispatch_disable_level > 0)
373      return (void *) 0;
374
375    if (_ISR_Nest_level > 0)
376      return (void *) 0;
377  }
378
379  /*
380   * Continue with realloc().
381   */
382  if ( !ptr )
383    return malloc( size );
384
385  if ( !size ) {
386    free( ptr );
387    return (void *) 0;
388  }
389
390#ifdef MALLOC_ARENA_CHECK
391  {
392  void *np;
393  np = malloc(size);
394  if (!np) return np;
395  memcpy(np,ptr,size);
396  free(ptr);
397  return np;
398  }
399#endif
400  status =
401    rtems_region_resize_segment( RTEMS_Malloc_Heap, ptr, size, &old_size );
402
403  if( status == RTEMS_SUCCESSFUL ) {
404    return ptr;
405  }
406  else if ( status != RTEMS_UNSATISFIED ) {
407    errno = EINVAL;
408    return (void *) 0;
409  }
410
411  new_area = malloc( size );
412
413  MSBUMP(malloc_calls, -1);   /* subtract off the malloc */
414
415  /*
416   *  There used to be a free on this error case but it is wrong to
417   *  free the memory per OpenGroup Single UNIX Specification V2
418   *  and the C Standard.
419   */
420
421  if ( !new_area ) {
422    return (void *) 0;
423  }
424
425  status = rtems_region_get_segment_size( RTEMS_Malloc_Heap, ptr, &old_size );
426  if ( status != RTEMS_SUCCESSFUL ) {
427    errno = EINVAL;
428    return (void *) 0;
429  }
430
431  memcpy( new_area, ptr, (size < old_size) ? size : old_size );
432  free( ptr );
433
434  return new_area;
435
436}
437
438void free(
439  void *ptr
440)
441{
442  rtems_status_code status;
443
444  MSBUMP(free_calls, 1);
445
446  if ( !ptr )
447    return;
448
449  /*
450   *  Do not attempt to free memory if in a critical section or ISR.
451   */
452
453  if (_System_state_Is_up(_System_state_Get())) {
454    if ((_Thread_Dispatch_disable_level > 0) || (_ISR_Nest_level > 0)) {
455      Chain_Append(&RTEMS_Malloc_GC_list, (Chain_Node *)ptr);
456      return;
457    }
458  }
459
460#ifdef MALLOC_ARENA_CHECK
461  {
462  struct mallocNode *mp = (struct mallocNode *)ptr - 1;
463  struct mallocNode *mp1;
464  int key;
465  rtems_interrupt_disable(key);
466  if ((mp->memory != (mp + 1))
467   || (memcmp((char *)mp->memory + mp->size, SENTINEL, SENTINELSIZE) != 0))
468    reportMallocError("Freeing with inconsistent pointer/sentinel", mp);
469  mp1 = mallocNodeHead.forw;
470  while (mp1 != &mallocNodeHead) {
471    if (mp1 == mp)
472      break;
473    mp1 = mp1->forw;
474  }
475  if (mp1 != mp)
476    reportMallocError("Freeing, but not on allocated list", mp);
477  mp->forw->back = mp->back;
478  mp->back->forw = mp->forw;
479  mp->back = mp->forw = NULL;
480  ptr = mp;
481  rtems_interrupt_enable(key);
482  }
483#endif
484#ifdef MALLOC_STATS
485  {
486      size_t size;
487      status = rtems_region_get_segment_size( RTEMS_Malloc_Heap, ptr, &size );
488      if ( status == RTEMS_SUCCESSFUL ) {
489          MSBUMP(lifetime_freed, size);
490      }
491  }
492#endif
493
494  status = rtems_region_return_segment( RTEMS_Malloc_Heap, ptr );
495  if ( status != RTEMS_SUCCESSFUL ) {
496    errno = EINVAL;
497    assert( 0 );
498  }
499}
500
501#ifdef MALLOC_ARENA_CHECK
502void checkMallocArena(void)
503{
504    struct mallocNode *mp = mallocNodeHead.forw;
505    int key;
506    rtems_interrupt_disable(key);
507    while (mp != &mallocNodeHead) {
508        if ((mp->forw->back != mp)
509         || (mp->back->forw != mp))
510            reportMallocError("Pointers mangled", mp);
511        if((mp->memory != (mp + 1))
512         || (memcmp((char *)mp->memory + mp->size, SENTINEL, SENTINELSIZE) != 0))
513            reportMallocError("Inconsistent pointer/sentinel", mp);
514        mp = mp->forw;
515    }
516    rtems_interrupt_enable(key);
517}
518#endif
519
520/* end if RTEMS_NEWLIB */
521#endif
522
523#ifdef MALLOC_STATS
524/*
525 * Dump the malloc statistics
526 * May be called via atexit()  (installable by our bsp) or
527 * at any time by user
528 */
529
530void malloc_dump(void)
531{
532    uint32_t   allocated = rtems_malloc_stats.lifetime_allocated -
533                     rtems_malloc_stats.lifetime_freed;
534
535    printf("Malloc stats\n");
536    printf("  avail:%uk  allocated:%uk (%d%%) "
537              "max:%uk (%d%%) lifetime:%Luk freed:%Luk\n",
538           (unsigned int) rtems_malloc_stats.space_available / 1024,
539           (unsigned int) allocated / 1024,
540           /* avoid float! */
541           (allocated * 100) / rtems_malloc_stats.space_available,
542           (unsigned int) rtems_malloc_stats.max_depth / 1024,
543           (rtems_malloc_stats.max_depth * 100) / rtems_malloc_stats.space_available,
544           (uint64_t  ) rtems_malloc_stats.lifetime_allocated / 1024,
545           (uint64_t  ) rtems_malloc_stats.lifetime_freed / 1024);
546    printf("  Call counts:   malloc:%d   free:%d   realloc:%d   calloc:%d\n",
547           rtems_malloc_stats.malloc_calls,
548           rtems_malloc_stats.free_calls,
549           rtems_malloc_stats.realloc_calls,
550           rtems_malloc_stats.calloc_calls);
551}
552
553
554void malloc_walk(size_t source, size_t printf_enabled)
555{
556   register Region_Control *the_region;
557   Objects_Locations        location;
558
559  _RTEMS_Lock_allocator();                      /* to prevent deletion */
560   the_region = _Region_Get( RTEMS_Malloc_Heap, &location );
561   if ( location == OBJECTS_LOCAL )
562   {
563      _Heap_Walk( &the_region->Memory, source, printf_enabled );
564   }
565  _RTEMS_Unlock_allocator();
566}
567
568#else
569
570void malloc_dump(void)
571{
572   return;
573}
574
575void malloc_walk(size_t source, size_t printf_enabled)
576{
577   return;
578}
579
580#endif
581
582/*
583 *  "Reentrant" versions of the above routines implemented above.
584 */
585
586#ifdef RTEMS_NEWLIB
587void *_malloc_r(
588  struct _reent *ignored,
589  size_t  size
590)
591{
592  return malloc( size );
593}
594
595void *_calloc_r(
596  struct _reent *ignored,
597  size_t nelem,
598  size_t elsize
599)
600{
601  return calloc( nelem, elsize );
602}
603
604void *_realloc_r(
605  struct _reent *ignored,
606  void *ptr,
607  size_t size
608)
609{
610  return realloc( ptr, size );
611}
612
613void _free_r(
614  struct _reent *ignored,
615  void *ptr
616)
617{
618  free( ptr );
619}
620
621#endif
Note: See TracBrowser for help on using the repository browser.