Changeset 11cdbeb in rtems


Ignore:
Timestamp:
May 27, 1999, 6:03:50 PM (22 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 4.8, 4.9, 5, master
Children:
af84b968
Parents:
1c841bd2
Message:

Patch from Eric Norum <eric@…> to eliminate a panic when the
network stack runs out of mbufs.

Files:
12 edited

Legend:

Unmodified
Added
Removed
  • c/src/exec/libnetworking/kern/uipc_mbuf.c

    r1c841bd2 r11cdbeb  
    6464int     max_datalen;
    6565
    66 static void     m_reclaim __P((void));
    67 
    6866/* "number of clusters of pages" */
    6967#define NCL_INIT        1
    7068
    7169#define NMB_INIT        16
    72 
    73 /* ARGSUSED*/
    74 static void
    75 mbinit(dummy)
    76         void *dummy;
    77 {
    78         int s;
    79 
    80         mmbfree = NULL; mclfree = NULL;
    81         s = splimp();
    82         if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
    83                 goto bad;
    84         if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
    85                 goto bad;
    86         splx(s);
    87         return;
    88 bad:
    89         panic("mbinit");
    90 }
    91 
    92 /*
    93  * Allocate at least nmb mbufs and place on mbuf free list.
    94  * Must be called at splimp.
    95  */
    96 /* ARGSUSED */
    97 int
    98 m_mballoc(nmb, nowait)
    99         register int nmb;
    100         int nowait;
    101 {
    102         register caddr_t p;
    103         register int i;
    104         int nbytes;
    105 
    106         /* Once we run out of map space, it will be impossible to get
    107          * any more (nothing is ever freed back to the map) (XXX which
    108          * is dumb). (however you are not dead as m_reclaim might
    109          * still be able to free a substantial amount of space).
    110          */
    111         if (mb_map_full)
    112                 return (0);
    113 
    114         nbytes = round_page(nmb * MSIZE);
    115         p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
    116         /*
    117          * Either the map is now full, or this is nowait and there
    118          * are no pages left.
    119          */
    120         if (p == NULL)
    121                 return (0);
    122 
    123         nmb = nbytes / MSIZE;
    124         for (i = 0; i < nmb; i++) {
    125                 ((struct mbuf *)p)->m_next = mmbfree;
    126                 mmbfree = (struct mbuf *)p;
    127                 p += MSIZE;
    128         }
    129         mbstat.m_mbufs += nmb;
    130         return (1);
    131 }
    132 
    133 /*
    134  * Allocate some number of mbuf clusters
    135  * and place on cluster free list.
    136  * Must be called at splimp.
    137  */
    138 /* ARGSUSED */
    139 int
    140 m_clalloc(ncl, nowait)
    141         register int ncl;
    142         int nowait;
    143 {
    144         register caddr_t p;
    145         register int i;
    146 
    147         /*
    148          * Once we run out of map space, it will be impossible
    149          * to get any more (nothing is ever freed back to the
    150          * map).
    151          */
    152         if (mb_map_full)
    153                 return (0);
    154 
    155         p = (caddr_t)kmem_malloc(mb_map, ncl*MCLBYTES,
    156                                  nowait ? M_NOWAIT : M_WAITOK);
    157         /*
    158          * Either the map is now full, or this is nowait and there
    159          * are no pages left.
    160          */
    161         if (p == NULL)
    162                 return (0);
    163 
    164         for (i = 0; i < ncl; i++) {
    165                 ((union mcluster *)p)->mcl_next = mclfree;
    166                 mclfree = (union mcluster *)p;
    167                 p += MCLBYTES;
    168                 mbstat.m_clfree++;
    169         }
    170         mbstat.m_clusters += ncl;
    171         return (1);
    172 }
    17370
    17471/*
     
    213110}
    214111
    215 static void
    216 m_reclaim()
     112void
     113m_reclaim(void)
    217114{
    218115        register struct domain *dp;
  • c/src/exec/libnetworking/rtems/rtems_glue.c

    r1c841bd2 r11cdbeb  
    127127/*
    128128 * Do the initializations required by the BSD code
    129  * FIXME: Maybe we should use a different memory allocation scheme that
    130  * would let us share space between mbufs and mbuf clusters.
    131  * For now, we'll just take the easy way out!
    132129 */
    133130static void
    134131bsd_init ()
    135132{
    136         /*
    137          * Set up mbuf data strutures
    138          * Cluster allocation *must* come first -- see comment on kmem_malloc().
    139          */
    140         m_clalloc (nmbclusters, M_DONTWAIT);
     133        int i;
     134        char *p;
     135
     136        /*
     137         * Set up mbuf cluster data strutures
     138         */
     139        p = malloc ((nmbclusters*MCLBYTES)+MCLBYTES-1);
     140        p = (char *)(((unsigned long)p + (MCLBYTES-1)) & ~(MCLBYTES-1));
     141        if (p == NULL)
     142                rtems_panic ("Can't get network cluster memory.");
     143        mbutl = (struct mbuf *)p;
     144        for (i = 0; i < nmbclusters; i++) {
     145                ((union mcluster *)p)->mcl_next = mclfree;
     146                mclfree = (union mcluster *)p;
     147                p += MCLBYTES;
     148                mbstat.m_clfree++;
     149        }
     150        mbstat.m_clusters = nmbclusters;
    141151        mclrefcnt = malloc (nmbclusters);
    142152        if (mclrefcnt == NULL)
    143                 rtems_panic ("No memory for mbuf cluster reference counts.");
     153                rtems_panic ("Can't get mbuf cluster reference counts memory.");
    144154        memset (mclrefcnt, '\0', nmbclusters);
    145         m_mballoc (nmbuf, M_DONTWAIT);
     155
     156        /*
     157         * Set up mbuf data structures
     158         */
     159
     160        p = malloc(nmbuf * MSIZE);
     161        if (p == NULL)
     162                rtems_panic ("Can't get network memory.");
     163        for (i = 0; i < nmbuf; i++) {
     164                ((struct mbuf *)p)->m_next = mmbfree;
     165                mmbfree = (struct mbuf *)p;
     166                p += MSIZE;
     167        }
     168        mbstat.m_mbufs = nmbuf;
    146169        mbstat.m_mtypes[MT_FREE] = nmbuf;
    147 
    148170
    149171        /*
     
    664686
    665687/*
    666  * Hack alert: kmem_malloc `knows' that its
    667  * first invocation is to get mbuf clusters!
    668  */
    669 int mb_map_full;
    670 vm_map_t mb_map;
    671 vm_offset_t
    672 kmem_malloc (vm_map_t *map, vm_size_t size, boolean_t waitflag)
    673 {
    674         void *p;
    675        
    676         /*
    677          * Can't get memory if we're already running.
    678          */
    679         if (networkDaemonTid) {
    680                 if (waitflag == M_WAITOK)
    681                         rtems_panic (
    682 "Network mbuf space can not be enlarged after rtems_bsdnet_initialize() has\n"
    683 "returned.  Enlarge the initial mbuf/cluster size in rtems_bsdnet_config.");
    684                 return 0;
    685         }
    686 
    687 #define ROUNDSIZE 2048
    688         p = malloc (size+ROUNDSIZE);
    689         p = (void *)((unsigned long)p & ~(ROUNDSIZE-1));
    690         if ((p == NULL) && (waitflag == M_WAITOK))
    691                 rtems_panic ("Can't get initial network memory!");
    692         if (mbutl == NULL)
    693                 mbutl = p;
    694         return (vm_offset_t)p;
    695 }
    696 
    697 /*
    698688 * IP header checksum routine for processors which don't have an inline version
    699689 */
    700 
    701690u_int
    702691in_cksum_hdr (const void *ip)
     
    927916        return -1;
    928917}
     918
     919/*
     920 * Handle requests for more network memory
     921 * XXX: Another possibility would be to use a semaphore here with
     922 *      a release in the mbuf free macro.  I have chosen this `polling'
     923 *      approach because:
     924 *      1) It is simpler.
     925 *      2) It adds no complexity to the free macro.
     926 *      3) Running out of mbufs should be a rare
     927 *         condition -- predeployment testing of
     928 *         an application should indicate the
     929 *         required mbuf pool size.
     930 * XXX: Should there be a panic if a task is stuck in the loop for
     931 *      more than a minute or so?
     932 */
     933int
     934m_mballoc (int nmb, int nowait)
     935{
     936        if (nowait)
     937                return 0;
     938        m_reclaim ();
     939        if (mmbfree == NULL) {
     940                mbstat.m_wait++;
     941                do {
     942                        rtems_bsdnet_semaphore_release ();
     943                        rtems_task_wake_after (1);
     944                        rtems_bsdnet_semaphore_obtain ();
     945                } while (mmbfree == NULL);
     946        }
     947        else {
     948                mbstat.m_drops++;
     949        }
     950        return 1;
     951}
     952
     953int
     954m_clalloc(ncl, nowait)
     955{
     956        if (nowait)
     957                return 0;
     958        m_reclaim ();
     959        if (mclfree == NULL) {
     960                mbstat.m_wait++;
     961                do {
     962                        rtems_bsdnet_semaphore_release ();
     963                        rtems_task_wake_after (1);
     964                        rtems_bsdnet_semaphore_obtain ();
     965                } while (mclfree == NULL);
     966        }
     967        else {
     968                mbstat.m_drops++;
     969        }
     970        return 1;
     971}
  • c/src/exec/libnetworking/sys/mbuf.h

    r1c841bd2 r11cdbeb  
    411411void    m_copydata __P((struct mbuf *,int,int,caddr_t));
    412412void    m_freem __P((struct mbuf *));
     413void    m_reclaim __P((void));
    413414
    414415#ifdef MBTYPES
  • c/src/lib/libnetworking/kern/uipc_mbuf.c

    r1c841bd2 r11cdbeb  
    6464int     max_datalen;
    6565
    66 static void     m_reclaim __P((void));
    67 
    6866/* "number of clusters of pages" */
    6967#define NCL_INIT        1
    7068
    7169#define NMB_INIT        16
    72 
    73 /* ARGSUSED*/
    74 static void
    75 mbinit(dummy)
    76         void *dummy;
    77 {
    78         int s;
    79 
    80         mmbfree = NULL; mclfree = NULL;
    81         s = splimp();
    82         if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
    83                 goto bad;
    84         if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
    85                 goto bad;
    86         splx(s);
    87         return;
    88 bad:
    89         panic("mbinit");
    90 }
    91 
    92 /*
    93  * Allocate at least nmb mbufs and place on mbuf free list.
    94  * Must be called at splimp.
    95  */
    96 /* ARGSUSED */
    97 int
    98 m_mballoc(nmb, nowait)
    99         register int nmb;
    100         int nowait;
    101 {
    102         register caddr_t p;
    103         register int i;
    104         int nbytes;
    105 
    106         /* Once we run out of map space, it will be impossible to get
    107          * any more (nothing is ever freed back to the map) (XXX which
    108          * is dumb). (however you are not dead as m_reclaim might
    109          * still be able to free a substantial amount of space).
    110          */
    111         if (mb_map_full)
    112                 return (0);
    113 
    114         nbytes = round_page(nmb * MSIZE);
    115         p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
    116         /*
    117          * Either the map is now full, or this is nowait and there
    118          * are no pages left.
    119          */
    120         if (p == NULL)
    121                 return (0);
    122 
    123         nmb = nbytes / MSIZE;
    124         for (i = 0; i < nmb; i++) {
    125                 ((struct mbuf *)p)->m_next = mmbfree;
    126                 mmbfree = (struct mbuf *)p;
    127                 p += MSIZE;
    128         }
    129         mbstat.m_mbufs += nmb;
    130         return (1);
    131 }
    132 
    133 /*
    134  * Allocate some number of mbuf clusters
    135  * and place on cluster free list.
    136  * Must be called at splimp.
    137  */
    138 /* ARGSUSED */
    139 int
    140 m_clalloc(ncl, nowait)
    141         register int ncl;
    142         int nowait;
    143 {
    144         register caddr_t p;
    145         register int i;
    146 
    147         /*
    148          * Once we run out of map space, it will be impossible
    149          * to get any more (nothing is ever freed back to the
    150          * map).
    151          */
    152         if (mb_map_full)
    153                 return (0);
    154 
    155         p = (caddr_t)kmem_malloc(mb_map, ncl*MCLBYTES,
    156                                  nowait ? M_NOWAIT : M_WAITOK);
    157         /*
    158          * Either the map is now full, or this is nowait and there
    159          * are no pages left.
    160          */
    161         if (p == NULL)
    162                 return (0);
    163 
    164         for (i = 0; i < ncl; i++) {
    165                 ((union mcluster *)p)->mcl_next = mclfree;
    166                 mclfree = (union mcluster *)p;
    167                 p += MCLBYTES;
    168                 mbstat.m_clfree++;
    169         }
    170         mbstat.m_clusters += ncl;
    171         return (1);
    172 }
    17370
    17471/*
     
    213110}
    214111
    215 static void
    216 m_reclaim()
     112void
     113m_reclaim(void)
    217114{
    218115        register struct domain *dp;
  • c/src/lib/libnetworking/rtems/rtems_glue.c

    r1c841bd2 r11cdbeb  
    127127/*
    128128 * Do the initializations required by the BSD code
    129  * FIXME: Maybe we should use a different memory allocation scheme that
    130  * would let us share space between mbufs and mbuf clusters.
    131  * For now, we'll just take the easy way out!
    132129 */
    133130static void
    134131bsd_init ()
    135132{
    136         /*
    137          * Set up mbuf data strutures
    138          * Cluster allocation *must* come first -- see comment on kmem_malloc().
    139          */
    140         m_clalloc (nmbclusters, M_DONTWAIT);
     133        int i;
     134        char *p;
     135
     136        /*
     137         * Set up mbuf cluster data strutures
     138         */
     139        p = malloc ((nmbclusters*MCLBYTES)+MCLBYTES-1);
     140        p = (char *)(((unsigned long)p + (MCLBYTES-1)) & ~(MCLBYTES-1));
     141        if (p == NULL)
     142                rtems_panic ("Can't get network cluster memory.");
     143        mbutl = (struct mbuf *)p;
     144        for (i = 0; i < nmbclusters; i++) {
     145                ((union mcluster *)p)->mcl_next = mclfree;
     146                mclfree = (union mcluster *)p;
     147                p += MCLBYTES;
     148                mbstat.m_clfree++;
     149        }
     150        mbstat.m_clusters = nmbclusters;
    141151        mclrefcnt = malloc (nmbclusters);
    142152        if (mclrefcnt == NULL)
    143                 rtems_panic ("No memory for mbuf cluster reference counts.");
     153                rtems_panic ("Can't get mbuf cluster reference counts memory.");
    144154        memset (mclrefcnt, '\0', nmbclusters);
    145         m_mballoc (nmbuf, M_DONTWAIT);
     155
     156        /*
     157         * Set up mbuf data structures
     158         */
     159
     160        p = malloc(nmbuf * MSIZE);
     161        if (p == NULL)
     162                rtems_panic ("Can't get network memory.");
     163        for (i = 0; i < nmbuf; i++) {
     164                ((struct mbuf *)p)->m_next = mmbfree;
     165                mmbfree = (struct mbuf *)p;
     166                p += MSIZE;
     167        }
     168        mbstat.m_mbufs = nmbuf;
    146169        mbstat.m_mtypes[MT_FREE] = nmbuf;
    147 
    148170
    149171        /*
     
    664686
    665687/*
    666  * Hack alert: kmem_malloc `knows' that its
    667  * first invocation is to get mbuf clusters!
    668  */
    669 int mb_map_full;
    670 vm_map_t mb_map;
    671 vm_offset_t
    672 kmem_malloc (vm_map_t *map, vm_size_t size, boolean_t waitflag)
    673 {
    674         void *p;
    675        
    676         /*
    677          * Can't get memory if we're already running.
    678          */
    679         if (networkDaemonTid) {
    680                 if (waitflag == M_WAITOK)
    681                         rtems_panic (
    682 "Network mbuf space can not be enlarged after rtems_bsdnet_initialize() has\n"
    683 "returned.  Enlarge the initial mbuf/cluster size in rtems_bsdnet_config.");
    684                 return 0;
    685         }
    686 
    687 #define ROUNDSIZE 2048
    688         p = malloc (size+ROUNDSIZE);
    689         p = (void *)((unsigned long)p & ~(ROUNDSIZE-1));
    690         if ((p == NULL) && (waitflag == M_WAITOK))
    691                 rtems_panic ("Can't get initial network memory!");
    692         if (mbutl == NULL)
    693                 mbutl = p;
    694         return (vm_offset_t)p;
    695 }
    696 
    697 /*
    698688 * IP header checksum routine for processors which don't have an inline version
    699689 */
    700 
    701690u_int
    702691in_cksum_hdr (const void *ip)
     
    927916        return -1;
    928917}
     918
     919/*
     920 * Handle requests for more network memory
     921 * XXX: Another possibility would be to use a semaphore here with
     922 *      a release in the mbuf free macro.  I have chosen this `polling'
     923 *      approach because:
     924 *      1) It is simpler.
     925 *      2) It adds no complexity to the free macro.
     926 *      3) Running out of mbufs should be a rare
     927 *         condition -- predeployment testing of
     928 *         an application should indicate the
     929 *         required mbuf pool size.
     930 * XXX: Should there be a panic if a task is stuck in the loop for
     931 *      more than a minute or so?
     932 */
     933int
     934m_mballoc (int nmb, int nowait)
     935{
     936        if (nowait)
     937                return 0;
     938        m_reclaim ();
     939        if (mmbfree == NULL) {
     940                mbstat.m_wait++;
     941                do {
     942                        rtems_bsdnet_semaphore_release ();
     943                        rtems_task_wake_after (1);
     944                        rtems_bsdnet_semaphore_obtain ();
     945                } while (mmbfree == NULL);
     946        }
     947        else {
     948                mbstat.m_drops++;
     949        }
     950        return 1;
     951}
     952
     953int
     954m_clalloc(ncl, nowait)
     955{
     956        if (nowait)
     957                return 0;
     958        m_reclaim ();
     959        if (mclfree == NULL) {
     960                mbstat.m_wait++;
     961                do {
     962                        rtems_bsdnet_semaphore_release ();
     963                        rtems_task_wake_after (1);
     964                        rtems_bsdnet_semaphore_obtain ();
     965                } while (mclfree == NULL);
     966        }
     967        else {
     968                mbstat.m_drops++;
     969        }
     970        return 1;
     971}
  • c/src/lib/libnetworking/sys/mbuf.h

    r1c841bd2 r11cdbeb  
    411411void    m_copydata __P((struct mbuf *,int,int,caddr_t));
    412412void    m_freem __P((struct mbuf *));
     413void    m_reclaim __P((void));
    413414
    414415#ifdef MBTYPES
  • c/src/libnetworking/kern/uipc_mbuf.c

    r1c841bd2 r11cdbeb  
    6464int     max_datalen;
    6565
    66 static void     m_reclaim __P((void));
    67 
    6866/* "number of clusters of pages" */
    6967#define NCL_INIT        1
    7068
    7169#define NMB_INIT        16
    72 
    73 /* ARGSUSED*/
    74 static void
    75 mbinit(dummy)
    76         void *dummy;
    77 {
    78         int s;
    79 
    80         mmbfree = NULL; mclfree = NULL;
    81         s = splimp();
    82         if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
    83                 goto bad;
    84         if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
    85                 goto bad;
    86         splx(s);
    87         return;
    88 bad:
    89         panic("mbinit");
    90 }
    91 
    92 /*
    93  * Allocate at least nmb mbufs and place on mbuf free list.
    94  * Must be called at splimp.
    95  */
    96 /* ARGSUSED */
    97 int
    98 m_mballoc(nmb, nowait)
    99         register int nmb;
    100         int nowait;
    101 {
    102         register caddr_t p;
    103         register int i;
    104         int nbytes;
    105 
    106         /* Once we run out of map space, it will be impossible to get
    107          * any more (nothing is ever freed back to the map) (XXX which
    108          * is dumb). (however you are not dead as m_reclaim might
    109          * still be able to free a substantial amount of space).
    110          */
    111         if (mb_map_full)
    112                 return (0);
    113 
    114         nbytes = round_page(nmb * MSIZE);
    115         p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
    116         /*
    117          * Either the map is now full, or this is nowait and there
    118          * are no pages left.
    119          */
    120         if (p == NULL)
    121                 return (0);
    122 
    123         nmb = nbytes / MSIZE;
    124         for (i = 0; i < nmb; i++) {
    125                 ((struct mbuf *)p)->m_next = mmbfree;
    126                 mmbfree = (struct mbuf *)p;
    127                 p += MSIZE;
    128         }
    129         mbstat.m_mbufs += nmb;
    130         return (1);
    131 }
    132 
    133 /*
    134  * Allocate some number of mbuf clusters
    135  * and place on cluster free list.
    136  * Must be called at splimp.
    137  */
    138 /* ARGSUSED */
    139 int
    140 m_clalloc(ncl, nowait)
    141         register int ncl;
    142         int nowait;
    143 {
    144         register caddr_t p;
    145         register int i;
    146 
    147         /*
    148          * Once we run out of map space, it will be impossible
    149          * to get any more (nothing is ever freed back to the
    150          * map).
    151          */
    152         if (mb_map_full)
    153                 return (0);
    154 
    155         p = (caddr_t)kmem_malloc(mb_map, ncl*MCLBYTES,
    156                                  nowait ? M_NOWAIT : M_WAITOK);
    157         /*
    158          * Either the map is now full, or this is nowait and there
    159          * are no pages left.
    160          */
    161         if (p == NULL)
    162                 return (0);
    163 
    164         for (i = 0; i < ncl; i++) {
    165                 ((union mcluster *)p)->mcl_next = mclfree;
    166                 mclfree = (union mcluster *)p;
    167                 p += MCLBYTES;
    168                 mbstat.m_clfree++;
    169         }
    170         mbstat.m_clusters += ncl;
    171         return (1);
    172 }
    17370
    17471/*
     
    213110}
    214111
    215 static void
    216 m_reclaim()
     112void
     113m_reclaim(void)
    217114{
    218115        register struct domain *dp;
  • c/src/libnetworking/rtems/rtems_glue.c

    r1c841bd2 r11cdbeb  
    127127/*
    128128 * Do the initializations required by the BSD code
    129  * FIXME: Maybe we should use a different memory allocation scheme that
    130  * would let us share space between mbufs and mbuf clusters.
    131  * For now, we'll just take the easy way out!
    132129 */
    133130static void
    134131bsd_init ()
    135132{
    136         /*
    137          * Set up mbuf data strutures
    138          * Cluster allocation *must* come first -- see comment on kmem_malloc().
    139          */
    140         m_clalloc (nmbclusters, M_DONTWAIT);
     133        int i;
     134        char *p;
     135
     136        /*
     137         * Set up mbuf cluster data strutures
     138         */
     139        p = malloc ((nmbclusters*MCLBYTES)+MCLBYTES-1);
     140        p = (char *)(((unsigned long)p + (MCLBYTES-1)) & ~(MCLBYTES-1));
     141        if (p == NULL)
     142                rtems_panic ("Can't get network cluster memory.");
     143        mbutl = (struct mbuf *)p;
     144        for (i = 0; i < nmbclusters; i++) {
     145                ((union mcluster *)p)->mcl_next = mclfree;
     146                mclfree = (union mcluster *)p;
     147                p += MCLBYTES;
     148                mbstat.m_clfree++;
     149        }
     150        mbstat.m_clusters = nmbclusters;
    141151        mclrefcnt = malloc (nmbclusters);
    142152        if (mclrefcnt == NULL)
    143                 rtems_panic ("No memory for mbuf cluster reference counts.");
     153                rtems_panic ("Can't get mbuf cluster reference counts memory.");
    144154        memset (mclrefcnt, '\0', nmbclusters);
    145         m_mballoc (nmbuf, M_DONTWAIT);
     155
     156        /*
     157         * Set up mbuf data structures
     158         */
     159
     160        p = malloc(nmbuf * MSIZE);
     161        if (p == NULL)
     162                rtems_panic ("Can't get network memory.");
     163        for (i = 0; i < nmbuf; i++) {
     164                ((struct mbuf *)p)->m_next = mmbfree;
     165                mmbfree = (struct mbuf *)p;
     166                p += MSIZE;
     167        }
     168        mbstat.m_mbufs = nmbuf;
    146169        mbstat.m_mtypes[MT_FREE] = nmbuf;
    147 
    148170
    149171        /*
     
    664686
    665687/*
    666  * Hack alert: kmem_malloc `knows' that its
    667  * first invocation is to get mbuf clusters!
    668  */
    669 int mb_map_full;
    670 vm_map_t mb_map;
    671 vm_offset_t
    672 kmem_malloc (vm_map_t *map, vm_size_t size, boolean_t waitflag)
    673 {
    674         void *p;
    675        
    676         /*
    677          * Can't get memory if we're already running.
    678          */
    679         if (networkDaemonTid) {
    680                 if (waitflag == M_WAITOK)
    681                         rtems_panic (
    682 "Network mbuf space can not be enlarged after rtems_bsdnet_initialize() has\n"
    683 "returned.  Enlarge the initial mbuf/cluster size in rtems_bsdnet_config.");
    684                 return 0;
    685         }
    686 
    687 #define ROUNDSIZE 2048
    688         p = malloc (size+ROUNDSIZE);
    689         p = (void *)((unsigned long)p & ~(ROUNDSIZE-1));
    690         if ((p == NULL) && (waitflag == M_WAITOK))
    691                 rtems_panic ("Can't get initial network memory!");
    692         if (mbutl == NULL)
    693                 mbutl = p;
    694         return (vm_offset_t)p;
    695 }
    696 
    697 /*
    698688 * IP header checksum routine for processors which don't have an inline version
    699689 */
    700 
    701690u_int
    702691in_cksum_hdr (const void *ip)
     
    927916        return -1;
    928917}
     918
     919/*
     920 * Handle requests for more network memory
     921 * XXX: Another possibility would be to use a semaphore here with
     922 *      a release in the mbuf free macro.  I have chosen this `polling'
     923 *      approach because:
     924 *      1) It is simpler.
     925 *      2) It adds no complexity to the free macro.
     926 *      3) Running out of mbufs should be a rare
     927 *         condition -- predeployment testing of
     928 *         an application should indicate the
     929 *         required mbuf pool size.
     930 * XXX: Should there be a panic if a task is stuck in the loop for
     931 *      more than a minute or so?
     932 */
     933int
     934m_mballoc (int nmb, int nowait)
     935{
     936        if (nowait)
     937                return 0;
     938        m_reclaim ();
     939        if (mmbfree == NULL) {
     940                mbstat.m_wait++;
     941                do {
     942                        rtems_bsdnet_semaphore_release ();
     943                        rtems_task_wake_after (1);
     944                        rtems_bsdnet_semaphore_obtain ();
     945                } while (mmbfree == NULL);
     946        }
     947        else {
     948                mbstat.m_drops++;
     949        }
     950        return 1;
     951}
     952
     953int
     954m_clalloc(ncl, nowait)
     955{
     956        if (nowait)
     957                return 0;
     958        m_reclaim ();
     959        if (mclfree == NULL) {
     960                mbstat.m_wait++;
     961                do {
     962                        rtems_bsdnet_semaphore_release ();
     963                        rtems_task_wake_after (1);
     964                        rtems_bsdnet_semaphore_obtain ();
     965                } while (mclfree == NULL);
     966        }
     967        else {
     968                mbstat.m_drops++;
     969        }
     970        return 1;
     971}
  • c/src/libnetworking/sys/mbuf.h

    r1c841bd2 r11cdbeb  
    411411void    m_copydata __P((struct mbuf *,int,int,caddr_t));
    412412void    m_freem __P((struct mbuf *));
     413void    m_reclaim __P((void));
    413414
    414415#ifdef MBTYPES
  • cpukit/libnetworking/kern/uipc_mbuf.c

    r1c841bd2 r11cdbeb  
    6464int     max_datalen;
    6565
    66 static void     m_reclaim __P((void));
    67 
    6866/* "number of clusters of pages" */
    6967#define NCL_INIT        1
    7068
    7169#define NMB_INIT        16
    72 
    73 /* ARGSUSED*/
    74 static void
    75 mbinit(dummy)
    76         void *dummy;
    77 {
    78         int s;
    79 
    80         mmbfree = NULL; mclfree = NULL;
    81         s = splimp();
    82         if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
    83                 goto bad;
    84         if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
    85                 goto bad;
    86         splx(s);
    87         return;
    88 bad:
    89         panic("mbinit");
    90 }
    91 
    92 /*
    93  * Allocate at least nmb mbufs and place on mbuf free list.
    94  * Must be called at splimp.
    95  */
    96 /* ARGSUSED */
    97 int
    98 m_mballoc(nmb, nowait)
    99         register int nmb;
    100         int nowait;
    101 {
    102         register caddr_t p;
    103         register int i;
    104         int nbytes;
    105 
    106         /* Once we run out of map space, it will be impossible to get
    107          * any more (nothing is ever freed back to the map) (XXX which
    108          * is dumb). (however you are not dead as m_reclaim might
    109          * still be able to free a substantial amount of space).
    110          */
    111         if (mb_map_full)
    112                 return (0);
    113 
    114         nbytes = round_page(nmb * MSIZE);
    115         p = (caddr_t)kmem_malloc(mb_map, nbytes, nowait ? M_NOWAIT : M_WAITOK);
    116         /*
    117          * Either the map is now full, or this is nowait and there
    118          * are no pages left.
    119          */
    120         if (p == NULL)
    121                 return (0);
    122 
    123         nmb = nbytes / MSIZE;
    124         for (i = 0; i < nmb; i++) {
    125                 ((struct mbuf *)p)->m_next = mmbfree;
    126                 mmbfree = (struct mbuf *)p;
    127                 p += MSIZE;
    128         }
    129         mbstat.m_mbufs += nmb;
    130         return (1);
    131 }
    132 
    133 /*
    134  * Allocate some number of mbuf clusters
    135  * and place on cluster free list.
    136  * Must be called at splimp.
    137  */
    138 /* ARGSUSED */
    139 int
    140 m_clalloc(ncl, nowait)
    141         register int ncl;
    142         int nowait;
    143 {
    144         register caddr_t p;
    145         register int i;
    146 
    147         /*
    148          * Once we run out of map space, it will be impossible
    149          * to get any more (nothing is ever freed back to the
    150          * map).
    151          */
    152         if (mb_map_full)
    153                 return (0);
    154 
    155         p = (caddr_t)kmem_malloc(mb_map, ncl*MCLBYTES,
    156                                  nowait ? M_NOWAIT : M_WAITOK);
    157         /*
    158          * Either the map is now full, or this is nowait and there
    159          * are no pages left.
    160          */
    161         if (p == NULL)
    162                 return (0);
    163 
    164         for (i = 0; i < ncl; i++) {
    165                 ((union mcluster *)p)->mcl_next = mclfree;
    166                 mclfree = (union mcluster *)p;
    167                 p += MCLBYTES;
    168                 mbstat.m_clfree++;
    169         }
    170         mbstat.m_clusters += ncl;
    171         return (1);
    172 }
    17370
    17471/*
     
    213110}
    214111
    215 static void
    216 m_reclaim()
     112void
     113m_reclaim(void)
    217114{
    218115        register struct domain *dp;
  • cpukit/libnetworking/rtems/rtems_glue.c

    r1c841bd2 r11cdbeb  
    127127/*
    128128 * Do the initializations required by the BSD code
    129  * FIXME: Maybe we should use a different memory allocation scheme that
    130  * would let us share space between mbufs and mbuf clusters.
    131  * For now, we'll just take the easy way out!
    132129 */
    133130static void
    134131bsd_init ()
    135132{
    136         /*
    137          * Set up mbuf data strutures
    138          * Cluster allocation *must* come first -- see comment on kmem_malloc().
    139          */
    140         m_clalloc (nmbclusters, M_DONTWAIT);
     133        int i;
     134        char *p;
     135
     136        /*
     137         * Set up mbuf cluster data strutures
     138         */
     139        p = malloc ((nmbclusters*MCLBYTES)+MCLBYTES-1);
     140        p = (char *)(((unsigned long)p + (MCLBYTES-1)) & ~(MCLBYTES-1));
     141        if (p == NULL)
     142                rtems_panic ("Can't get network cluster memory.");
     143        mbutl = (struct mbuf *)p;
     144        for (i = 0; i < nmbclusters; i++) {
     145                ((union mcluster *)p)->mcl_next = mclfree;
     146                mclfree = (union mcluster *)p;
     147                p += MCLBYTES;
     148                mbstat.m_clfree++;
     149        }
     150        mbstat.m_clusters = nmbclusters;
    141151        mclrefcnt = malloc (nmbclusters);
    142152        if (mclrefcnt == NULL)
    143                 rtems_panic ("No memory for mbuf cluster reference counts.");
     153                rtems_panic ("Can't get mbuf cluster reference counts memory.");
    144154        memset (mclrefcnt, '\0', nmbclusters);
    145         m_mballoc (nmbuf, M_DONTWAIT);
     155
     156        /*
     157         * Set up mbuf data structures
     158         */
     159
     160        p = malloc(nmbuf * MSIZE);
     161        if (p == NULL)
     162                rtems_panic ("Can't get network memory.");
     163        for (i = 0; i < nmbuf; i++) {
     164                ((struct mbuf *)p)->m_next = mmbfree;
     165                mmbfree = (struct mbuf *)p;
     166                p += MSIZE;
     167        }
     168        mbstat.m_mbufs = nmbuf;
    146169        mbstat.m_mtypes[MT_FREE] = nmbuf;
    147 
    148170
    149171        /*
     
    664686
    665687/*
    666  * Hack alert: kmem_malloc `knows' that its
    667  * first invocation is to get mbuf clusters!
    668  */
    669 int mb_map_full;
    670 vm_map_t mb_map;
    671 vm_offset_t
    672 kmem_malloc (vm_map_t *map, vm_size_t size, boolean_t waitflag)
    673 {
    674         void *p;
    675        
    676         /*
    677          * Can't get memory if we're already running.
    678          */
    679         if (networkDaemonTid) {
    680                 if (waitflag == M_WAITOK)
    681                         rtems_panic (
    682 "Network mbuf space can not be enlarged after rtems_bsdnet_initialize() has\n"
    683 "returned.  Enlarge the initial mbuf/cluster size in rtems_bsdnet_config.");
    684                 return 0;
    685         }
    686 
    687 #define ROUNDSIZE 2048
    688         p = malloc (size+ROUNDSIZE);
    689         p = (void *)((unsigned long)p & ~(ROUNDSIZE-1));
    690         if ((p == NULL) && (waitflag == M_WAITOK))
    691                 rtems_panic ("Can't get initial network memory!");
    692         if (mbutl == NULL)
    693                 mbutl = p;
    694         return (vm_offset_t)p;
    695 }
    696 
    697 /*
    698688 * IP header checksum routine for processors which don't have an inline version
    699689 */
    700 
    701690u_int
    702691in_cksum_hdr (const void *ip)
     
    927916        return -1;
    928917}
     918
     919/*
     920 * Handle requests for more network memory
     921 * XXX: Another possibility would be to use a semaphore here with
     922 *      a release in the mbuf free macro.  I have chosen this `polling'
     923 *      approach because:
     924 *      1) It is simpler.
     925 *      2) It adds no complexity to the free macro.
     926 *      3) Running out of mbufs should be a rare
     927 *         condition -- predeployment testing of
     928 *         an application should indicate the
     929 *         required mbuf pool size.
     930 * XXX: Should there be a panic if a task is stuck in the loop for
     931 *      more than a minute or so?
     932 */
     933int
     934m_mballoc (int nmb, int nowait)
     935{
     936        if (nowait)
     937                return 0;
     938        m_reclaim ();
     939        if (mmbfree == NULL) {
     940                mbstat.m_wait++;
     941                do {
     942                        rtems_bsdnet_semaphore_release ();
     943                        rtems_task_wake_after (1);
     944                        rtems_bsdnet_semaphore_obtain ();
     945                } while (mmbfree == NULL);
     946        }
     947        else {
     948                mbstat.m_drops++;
     949        }
     950        return 1;
     951}
     952
     953int
     954m_clalloc(ncl, nowait)
     955{
     956        if (nowait)
     957                return 0;
     958        m_reclaim ();
     959        if (mclfree == NULL) {
     960                mbstat.m_wait++;
     961                do {
     962                        rtems_bsdnet_semaphore_release ();
     963                        rtems_task_wake_after (1);
     964                        rtems_bsdnet_semaphore_obtain ();
     965                } while (mclfree == NULL);
     966        }
     967        else {
     968                mbstat.m_drops++;
     969        }
     970        return 1;
     971}
  • cpukit/libnetworking/sys/mbuf.h

    r1c841bd2 r11cdbeb  
    411411void    m_copydata __P((struct mbuf *,int,int,caddr_t));
    412412void    m_freem __P((struct mbuf *));
     413void    m_reclaim __P((void));
    413414
    414415#ifdef MBTYPES
Note: See TracChangeset for help on using the changeset viewer.