source: rtems-libbsd/freebsd/sys/net/if_epair.c @ 2404264

4.1155-freebsd-126-freebsd-12freebsd-9.3
Last change on this file since 2404264 was 2404264, checked in by Sebastian Huber <sebastian.huber@…>, on 10/30/13 at 12:23:59

Revert superfluous changes

  • Property mode set to 100644
File size: 25.1 KB
Line 
1#include <machine/rtems-bsd-config.h>
2
3/*-
4 * Copyright (c) 2008 The FreeBSD Foundation
5 * Copyright (c) 2009-2010 Bjoern A. Zeeb <bz@FreeBSD.org>
6 * All rights reserved.
7 *
8 * This software was developed by CK Software GmbH under sponsorship
9 * from the FreeBSD Foundation.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33/*
34 * A pair of virtual back-to-back connected ethernet like interfaces
35 * (``two interfaces with a virtual cross-over cable'').
36 *
37 * This is mostly intended to be used to provide connectivity between
38 * different virtual network stack instances.
39 */
40/*
41 * Things to re-think once we have more experience:
42 * - ifp->if_reassign function once we can test with vimage. Depending on
43 *   how if_vmove() is going to be improved.
44 * - Real random etheraddrs that are checked to be uniquish; we would need
45 *   to re-do them in case we move the interface between network stacks
46 *   in a private if_reassign function.
47 *   In case we bridge to a real interface/network or between indepedent
48 *   epairs on multiple stacks/machines, we may need this.
49 *   For now let the user handle that case.
50 */
51
52#include <sys/cdefs.h>
53__FBSDID("$FreeBSD$");
54
55#include <rtems/bsd/sys/param.h>
56#include <sys/kernel.h>
57#include <sys/mbuf.h>
58#include <sys/module.h>
59#include <sys/refcount.h>
60#include <sys/queue.h>
61#include <sys/smp.h>
62#include <sys/socket.h>
63#include <sys/sockio.h>
64#include <sys/sysctl.h>
65#include <rtems/bsd/sys/types.h>
66
67#include <net/bpf.h>
68#include <net/ethernet.h>
69#include <net/if.h>
70#include <net/if_clone.h>
71#include <net/if_var.h>
72#include <net/if_types.h>
73#include <net/netisr.h>
74#include <net/vnet.h>
75
76#define EPAIRNAME       "epair"
77
78SYSCTL_DECL(_net_link);
79SYSCTL_NODE(_net_link, OID_AUTO, epair, CTLFLAG_RW, 0, "epair sysctl");
80
81#ifdef EPAIR_DEBUG
82static int epair_debug = 0;
83SYSCTL_INT(_net_link_epair, OID_AUTO, epair_debug, CTLFLAG_RW,
84    &epair_debug, 0, "if_epair(4) debugging.");
85#define DPRINTF(fmt, arg...)                                            \
86        if (epair_debug)                                                \
87                printf("[%s:%d] " fmt, __func__, __LINE__, ##arg)
88#else
89#define DPRINTF(fmt, arg...)
90#endif
91
92static void epair_nh_sintr(struct mbuf *);
93static struct mbuf *epair_nh_m2cpuid(struct mbuf *, uintptr_t, u_int *);
94static void epair_nh_drainedcpu(u_int);
95
96static void epair_start_locked(struct ifnet *);
97
98static int epair_clone_match(struct if_clone *, const char *);
99static int epair_clone_create(struct if_clone *, char *, size_t, caddr_t);
100static int epair_clone_destroy(struct if_clone *, struct ifnet *);
101
102/* Netisr realted definitions and sysctl. */
103static struct netisr_handler epair_nh = {
104        .nh_name        = EPAIRNAME,
105        .nh_proto       = NETISR_EPAIR,
106        .nh_policy      = NETISR_POLICY_CPU,
107        .nh_handler     = epair_nh_sintr,
108        .nh_m2cpuid     = epair_nh_m2cpuid,
109        .nh_drainedcpu  = epair_nh_drainedcpu,
110};
111
112static int
113sysctl_epair_netisr_maxqlen(SYSCTL_HANDLER_ARGS)
114{
115        int error, qlimit;
116
117        netisr_getqlimit(&epair_nh, &qlimit);
118        error = sysctl_handle_int(oidp, &qlimit, 0, req);
119        if (error || !req->newptr)
120                return (error);
121        if (qlimit < 1)
122                return (EINVAL);
123        return (netisr_setqlimit(&epair_nh, qlimit));
124}
125SYSCTL_PROC(_net_link_epair, OID_AUTO, netisr_maxqlen, CTLTYPE_INT|CTLFLAG_RW,
126    0, 0, sysctl_epair_netisr_maxqlen, "I",
127    "Maximum if_epair(4) netisr \"hw\" queue length");
128
129struct epair_softc {
130        struct ifnet    *ifp;           /* This ifp. */
131        struct ifnet    *oifp;          /* other ifp of pair. */
132        u_int           refcount;       /* # of mbufs in flight. */
133        u_int           cpuid;          /* CPU ID assigned upon creation. */
134        void            (*if_qflush)(struct ifnet *);
135                                        /* Original if_qflush routine. */
136};
137
138/*
139 * Per-CPU list of ifps with data in the ifq that needs to be flushed
140 * to the netisr ``hw'' queue before we allow any further direct queuing
141 * to the ``hw'' queue.
142 */
143struct epair_ifp_drain {
144        STAILQ_ENTRY(epair_ifp_drain)   ifp_next;
145        struct ifnet                    *ifp;
146};
147STAILQ_HEAD(eid_list, epair_ifp_drain);
148
149#define EPAIR_LOCK_INIT(dpcpu)          mtx_init(&(dpcpu)->if_epair_mtx, \
150                                            "if_epair", NULL, MTX_DEF)
151#define EPAIR_LOCK_DESTROY(dpcpu)       mtx_destroy(&(dpcpu)->if_epair_mtx)
152#define EPAIR_LOCK_ASSERT(dpcpu)        mtx_assert(&(dpcpu)->if_epair_mtx, \
153                                            MA_OWNED)
154#define EPAIR_LOCK(dpcpu)               mtx_lock(&(dpcpu)->if_epair_mtx)
155#define EPAIR_UNLOCK(dpcpu)             mtx_unlock(&(dpcpu)->if_epair_mtx)
156
157#ifdef INVARIANTS
158#define EPAIR_REFCOUNT_INIT(r, v)       refcount_init((r), (v))
159#define EPAIR_REFCOUNT_AQUIRE(r)        refcount_acquire((r))
160#define EPAIR_REFCOUNT_RELEASE(r)       refcount_release((r))
161#define EPAIR_REFCOUNT_ASSERT(a, p)     KASSERT(a, p)
162#else
163#define EPAIR_REFCOUNT_INIT(r, v)
164#define EPAIR_REFCOUNT_AQUIRE(r)
165#define EPAIR_REFCOUNT_RELEASE(r)
166#define EPAIR_REFCOUNT_ASSERT(a, p)
167#endif
168
169static MALLOC_DEFINE(M_EPAIR, EPAIRNAME,
170    "Pair of virtual cross-over connected Ethernet-like interfaces");
171
172static struct if_clone epair_cloner = IFC_CLONE_INITIALIZER(
173    EPAIRNAME, NULL, IF_MAXUNIT,
174    NULL, epair_clone_match, epair_clone_create, epair_clone_destroy);
175
176/*
177 * DPCPU area and functions.
178 */
179struct epair_dpcpu {
180        struct mtx      if_epair_mtx;           /* Per-CPU locking. */
181        int             epair_drv_flags;        /* Per-CPU ``hw'' drv flags. */
182        struct eid_list epair_ifp_drain_list;   /* Per-CPU list of ifps with
183                                                 * data in the ifq. */
184};
185DPCPU_DEFINE(struct epair_dpcpu, epair_dpcpu);
186
187static void
188epair_dpcpu_init(void)
189{
190        struct epair_dpcpu *epair_dpcpu;
191        struct eid_list *s;
192        u_int cpuid;
193
194        for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
195                if (CPU_ABSENT(cpuid))
196                        continue;
197
198                epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu);
199
200                /* Initialize per-cpu lock. */
201                EPAIR_LOCK_INIT(epair_dpcpu);
202
203                /* Driver flags are per-cpu as are our netisr "hw" queues. */
204                epair_dpcpu->epair_drv_flags = 0;
205
206                /*
207                 * Initialize per-cpu drain list.
208                 * Manually do what STAILQ_HEAD_INITIALIZER would do.
209                 */
210                s = &epair_dpcpu->epair_ifp_drain_list;
211                s->stqh_first = NULL;
212                s->stqh_last = &s->stqh_first;
213        }
214}
215
216static void
217epair_dpcpu_detach(void)
218{
219        struct epair_dpcpu *epair_dpcpu;
220        u_int cpuid;
221
222        for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
223                if (CPU_ABSENT(cpuid))
224                        continue;
225
226                epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu);
227
228                /* Destroy per-cpu lock. */
229                EPAIR_LOCK_DESTROY(epair_dpcpu);
230        }
231}
232
233/*
234 * Helper functions.
235 */
236static u_int
237cpuid_from_ifp(struct ifnet *ifp)
238{
239        struct epair_softc *sc;
240
241        if (ifp == NULL)
242                return (0);
243        sc = ifp->if_softc;
244
245        return (sc->cpuid);
246}
247
248/*
249 * Netisr handler functions.
250 */
251static void
252epair_nh_sintr(struct mbuf *m)
253{
254        struct ifnet *ifp;
255        struct epair_softc *sc;
256
257        ifp = m->m_pkthdr.rcvif;
258        (*ifp->if_input)(ifp, m);
259        sc = ifp->if_softc;
260        EPAIR_REFCOUNT_RELEASE(&sc->refcount);
261        EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1,
262            ("%s: ifp=%p sc->refcount not >= 1: %d",
263            __func__, ifp, sc->refcount));
264        DPRINTF("ifp=%p refcount=%u\n", ifp, sc->refcount);
265}
266
267static struct mbuf *
268epair_nh_m2cpuid(struct mbuf *m, uintptr_t source, u_int *cpuid)
269{
270
271        *cpuid = cpuid_from_ifp(m->m_pkthdr.rcvif);
272
273        return (m);
274}
275
276static void
277epair_nh_drainedcpu(u_int cpuid)
278{
279        struct epair_dpcpu *epair_dpcpu;
280        struct epair_ifp_drain *elm, *tvar;
281        struct ifnet *ifp;
282
283        epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu);
284        EPAIR_LOCK(epair_dpcpu);
285        /*
286         * Assume our "hw" queue and possibly ifq will be emptied
287         * again. In case we will overflow the "hw" queue while
288         * draining, epair_start_locked will set IFF_DRV_OACTIVE
289         * again and we will stop and return.
290         */
291        STAILQ_FOREACH_SAFE(elm, &epair_dpcpu->epair_ifp_drain_list,
292            ifp_next, tvar) {
293                ifp = elm->ifp;
294                epair_dpcpu->epair_drv_flags &= ~IFF_DRV_OACTIVE;
295                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
296                epair_start_locked(ifp);
297
298                IFQ_LOCK(&ifp->if_snd);
299                if (IFQ_IS_EMPTY(&ifp->if_snd)) {
300                        struct epair_softc *sc;
301
302                        STAILQ_REMOVE(&epair_dpcpu->epair_ifp_drain_list,
303                            elm, epair_ifp_drain, ifp_next);
304                        /* The cached ifp goes off the list. */
305                        sc = ifp->if_softc;
306                        EPAIR_REFCOUNT_RELEASE(&sc->refcount);
307                        EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1,
308                            ("%s: ifp=%p sc->refcount not >= 1: %d",
309                            __func__, ifp, sc->refcount));
310                        free(elm, M_EPAIR);
311                }
312                IFQ_UNLOCK(&ifp->if_snd);
313
314                if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) {
315                        /* Our "hw"q overflew again. */
316                        epair_dpcpu->epair_drv_flags |= IFF_DRV_OACTIVE;
317                        DPRINTF("hw queue length overflow at %u\n",
318                            epair_nh.nh_qlimit);
319                        break;
320                }
321        }
322        EPAIR_UNLOCK(epair_dpcpu);
323}
324
325/*
326 * Network interface (`if') related functions.
327 */
328static void
329epair_remove_ifp_from_draining(struct ifnet *ifp)
330{
331        struct epair_dpcpu *epair_dpcpu;
332        struct epair_ifp_drain *elm, *tvar;
333        u_int cpuid;
334
335        for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
336                if (CPU_ABSENT(cpuid))
337                        continue;
338
339                epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu);
340                EPAIR_LOCK(epair_dpcpu);
341                STAILQ_FOREACH_SAFE(elm, &epair_dpcpu->epair_ifp_drain_list,
342                    ifp_next, tvar) {
343                        if (ifp == elm->ifp) {
344                                struct epair_softc *sc;
345
346                                STAILQ_REMOVE(
347                                    &epair_dpcpu->epair_ifp_drain_list, elm,
348                                    epair_ifp_drain, ifp_next);
349                                /* The cached ifp goes off the list. */
350                                sc = ifp->if_softc;
351                                EPAIR_REFCOUNT_RELEASE(&sc->refcount);
352                                EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1,
353                                    ("%s: ifp=%p sc->refcount not >= 1: %d",
354                                    __func__, ifp, sc->refcount));
355                                free(elm, M_EPAIR);
356                        }
357                }
358                EPAIR_UNLOCK(epair_dpcpu);
359        }
360}
361
362static int
363epair_add_ifp_for_draining(struct ifnet *ifp)
364{
365        struct epair_dpcpu *epair_dpcpu;
366        struct epair_softc *sc;
367        struct epair_ifp_drain *elm = NULL;
368
369        sc = ifp->if_softc;
370        epair_dpcpu = DPCPU_ID_PTR(sc->cpuid, epair_dpcpu);
371        EPAIR_LOCK_ASSERT(epair_dpcpu);
372        STAILQ_FOREACH(elm, &epair_dpcpu->epair_ifp_drain_list, ifp_next)
373                if (elm->ifp == ifp)
374                        break;
375        /* If the ifp is there already, return success. */
376        if (elm != NULL)
377                return (0);
378
379        elm = malloc(sizeof(struct epair_ifp_drain), M_EPAIR, M_NOWAIT|M_ZERO);
380        if (elm == NULL)
381                return (ENOMEM);
382
383        elm->ifp = ifp;
384        /* Add a reference for the ifp pointer on the list. */
385        EPAIR_REFCOUNT_AQUIRE(&sc->refcount);
386        STAILQ_INSERT_TAIL(&epair_dpcpu->epair_ifp_drain_list, elm, ifp_next);
387
388        return (0);
389}
390
391static void
392epair_start_locked(struct ifnet *ifp)
393{
394        struct epair_dpcpu *epair_dpcpu;
395        struct mbuf *m;
396        struct epair_softc *sc;
397        struct ifnet *oifp;
398        int error;
399
400        DPRINTF("ifp=%p\n", ifp);
401        sc = ifp->if_softc;
402        epair_dpcpu = DPCPU_ID_PTR(sc->cpuid, epair_dpcpu);
403        EPAIR_LOCK_ASSERT(epair_dpcpu);
404
405        if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
406                return;
407        if ((ifp->if_flags & IFF_UP) == 0)
408                return;
409
410        /*
411         * We get patckets here from ether_output via if_handoff()
412         * and ned to put them into the input queue of the oifp
413         * and call oifp->if_input() via netisr/epair_sintr().
414         */
415        oifp = sc->oifp;
416        sc = oifp->if_softc;
417        for (;;) {
418                IFQ_DEQUEUE(&ifp->if_snd, m);
419                if (m == NULL)
420                        break;
421                BPF_MTAP(ifp, m);
422
423                /*
424                 * In case the outgoing interface is not usable,
425                 * drop the packet.
426                 */
427                if ((oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
428                    (oifp->if_flags & IFF_UP) ==0) {
429                        ifp->if_oerrors++;
430                        m_freem(m);
431                        continue;
432                }
433                DPRINTF("packet %s -> %s\n", ifp->if_xname, oifp->if_xname);
434
435                /*
436                 * Add a reference so the interface cannot go while the
437                 * packet is in transit as we rely on rcvif to stay valid.
438                 */
439                EPAIR_REFCOUNT_AQUIRE(&sc->refcount);
440                m->m_pkthdr.rcvif = oifp;
441                CURVNET_SET_QUIET(oifp->if_vnet);
442                error = netisr_queue(NETISR_EPAIR, m);
443                CURVNET_RESTORE();
444                if (!error) {
445                        ifp->if_opackets++;
446                        /* Someone else received the packet. */
447                        oifp->if_ipackets++;
448                } else {
449                        /* The packet was freed already. */
450                        epair_dpcpu->epair_drv_flags |= IFF_DRV_OACTIVE;
451                        ifp->if_drv_flags |= IFF_DRV_OACTIVE;
452                        (void) epair_add_ifp_for_draining(ifp);
453                        ifp->if_oerrors++;
454                        EPAIR_REFCOUNT_RELEASE(&sc->refcount);
455                        EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1,
456                            ("%s: ifp=%p sc->refcount not >= 1: %d",
457                            __func__, oifp, sc->refcount));
458                }
459        }
460}
461
462static void
463epair_start(struct ifnet *ifp)
464{
465        struct epair_dpcpu *epair_dpcpu;
466
467        epair_dpcpu = DPCPU_ID_PTR(cpuid_from_ifp(ifp), epair_dpcpu);
468        EPAIR_LOCK(epair_dpcpu);
469        epair_start_locked(ifp);
470        EPAIR_UNLOCK(epair_dpcpu);
471}
472
473static int
474epair_transmit_locked(struct ifnet *ifp, struct mbuf *m)
475{
476        struct epair_dpcpu *epair_dpcpu;
477        struct epair_softc *sc;
478        struct ifnet *oifp;
479        int error, len;
480        short mflags;
481
482        DPRINTF("ifp=%p m=%p\n", ifp, m);
483        sc = ifp->if_softc;
484        epair_dpcpu = DPCPU_ID_PTR(sc->cpuid, epair_dpcpu);
485        EPAIR_LOCK_ASSERT(epair_dpcpu);
486
487        if (m == NULL)
488                return (0);
489       
490        /*
491         * We are not going to use the interface en/dequeue mechanism
492         * on the TX side. We are called from ether_output_frame()
493         * and will put the packet into the incoming queue of the
494         * other interface of our pair via the netsir.
495         */
496        if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
497                m_freem(m);
498                return (ENXIO);
499        }
500        if ((ifp->if_flags & IFF_UP) == 0) {
501                m_freem(m);
502                return (ENETDOWN);
503        }
504
505        BPF_MTAP(ifp, m);
506
507        /*
508         * In case the outgoing interface is not usable,
509         * drop the packet.
510         */
511        oifp = sc->oifp;
512        if ((oifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
513            (oifp->if_flags & IFF_UP) ==0) {
514                ifp->if_oerrors++;
515                m_freem(m);
516                return (0);
517        }
518        len = m->m_pkthdr.len;
519        mflags = m->m_flags;
520        DPRINTF("packet %s -> %s\n", ifp->if_xname, oifp->if_xname);
521
522#ifdef ALTQ
523        /* Support ALTQ via the clasic if_start() path. */
524        IF_LOCK(&ifp->if_snd);
525        if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
526                ALTQ_ENQUEUE(&ifp->if_snd, m, NULL, error);
527                if (error)
528                        ifp->if_snd.ifq_drops++;
529                IF_UNLOCK(&ifp->if_snd);
530                if (!error) {
531                        ifp->if_obytes += len;
532                        if (mflags & (M_BCAST|M_MCAST))
533                                ifp->if_omcasts++;
534                       
535                        if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0)
536                                epair_start_locked(ifp);
537                        else
538                                (void)epair_add_ifp_for_draining(ifp);
539                }
540                return (error);
541        }
542        IF_UNLOCK(&ifp->if_snd);
543#endif
544
545        if ((epair_dpcpu->epair_drv_flags & IFF_DRV_OACTIVE) != 0) {
546                /*
547                 * Our hardware queue is full, try to fall back
548                 * queuing to the ifq but do not call ifp->if_start.
549                 * Either we are lucky or the packet is gone.
550                 */
551                IFQ_ENQUEUE(&ifp->if_snd, m, error);
552                if (!error)
553                        (void)epair_add_ifp_for_draining(ifp);
554                return (error);
555        }
556        sc = oifp->if_softc;
557        /*
558         * Add a reference so the interface cannot go while the
559         * packet is in transit as we rely on rcvif to stay valid.
560         */
561        EPAIR_REFCOUNT_AQUIRE(&sc->refcount);
562        m->m_pkthdr.rcvif = oifp;
563        CURVNET_SET_QUIET(oifp->if_vnet);
564        error = netisr_queue(NETISR_EPAIR, m);
565        CURVNET_RESTORE();
566        if (!error) {
567                ifp->if_opackets++;
568                /*
569                 * IFQ_HANDOFF_ADJ/ip_handoff() update statistics,
570                 * but as we bypass all this we have to duplicate
571                 * the logic another time.
572                 */
573                ifp->if_obytes += len;
574                if (mflags & (M_BCAST|M_MCAST))
575                        ifp->if_omcasts++;
576                /* Someone else received the packet. */
577                oifp->if_ipackets++;
578        } else {
579                /* The packet was freed already. */
580                epair_dpcpu->epair_drv_flags |= IFF_DRV_OACTIVE;
581                ifp->if_drv_flags |= IFF_DRV_OACTIVE;
582                ifp->if_oerrors++;
583                EPAIR_REFCOUNT_RELEASE(&sc->refcount);
584                EPAIR_REFCOUNT_ASSERT((int)sc->refcount >= 1,
585                    ("%s: ifp=%p sc->refcount not >= 1: %d",
586                    __func__, oifp, sc->refcount));
587        }
588
589        return (error);
590}
591
592static int
593epair_transmit(struct ifnet *ifp, struct mbuf *m)
594{
595        struct epair_dpcpu *epair_dpcpu;
596        int error;
597
598        epair_dpcpu = DPCPU_ID_PTR(cpuid_from_ifp(ifp), epair_dpcpu);
599        EPAIR_LOCK(epair_dpcpu);
600        error = epair_transmit_locked(ifp, m);
601        EPAIR_UNLOCK(epair_dpcpu);
602        return (error);
603}
604
605static void
606epair_qflush(struct ifnet *ifp)
607{
608        struct epair_softc *sc;
609       
610        sc = ifp->if_softc;
611        KASSERT(sc != NULL, ("%s: ifp=%p, epair_softc gone? sc=%p\n",
612            __func__, ifp, sc));
613        /*
614         * Remove this ifp from all backpointer lists. The interface will not
615         * usable for flushing anyway nor should it have anything to flush
616         * after if_qflush().
617         */
618        epair_remove_ifp_from_draining(ifp);
619
620        if (sc->if_qflush)
621                sc->if_qflush(ifp);
622}
623
624static int
625epair_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
626{
627        struct ifreq *ifr;
628        int error;
629
630        ifr = (struct ifreq *)data;
631        switch (cmd) {
632        case SIOCSIFFLAGS:
633        case SIOCADDMULTI:
634        case SIOCDELMULTI:
635                error = 0;
636                break;
637
638        case SIOCSIFMTU:
639                /* We basically allow all kinds of MTUs. */
640                ifp->if_mtu = ifr->ifr_mtu;
641                error = 0;
642                break;
643
644        default:
645                /* Let the common ethernet handler process this. */
646                error = ether_ioctl(ifp, cmd, data);
647                break;
648        }
649
650        return (error);
651}
652
653static void
654epair_init(void *dummy __unused)
655{
656}
657
658
659/*
660 * Interface cloning functions.
661 * We use our private ones so that we can create/destroy our secondary
662 * device along with the primary one.
663 */
664static int
665epair_clone_match(struct if_clone *ifc, const char *name)
666{
667        const char *cp;
668
669        DPRINTF("name='%s'\n", name);
670
671        /*
672         * Our base name is epair.
673         * Our interfaces will be named epair<n>[ab].
674         * So accept anything of the following list:
675         * - epair
676         * - epair<n>
677         * but not the epair<n>[ab] versions.
678         */
679        if (strncmp(EPAIRNAME, name, sizeof(EPAIRNAME)-1) != 0)
680                return (0);
681
682        for (cp = name + sizeof(EPAIRNAME) - 1; *cp != '\0'; cp++) {
683                if (*cp < '0' || *cp > '9')
684                        return (0);
685        }
686
687        return (1);
688}
689
690static int
691epair_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
692{
693        struct epair_softc *sca, *scb;
694        struct ifnet *ifp;
695        char *dp;
696        int error, unit, wildcard;
697        uint8_t eaddr[ETHER_ADDR_LEN];  /* 00:00:00:00:00:00 */
698
699        /*
700         * We are abusing params to create our second interface.
701         * Actually we already created it and called if_clone_createif()
702         * for it to do the official insertion procedure the moment we knew
703         * it cannot fail anymore. So just do attach it here.
704         */
705        if (params) {
706                scb = (struct epair_softc *)params;
707                ifp = scb->ifp;
708                /* Assign a hopefully unique, locally administered etheraddr. */
709                eaddr[0] = 0x02;
710                eaddr[3] = (ifp->if_index >> 8) & 0xff;
711                eaddr[4] = ifp->if_index & 0xff;
712                eaddr[5] = 0x0b;
713                ether_ifattach(ifp, eaddr);
714                /* Correctly set the name for the cloner list. */
715                strlcpy(name, scb->ifp->if_xname, len);
716                return (0);
717        }
718
719        /* Try to see if a special unit was requested. */
720        error = ifc_name2unit(name, &unit);
721        if (error != 0)
722                return (error);
723        wildcard = (unit < 0);
724
725        error = ifc_alloc_unit(ifc, &unit);
726        if (error != 0)
727                return (error);
728
729        /*
730         * If no unit had been given, we need to adjust the ifName.
731         * Also make sure there is space for our extra [ab] suffix.
732         */
733        for (dp = name; *dp != '\0'; dp++);
734        if (wildcard) {
735                error = snprintf(dp, len - (dp - name), "%d", unit);
736                if (error > len - (dp - name) - 1) {
737                        /* ifName too long. */
738                        ifc_free_unit(ifc, unit);
739                        return (ENOSPC);
740                }
741                dp += error;
742        }
743        if (len - (dp - name) - 1 < 1) {
744                /* No space left for our [ab] suffix. */
745                ifc_free_unit(ifc, unit);
746                return (ENOSPC);
747        }
748        *dp = 'a';
749        /* Must not change dp so we can replace 'a' by 'b' later. */
750        *(dp+1) = '\0';
751
752        /* Allocate memory for both [ab] interfaces */
753        sca = malloc(sizeof(struct epair_softc), M_EPAIR, M_WAITOK | M_ZERO);
754        EPAIR_REFCOUNT_INIT(&sca->refcount, 1);
755        sca->ifp = if_alloc(IFT_ETHER);
756        if (sca->ifp == NULL) {
757                free(sca, M_EPAIR);
758                ifc_free_unit(ifc, unit);
759                return (ENOSPC);
760        }
761
762        scb = malloc(sizeof(struct epair_softc), M_EPAIR, M_WAITOK | M_ZERO);
763        EPAIR_REFCOUNT_INIT(&scb->refcount, 1);
764        scb->ifp = if_alloc(IFT_ETHER);
765        if (scb->ifp == NULL) {
766                free(scb, M_EPAIR);
767                if_free(sca->ifp);
768                free(sca, M_EPAIR);
769                ifc_free_unit(ifc, unit);
770                return (ENOSPC);
771        }
772       
773        /*
774         * Cross-reference the interfaces so we will be able to free both.
775         */
776        sca->oifp = scb->ifp;
777        scb->oifp = sca->ifp;
778
779        /*
780         * Calculate the cpuid for netisr queueing based on the
781         * ifIndex of the interfaces. As long as we cannot configure
782         * this or use cpuset information easily we cannot guarantee
783         * cache locality but we can at least allow parallelism.
784         */
785        sca->cpuid =
786            netisr_get_cpuid(sca->ifp->if_index % netisr_get_cpucount());
787        scb->cpuid =
788            netisr_get_cpuid(scb->ifp->if_index % netisr_get_cpucount());
789       
790        /* Finish initialization of interface <n>a. */
791        ifp = sca->ifp;
792        ifp->if_softc = sca;
793        strlcpy(ifp->if_xname, name, IFNAMSIZ);
794        ifp->if_dname = ifc->ifc_name;
795        ifp->if_dunit = unit;
796        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
797        ifp->if_start = epair_start;
798        ifp->if_ioctl = epair_ioctl;
799        ifp->if_init  = epair_init;
800        ifp->if_snd.ifq_maxlen = ifqmaxlen;
801        /* Assign a hopefully unique, locally administered etheraddr. */
802        eaddr[0] = 0x02;
803        eaddr[3] = (ifp->if_index >> 8) & 0xff;
804        eaddr[4] = ifp->if_index & 0xff;
805        eaddr[5] = 0x0a;
806        ether_ifattach(ifp, eaddr);
807        sca->if_qflush = ifp->if_qflush;
808        ifp->if_qflush = epair_qflush;
809        ifp->if_transmit = epair_transmit;
810        ifp->if_baudrate = IF_Gbps(10UL);       /* arbitrary maximum */
811
812        /* Swap the name and finish initialization of interface <n>b. */
813        *dp = 'b';
814
815        ifp = scb->ifp;
816        ifp->if_softc = scb;
817        strlcpy(ifp->if_xname, name, IFNAMSIZ);
818        ifp->if_dname = ifc->ifc_name;
819        ifp->if_dunit = unit;
820        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
821        ifp->if_start = epair_start;
822        ifp->if_ioctl = epair_ioctl;
823        ifp->if_init  = epair_init;
824        ifp->if_snd.ifq_maxlen = ifqmaxlen;
825        /* We need to play some tricks here for the second interface. */
826        strlcpy(name, EPAIRNAME, len);
827        error = if_clone_create(name, len, (caddr_t)scb);
828        if (error)
829                panic("%s: if_clone_createif() for our 2nd iface failed: %d",
830                    __func__, error);
831        scb->if_qflush = ifp->if_qflush;
832        ifp->if_qflush = epair_qflush;
833        ifp->if_transmit = epair_transmit;
834        ifp->if_baudrate = IF_Gbps(10UL);       /* arbitrary maximum */
835
836        /*
837         * Restore name to <n>a as the ifp for this will go into the
838         * cloner list for the initial call.
839         */
840        strlcpy(name, sca->ifp->if_xname, len);
841        DPRINTF("name='%s/%db' created sca=%p scb=%p\n", name, unit, sca, scb);
842
843        /* Tell the world, that we are ready to rock. */
844        sca->ifp->if_drv_flags |= IFF_DRV_RUNNING;
845        scb->ifp->if_drv_flags |= IFF_DRV_RUNNING;
846        if_link_state_change(sca->ifp, LINK_STATE_UP);
847        if_link_state_change(scb->ifp, LINK_STATE_UP);
848
849        return (0);
850}
851
852static int
853epair_clone_destroy(struct if_clone *ifc, struct ifnet *ifp)
854{
855        struct ifnet *oifp;
856        struct epair_softc *sca, *scb;
857        int unit, error;
858
859        DPRINTF("ifp=%p\n", ifp);
860
861        /*
862         * In case we called into if_clone_destroyif() ourselves
863         * again to remove the second interface, the softc will be
864         * NULL. In that case so not do anything but return success.
865         */
866        if (ifp->if_softc == NULL)
867                return (0);
868       
869        unit = ifp->if_dunit;
870        sca = ifp->if_softc;
871        oifp = sca->oifp;
872        scb = oifp->if_softc;
873
874        DPRINTF("ifp=%p oifp=%p\n", ifp, oifp);
875        if_link_state_change(ifp, LINK_STATE_DOWN);
876        if_link_state_change(oifp, LINK_STATE_DOWN);
877        ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
878        oifp->if_drv_flags &= ~IFF_DRV_RUNNING;
879        ether_ifdetach(oifp);
880        ether_ifdetach(ifp);
881        /*
882         * Wait for all packets to be dispatched to if_input.
883         * The numbers can only go down as the interfaces are
884         * detached so there is no need to use atomics.
885         */
886        DPRINTF("sca refcnt=%u scb refcnt=%u\n", sca->refcount, scb->refcount);
887        EPAIR_REFCOUNT_ASSERT(sca->refcount == 1 && scb->refcount == 1,
888            ("%s: ifp=%p sca->refcount!=1: %d || ifp=%p scb->refcount!=1: %d",
889            __func__, ifp, sca->refcount, oifp, scb->refcount));
890
891        /*
892         * Get rid of our second half.
893         */
894        oifp->if_softc = NULL;
895        error = if_clone_destroyif(ifc, oifp);
896        if (error)
897                panic("%s: if_clone_destroyif() for our 2nd iface failed: %d",
898                    __func__, error);
899
900        /*
901         * Finish cleaning up. Free them and release the unit.
902         * As the other of the two interfaces my reside in a different vnet,
903         * we need to switch before freeing them.
904         */
905        CURVNET_SET_QUIET(oifp->if_vnet);
906        if_free(oifp);
907        CURVNET_RESTORE();
908        if_free(ifp);
909        free(scb, M_EPAIR);
910        free(sca, M_EPAIR);
911        ifc_free_unit(ifc, unit);
912
913        return (0);
914}
915
916static int
917epair_modevent(module_t mod, int type, void *data)
918{
919        int qlimit;
920
921        switch (type) {
922        case MOD_LOAD:
923                /* For now limit us to one global mutex and one inq. */
924                epair_dpcpu_init();
925                epair_nh.nh_qlimit = 42 * ifqmaxlen; /* 42 shall be the number. */
926#ifndef __rtems__
927                if (TUNABLE_INT_FETCH("net.link.epair.netisr_maxqlen", &qlimit))
928                    epair_nh.nh_qlimit = qlimit;
929#endif /* __rtems__ */
930                netisr_register(&epair_nh);
931                if_clone_attach(&epair_cloner);
932                if (bootverbose)
933                        printf("%s initialized.\n", EPAIRNAME);
934                break;
935        case MOD_UNLOAD:
936                if_clone_detach(&epair_cloner);
937                netisr_unregister(&epair_nh);
938                epair_dpcpu_detach();
939                if (bootverbose)
940                        printf("%s unloaded.\n", EPAIRNAME);
941                break;
942        default:
943                return (EOPNOTSUPP);
944        }
945        return (0);
946}
947
948static moduledata_t epair_mod = {
949        "if_epair",
950        epair_modevent,
951        0
952};
953
954DECLARE_MODULE(if_epair, epair_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
955MODULE_VERSION(if_epair, 1);
Note: See TracBrowser for help on using the repository browser.