1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | /*- |
---|
4 | * Copyright (c) 2007-2009 Robert N. M. Watson |
---|
5 | * Copyright (c) 2010-2011 Juniper Networks, Inc. |
---|
6 | * All rights reserved. |
---|
7 | * |
---|
8 | * This software was developed by Robert N. M. Watson under contract |
---|
9 | * to Juniper Networks, Inc. |
---|
10 | * |
---|
11 | * Redistribution and use in source and binary forms, with or without |
---|
12 | * modification, are permitted provided that the following conditions |
---|
13 | * are met: |
---|
14 | * 1. Redistributions of source code must retain the above copyright |
---|
15 | * notice, this list of conditions and the following disclaimer. |
---|
16 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
17 | * notice, this list of conditions and the following disclaimer in the |
---|
18 | * documentation and/or other materials provided with the distribution. |
---|
19 | * |
---|
20 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
---|
21 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
---|
24 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
25 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
26 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
27 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
29 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
30 | * SUCH DAMAGE. |
---|
31 | */ |
---|
32 | |
---|
33 | #include <sys/cdefs.h> |
---|
34 | __FBSDID("$FreeBSD$"); |
---|
35 | |
---|
36 | /* |
---|
37 | * netisr is a packet dispatch service, allowing synchronous (directly |
---|
38 | * dispatched) and asynchronous (deferred dispatch) processing of packets by |
---|
39 | * registered protocol handlers. Callers pass a protocol identifier and |
---|
40 | * packet to netisr, along with a direct dispatch hint, and work will either |
---|
41 | * be immediately processed by the registered handler, or passed to a |
---|
42 | * software interrupt (SWI) thread for deferred dispatch. Callers will |
---|
43 | * generally select one or the other based on: |
---|
44 | * |
---|
45 | * - Whether directly dispatching a netisr handler lead to code reentrance or |
---|
46 | * lock recursion, such as entering the socket code from the socket code. |
---|
47 | * - Whether directly dispatching a netisr handler lead to recursive |
---|
48 | * processing, such as when decapsulating several wrapped layers of tunnel |
---|
49 | * information (IPSEC within IPSEC within ...). |
---|
50 | * |
---|
51 | * Maintaining ordering for protocol streams is a critical design concern. |
---|
52 | * Enforcing ordering limits the opportunity for concurrency, but maintains |
---|
53 | * the strong ordering requirements found in some protocols, such as TCP. Of |
---|
54 | * related concern is CPU affinity--it is desirable to process all data |
---|
55 | * associated with a particular stream on the same CPU over time in order to |
---|
56 | * avoid acquiring locks associated with the connection on different CPUs, |
---|
57 | * keep connection data in one cache, and to generally encourage associated |
---|
58 | * user threads to live on the same CPU as the stream. It's also desirable |
---|
59 | * to avoid lock migration and contention where locks are associated with |
---|
60 | * more than one flow. |
---|
61 | * |
---|
62 | * netisr supports several policy variations, represented by the |
---|
63 | * NETISR_POLICY_* constants, allowing protocols to play various roles in |
---|
64 | * identifying flows, assigning work to CPUs, etc. These are described in |
---|
65 | * netisr.h. |
---|
66 | */ |
---|
67 | |
---|
68 | #include <rtems/bsd/local/opt_ddb.h> |
---|
69 | #include <rtems/bsd/local/opt_device_polling.h> |
---|
70 | |
---|
71 | #include <rtems/bsd/sys/param.h> |
---|
72 | #include <sys/bus.h> |
---|
73 | #include <sys/kernel.h> |
---|
74 | #include <sys/kthread.h> |
---|
75 | #include <sys/interrupt.h> |
---|
76 | #include <rtems/bsd/sys/lock.h> |
---|
77 | #include <sys/mbuf.h> |
---|
78 | #include <sys/mutex.h> |
---|
79 | #include <sys/pcpu.h> |
---|
80 | #include <sys/proc.h> |
---|
81 | #include <sys/rmlock.h> |
---|
82 | #include <sys/sched.h> |
---|
83 | #include <sys/smp.h> |
---|
84 | #include <sys/socket.h> |
---|
85 | #include <sys/sysctl.h> |
---|
86 | #include <sys/systm.h> |
---|
87 | |
---|
88 | #ifdef DDB |
---|
89 | #include <ddb/ddb.h> |
---|
90 | #endif |
---|
91 | |
---|
92 | #define _WANT_NETISR_INTERNAL /* Enable definitions from netisr_internal.h */ |
---|
93 | #include <net/if.h> |
---|
94 | #include <net/if_var.h> |
---|
95 | #include <net/netisr.h> |
---|
96 | #include <net/netisr_internal.h> |
---|
97 | #include <net/vnet.h> |
---|
98 | |
---|
99 | /*- |
---|
100 | * Synchronize use and modification of the registered netisr data structures; |
---|
101 | * acquire a read lock while modifying the set of registered protocols to |
---|
102 | * prevent partially registered or unregistered protocols from being run. |
---|
103 | * |
---|
104 | * The following data structures and fields are protected by this lock: |
---|
105 | * |
---|
106 | * - The netisr_proto array, including all fields of struct netisr_proto. |
---|
107 | * - The nws array, including all fields of struct netisr_worker. |
---|
108 | * - The nws_array array. |
---|
109 | * |
---|
110 | * Note: the NETISR_LOCKING define controls whether read locks are acquired |
---|
111 | * in packet processing paths requiring netisr registration stability. This |
---|
112 | * is disabled by default as it can lead to measurable performance |
---|
113 | * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and |
---|
114 | * because netisr registration and unregistration is extremely rare at |
---|
115 | * runtime. If it becomes more common, this decision should be revisited. |
---|
116 | * |
---|
117 | * XXXRW: rmlocks don't support assertions. |
---|
118 | */ |
---|
119 | static struct rmlock netisr_rmlock; |
---|
120 | #define NETISR_LOCK_INIT() rm_init_flags(&netisr_rmlock, "netisr", \ |
---|
121 | RM_NOWITNESS) |
---|
122 | #define NETISR_LOCK_ASSERT() |
---|
123 | #define NETISR_RLOCK(tracker) rm_rlock(&netisr_rmlock, (tracker)) |
---|
124 | #define NETISR_RUNLOCK(tracker) rm_runlock(&netisr_rmlock, (tracker)) |
---|
125 | #define NETISR_WLOCK() rm_wlock(&netisr_rmlock) |
---|
126 | #define NETISR_WUNLOCK() rm_wunlock(&netisr_rmlock) |
---|
127 | /* #define NETISR_LOCKING */ |
---|
128 | |
---|
129 | static SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr"); |
---|
130 | |
---|
131 | /*- |
---|
132 | * Three global direct dispatch policies are supported: |
---|
133 | * |
---|
134 | * NETISR_DISPATCH_QUEUED: All work is deferred for a netisr, regardless of |
---|
135 | * context (may be overriden by protocols). |
---|
136 | * |
---|
137 | * NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch, |
---|
138 | * and we're running on the CPU the work would be performed on, then direct |
---|
139 | * dispatch it if it wouldn't violate ordering constraints on the workstream. |
---|
140 | * |
---|
141 | * NETISR_DISPATCH_DIRECT: If the executing context allows direct dispatch, |
---|
142 | * always direct dispatch. (The default.) |
---|
143 | * |
---|
144 | * Notice that changing the global policy could lead to short periods of |
---|
145 | * misordered processing, but this is considered acceptable as compared to |
---|
146 | * the complexity of enforcing ordering during policy changes. Protocols can |
---|
147 | * override the global policy (when they're not doing that, they select |
---|
148 | * NETISR_DISPATCH_DEFAULT). |
---|
149 | */ |
---|
150 | #define NETISR_DISPATCH_POLICY_DEFAULT NETISR_DISPATCH_DIRECT |
---|
151 | #define NETISR_DISPATCH_POLICY_MAXSTR 20 /* Used for temporary buffers. */ |
---|
152 | static u_int netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT; |
---|
153 | static int sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS); |
---|
154 | SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, CTLTYPE_STRING | CTLFLAG_RW | |
---|
155 | CTLFLAG_TUN, 0, 0, sysctl_netisr_dispatch_policy, "A", |
---|
156 | "netisr dispatch policy"); |
---|
157 | |
---|
158 | /* |
---|
159 | * These sysctls were used in previous versions to control and export |
---|
160 | * dispatch policy state. Now, we provide read-only export via them so that |
---|
161 | * older netstat binaries work. At some point they can be garbage collected. |
---|
162 | */ |
---|
163 | static int netisr_direct_force; |
---|
164 | SYSCTL_INT(_net_isr, OID_AUTO, direct_force, CTLFLAG_RD, |
---|
165 | &netisr_direct_force, 0, "compat: force direct dispatch"); |
---|
166 | |
---|
167 | static int netisr_direct; |
---|
168 | SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RD, &netisr_direct, 0, |
---|
169 | "compat: enable direct dispatch"); |
---|
170 | |
---|
171 | /* |
---|
172 | * Allow the administrator to limit the number of threads (CPUs) to use for |
---|
173 | * netisr. We don't check netisr_maxthreads before creating the thread for |
---|
174 | * CPU 0, so in practice we ignore values <= 1. This must be set at boot. |
---|
175 | * We will create at most one thread per CPU. |
---|
176 | */ |
---|
177 | static int netisr_maxthreads = -1; /* Max number of threads. */ |
---|
178 | TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads); |
---|
179 | SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN, |
---|
180 | &netisr_maxthreads, 0, |
---|
181 | "Use at most this many CPUs for netisr processing"); |
---|
182 | |
---|
183 | static int netisr_bindthreads = 0; /* Bind threads to CPUs. */ |
---|
184 | TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads); |
---|
185 | SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN, |
---|
186 | &netisr_bindthreads, 0, "Bind netisr threads to CPUs."); |
---|
187 | |
---|
188 | /* |
---|
189 | * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit, |
---|
190 | * both for initial configuration and later modification using |
---|
191 | * netisr_setqlimit(). |
---|
192 | */ |
---|
193 | #define NETISR_DEFAULT_MAXQLIMIT 10240 |
---|
194 | static u_int netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT; |
---|
195 | TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit); |
---|
196 | SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN, |
---|
197 | &netisr_maxqlimit, 0, |
---|
198 | "Maximum netisr per-protocol, per-CPU queue depth."); |
---|
199 | |
---|
200 | /* |
---|
201 | * The default per-workstream mbuf queue limit for protocols that don't |
---|
202 | * initialize the nh_qlimit field of their struct netisr_handler. If this is |
---|
203 | * set above netisr_maxqlimit, we truncate it to the maximum during boot. |
---|
204 | */ |
---|
205 | #define NETISR_DEFAULT_DEFAULTQLIMIT 256 |
---|
206 | static u_int netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT; |
---|
207 | TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit); |
---|
208 | SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN, |
---|
209 | &netisr_defaultqlimit, 0, |
---|
210 | "Default netisr per-protocol, per-CPU queue limit if not set by protocol"); |
---|
211 | |
---|
212 | /* |
---|
213 | * Store and export the compile-time constant NETISR_MAXPROT limit on the |
---|
214 | * number of protocols that can register with netisr at a time. This is |
---|
215 | * required for crashdump analysis, as it sizes netisr_proto[]. |
---|
216 | */ |
---|
217 | static u_int netisr_maxprot = NETISR_MAXPROT; |
---|
218 | SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD, |
---|
219 | &netisr_maxprot, 0, |
---|
220 | "Compile-time limit on the number of protocols supported by netisr."); |
---|
221 | |
---|
222 | /* |
---|
223 | * The netisr_proto array describes all registered protocols, indexed by |
---|
224 | * protocol number. See netisr_internal.h for more details. |
---|
225 | */ |
---|
226 | static struct netisr_proto netisr_proto[NETISR_MAXPROT]; |
---|
227 | |
---|
228 | #ifndef __rtems__ |
---|
229 | /* |
---|
230 | * Per-CPU workstream data. See netisr_internal.h for more details. |
---|
231 | */ |
---|
232 | DPCPU_DEFINE(struct netisr_workstream, nws); |
---|
233 | |
---|
234 | /* |
---|
235 | * Map contiguous values between 0 and nws_count into CPU IDs appropriate for |
---|
236 | * accessing workstreams. This allows constructions of the form |
---|
237 | * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws). |
---|
238 | */ |
---|
239 | static u_int nws_array[MAXCPU]; |
---|
240 | |
---|
241 | /* |
---|
242 | * Number of registered workstreams. Will be at most the number of running |
---|
243 | * CPUs once fully started. |
---|
244 | */ |
---|
245 | static u_int nws_count; |
---|
246 | SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD, |
---|
247 | &nws_count, 0, "Number of extant netisr threads."); |
---|
248 | #else /* __rtems__ */ |
---|
249 | static struct netisr_workstream rtems_bsd_nws; |
---|
250 | #endif /* __rtems__ */ |
---|
251 | |
---|
252 | /* |
---|
253 | * Synchronization for each workstream: a mutex protects all mutable fields |
---|
254 | * in each stream, including per-protocol state (mbuf queues). The SWI is |
---|
255 | * woken up if asynchronous dispatch is required. |
---|
256 | */ |
---|
257 | #define NWS_LOCK(s) mtx_lock(&(s)->nws_mtx) |
---|
258 | #define NWS_LOCK_ASSERT(s) mtx_assert(&(s)->nws_mtx, MA_OWNED) |
---|
259 | #define NWS_UNLOCK(s) mtx_unlock(&(s)->nws_mtx) |
---|
260 | #define NWS_SIGNAL(s) swi_sched((s)->nws_swi_cookie, 0) |
---|
261 | |
---|
262 | #ifndef __rtems__ |
---|
263 | /* |
---|
264 | * Utility routines for protocols that implement their own mapping of flows |
---|
265 | * to CPUs. |
---|
266 | */ |
---|
267 | u_int |
---|
268 | netisr_get_cpucount(void) |
---|
269 | { |
---|
270 | |
---|
271 | return (nws_count); |
---|
272 | } |
---|
273 | |
---|
274 | u_int |
---|
275 | netisr_get_cpuid(u_int cpunumber) |
---|
276 | { |
---|
277 | |
---|
278 | KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber, |
---|
279 | nws_count)); |
---|
280 | |
---|
281 | return (nws_array[cpunumber]); |
---|
282 | } |
---|
283 | |
---|
284 | /* |
---|
285 | * The default implementation of flow -> CPU ID mapping. |
---|
286 | * |
---|
287 | * Non-static so that protocols can use it to map their own work to specific |
---|
288 | * CPUs in a manner consistent to netisr for affinity purposes. |
---|
289 | */ |
---|
290 | u_int |
---|
291 | netisr_default_flow2cpu(u_int flowid) |
---|
292 | { |
---|
293 | |
---|
294 | return (nws_array[flowid % nws_count]); |
---|
295 | } |
---|
296 | #endif /* __rtems__ */ |
---|
297 | |
---|
298 | /* |
---|
299 | * Dispatch tunable and sysctl configuration. |
---|
300 | */ |
---|
301 | struct netisr_dispatch_table_entry { |
---|
302 | u_int ndte_policy; |
---|
303 | const char *ndte_policy_str; |
---|
304 | }; |
---|
305 | static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = { |
---|
306 | { NETISR_DISPATCH_DEFAULT, "default" }, |
---|
307 | { NETISR_DISPATCH_DEFERRED, "deferred" }, |
---|
308 | { NETISR_DISPATCH_HYBRID, "hybrid" }, |
---|
309 | { NETISR_DISPATCH_DIRECT, "direct" }, |
---|
310 | }; |
---|
311 | static const u_int netisr_dispatch_table_len = |
---|
312 | (sizeof(netisr_dispatch_table) / sizeof(netisr_dispatch_table[0])); |
---|
313 | |
---|
314 | static void |
---|
315 | netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer, |
---|
316 | u_int buflen) |
---|
317 | { |
---|
318 | const struct netisr_dispatch_table_entry *ndtep; |
---|
319 | const char *str; |
---|
320 | u_int i; |
---|
321 | |
---|
322 | str = "unknown"; |
---|
323 | for (i = 0; i < netisr_dispatch_table_len; i++) { |
---|
324 | ndtep = &netisr_dispatch_table[i]; |
---|
325 | if (ndtep->ndte_policy == dispatch_policy) { |
---|
326 | str = ndtep->ndte_policy_str; |
---|
327 | break; |
---|
328 | } |
---|
329 | } |
---|
330 | snprintf(buffer, buflen, "%s", str); |
---|
331 | } |
---|
332 | |
---|
333 | static int |
---|
334 | netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp) |
---|
335 | { |
---|
336 | const struct netisr_dispatch_table_entry *ndtep; |
---|
337 | u_int i; |
---|
338 | |
---|
339 | for (i = 0; i < netisr_dispatch_table_len; i++) { |
---|
340 | ndtep = &netisr_dispatch_table[i]; |
---|
341 | if (strcmp(ndtep->ndte_policy_str, str) == 0) { |
---|
342 | *dispatch_policyp = ndtep->ndte_policy; |
---|
343 | return (0); |
---|
344 | } |
---|
345 | } |
---|
346 | return (EINVAL); |
---|
347 | } |
---|
348 | |
---|
349 | static void |
---|
350 | netisr_dispatch_policy_compat(void) |
---|
351 | { |
---|
352 | |
---|
353 | switch (netisr_dispatch_policy) { |
---|
354 | case NETISR_DISPATCH_DEFERRED: |
---|
355 | netisr_direct_force = 0; |
---|
356 | netisr_direct = 0; |
---|
357 | break; |
---|
358 | |
---|
359 | case NETISR_DISPATCH_HYBRID: |
---|
360 | netisr_direct_force = 0; |
---|
361 | netisr_direct = 1; |
---|
362 | break; |
---|
363 | |
---|
364 | case NETISR_DISPATCH_DIRECT: |
---|
365 | netisr_direct_force = 1; |
---|
366 | netisr_direct = 1; |
---|
367 | break; |
---|
368 | |
---|
369 | default: |
---|
370 | panic("%s: unknown policy %u", __func__, |
---|
371 | netisr_dispatch_policy); |
---|
372 | } |
---|
373 | } |
---|
374 | |
---|
375 | static int |
---|
376 | sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS) |
---|
377 | { |
---|
378 | char tmp[NETISR_DISPATCH_POLICY_MAXSTR]; |
---|
379 | u_int dispatch_policy; |
---|
380 | int error; |
---|
381 | |
---|
382 | netisr_dispatch_policy_to_str(netisr_dispatch_policy, tmp, |
---|
383 | sizeof(tmp)); |
---|
384 | error = sysctl_handle_string(oidp, tmp, sizeof(tmp), req); |
---|
385 | if (error == 0 && req->newptr != NULL) { |
---|
386 | error = netisr_dispatch_policy_from_str(tmp, |
---|
387 | &dispatch_policy); |
---|
388 | if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT) |
---|
389 | error = EINVAL; |
---|
390 | if (error == 0) { |
---|
391 | netisr_dispatch_policy = dispatch_policy; |
---|
392 | netisr_dispatch_policy_compat(); |
---|
393 | } |
---|
394 | } |
---|
395 | return (error); |
---|
396 | } |
---|
397 | |
---|
398 | /* |
---|
399 | * Register a new netisr handler, which requires initializing per-protocol |
---|
400 | * fields for each workstream. All netisr work is briefly suspended while |
---|
401 | * the protocol is installed. |
---|
402 | */ |
---|
403 | void |
---|
404 | netisr_register(const struct netisr_handler *nhp) |
---|
405 | { |
---|
406 | struct netisr_work *npwp; |
---|
407 | const char *name; |
---|
408 | u_int i, proto; |
---|
409 | |
---|
410 | proto = nhp->nh_proto; |
---|
411 | name = nhp->nh_name; |
---|
412 | |
---|
413 | /* |
---|
414 | * Test that the requested registration is valid. |
---|
415 | */ |
---|
416 | KASSERT(nhp->nh_name != NULL, |
---|
417 | ("%s: nh_name NULL for %u", __func__, proto)); |
---|
418 | KASSERT(nhp->nh_handler != NULL, |
---|
419 | ("%s: nh_handler NULL for %s", __func__, name)); |
---|
420 | KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE || |
---|
421 | nhp->nh_policy == NETISR_POLICY_FLOW || |
---|
422 | nhp->nh_policy == NETISR_POLICY_CPU, |
---|
423 | ("%s: unsupported nh_policy %u for %s", __func__, |
---|
424 | nhp->nh_policy, name)); |
---|
425 | KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW || |
---|
426 | nhp->nh_m2flow == NULL, |
---|
427 | ("%s: nh_policy != FLOW but m2flow defined for %s", __func__, |
---|
428 | name)); |
---|
429 | KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL, |
---|
430 | ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__, |
---|
431 | name)); |
---|
432 | KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL, |
---|
433 | ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__, |
---|
434 | name)); |
---|
435 | KASSERT(nhp->nh_dispatch == NETISR_DISPATCH_DEFAULT || |
---|
436 | nhp->nh_dispatch == NETISR_DISPATCH_DEFERRED || |
---|
437 | nhp->nh_dispatch == NETISR_DISPATCH_HYBRID || |
---|
438 | nhp->nh_dispatch == NETISR_DISPATCH_DIRECT, |
---|
439 | ("%s: invalid nh_dispatch (%u)", __func__, nhp->nh_dispatch)); |
---|
440 | |
---|
441 | KASSERT(proto < NETISR_MAXPROT, |
---|
442 | ("%s(%u, %s): protocol too big", __func__, proto, name)); |
---|
443 | |
---|
444 | /* |
---|
445 | * Test that no existing registration exists for this protocol. |
---|
446 | */ |
---|
447 | NETISR_WLOCK(); |
---|
448 | KASSERT(netisr_proto[proto].np_name == NULL, |
---|
449 | ("%s(%u, %s): name present", __func__, proto, name)); |
---|
450 | KASSERT(netisr_proto[proto].np_handler == NULL, |
---|
451 | ("%s(%u, %s): handler present", __func__, proto, name)); |
---|
452 | |
---|
453 | netisr_proto[proto].np_name = name; |
---|
454 | netisr_proto[proto].np_handler = nhp->nh_handler; |
---|
455 | netisr_proto[proto].np_m2flow = nhp->nh_m2flow; |
---|
456 | netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid; |
---|
457 | netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu; |
---|
458 | if (nhp->nh_qlimit == 0) |
---|
459 | netisr_proto[proto].np_qlimit = netisr_defaultqlimit; |
---|
460 | else if (nhp->nh_qlimit > netisr_maxqlimit) { |
---|
461 | printf("%s: %s requested queue limit %u capped to " |
---|
462 | "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit, |
---|
463 | netisr_maxqlimit); |
---|
464 | netisr_proto[proto].np_qlimit = netisr_maxqlimit; |
---|
465 | } else |
---|
466 | netisr_proto[proto].np_qlimit = nhp->nh_qlimit; |
---|
467 | netisr_proto[proto].np_policy = nhp->nh_policy; |
---|
468 | netisr_proto[proto].np_dispatch = nhp->nh_dispatch; |
---|
469 | CPU_FOREACH(i) { |
---|
470 | #ifndef __rtems__ |
---|
471 | npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; |
---|
472 | #else /* __rtems__ */ |
---|
473 | npwp = &rtems_bsd_nws.nws_work[proto]; |
---|
474 | #endif /* __rtems__ */ |
---|
475 | bzero(npwp, sizeof(*npwp)); |
---|
476 | npwp->nw_qlimit = netisr_proto[proto].np_qlimit; |
---|
477 | } |
---|
478 | NETISR_WUNLOCK(); |
---|
479 | } |
---|
480 | |
---|
481 | /* |
---|
482 | * Clear drop counters across all workstreams for a protocol. |
---|
483 | */ |
---|
484 | void |
---|
485 | netisr_clearqdrops(const struct netisr_handler *nhp) |
---|
486 | { |
---|
487 | struct netisr_work *npwp; |
---|
488 | #ifdef INVARIANTS |
---|
489 | const char *name; |
---|
490 | #endif |
---|
491 | u_int i, proto; |
---|
492 | |
---|
493 | proto = nhp->nh_proto; |
---|
494 | #ifdef INVARIANTS |
---|
495 | name = nhp->nh_name; |
---|
496 | #endif |
---|
497 | KASSERT(proto < NETISR_MAXPROT, |
---|
498 | ("%s(%u): protocol too big for %s", __func__, proto, name)); |
---|
499 | |
---|
500 | NETISR_WLOCK(); |
---|
501 | KASSERT(netisr_proto[proto].np_handler != NULL, |
---|
502 | ("%s(%u): protocol not registered for %s", __func__, proto, |
---|
503 | name)); |
---|
504 | |
---|
505 | CPU_FOREACH(i) { |
---|
506 | #ifndef __rtems__ |
---|
507 | npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; |
---|
508 | #else /* __rtems__ */ |
---|
509 | npwp = &rtems_bsd_nws.nws_work[proto]; |
---|
510 | #endif /* __rtems__ */ |
---|
511 | npwp->nw_qdrops = 0; |
---|
512 | } |
---|
513 | NETISR_WUNLOCK(); |
---|
514 | } |
---|
515 | |
---|
516 | /* |
---|
517 | * Query current drop counters across all workstreams for a protocol. |
---|
518 | */ |
---|
519 | void |
---|
520 | netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp) |
---|
521 | { |
---|
522 | struct netisr_work *npwp; |
---|
523 | struct rm_priotracker tracker; |
---|
524 | #ifdef INVARIANTS |
---|
525 | const char *name; |
---|
526 | #endif |
---|
527 | u_int i, proto; |
---|
528 | |
---|
529 | *qdropp = 0; |
---|
530 | proto = nhp->nh_proto; |
---|
531 | #ifdef INVARIANTS |
---|
532 | name = nhp->nh_name; |
---|
533 | #endif |
---|
534 | KASSERT(proto < NETISR_MAXPROT, |
---|
535 | ("%s(%u): protocol too big for %s", __func__, proto, name)); |
---|
536 | |
---|
537 | NETISR_RLOCK(&tracker); |
---|
538 | KASSERT(netisr_proto[proto].np_handler != NULL, |
---|
539 | ("%s(%u): protocol not registered for %s", __func__, proto, |
---|
540 | name)); |
---|
541 | |
---|
542 | CPU_FOREACH(i) { |
---|
543 | #ifndef __rtems__ |
---|
544 | npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; |
---|
545 | #else /* __rtems__ */ |
---|
546 | npwp = &rtems_bsd_nws.nws_work[proto]; |
---|
547 | #endif /* __rtems__ */ |
---|
548 | *qdropp += npwp->nw_qdrops; |
---|
549 | } |
---|
550 | NETISR_RUNLOCK(&tracker); |
---|
551 | } |
---|
552 | |
---|
553 | /* |
---|
554 | * Query current per-workstream queue limit for a protocol. |
---|
555 | */ |
---|
556 | void |
---|
557 | netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp) |
---|
558 | { |
---|
559 | struct rm_priotracker tracker; |
---|
560 | #ifdef INVARIANTS |
---|
561 | const char *name; |
---|
562 | #endif |
---|
563 | u_int proto; |
---|
564 | |
---|
565 | proto = nhp->nh_proto; |
---|
566 | #ifdef INVARIANTS |
---|
567 | name = nhp->nh_name; |
---|
568 | #endif |
---|
569 | KASSERT(proto < NETISR_MAXPROT, |
---|
570 | ("%s(%u): protocol too big for %s", __func__, proto, name)); |
---|
571 | |
---|
572 | NETISR_RLOCK(&tracker); |
---|
573 | KASSERT(netisr_proto[proto].np_handler != NULL, |
---|
574 | ("%s(%u): protocol not registered for %s", __func__, proto, |
---|
575 | name)); |
---|
576 | *qlimitp = netisr_proto[proto].np_qlimit; |
---|
577 | NETISR_RUNLOCK(&tracker); |
---|
578 | } |
---|
579 | |
---|
580 | /* |
---|
581 | * Update the queue limit across per-workstream queues for a protocol. We |
---|
582 | * simply change the limits, and don't drain overflowed packets as they will |
---|
583 | * (hopefully) take care of themselves shortly. |
---|
584 | */ |
---|
585 | int |
---|
586 | netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit) |
---|
587 | { |
---|
588 | struct netisr_work *npwp; |
---|
589 | #ifdef INVARIANTS |
---|
590 | const char *name; |
---|
591 | #endif |
---|
592 | u_int i, proto; |
---|
593 | |
---|
594 | if (qlimit > netisr_maxqlimit) |
---|
595 | return (EINVAL); |
---|
596 | |
---|
597 | proto = nhp->nh_proto; |
---|
598 | #ifdef INVARIANTS |
---|
599 | name = nhp->nh_name; |
---|
600 | #endif |
---|
601 | KASSERT(proto < NETISR_MAXPROT, |
---|
602 | ("%s(%u): protocol too big for %s", __func__, proto, name)); |
---|
603 | |
---|
604 | NETISR_WLOCK(); |
---|
605 | KASSERT(netisr_proto[proto].np_handler != NULL, |
---|
606 | ("%s(%u): protocol not registered for %s", __func__, proto, |
---|
607 | name)); |
---|
608 | |
---|
609 | netisr_proto[proto].np_qlimit = qlimit; |
---|
610 | CPU_FOREACH(i) { |
---|
611 | #ifndef __rtems__ |
---|
612 | npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; |
---|
613 | #else /* __rtems__ */ |
---|
614 | npwp = &rtems_bsd_nws.nws_work[proto]; |
---|
615 | #endif /* __rtems__ */ |
---|
616 | npwp->nw_qlimit = qlimit; |
---|
617 | } |
---|
618 | NETISR_WUNLOCK(); |
---|
619 | return (0); |
---|
620 | } |
---|
621 | |
---|
622 | /* |
---|
623 | * Drain all packets currently held in a particular protocol work queue. |
---|
624 | */ |
---|
625 | static void |
---|
626 | netisr_drain_proto(struct netisr_work *npwp) |
---|
627 | { |
---|
628 | struct mbuf *m; |
---|
629 | |
---|
630 | /* |
---|
631 | * We would assert the lock on the workstream but it's not passed in. |
---|
632 | */ |
---|
633 | while ((m = npwp->nw_head) != NULL) { |
---|
634 | npwp->nw_head = m->m_nextpkt; |
---|
635 | m->m_nextpkt = NULL; |
---|
636 | if (npwp->nw_head == NULL) |
---|
637 | npwp->nw_tail = NULL; |
---|
638 | npwp->nw_len--; |
---|
639 | m_freem(m); |
---|
640 | } |
---|
641 | KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__)); |
---|
642 | KASSERT(npwp->nw_len == 0, ("%s: len", __func__)); |
---|
643 | } |
---|
644 | |
---|
645 | /* |
---|
646 | * Remove the registration of a network protocol, which requires clearing |
---|
647 | * per-protocol fields across all workstreams, including freeing all mbufs in |
---|
648 | * the queues at time of unregister. All work in netisr is briefly suspended |
---|
649 | * while this takes place. |
---|
650 | */ |
---|
651 | void |
---|
652 | netisr_unregister(const struct netisr_handler *nhp) |
---|
653 | { |
---|
654 | struct netisr_work *npwp; |
---|
655 | #ifdef INVARIANTS |
---|
656 | const char *name; |
---|
657 | #endif |
---|
658 | u_int i, proto; |
---|
659 | |
---|
660 | proto = nhp->nh_proto; |
---|
661 | #ifdef INVARIANTS |
---|
662 | name = nhp->nh_name; |
---|
663 | #endif |
---|
664 | KASSERT(proto < NETISR_MAXPROT, |
---|
665 | ("%s(%u): protocol too big for %s", __func__, proto, name)); |
---|
666 | |
---|
667 | NETISR_WLOCK(); |
---|
668 | KASSERT(netisr_proto[proto].np_handler != NULL, |
---|
669 | ("%s(%u): protocol not registered for %s", __func__, proto, |
---|
670 | name)); |
---|
671 | |
---|
672 | netisr_proto[proto].np_name = NULL; |
---|
673 | netisr_proto[proto].np_handler = NULL; |
---|
674 | netisr_proto[proto].np_m2flow = NULL; |
---|
675 | netisr_proto[proto].np_m2cpuid = NULL; |
---|
676 | netisr_proto[proto].np_qlimit = 0; |
---|
677 | netisr_proto[proto].np_policy = 0; |
---|
678 | CPU_FOREACH(i) { |
---|
679 | #ifndef __rtems__ |
---|
680 | npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; |
---|
681 | #else /* __rtems__ */ |
---|
682 | npwp = &rtems_bsd_nws.nws_work[proto]; |
---|
683 | #endif /* __rtems__ */ |
---|
684 | netisr_drain_proto(npwp); |
---|
685 | bzero(npwp, sizeof(*npwp)); |
---|
686 | } |
---|
687 | NETISR_WUNLOCK(); |
---|
688 | } |
---|
689 | |
---|
690 | /* |
---|
691 | * Compose the global and per-protocol policies on dispatch, and return the |
---|
692 | * dispatch policy to use. |
---|
693 | */ |
---|
694 | static u_int |
---|
695 | netisr_get_dispatch(struct netisr_proto *npp) |
---|
696 | { |
---|
697 | |
---|
698 | /* |
---|
699 | * Protocol-specific configuration overrides the global default. |
---|
700 | */ |
---|
701 | if (npp->np_dispatch != NETISR_DISPATCH_DEFAULT) |
---|
702 | return (npp->np_dispatch); |
---|
703 | return (netisr_dispatch_policy); |
---|
704 | } |
---|
705 | |
---|
706 | /* |
---|
707 | * Look up the workstream given a packet and source identifier. Do this by |
---|
708 | * checking the protocol's policy, and optionally call out to the protocol |
---|
709 | * for assistance if required. |
---|
710 | */ |
---|
711 | static struct mbuf * |
---|
712 | netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy, |
---|
713 | uintptr_t source, struct mbuf *m, u_int *cpuidp) |
---|
714 | { |
---|
715 | struct ifnet *ifp; |
---|
716 | u_int policy; |
---|
717 | |
---|
718 | NETISR_LOCK_ASSERT(); |
---|
719 | |
---|
720 | #ifndef __rtems__ |
---|
721 | /* |
---|
722 | * In the event we have only one worker, shortcut and deliver to it |
---|
723 | * without further ado. |
---|
724 | */ |
---|
725 | if (nws_count == 1) { |
---|
726 | *cpuidp = nws_array[0]; |
---|
727 | return (m); |
---|
728 | } |
---|
729 | |
---|
730 | /* |
---|
731 | * What happens next depends on the policy selected by the protocol. |
---|
732 | * If we want to support per-interface policies, we should do that |
---|
733 | * here first. |
---|
734 | */ |
---|
735 | policy = npp->np_policy; |
---|
736 | if (policy == NETISR_POLICY_CPU) { |
---|
737 | m = npp->np_m2cpuid(m, source, cpuidp); |
---|
738 | if (m == NULL) |
---|
739 | return (NULL); |
---|
740 | |
---|
741 | /* |
---|
742 | * It's possible for a protocol not to have a good idea about |
---|
743 | * where to process a packet, in which case we fall back on |
---|
744 | * the netisr code to decide. In the hybrid case, return the |
---|
745 | * current CPU ID, which will force an immediate direct |
---|
746 | * dispatch. In the queued case, fall back on the SOURCE |
---|
747 | * policy. |
---|
748 | */ |
---|
749 | if (*cpuidp != NETISR_CPUID_NONE) |
---|
750 | return (m); |
---|
751 | if (dispatch_policy == NETISR_DISPATCH_HYBRID) { |
---|
752 | *cpuidp = curcpu; |
---|
753 | return (m); |
---|
754 | } |
---|
755 | policy = NETISR_POLICY_SOURCE; |
---|
756 | } |
---|
757 | |
---|
758 | if (policy == NETISR_POLICY_FLOW) { |
---|
759 | if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) { |
---|
760 | m = npp->np_m2flow(m, source); |
---|
761 | if (m == NULL) |
---|
762 | return (NULL); |
---|
763 | } |
---|
764 | if (m->m_flags & M_FLOWID) { |
---|
765 | *cpuidp = |
---|
766 | netisr_default_flow2cpu(m->m_pkthdr.flowid); |
---|
767 | return (m); |
---|
768 | } |
---|
769 | policy = NETISR_POLICY_SOURCE; |
---|
770 | } |
---|
771 | |
---|
772 | KASSERT(policy == NETISR_POLICY_SOURCE, |
---|
773 | ("%s: invalid policy %u for %s", __func__, npp->np_policy, |
---|
774 | npp->np_name)); |
---|
775 | |
---|
776 | ifp = m->m_pkthdr.rcvif; |
---|
777 | if (ifp != NULL) |
---|
778 | *cpuidp = nws_array[(ifp->if_index + source) % nws_count]; |
---|
779 | else |
---|
780 | *cpuidp = nws_array[source % nws_count]; |
---|
781 | #else /* __rtems__ */ |
---|
782 | *cpuidp = 0; |
---|
783 | #endif /* __rtems__ */ |
---|
784 | return (m); |
---|
785 | } |
---|
786 | |
---|
787 | /* |
---|
788 | * Process packets associated with a workstream and protocol. For reasons of |
---|
789 | * fairness, we process up to one complete netisr queue at a time, moving the |
---|
790 | * queue to a stack-local queue for processing, but do not loop refreshing |
---|
791 | * from the global queue. The caller is responsible for deciding whether to |
---|
792 | * loop, and for setting the NWS_RUNNING flag. The passed workstream will be |
---|
793 | * locked on entry and relocked before return, but will be released while |
---|
794 | * processing. The number of packets processed is returned. |
---|
795 | */ |
---|
796 | static u_int |
---|
797 | netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto) |
---|
798 | { |
---|
799 | struct netisr_work local_npw, *npwp; |
---|
800 | u_int handled; |
---|
801 | struct mbuf *m; |
---|
802 | |
---|
803 | NETISR_LOCK_ASSERT(); |
---|
804 | NWS_LOCK_ASSERT(nwsp); |
---|
805 | |
---|
806 | KASSERT(nwsp->nws_flags & NWS_RUNNING, |
---|
807 | ("%s(%u): not running", __func__, proto)); |
---|
808 | KASSERT(proto >= 0 && proto < NETISR_MAXPROT, |
---|
809 | ("%s(%u): invalid proto\n", __func__, proto)); |
---|
810 | |
---|
811 | npwp = &nwsp->nws_work[proto]; |
---|
812 | if (npwp->nw_len == 0) |
---|
813 | return (0); |
---|
814 | |
---|
815 | /* |
---|
816 | * Move the global work queue to a thread-local work queue. |
---|
817 | * |
---|
818 | * Notice that this means the effective maximum length of the queue |
---|
819 | * is actually twice that of the maximum queue length specified in |
---|
820 | * the protocol registration call. |
---|
821 | */ |
---|
822 | handled = npwp->nw_len; |
---|
823 | local_npw = *npwp; |
---|
824 | npwp->nw_head = NULL; |
---|
825 | npwp->nw_tail = NULL; |
---|
826 | npwp->nw_len = 0; |
---|
827 | nwsp->nws_pendingbits &= ~(1 << proto); |
---|
828 | NWS_UNLOCK(nwsp); |
---|
829 | while ((m = local_npw.nw_head) != NULL) { |
---|
830 | local_npw.nw_head = m->m_nextpkt; |
---|
831 | m->m_nextpkt = NULL; |
---|
832 | if (local_npw.nw_head == NULL) |
---|
833 | local_npw.nw_tail = NULL; |
---|
834 | local_npw.nw_len--; |
---|
835 | VNET_ASSERT(m->m_pkthdr.rcvif != NULL, |
---|
836 | ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m)); |
---|
837 | CURVNET_SET(m->m_pkthdr.rcvif->if_vnet); |
---|
838 | netisr_proto[proto].np_handler(m); |
---|
839 | CURVNET_RESTORE(); |
---|
840 | } |
---|
841 | KASSERT(local_npw.nw_len == 0, |
---|
842 | ("%s(%u): len %u", __func__, proto, local_npw.nw_len)); |
---|
843 | if (netisr_proto[proto].np_drainedcpu) |
---|
844 | netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu); |
---|
845 | NWS_LOCK(nwsp); |
---|
846 | npwp->nw_handled += handled; |
---|
847 | return (handled); |
---|
848 | } |
---|
849 | |
---|
850 | /* |
---|
851 | * SWI handler for netisr -- processes packets in a set of workstreams that |
---|
852 | * it owns, woken up by calls to NWS_SIGNAL(). If this workstream is already |
---|
853 | * being direct dispatched, go back to sleep and wait for the dispatching |
---|
854 | * thread to wake us up again. |
---|
855 | */ |
---|
856 | static void |
---|
857 | swi_net(void *arg) |
---|
858 | { |
---|
859 | #ifdef NETISR_LOCKING |
---|
860 | struct rm_priotracker tracker; |
---|
861 | #endif |
---|
862 | struct netisr_workstream *nwsp; |
---|
863 | u_int bits, prot; |
---|
864 | |
---|
865 | nwsp = arg; |
---|
866 | |
---|
867 | #ifdef DEVICE_POLLING |
---|
868 | KASSERT(nws_count == 1, |
---|
869 | ("%s: device_polling but nws_count != 1", __func__)); |
---|
870 | netisr_poll(); |
---|
871 | #endif |
---|
872 | #ifdef NETISR_LOCKING |
---|
873 | NETISR_RLOCK(&tracker); |
---|
874 | #endif |
---|
875 | NWS_LOCK(nwsp); |
---|
876 | KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running")); |
---|
877 | if (nwsp->nws_flags & NWS_DISPATCHING) |
---|
878 | goto out; |
---|
879 | nwsp->nws_flags |= NWS_RUNNING; |
---|
880 | nwsp->nws_flags &= ~NWS_SCHEDULED; |
---|
881 | while ((bits = nwsp->nws_pendingbits) != 0) { |
---|
882 | while ((prot = ffs(bits)) != 0) { |
---|
883 | prot--; |
---|
884 | bits &= ~(1 << prot); |
---|
885 | (void)netisr_process_workstream_proto(nwsp, prot); |
---|
886 | } |
---|
887 | } |
---|
888 | nwsp->nws_flags &= ~NWS_RUNNING; |
---|
889 | out: |
---|
890 | NWS_UNLOCK(nwsp); |
---|
891 | #ifdef NETISR_LOCKING |
---|
892 | NETISR_RUNLOCK(&tracker); |
---|
893 | #endif |
---|
894 | #ifdef DEVICE_POLLING |
---|
895 | netisr_pollmore(); |
---|
896 | #endif |
---|
897 | } |
---|
898 | |
---|
899 | static int |
---|
900 | netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto, |
---|
901 | struct netisr_work *npwp, struct mbuf *m, int *dosignalp) |
---|
902 | { |
---|
903 | |
---|
904 | NWS_LOCK_ASSERT(nwsp); |
---|
905 | |
---|
906 | *dosignalp = 0; |
---|
907 | if (npwp->nw_len < npwp->nw_qlimit) { |
---|
908 | m->m_nextpkt = NULL; |
---|
909 | if (npwp->nw_head == NULL) { |
---|
910 | npwp->nw_head = m; |
---|
911 | npwp->nw_tail = m; |
---|
912 | } else { |
---|
913 | npwp->nw_tail->m_nextpkt = m; |
---|
914 | npwp->nw_tail = m; |
---|
915 | } |
---|
916 | npwp->nw_len++; |
---|
917 | if (npwp->nw_len > npwp->nw_watermark) |
---|
918 | npwp->nw_watermark = npwp->nw_len; |
---|
919 | |
---|
920 | /* |
---|
921 | * We must set the bit regardless of NWS_RUNNING, so that |
---|
922 | * swi_net() keeps calling netisr_process_workstream_proto(). |
---|
923 | */ |
---|
924 | nwsp->nws_pendingbits |= (1 << proto); |
---|
925 | if (!(nwsp->nws_flags & |
---|
926 | (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) { |
---|
927 | nwsp->nws_flags |= NWS_SCHEDULED; |
---|
928 | *dosignalp = 1; /* Defer until unlocked. */ |
---|
929 | } |
---|
930 | npwp->nw_queued++; |
---|
931 | return (0); |
---|
932 | } else { |
---|
933 | m_freem(m); |
---|
934 | npwp->nw_qdrops++; |
---|
935 | return (ENOBUFS); |
---|
936 | } |
---|
937 | } |
---|
938 | |
---|
939 | static int |
---|
940 | netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid) |
---|
941 | { |
---|
942 | struct netisr_workstream *nwsp; |
---|
943 | struct netisr_work *npwp; |
---|
944 | int dosignal, error; |
---|
945 | |
---|
946 | #ifdef NETISR_LOCKING |
---|
947 | NETISR_LOCK_ASSERT(); |
---|
948 | #endif |
---|
949 | KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__, |
---|
950 | cpuid, mp_maxid)); |
---|
951 | KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); |
---|
952 | |
---|
953 | dosignal = 0; |
---|
954 | error = 0; |
---|
955 | #ifndef __rtems__ |
---|
956 | nwsp = DPCPU_ID_PTR(cpuid, nws); |
---|
957 | #else /* __rtems__ */ |
---|
958 | nwsp = &rtems_bsd_nws; |
---|
959 | #endif /* __rtems__ */ |
---|
960 | npwp = &nwsp->nws_work[proto]; |
---|
961 | NWS_LOCK(nwsp); |
---|
962 | error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal); |
---|
963 | NWS_UNLOCK(nwsp); |
---|
964 | if (dosignal) |
---|
965 | NWS_SIGNAL(nwsp); |
---|
966 | return (error); |
---|
967 | } |
---|
968 | |
---|
969 | int |
---|
970 | netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m) |
---|
971 | { |
---|
972 | #ifdef NETISR_LOCKING |
---|
973 | struct rm_priotracker tracker; |
---|
974 | #endif |
---|
975 | u_int cpuid; |
---|
976 | int error; |
---|
977 | |
---|
978 | KASSERT(proto < NETISR_MAXPROT, |
---|
979 | ("%s: invalid proto %u", __func__, proto)); |
---|
980 | |
---|
981 | #ifdef NETISR_LOCKING |
---|
982 | NETISR_RLOCK(&tracker); |
---|
983 | #endif |
---|
984 | KASSERT(netisr_proto[proto].np_handler != NULL, |
---|
985 | ("%s: invalid proto %u", __func__, proto)); |
---|
986 | |
---|
987 | m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED, |
---|
988 | source, m, &cpuid); |
---|
989 | if (m != NULL) { |
---|
990 | KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, |
---|
991 | cpuid)); |
---|
992 | error = netisr_queue_internal(proto, m, cpuid); |
---|
993 | } else |
---|
994 | error = ENOBUFS; |
---|
995 | #ifdef NETISR_LOCKING |
---|
996 | NETISR_RUNLOCK(&tracker); |
---|
997 | #endif |
---|
998 | return (error); |
---|
999 | } |
---|
1000 | |
---|
1001 | int |
---|
1002 | netisr_queue(u_int proto, struct mbuf *m) |
---|
1003 | { |
---|
1004 | |
---|
1005 | return (netisr_queue_src(proto, 0, m)); |
---|
1006 | } |
---|
1007 | |
---|
1008 | /* |
---|
1009 | * Dispatch a packet for netisr processing; direct dispatch is permitted by |
---|
1010 | * calling context. |
---|
1011 | */ |
---|
1012 | int |
---|
1013 | netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m) |
---|
1014 | { |
---|
1015 | #ifdef NETISR_LOCKING |
---|
1016 | struct rm_priotracker tracker; |
---|
1017 | #endif |
---|
1018 | struct netisr_workstream *nwsp; |
---|
1019 | struct netisr_proto *npp; |
---|
1020 | struct netisr_work *npwp; |
---|
1021 | int dosignal, error; |
---|
1022 | u_int cpuid, dispatch_policy; |
---|
1023 | |
---|
1024 | KASSERT(proto < NETISR_MAXPROT, |
---|
1025 | ("%s: invalid proto %u", __func__, proto)); |
---|
1026 | #ifdef NETISR_LOCKING |
---|
1027 | NETISR_RLOCK(&tracker); |
---|
1028 | #endif |
---|
1029 | npp = &netisr_proto[proto]; |
---|
1030 | KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__, |
---|
1031 | proto)); |
---|
1032 | |
---|
1033 | dispatch_policy = netisr_get_dispatch(npp); |
---|
1034 | if (dispatch_policy == NETISR_DISPATCH_DEFERRED) |
---|
1035 | return (netisr_queue_src(proto, source, m)); |
---|
1036 | |
---|
1037 | /* |
---|
1038 | * If direct dispatch is forced, then unconditionally dispatch |
---|
1039 | * without a formal CPU selection. Borrow the current CPU's stats, |
---|
1040 | * even if there's no worker on it. In this case we don't update |
---|
1041 | * nws_flags because all netisr processing will be source ordered due |
---|
1042 | * to always being forced to directly dispatch. |
---|
1043 | */ |
---|
1044 | if (dispatch_policy == NETISR_DISPATCH_DIRECT) { |
---|
1045 | #ifndef __rtems__ |
---|
1046 | nwsp = DPCPU_PTR(nws); |
---|
1047 | #else /* __rtems__ */ |
---|
1048 | nwsp = &rtems_bsd_nws; |
---|
1049 | #endif /* __rtems__ */ |
---|
1050 | npwp = &nwsp->nws_work[proto]; |
---|
1051 | npwp->nw_dispatched++; |
---|
1052 | npwp->nw_handled++; |
---|
1053 | netisr_proto[proto].np_handler(m); |
---|
1054 | error = 0; |
---|
1055 | goto out_unlock; |
---|
1056 | } |
---|
1057 | |
---|
1058 | KASSERT(dispatch_policy == NETISR_DISPATCH_HYBRID, |
---|
1059 | ("%s: unknown dispatch policy (%u)", __func__, dispatch_policy)); |
---|
1060 | |
---|
1061 | /* |
---|
1062 | * Otherwise, we execute in a hybrid mode where we will try to direct |
---|
1063 | * dispatch if we're on the right CPU and the netisr worker isn't |
---|
1064 | * already running. |
---|
1065 | */ |
---|
1066 | #ifndef __rtems__ |
---|
1067 | sched_pin(); |
---|
1068 | #endif /* __rtems__ */ |
---|
1069 | m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_HYBRID, |
---|
1070 | source, m, &cpuid); |
---|
1071 | if (m == NULL) { |
---|
1072 | error = ENOBUFS; |
---|
1073 | goto out_unpin; |
---|
1074 | } |
---|
1075 | KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); |
---|
1076 | if (cpuid != curcpu) |
---|
1077 | goto queue_fallback; |
---|
1078 | #ifndef __rtems__ |
---|
1079 | nwsp = DPCPU_PTR(nws); |
---|
1080 | #else /* __rtems__ */ |
---|
1081 | nwsp = &rtems_bsd_nws; |
---|
1082 | #endif /* __rtems__ */ |
---|
1083 | npwp = &nwsp->nws_work[proto]; |
---|
1084 | |
---|
1085 | /*- |
---|
1086 | * We are willing to direct dispatch only if three conditions hold: |
---|
1087 | * |
---|
1088 | * (1) The netisr worker isn't already running, |
---|
1089 | * (2) Another thread isn't already directly dispatching, and |
---|
1090 | * (3) The netisr hasn't already been woken up. |
---|
1091 | */ |
---|
1092 | NWS_LOCK(nwsp); |
---|
1093 | if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) { |
---|
1094 | error = netisr_queue_workstream(nwsp, proto, npwp, m, |
---|
1095 | &dosignal); |
---|
1096 | NWS_UNLOCK(nwsp); |
---|
1097 | if (dosignal) |
---|
1098 | NWS_SIGNAL(nwsp); |
---|
1099 | goto out_unpin; |
---|
1100 | } |
---|
1101 | |
---|
1102 | /* |
---|
1103 | * The current thread is now effectively the netisr worker, so set |
---|
1104 | * the dispatching flag to prevent concurrent processing of the |
---|
1105 | * stream from another thread (even the netisr worker), which could |
---|
1106 | * otherwise lead to effective misordering of the stream. |
---|
1107 | */ |
---|
1108 | nwsp->nws_flags |= NWS_DISPATCHING; |
---|
1109 | NWS_UNLOCK(nwsp); |
---|
1110 | netisr_proto[proto].np_handler(m); |
---|
1111 | NWS_LOCK(nwsp); |
---|
1112 | nwsp->nws_flags &= ~NWS_DISPATCHING; |
---|
1113 | npwp->nw_handled++; |
---|
1114 | npwp->nw_hybrid_dispatched++; |
---|
1115 | |
---|
1116 | /* |
---|
1117 | * If other work was enqueued by another thread while we were direct |
---|
1118 | * dispatching, we need to signal the netisr worker to do that work. |
---|
1119 | * In the future, we might want to do some of that work in the |
---|
1120 | * current thread, rather than trigger further context switches. If |
---|
1121 | * so, we'll want to establish a reasonable bound on the work done in |
---|
1122 | * the "borrowed" context. |
---|
1123 | */ |
---|
1124 | if (nwsp->nws_pendingbits != 0) { |
---|
1125 | nwsp->nws_flags |= NWS_SCHEDULED; |
---|
1126 | dosignal = 1; |
---|
1127 | } else |
---|
1128 | dosignal = 0; |
---|
1129 | NWS_UNLOCK(nwsp); |
---|
1130 | if (dosignal) |
---|
1131 | NWS_SIGNAL(nwsp); |
---|
1132 | error = 0; |
---|
1133 | goto out_unpin; |
---|
1134 | |
---|
1135 | queue_fallback: |
---|
1136 | error = netisr_queue_internal(proto, m, cpuid); |
---|
1137 | out_unpin: |
---|
1138 | #ifndef __rtems__ |
---|
1139 | sched_unpin(); |
---|
1140 | #endif /* __rtems__ */ |
---|
1141 | out_unlock: |
---|
1142 | #ifdef NETISR_LOCKING |
---|
1143 | NETISR_RUNLOCK(&tracker); |
---|
1144 | #endif |
---|
1145 | return (error); |
---|
1146 | } |
---|
1147 | |
---|
1148 | int |
---|
1149 | netisr_dispatch(u_int proto, struct mbuf *m) |
---|
1150 | { |
---|
1151 | |
---|
1152 | return (netisr_dispatch_src(proto, 0, m)); |
---|
1153 | } |
---|
1154 | |
---|
1155 | #ifdef DEVICE_POLLING |
---|
1156 | /* |
---|
1157 | * Kernel polling borrows a netisr thread to run interface polling in; this |
---|
1158 | * function allows kernel polling to request that the netisr thread be |
---|
1159 | * scheduled even if no packets are pending for protocols. |
---|
1160 | */ |
---|
1161 | void |
---|
1162 | netisr_sched_poll(void) |
---|
1163 | { |
---|
1164 | struct netisr_workstream *nwsp; |
---|
1165 | |
---|
1166 | nwsp = DPCPU_ID_PTR(nws_array[0], nws); |
---|
1167 | NWS_SIGNAL(nwsp); |
---|
1168 | } |
---|
1169 | #endif |
---|
1170 | |
---|
1171 | static void |
---|
1172 | netisr_start_swi(u_int cpuid, struct pcpu *pc) |
---|
1173 | { |
---|
1174 | char swiname[12]; |
---|
1175 | struct netisr_workstream *nwsp; |
---|
1176 | int error; |
---|
1177 | |
---|
1178 | KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); |
---|
1179 | |
---|
1180 | #ifndef __rtems__ |
---|
1181 | nwsp = DPCPU_ID_PTR(cpuid, nws); |
---|
1182 | #else /* __rtems__ */ |
---|
1183 | nwsp = &rtems_bsd_nws; |
---|
1184 | #endif /* __rtems__ */ |
---|
1185 | mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF); |
---|
1186 | nwsp->nws_cpu = cpuid; |
---|
1187 | snprintf(swiname, sizeof(swiname), "netisr %u", cpuid); |
---|
1188 | error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp, |
---|
1189 | SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie); |
---|
1190 | if (error) |
---|
1191 | panic("%s: swi_add %d", __func__, error); |
---|
1192 | #ifndef __rtems__ |
---|
1193 | pc->pc_netisr = nwsp->nws_intr_event; |
---|
1194 | if (netisr_bindthreads) { |
---|
1195 | error = intr_event_bind(nwsp->nws_intr_event, cpuid); |
---|
1196 | if (error != 0) |
---|
1197 | printf("%s: cpu %u: intr_event_bind: %d", __func__, |
---|
1198 | cpuid, error); |
---|
1199 | } |
---|
1200 | NETISR_WLOCK(); |
---|
1201 | nws_array[nws_count] = nwsp->nws_cpu; |
---|
1202 | nws_count++; |
---|
1203 | NETISR_WUNLOCK(); |
---|
1204 | #endif /* __rtems__ */ |
---|
1205 | } |
---|
1206 | |
---|
1207 | /* |
---|
1208 | * Initialize the netisr subsystem. We rely on BSS and static initialization |
---|
1209 | * of most fields in global data structures. |
---|
1210 | * |
---|
1211 | * Start a worker thread for the boot CPU so that we can support network |
---|
1212 | * traffic immediately in case the network stack is used before additional |
---|
1213 | * CPUs are started (for example, diskless boot). |
---|
1214 | */ |
---|
1215 | static void |
---|
1216 | netisr_init(void *arg) |
---|
1217 | { |
---|
1218 | char tmp[NETISR_DISPATCH_POLICY_MAXSTR]; |
---|
1219 | u_int dispatch_policy; |
---|
1220 | int error; |
---|
1221 | |
---|
1222 | KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__)); |
---|
1223 | |
---|
1224 | NETISR_LOCK_INIT(); |
---|
1225 | if (netisr_maxthreads < 1) |
---|
1226 | netisr_maxthreads = 1; |
---|
1227 | if (netisr_maxthreads > mp_ncpus) { |
---|
1228 | printf("netisr_init: forcing maxthreads from %d to %d\n", |
---|
1229 | netisr_maxthreads, mp_ncpus); |
---|
1230 | netisr_maxthreads = mp_ncpus; |
---|
1231 | } |
---|
1232 | if (netisr_defaultqlimit > netisr_maxqlimit) { |
---|
1233 | printf("netisr_init: forcing defaultqlimit from %d to %d\n", |
---|
1234 | netisr_defaultqlimit, netisr_maxqlimit); |
---|
1235 | netisr_defaultqlimit = netisr_maxqlimit; |
---|
1236 | } |
---|
1237 | #ifdef DEVICE_POLLING |
---|
1238 | /* |
---|
1239 | * The device polling code is not yet aware of how to deal with |
---|
1240 | * multiple netisr threads, so for the time being compiling in device |
---|
1241 | * polling disables parallel netisr workers. |
---|
1242 | */ |
---|
1243 | if (netisr_maxthreads != 1 || netisr_bindthreads != 0) { |
---|
1244 | printf("netisr_init: forcing maxthreads to 1 and " |
---|
1245 | "bindthreads to 0 for device polling\n"); |
---|
1246 | netisr_maxthreads = 1; |
---|
1247 | netisr_bindthreads = 0; |
---|
1248 | } |
---|
1249 | #endif |
---|
1250 | |
---|
1251 | #ifndef __rtems__ |
---|
1252 | if (TUNABLE_STR_FETCH("net.isr.dispatch", tmp, sizeof(tmp))) { |
---|
1253 | error = netisr_dispatch_policy_from_str(tmp, |
---|
1254 | &dispatch_policy); |
---|
1255 | if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT) |
---|
1256 | error = EINVAL; |
---|
1257 | if (error == 0) { |
---|
1258 | netisr_dispatch_policy = dispatch_policy; |
---|
1259 | netisr_dispatch_policy_compat(); |
---|
1260 | } else |
---|
1261 | printf( |
---|
1262 | "%s: invalid dispatch policy %s, using default\n", |
---|
1263 | __func__, tmp); |
---|
1264 | } |
---|
1265 | #endif /* __rtems__ */ |
---|
1266 | |
---|
1267 | #ifndef __rtems__ |
---|
1268 | netisr_start_swi(curcpu, pcpu_find(curcpu)); |
---|
1269 | #else /* __rtems__ */ |
---|
1270 | netisr_start_swi(0, NULL); |
---|
1271 | #endif /* __rtems__ */ |
---|
1272 | } |
---|
1273 | SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL); |
---|
1274 | |
---|
1275 | #ifndef __rtems__ |
---|
1276 | /* |
---|
1277 | * Start worker threads for additional CPUs. No attempt to gracefully handle |
---|
1278 | * work reassignment, we don't yet support dynamic reconfiguration. |
---|
1279 | */ |
---|
1280 | static void |
---|
1281 | netisr_start(void *arg) |
---|
1282 | { |
---|
1283 | struct pcpu *pc; |
---|
1284 | |
---|
1285 | STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { |
---|
1286 | if (nws_count >= netisr_maxthreads) |
---|
1287 | break; |
---|
1288 | /* XXXRW: Is skipping absent CPUs still required here? */ |
---|
1289 | if (CPU_ABSENT(pc->pc_cpuid)) |
---|
1290 | continue; |
---|
1291 | /* Worker will already be present for boot CPU. */ |
---|
1292 | if (pc->pc_netisr != NULL) |
---|
1293 | continue; |
---|
1294 | netisr_start_swi(pc->pc_cpuid, pc); |
---|
1295 | } |
---|
1296 | } |
---|
1297 | SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL); |
---|
1298 | #endif /* __rtems__ */ |
---|
1299 | |
---|
1300 | /* |
---|
1301 | * Sysctl monitoring for netisr: query a list of registered protocols. |
---|
1302 | */ |
---|
1303 | static int |
---|
1304 | sysctl_netisr_proto(SYSCTL_HANDLER_ARGS) |
---|
1305 | { |
---|
1306 | struct rm_priotracker tracker; |
---|
1307 | struct sysctl_netisr_proto *snpp, *snp_array; |
---|
1308 | struct netisr_proto *npp; |
---|
1309 | u_int counter, proto; |
---|
1310 | int error; |
---|
1311 | |
---|
1312 | if (req->newptr != NULL) |
---|
1313 | return (EINVAL); |
---|
1314 | snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP, |
---|
1315 | M_ZERO | M_WAITOK); |
---|
1316 | counter = 0; |
---|
1317 | NETISR_RLOCK(&tracker); |
---|
1318 | for (proto = 0; proto < NETISR_MAXPROT; proto++) { |
---|
1319 | npp = &netisr_proto[proto]; |
---|
1320 | if (npp->np_name == NULL) |
---|
1321 | continue; |
---|
1322 | snpp = &snp_array[counter]; |
---|
1323 | snpp->snp_version = sizeof(*snpp); |
---|
1324 | strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN); |
---|
1325 | snpp->snp_proto = proto; |
---|
1326 | snpp->snp_qlimit = npp->np_qlimit; |
---|
1327 | snpp->snp_policy = npp->np_policy; |
---|
1328 | snpp->snp_dispatch = npp->np_dispatch; |
---|
1329 | if (npp->np_m2flow != NULL) |
---|
1330 | snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW; |
---|
1331 | if (npp->np_m2cpuid != NULL) |
---|
1332 | snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID; |
---|
1333 | if (npp->np_drainedcpu != NULL) |
---|
1334 | snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU; |
---|
1335 | counter++; |
---|
1336 | } |
---|
1337 | NETISR_RUNLOCK(&tracker); |
---|
1338 | KASSERT(counter <= NETISR_MAXPROT, |
---|
1339 | ("sysctl_netisr_proto: counter too big (%d)", counter)); |
---|
1340 | error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter); |
---|
1341 | free(snp_array, M_TEMP); |
---|
1342 | return (error); |
---|
1343 | } |
---|
1344 | |
---|
1345 | SYSCTL_PROC(_net_isr, OID_AUTO, proto, |
---|
1346 | CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto, |
---|
1347 | "S,sysctl_netisr_proto", |
---|
1348 | "Return list of protocols registered with netisr"); |
---|
1349 | |
---|
1350 | /* |
---|
1351 | * Sysctl monitoring for netisr: query a list of workstreams. |
---|
1352 | */ |
---|
1353 | static int |
---|
1354 | sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS) |
---|
1355 | { |
---|
1356 | struct rm_priotracker tracker; |
---|
1357 | struct sysctl_netisr_workstream *snwsp, *snws_array; |
---|
1358 | struct netisr_workstream *nwsp; |
---|
1359 | u_int counter, cpuid; |
---|
1360 | int error; |
---|
1361 | |
---|
1362 | if (req->newptr != NULL) |
---|
1363 | return (EINVAL); |
---|
1364 | snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP, |
---|
1365 | M_ZERO | M_WAITOK); |
---|
1366 | counter = 0; |
---|
1367 | NETISR_RLOCK(&tracker); |
---|
1368 | CPU_FOREACH(cpuid) { |
---|
1369 | #ifndef __rtems__ |
---|
1370 | nwsp = DPCPU_ID_PTR(cpuid, nws); |
---|
1371 | #else /* __rtems__ */ |
---|
1372 | nwsp = &rtems_bsd_nws; |
---|
1373 | #endif /* __rtems__ */ |
---|
1374 | if (nwsp->nws_intr_event == NULL) |
---|
1375 | continue; |
---|
1376 | NWS_LOCK(nwsp); |
---|
1377 | snwsp = &snws_array[counter]; |
---|
1378 | snwsp->snws_version = sizeof(*snwsp); |
---|
1379 | |
---|
1380 | /* |
---|
1381 | * For now, we equate workstream IDs and CPU IDs in the |
---|
1382 | * kernel, but expose them independently to userspace in case |
---|
1383 | * that assumption changes in the future. |
---|
1384 | */ |
---|
1385 | snwsp->snws_wsid = cpuid; |
---|
1386 | snwsp->snws_cpu = cpuid; |
---|
1387 | if (nwsp->nws_intr_event != NULL) |
---|
1388 | snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR; |
---|
1389 | NWS_UNLOCK(nwsp); |
---|
1390 | counter++; |
---|
1391 | } |
---|
1392 | NETISR_RUNLOCK(&tracker); |
---|
1393 | KASSERT(counter <= MAXCPU, |
---|
1394 | ("sysctl_netisr_workstream: counter too big (%d)", counter)); |
---|
1395 | error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter); |
---|
1396 | free(snws_array, M_TEMP); |
---|
1397 | return (error); |
---|
1398 | } |
---|
1399 | |
---|
1400 | SYSCTL_PROC(_net_isr, OID_AUTO, workstream, |
---|
1401 | CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream, |
---|
1402 | "S,sysctl_netisr_workstream", |
---|
1403 | "Return list of workstreams implemented by netisr"); |
---|
1404 | |
---|
1405 | /* |
---|
1406 | * Sysctl monitoring for netisr: query per-protocol data across all |
---|
1407 | * workstreams. |
---|
1408 | */ |
---|
1409 | static int |
---|
1410 | sysctl_netisr_work(SYSCTL_HANDLER_ARGS) |
---|
1411 | { |
---|
1412 | struct rm_priotracker tracker; |
---|
1413 | struct sysctl_netisr_work *snwp, *snw_array; |
---|
1414 | struct netisr_workstream *nwsp; |
---|
1415 | struct netisr_proto *npp; |
---|
1416 | struct netisr_work *nwp; |
---|
1417 | u_int counter, cpuid, proto; |
---|
1418 | int error; |
---|
1419 | |
---|
1420 | if (req->newptr != NULL) |
---|
1421 | return (EINVAL); |
---|
1422 | snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT, |
---|
1423 | M_TEMP, M_ZERO | M_WAITOK); |
---|
1424 | counter = 0; |
---|
1425 | NETISR_RLOCK(&tracker); |
---|
1426 | CPU_FOREACH(cpuid) { |
---|
1427 | #ifndef __rtems__ |
---|
1428 | nwsp = DPCPU_ID_PTR(cpuid, nws); |
---|
1429 | #else /* __rtems__ */ |
---|
1430 | nwsp = &rtems_bsd_nws; |
---|
1431 | #endif /* __rtems__ */ |
---|
1432 | if (nwsp->nws_intr_event == NULL) |
---|
1433 | continue; |
---|
1434 | NWS_LOCK(nwsp); |
---|
1435 | for (proto = 0; proto < NETISR_MAXPROT; proto++) { |
---|
1436 | npp = &netisr_proto[proto]; |
---|
1437 | if (npp->np_name == NULL) |
---|
1438 | continue; |
---|
1439 | nwp = &nwsp->nws_work[proto]; |
---|
1440 | snwp = &snw_array[counter]; |
---|
1441 | snwp->snw_version = sizeof(*snwp); |
---|
1442 | snwp->snw_wsid = cpuid; /* See comment above. */ |
---|
1443 | snwp->snw_proto = proto; |
---|
1444 | snwp->snw_len = nwp->nw_len; |
---|
1445 | snwp->snw_watermark = nwp->nw_watermark; |
---|
1446 | snwp->snw_dispatched = nwp->nw_dispatched; |
---|
1447 | snwp->snw_hybrid_dispatched = |
---|
1448 | nwp->nw_hybrid_dispatched; |
---|
1449 | snwp->snw_qdrops = nwp->nw_qdrops; |
---|
1450 | snwp->snw_queued = nwp->nw_queued; |
---|
1451 | snwp->snw_handled = nwp->nw_handled; |
---|
1452 | counter++; |
---|
1453 | } |
---|
1454 | NWS_UNLOCK(nwsp); |
---|
1455 | } |
---|
1456 | KASSERT(counter <= MAXCPU * NETISR_MAXPROT, |
---|
1457 | ("sysctl_netisr_work: counter too big (%d)", counter)); |
---|
1458 | NETISR_RUNLOCK(&tracker); |
---|
1459 | error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter); |
---|
1460 | free(snw_array, M_TEMP); |
---|
1461 | return (error); |
---|
1462 | } |
---|
1463 | |
---|
1464 | SYSCTL_PROC(_net_isr, OID_AUTO, work, |
---|
1465 | CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work, |
---|
1466 | "S,sysctl_netisr_work", |
---|
1467 | "Return list of per-workstream, per-protocol work in netisr"); |
---|
1468 | |
---|
1469 | #ifdef DDB |
---|
1470 | DB_SHOW_COMMAND(netisr, db_show_netisr) |
---|
1471 | { |
---|
1472 | struct netisr_workstream *nwsp; |
---|
1473 | struct netisr_work *nwp; |
---|
1474 | int first, proto; |
---|
1475 | u_int cpuid; |
---|
1476 | |
---|
1477 | db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto", |
---|
1478 | "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue"); |
---|
1479 | CPU_FOREACH(cpuid) { |
---|
1480 | #ifndef __rtems__ |
---|
1481 | nwsp = DPCPU_ID_PTR(cpuid, nws); |
---|
1482 | #else /* __rtems__ */ |
---|
1483 | nwsp = &rtems_bsd_nws; |
---|
1484 | #endif /* __rtems__ */ |
---|
1485 | if (nwsp->nws_intr_event == NULL) |
---|
1486 | continue; |
---|
1487 | first = 1; |
---|
1488 | for (proto = 0; proto < NETISR_MAXPROT; proto++) { |
---|
1489 | if (netisr_proto[proto].np_handler == NULL) |
---|
1490 | continue; |
---|
1491 | nwp = &nwsp->nws_work[proto]; |
---|
1492 | if (first) { |
---|
1493 | db_printf("%3d ", cpuid); |
---|
1494 | first = 0; |
---|
1495 | } else |
---|
1496 | db_printf("%3s ", ""); |
---|
1497 | db_printf( |
---|
1498 | "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n", |
---|
1499 | netisr_proto[proto].np_name, nwp->nw_len, |
---|
1500 | nwp->nw_watermark, nwp->nw_qlimit, |
---|
1501 | nwp->nw_dispatched, nwp->nw_hybrid_dispatched, |
---|
1502 | nwp->nw_qdrops, nwp->nw_queued); |
---|
1503 | } |
---|
1504 | } |
---|
1505 | } |
---|
1506 | #endif |
---|