1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | /* $FreeBSD$ */ |
---|
4 | /* $KAME: altq_subr.c,v 1.21 2003/11/06 06:32:53 kjc Exp $ */ |
---|
5 | |
---|
6 | /* |
---|
7 | * Copyright (C) 1997-2003 |
---|
8 | * Sony Computer Science Laboratories Inc. All rights reserved. |
---|
9 | * |
---|
10 | * Redistribution and use in source and binary forms, with or without |
---|
11 | * modification, are permitted provided that the following conditions |
---|
12 | * are met: |
---|
13 | * 1. Redistributions of source code must retain the above copyright |
---|
14 | * notice, this list of conditions and the following disclaimer. |
---|
15 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
16 | * notice, this list of conditions and the following disclaimer in the |
---|
17 | * documentation and/or other materials provided with the distribution. |
---|
18 | * |
---|
19 | * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND |
---|
20 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
22 | * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE |
---|
23 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
24 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
25 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
26 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
27 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
28 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
29 | * SUCH DAMAGE. |
---|
30 | */ |
---|
31 | |
---|
32 | #if defined(__FreeBSD__) || defined(__NetBSD__) |
---|
33 | #include <rtems/bsd/local/opt_altq.h> |
---|
34 | #include <rtems/bsd/local/opt_inet.h> |
---|
35 | #ifdef __FreeBSD__ |
---|
36 | #include <rtems/bsd/local/opt_inet6.h> |
---|
37 | #endif |
---|
38 | #endif /* __FreeBSD__ || __NetBSD__ */ |
---|
39 | |
---|
40 | #include <rtems/bsd/sys/param.h> |
---|
41 | #include <sys/malloc.h> |
---|
42 | #include <sys/mbuf.h> |
---|
43 | #include <sys/systm.h> |
---|
44 | #include <sys/proc.h> |
---|
45 | #include <sys/socket.h> |
---|
46 | #include <sys/socketvar.h> |
---|
47 | #include <sys/kernel.h> |
---|
48 | #include <rtems/bsd/sys/errno.h> |
---|
49 | #include <sys/syslog.h> |
---|
50 | #include <sys/sysctl.h> |
---|
51 | #include <sys/queue.h> |
---|
52 | |
---|
53 | #include <net/if.h> |
---|
54 | #include <net/if_dl.h> |
---|
55 | #include <net/if_types.h> |
---|
56 | #ifdef __FreeBSD__ |
---|
57 | #include <net/vnet.h> |
---|
58 | #endif |
---|
59 | |
---|
60 | #include <netinet/in.h> |
---|
61 | #include <netinet/in_systm.h> |
---|
62 | #include <netinet/ip.h> |
---|
63 | #ifdef INET6 |
---|
64 | #include <netinet/ip6.h> |
---|
65 | #endif |
---|
66 | #include <netinet/tcp.h> |
---|
67 | #include <netinet/udp.h> |
---|
68 | |
---|
69 | #include <net/pfvar.h> |
---|
70 | #include <altq/altq.h> |
---|
71 | #ifdef ALTQ3_COMPAT |
---|
72 | #include <altq/altq_conf.h> |
---|
73 | #endif |
---|
74 | |
---|
75 | /* machine dependent clock related includes */ |
---|
76 | #ifdef __FreeBSD__ |
---|
77 | #include <sys/bus.h> |
---|
78 | #include <sys/cpu.h> |
---|
79 | #include <sys/eventhandler.h> |
---|
80 | #include <machine/clock.h> |
---|
81 | #endif |
---|
82 | #if defined(__amd64__) || defined(__i386__) |
---|
83 | #include <machine/cpufunc.h> /* for pentium tsc */ |
---|
84 | #include <machine/specialreg.h> /* for CPUID_TSC */ |
---|
85 | #ifdef __FreeBSD__ |
---|
86 | #include <machine/md_var.h> /* for cpu_feature */ |
---|
87 | #elif defined(__NetBSD__) || defined(__OpenBSD__) |
---|
88 | #include <machine/cpu.h> /* for cpu_feature */ |
---|
89 | #endif |
---|
90 | #endif /* __amd64 || __i386__ */ |
---|
91 | |
---|
92 | /* |
---|
93 | * internal function prototypes |
---|
94 | */ |
---|
95 | static void tbr_timeout(void *); |
---|
96 | int (*altq_input)(struct mbuf *, int) = NULL; |
---|
97 | static struct mbuf *tbr_dequeue(struct ifaltq *, int); |
---|
98 | static int tbr_timer = 0; /* token bucket regulator timer */ |
---|
99 | #if !defined(__FreeBSD__) || (__FreeBSD_version < 600000) |
---|
100 | static struct callout tbr_callout = CALLOUT_INITIALIZER; |
---|
101 | #else |
---|
102 | static struct callout tbr_callout; |
---|
103 | #endif |
---|
104 | |
---|
105 | #ifdef ALTQ3_CLFIER_COMPAT |
---|
106 | static int extract_ports4(struct mbuf *, struct ip *, struct flowinfo_in *); |
---|
107 | #ifdef INET6 |
---|
108 | static int extract_ports6(struct mbuf *, struct ip6_hdr *, |
---|
109 | struct flowinfo_in6 *); |
---|
110 | #endif |
---|
111 | static int apply_filter4(u_int32_t, struct flow_filter *, |
---|
112 | struct flowinfo_in *); |
---|
113 | static int apply_ppfilter4(u_int32_t, struct flow_filter *, |
---|
114 | struct flowinfo_in *); |
---|
115 | #ifdef INET6 |
---|
116 | static int apply_filter6(u_int32_t, struct flow_filter6 *, |
---|
117 | struct flowinfo_in6 *); |
---|
118 | #endif |
---|
119 | static int apply_tosfilter4(u_int32_t, struct flow_filter *, |
---|
120 | struct flowinfo_in *); |
---|
121 | static u_long get_filt_handle(struct acc_classifier *, int); |
---|
122 | static struct acc_filter *filth_to_filtp(struct acc_classifier *, u_long); |
---|
123 | static u_int32_t filt2fibmask(struct flow_filter *); |
---|
124 | |
---|
125 | static void ip4f_cache(struct ip *, struct flowinfo_in *); |
---|
126 | static int ip4f_lookup(struct ip *, struct flowinfo_in *); |
---|
127 | static int ip4f_init(void); |
---|
128 | static struct ip4_frag *ip4f_alloc(void); |
---|
129 | static void ip4f_free(struct ip4_frag *); |
---|
130 | #endif /* ALTQ3_CLFIER_COMPAT */ |
---|
131 | |
---|
132 | /* |
---|
133 | * alternate queueing support routines |
---|
134 | */ |
---|
135 | |
---|
136 | /* look up the queue state by the interface name and the queueing type. */ |
---|
137 | void * |
---|
138 | altq_lookup(name, type) |
---|
139 | char *name; |
---|
140 | int type; |
---|
141 | { |
---|
142 | struct ifnet *ifp; |
---|
143 | |
---|
144 | if ((ifp = ifunit(name)) != NULL) { |
---|
145 | /* read if_snd unlocked */ |
---|
146 | if (type != ALTQT_NONE && ifp->if_snd.altq_type == type) |
---|
147 | return (ifp->if_snd.altq_disc); |
---|
148 | } |
---|
149 | |
---|
150 | return NULL; |
---|
151 | } |
---|
152 | |
---|
153 | int |
---|
154 | altq_attach(ifq, type, discipline, enqueue, dequeue, request, clfier, classify) |
---|
155 | struct ifaltq *ifq; |
---|
156 | int type; |
---|
157 | void *discipline; |
---|
158 | int (*enqueue)(struct ifaltq *, struct mbuf *, struct altq_pktattr *); |
---|
159 | struct mbuf *(*dequeue)(struct ifaltq *, int); |
---|
160 | int (*request)(struct ifaltq *, int, void *); |
---|
161 | void *clfier; |
---|
162 | void *(*classify)(void *, struct mbuf *, int); |
---|
163 | { |
---|
164 | IFQ_LOCK(ifq); |
---|
165 | if (!ALTQ_IS_READY(ifq)) { |
---|
166 | IFQ_UNLOCK(ifq); |
---|
167 | return ENXIO; |
---|
168 | } |
---|
169 | |
---|
170 | #ifdef ALTQ3_COMPAT |
---|
171 | /* |
---|
172 | * pfaltq can override the existing discipline, but altq3 cannot. |
---|
173 | * check these if clfier is not NULL (which implies altq3). |
---|
174 | */ |
---|
175 | if (clfier != NULL) { |
---|
176 | if (ALTQ_IS_ENABLED(ifq)) { |
---|
177 | IFQ_UNLOCK(ifq); |
---|
178 | return EBUSY; |
---|
179 | } |
---|
180 | if (ALTQ_IS_ATTACHED(ifq)) { |
---|
181 | IFQ_UNLOCK(ifq); |
---|
182 | return EEXIST; |
---|
183 | } |
---|
184 | } |
---|
185 | #endif |
---|
186 | ifq->altq_type = type; |
---|
187 | ifq->altq_disc = discipline; |
---|
188 | ifq->altq_enqueue = enqueue; |
---|
189 | ifq->altq_dequeue = dequeue; |
---|
190 | ifq->altq_request = request; |
---|
191 | ifq->altq_clfier = clfier; |
---|
192 | ifq->altq_classify = classify; |
---|
193 | ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED); |
---|
194 | #ifdef ALTQ3_COMPAT |
---|
195 | #ifdef ALTQ_KLD |
---|
196 | altq_module_incref(type); |
---|
197 | #endif |
---|
198 | #endif |
---|
199 | IFQ_UNLOCK(ifq); |
---|
200 | return 0; |
---|
201 | } |
---|
202 | |
---|
203 | int |
---|
204 | altq_detach(ifq) |
---|
205 | struct ifaltq *ifq; |
---|
206 | { |
---|
207 | IFQ_LOCK(ifq); |
---|
208 | |
---|
209 | if (!ALTQ_IS_READY(ifq)) { |
---|
210 | IFQ_UNLOCK(ifq); |
---|
211 | return ENXIO; |
---|
212 | } |
---|
213 | if (ALTQ_IS_ENABLED(ifq)) { |
---|
214 | IFQ_UNLOCK(ifq); |
---|
215 | return EBUSY; |
---|
216 | } |
---|
217 | if (!ALTQ_IS_ATTACHED(ifq)) { |
---|
218 | IFQ_UNLOCK(ifq); |
---|
219 | return (0); |
---|
220 | } |
---|
221 | #ifdef ALTQ3_COMPAT |
---|
222 | #ifdef ALTQ_KLD |
---|
223 | altq_module_declref(ifq->altq_type); |
---|
224 | #endif |
---|
225 | #endif |
---|
226 | |
---|
227 | ifq->altq_type = ALTQT_NONE; |
---|
228 | ifq->altq_disc = NULL; |
---|
229 | ifq->altq_enqueue = NULL; |
---|
230 | ifq->altq_dequeue = NULL; |
---|
231 | ifq->altq_request = NULL; |
---|
232 | ifq->altq_clfier = NULL; |
---|
233 | ifq->altq_classify = NULL; |
---|
234 | ifq->altq_flags &= ALTQF_CANTCHANGE; |
---|
235 | |
---|
236 | IFQ_UNLOCK(ifq); |
---|
237 | return 0; |
---|
238 | } |
---|
239 | |
---|
240 | int |
---|
241 | altq_enable(ifq) |
---|
242 | struct ifaltq *ifq; |
---|
243 | { |
---|
244 | int s; |
---|
245 | |
---|
246 | IFQ_LOCK(ifq); |
---|
247 | |
---|
248 | if (!ALTQ_IS_READY(ifq)) { |
---|
249 | IFQ_UNLOCK(ifq); |
---|
250 | return ENXIO; |
---|
251 | } |
---|
252 | if (ALTQ_IS_ENABLED(ifq)) { |
---|
253 | IFQ_UNLOCK(ifq); |
---|
254 | return 0; |
---|
255 | } |
---|
256 | |
---|
257 | #ifdef __NetBSD__ |
---|
258 | s = splnet(); |
---|
259 | #else |
---|
260 | s = splimp(); |
---|
261 | #endif |
---|
262 | IFQ_PURGE_NOLOCK(ifq); |
---|
263 | ASSERT(ifq->ifq_len == 0); |
---|
264 | ifq->ifq_drv_maxlen = 0; /* disable bulk dequeue */ |
---|
265 | ifq->altq_flags |= ALTQF_ENABLED; |
---|
266 | if (ifq->altq_clfier != NULL) |
---|
267 | ifq->altq_flags |= ALTQF_CLASSIFY; |
---|
268 | splx(s); |
---|
269 | |
---|
270 | IFQ_UNLOCK(ifq); |
---|
271 | return 0; |
---|
272 | } |
---|
273 | |
---|
274 | int |
---|
275 | altq_disable(ifq) |
---|
276 | struct ifaltq *ifq; |
---|
277 | { |
---|
278 | int s; |
---|
279 | |
---|
280 | IFQ_LOCK(ifq); |
---|
281 | if (!ALTQ_IS_ENABLED(ifq)) { |
---|
282 | IFQ_UNLOCK(ifq); |
---|
283 | return 0; |
---|
284 | } |
---|
285 | |
---|
286 | #ifdef __NetBSD__ |
---|
287 | s = splnet(); |
---|
288 | #else |
---|
289 | s = splimp(); |
---|
290 | #endif |
---|
291 | IFQ_PURGE_NOLOCK(ifq); |
---|
292 | ASSERT(ifq->ifq_len == 0); |
---|
293 | ifq->altq_flags &= ~(ALTQF_ENABLED|ALTQF_CLASSIFY); |
---|
294 | splx(s); |
---|
295 | |
---|
296 | IFQ_UNLOCK(ifq); |
---|
297 | return 0; |
---|
298 | } |
---|
299 | |
---|
300 | #ifdef ALTQ_DEBUG |
---|
301 | void |
---|
302 | altq_assert(file, line, failedexpr) |
---|
303 | const char *file, *failedexpr; |
---|
304 | int line; |
---|
305 | { |
---|
306 | (void)printf("altq assertion \"%s\" failed: file \"%s\", line %d\n", |
---|
307 | failedexpr, file, line); |
---|
308 | panic("altq assertion"); |
---|
309 | /* NOTREACHED */ |
---|
310 | } |
---|
311 | #endif |
---|
312 | |
---|
313 | /* |
---|
314 | * internal representation of token bucket parameters |
---|
315 | * rate: byte_per_unittime << 32 |
---|
316 | * (((bits_per_sec) / 8) << 32) / machclk_freq |
---|
317 | * depth: byte << 32 |
---|
318 | * |
---|
319 | */ |
---|
320 | #define TBR_SHIFT 32 |
---|
321 | #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT) |
---|
322 | #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT) |
---|
323 | |
---|
324 | static struct mbuf * |
---|
325 | tbr_dequeue(ifq, op) |
---|
326 | struct ifaltq *ifq; |
---|
327 | int op; |
---|
328 | { |
---|
329 | struct tb_regulator *tbr; |
---|
330 | struct mbuf *m; |
---|
331 | int64_t interval; |
---|
332 | u_int64_t now; |
---|
333 | |
---|
334 | IFQ_LOCK_ASSERT(ifq); |
---|
335 | tbr = ifq->altq_tbr; |
---|
336 | if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) { |
---|
337 | /* if this is a remove after poll, bypass tbr check */ |
---|
338 | } else { |
---|
339 | /* update token only when it is negative */ |
---|
340 | if (tbr->tbr_token <= 0) { |
---|
341 | now = read_machclk(); |
---|
342 | interval = now - tbr->tbr_last; |
---|
343 | if (interval >= tbr->tbr_filluptime) |
---|
344 | tbr->tbr_token = tbr->tbr_depth; |
---|
345 | else { |
---|
346 | tbr->tbr_token += interval * tbr->tbr_rate; |
---|
347 | if (tbr->tbr_token > tbr->tbr_depth) |
---|
348 | tbr->tbr_token = tbr->tbr_depth; |
---|
349 | } |
---|
350 | tbr->tbr_last = now; |
---|
351 | } |
---|
352 | /* if token is still negative, don't allow dequeue */ |
---|
353 | if (tbr->tbr_token <= 0) |
---|
354 | return (NULL); |
---|
355 | } |
---|
356 | |
---|
357 | if (ALTQ_IS_ENABLED(ifq)) |
---|
358 | m = (*ifq->altq_dequeue)(ifq, op); |
---|
359 | else { |
---|
360 | if (op == ALTDQ_POLL) |
---|
361 | _IF_POLL(ifq, m); |
---|
362 | else |
---|
363 | _IF_DEQUEUE(ifq, m); |
---|
364 | } |
---|
365 | |
---|
366 | if (m != NULL && op == ALTDQ_REMOVE) |
---|
367 | tbr->tbr_token -= TBR_SCALE(m_pktlen(m)); |
---|
368 | tbr->tbr_lastop = op; |
---|
369 | return (m); |
---|
370 | } |
---|
371 | |
---|
372 | /* |
---|
373 | * set a token bucket regulator. |
---|
374 | * if the specified rate is zero, the token bucket regulator is deleted. |
---|
375 | */ |
---|
376 | int |
---|
377 | tbr_set(ifq, profile) |
---|
378 | struct ifaltq *ifq; |
---|
379 | struct tb_profile *profile; |
---|
380 | { |
---|
381 | struct tb_regulator *tbr, *otbr; |
---|
382 | |
---|
383 | if (tbr_dequeue_ptr == NULL) |
---|
384 | tbr_dequeue_ptr = tbr_dequeue; |
---|
385 | |
---|
386 | if (machclk_freq == 0) |
---|
387 | init_machclk(); |
---|
388 | if (machclk_freq == 0) { |
---|
389 | printf("tbr_set: no cpu clock available!\n"); |
---|
390 | return (ENXIO); |
---|
391 | } |
---|
392 | |
---|
393 | IFQ_LOCK(ifq); |
---|
394 | if (profile->rate == 0) { |
---|
395 | /* delete this tbr */ |
---|
396 | if ((tbr = ifq->altq_tbr) == NULL) { |
---|
397 | IFQ_UNLOCK(ifq); |
---|
398 | return (ENOENT); |
---|
399 | } |
---|
400 | ifq->altq_tbr = NULL; |
---|
401 | free(tbr, M_DEVBUF); |
---|
402 | IFQ_UNLOCK(ifq); |
---|
403 | return (0); |
---|
404 | } |
---|
405 | |
---|
406 | IFQ_UNLOCK(ifq); |
---|
407 | tbr = malloc(sizeof(struct tb_regulator), |
---|
408 | M_DEVBUF, M_WAITOK); |
---|
409 | if (tbr == NULL) { /* can not happen */ |
---|
410 | IFQ_UNLOCK(ifq); |
---|
411 | return (ENOMEM); |
---|
412 | } |
---|
413 | bzero(tbr, sizeof(struct tb_regulator)); |
---|
414 | |
---|
415 | tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq; |
---|
416 | tbr->tbr_depth = TBR_SCALE(profile->depth); |
---|
417 | if (tbr->tbr_rate > 0) |
---|
418 | tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate; |
---|
419 | else |
---|
420 | tbr->tbr_filluptime = 0xffffffffffffffffLL; |
---|
421 | tbr->tbr_token = tbr->tbr_depth; |
---|
422 | tbr->tbr_last = read_machclk(); |
---|
423 | tbr->tbr_lastop = ALTDQ_REMOVE; |
---|
424 | |
---|
425 | IFQ_LOCK(ifq); |
---|
426 | otbr = ifq->altq_tbr; |
---|
427 | ifq->altq_tbr = tbr; /* set the new tbr */ |
---|
428 | |
---|
429 | if (otbr != NULL) |
---|
430 | free(otbr, M_DEVBUF); |
---|
431 | else { |
---|
432 | if (tbr_timer == 0) { |
---|
433 | CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0); |
---|
434 | tbr_timer = 1; |
---|
435 | } |
---|
436 | } |
---|
437 | IFQ_UNLOCK(ifq); |
---|
438 | return (0); |
---|
439 | } |
---|
440 | |
---|
441 | /* |
---|
442 | * tbr_timeout goes through the interface list, and kicks the drivers |
---|
443 | * if necessary. |
---|
444 | * |
---|
445 | * MPSAFE |
---|
446 | */ |
---|
447 | static void |
---|
448 | tbr_timeout(arg) |
---|
449 | void *arg; |
---|
450 | { |
---|
451 | #ifdef __FreeBSD__ |
---|
452 | VNET_ITERATOR_DECL(vnet_iter); |
---|
453 | #endif |
---|
454 | struct ifnet *ifp; |
---|
455 | int active, s; |
---|
456 | |
---|
457 | active = 0; |
---|
458 | #ifdef __NetBSD__ |
---|
459 | s = splnet(); |
---|
460 | #else |
---|
461 | s = splimp(); |
---|
462 | #endif |
---|
463 | #ifdef __FreeBSD__ |
---|
464 | IFNET_RLOCK_NOSLEEP(); |
---|
465 | VNET_LIST_RLOCK_NOSLEEP(); |
---|
466 | VNET_FOREACH(vnet_iter) { |
---|
467 | CURVNET_SET(vnet_iter); |
---|
468 | #endif |
---|
469 | for (ifp = TAILQ_FIRST(&V_ifnet); ifp; |
---|
470 | ifp = TAILQ_NEXT(ifp, if_list)) { |
---|
471 | /* read from if_snd unlocked */ |
---|
472 | if (!TBR_IS_ENABLED(&ifp->if_snd)) |
---|
473 | continue; |
---|
474 | active++; |
---|
475 | if (!IFQ_IS_EMPTY(&ifp->if_snd) && |
---|
476 | ifp->if_start != NULL) |
---|
477 | (*ifp->if_start)(ifp); |
---|
478 | } |
---|
479 | #ifdef __FreeBSD__ |
---|
480 | CURVNET_RESTORE(); |
---|
481 | } |
---|
482 | VNET_LIST_RUNLOCK_NOSLEEP(); |
---|
483 | IFNET_RUNLOCK_NOSLEEP(); |
---|
484 | #endif |
---|
485 | splx(s); |
---|
486 | if (active > 0) |
---|
487 | CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0); |
---|
488 | else |
---|
489 | tbr_timer = 0; /* don't need tbr_timer anymore */ |
---|
490 | } |
---|
491 | |
---|
492 | /* |
---|
493 | * get token bucket regulator profile |
---|
494 | */ |
---|
495 | int |
---|
496 | tbr_get(ifq, profile) |
---|
497 | struct ifaltq *ifq; |
---|
498 | struct tb_profile *profile; |
---|
499 | { |
---|
500 | struct tb_regulator *tbr; |
---|
501 | |
---|
502 | IFQ_LOCK(ifq); |
---|
503 | if ((tbr = ifq->altq_tbr) == NULL) { |
---|
504 | profile->rate = 0; |
---|
505 | profile->depth = 0; |
---|
506 | } else { |
---|
507 | profile->rate = |
---|
508 | (u_int)TBR_UNSCALE(tbr->tbr_rate * 8 * machclk_freq); |
---|
509 | profile->depth = (u_int)TBR_UNSCALE(tbr->tbr_depth); |
---|
510 | } |
---|
511 | IFQ_UNLOCK(ifq); |
---|
512 | return (0); |
---|
513 | } |
---|
514 | |
---|
515 | /* |
---|
516 | * attach a discipline to the interface. if one already exists, it is |
---|
517 | * overridden. |
---|
518 | * Locking is done in the discipline specific attach functions. Basically |
---|
519 | * they call back to altq_attach which takes care of the attach and locking. |
---|
520 | */ |
---|
521 | int |
---|
522 | altq_pfattach(struct pf_altq *a) |
---|
523 | { |
---|
524 | int error = 0; |
---|
525 | |
---|
526 | switch (a->scheduler) { |
---|
527 | case ALTQT_NONE: |
---|
528 | break; |
---|
529 | #ifdef ALTQ_CBQ |
---|
530 | case ALTQT_CBQ: |
---|
531 | error = cbq_pfattach(a); |
---|
532 | break; |
---|
533 | #endif |
---|
534 | #ifdef ALTQ_PRIQ |
---|
535 | case ALTQT_PRIQ: |
---|
536 | error = priq_pfattach(a); |
---|
537 | break; |
---|
538 | #endif |
---|
539 | #ifdef ALTQ_HFSC |
---|
540 | case ALTQT_HFSC: |
---|
541 | error = hfsc_pfattach(a); |
---|
542 | break; |
---|
543 | #endif |
---|
544 | default: |
---|
545 | error = ENXIO; |
---|
546 | } |
---|
547 | |
---|
548 | return (error); |
---|
549 | } |
---|
550 | |
---|
551 | /* |
---|
552 | * detach a discipline from the interface. |
---|
553 | * it is possible that the discipline was already overridden by another |
---|
554 | * discipline. |
---|
555 | */ |
---|
556 | int |
---|
557 | altq_pfdetach(struct pf_altq *a) |
---|
558 | { |
---|
559 | struct ifnet *ifp; |
---|
560 | int s, error = 0; |
---|
561 | |
---|
562 | if ((ifp = ifunit(a->ifname)) == NULL) |
---|
563 | return (EINVAL); |
---|
564 | |
---|
565 | /* if this discipline is no longer referenced, just return */ |
---|
566 | /* read unlocked from if_snd */ |
---|
567 | if (a->altq_disc == NULL || a->altq_disc != ifp->if_snd.altq_disc) |
---|
568 | return (0); |
---|
569 | |
---|
570 | #ifdef __NetBSD__ |
---|
571 | s = splnet(); |
---|
572 | #else |
---|
573 | s = splimp(); |
---|
574 | #endif |
---|
575 | /* read unlocked from if_snd, _disable and _detach take care */ |
---|
576 | if (ALTQ_IS_ENABLED(&ifp->if_snd)) |
---|
577 | error = altq_disable(&ifp->if_snd); |
---|
578 | if (error == 0) |
---|
579 | error = altq_detach(&ifp->if_snd); |
---|
580 | splx(s); |
---|
581 | |
---|
582 | return (error); |
---|
583 | } |
---|
584 | |
---|
585 | /* |
---|
586 | * add a discipline or a queue |
---|
587 | * Locking is done in the discipline specific functions with regards to |
---|
588 | * malloc with WAITOK, also it is not yet clear which lock to use. |
---|
589 | */ |
---|
590 | int |
---|
591 | altq_add(struct pf_altq *a) |
---|
592 | { |
---|
593 | int error = 0; |
---|
594 | |
---|
595 | if (a->qname[0] != 0) |
---|
596 | return (altq_add_queue(a)); |
---|
597 | |
---|
598 | if (machclk_freq == 0) |
---|
599 | init_machclk(); |
---|
600 | if (machclk_freq == 0) |
---|
601 | panic("altq_add: no cpu clock"); |
---|
602 | |
---|
603 | switch (a->scheduler) { |
---|
604 | #ifdef ALTQ_CBQ |
---|
605 | case ALTQT_CBQ: |
---|
606 | error = cbq_add_altq(a); |
---|
607 | break; |
---|
608 | #endif |
---|
609 | #ifdef ALTQ_PRIQ |
---|
610 | case ALTQT_PRIQ: |
---|
611 | error = priq_add_altq(a); |
---|
612 | break; |
---|
613 | #endif |
---|
614 | #ifdef ALTQ_HFSC |
---|
615 | case ALTQT_HFSC: |
---|
616 | error = hfsc_add_altq(a); |
---|
617 | break; |
---|
618 | #endif |
---|
619 | default: |
---|
620 | error = ENXIO; |
---|
621 | } |
---|
622 | |
---|
623 | return (error); |
---|
624 | } |
---|
625 | |
---|
626 | /* |
---|
627 | * remove a discipline or a queue |
---|
628 | * It is yet unclear what lock to use to protect this operation, the |
---|
629 | * discipline specific functions will determine and grab it |
---|
630 | */ |
---|
631 | int |
---|
632 | altq_remove(struct pf_altq *a) |
---|
633 | { |
---|
634 | int error = 0; |
---|
635 | |
---|
636 | if (a->qname[0] != 0) |
---|
637 | return (altq_remove_queue(a)); |
---|
638 | |
---|
639 | switch (a->scheduler) { |
---|
640 | #ifdef ALTQ_CBQ |
---|
641 | case ALTQT_CBQ: |
---|
642 | error = cbq_remove_altq(a); |
---|
643 | break; |
---|
644 | #endif |
---|
645 | #ifdef ALTQ_PRIQ |
---|
646 | case ALTQT_PRIQ: |
---|
647 | error = priq_remove_altq(a); |
---|
648 | break; |
---|
649 | #endif |
---|
650 | #ifdef ALTQ_HFSC |
---|
651 | case ALTQT_HFSC: |
---|
652 | error = hfsc_remove_altq(a); |
---|
653 | break; |
---|
654 | #endif |
---|
655 | default: |
---|
656 | error = ENXIO; |
---|
657 | } |
---|
658 | |
---|
659 | return (error); |
---|
660 | } |
---|
661 | |
---|
662 | /* |
---|
663 | * add a queue to the discipline |
---|
664 | * It is yet unclear what lock to use to protect this operation, the |
---|
665 | * discipline specific functions will determine and grab it |
---|
666 | */ |
---|
667 | int |
---|
668 | altq_add_queue(struct pf_altq *a) |
---|
669 | { |
---|
670 | int error = 0; |
---|
671 | |
---|
672 | switch (a->scheduler) { |
---|
673 | #ifdef ALTQ_CBQ |
---|
674 | case ALTQT_CBQ: |
---|
675 | error = cbq_add_queue(a); |
---|
676 | break; |
---|
677 | #endif |
---|
678 | #ifdef ALTQ_PRIQ |
---|
679 | case ALTQT_PRIQ: |
---|
680 | error = priq_add_queue(a); |
---|
681 | break; |
---|
682 | #endif |
---|
683 | #ifdef ALTQ_HFSC |
---|
684 | case ALTQT_HFSC: |
---|
685 | error = hfsc_add_queue(a); |
---|
686 | break; |
---|
687 | #endif |
---|
688 | default: |
---|
689 | error = ENXIO; |
---|
690 | } |
---|
691 | |
---|
692 | return (error); |
---|
693 | } |
---|
694 | |
---|
695 | /* |
---|
696 | * remove a queue from the discipline |
---|
697 | * It is yet unclear what lock to use to protect this operation, the |
---|
698 | * discipline specific functions will determine and grab it |
---|
699 | */ |
---|
700 | int |
---|
701 | altq_remove_queue(struct pf_altq *a) |
---|
702 | { |
---|
703 | int error = 0; |
---|
704 | |
---|
705 | switch (a->scheduler) { |
---|
706 | #ifdef ALTQ_CBQ |
---|
707 | case ALTQT_CBQ: |
---|
708 | error = cbq_remove_queue(a); |
---|
709 | break; |
---|
710 | #endif |
---|
711 | #ifdef ALTQ_PRIQ |
---|
712 | case ALTQT_PRIQ: |
---|
713 | error = priq_remove_queue(a); |
---|
714 | break; |
---|
715 | #endif |
---|
716 | #ifdef ALTQ_HFSC |
---|
717 | case ALTQT_HFSC: |
---|
718 | error = hfsc_remove_queue(a); |
---|
719 | break; |
---|
720 | #endif |
---|
721 | default: |
---|
722 | error = ENXIO; |
---|
723 | } |
---|
724 | |
---|
725 | return (error); |
---|
726 | } |
---|
727 | |
---|
728 | /* |
---|
729 | * get queue statistics |
---|
730 | * Locking is done in the discipline specific functions with regards to |
---|
731 | * copyout operations, also it is not yet clear which lock to use. |
---|
732 | */ |
---|
733 | int |
---|
734 | altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) |
---|
735 | { |
---|
736 | int error = 0; |
---|
737 | |
---|
738 | switch (a->scheduler) { |
---|
739 | #ifdef ALTQ_CBQ |
---|
740 | case ALTQT_CBQ: |
---|
741 | error = cbq_getqstats(a, ubuf, nbytes); |
---|
742 | break; |
---|
743 | #endif |
---|
744 | #ifdef ALTQ_PRIQ |
---|
745 | case ALTQT_PRIQ: |
---|
746 | error = priq_getqstats(a, ubuf, nbytes); |
---|
747 | break; |
---|
748 | #endif |
---|
749 | #ifdef ALTQ_HFSC |
---|
750 | case ALTQT_HFSC: |
---|
751 | error = hfsc_getqstats(a, ubuf, nbytes); |
---|
752 | break; |
---|
753 | #endif |
---|
754 | default: |
---|
755 | error = ENXIO; |
---|
756 | } |
---|
757 | |
---|
758 | return (error); |
---|
759 | } |
---|
760 | |
---|
761 | /* |
---|
762 | * read and write diffserv field in IPv4 or IPv6 header |
---|
763 | */ |
---|
764 | u_int8_t |
---|
765 | read_dsfield(m, pktattr) |
---|
766 | struct mbuf *m; |
---|
767 | struct altq_pktattr *pktattr; |
---|
768 | { |
---|
769 | struct mbuf *m0; |
---|
770 | u_int8_t ds_field = 0; |
---|
771 | |
---|
772 | if (pktattr == NULL || |
---|
773 | (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6)) |
---|
774 | return ((u_int8_t)0); |
---|
775 | |
---|
776 | /* verify that pattr_hdr is within the mbuf data */ |
---|
777 | for (m0 = m; m0 != NULL; m0 = m0->m_next) |
---|
778 | if ((pktattr->pattr_hdr >= m0->m_data) && |
---|
779 | (pktattr->pattr_hdr < m0->m_data + m0->m_len)) |
---|
780 | break; |
---|
781 | if (m0 == NULL) { |
---|
782 | /* ick, pattr_hdr is stale */ |
---|
783 | pktattr->pattr_af = AF_UNSPEC; |
---|
784 | #ifdef ALTQ_DEBUG |
---|
785 | printf("read_dsfield: can't locate header!\n"); |
---|
786 | #endif |
---|
787 | return ((u_int8_t)0); |
---|
788 | } |
---|
789 | |
---|
790 | if (pktattr->pattr_af == AF_INET) { |
---|
791 | struct ip *ip = (struct ip *)pktattr->pattr_hdr; |
---|
792 | |
---|
793 | if (ip->ip_v != 4) |
---|
794 | return ((u_int8_t)0); /* version mismatch! */ |
---|
795 | ds_field = ip->ip_tos; |
---|
796 | } |
---|
797 | #ifdef INET6 |
---|
798 | else if (pktattr->pattr_af == AF_INET6) { |
---|
799 | struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr; |
---|
800 | u_int32_t flowlabel; |
---|
801 | |
---|
802 | flowlabel = ntohl(ip6->ip6_flow); |
---|
803 | if ((flowlabel >> 28) != 6) |
---|
804 | return ((u_int8_t)0); /* version mismatch! */ |
---|
805 | ds_field = (flowlabel >> 20) & 0xff; |
---|
806 | } |
---|
807 | #endif |
---|
808 | return (ds_field); |
---|
809 | } |
---|
810 | |
---|
811 | void |
---|
812 | write_dsfield(struct mbuf *m, struct altq_pktattr *pktattr, u_int8_t dsfield) |
---|
813 | { |
---|
814 | struct mbuf *m0; |
---|
815 | |
---|
816 | if (pktattr == NULL || |
---|
817 | (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6)) |
---|
818 | return; |
---|
819 | |
---|
820 | /* verify that pattr_hdr is within the mbuf data */ |
---|
821 | for (m0 = m; m0 != NULL; m0 = m0->m_next) |
---|
822 | if ((pktattr->pattr_hdr >= m0->m_data) && |
---|
823 | (pktattr->pattr_hdr < m0->m_data + m0->m_len)) |
---|
824 | break; |
---|
825 | if (m0 == NULL) { |
---|
826 | /* ick, pattr_hdr is stale */ |
---|
827 | pktattr->pattr_af = AF_UNSPEC; |
---|
828 | #ifdef ALTQ_DEBUG |
---|
829 | printf("write_dsfield: can't locate header!\n"); |
---|
830 | #endif |
---|
831 | return; |
---|
832 | } |
---|
833 | |
---|
834 | if (pktattr->pattr_af == AF_INET) { |
---|
835 | struct ip *ip = (struct ip *)pktattr->pattr_hdr; |
---|
836 | u_int8_t old; |
---|
837 | int32_t sum; |
---|
838 | |
---|
839 | if (ip->ip_v != 4) |
---|
840 | return; /* version mismatch! */ |
---|
841 | old = ip->ip_tos; |
---|
842 | dsfield |= old & 3; /* leave CU bits */ |
---|
843 | if (old == dsfield) |
---|
844 | return; |
---|
845 | ip->ip_tos = dsfield; |
---|
846 | /* |
---|
847 | * update checksum (from RFC1624) |
---|
848 | * HC' = ~(~HC + ~m + m') |
---|
849 | */ |
---|
850 | sum = ~ntohs(ip->ip_sum) & 0xffff; |
---|
851 | sum += 0xff00 + (~old & 0xff) + dsfield; |
---|
852 | sum = (sum >> 16) + (sum & 0xffff); |
---|
853 | sum += (sum >> 16); /* add carry */ |
---|
854 | |
---|
855 | ip->ip_sum = htons(~sum & 0xffff); |
---|
856 | } |
---|
857 | #ifdef INET6 |
---|
858 | else if (pktattr->pattr_af == AF_INET6) { |
---|
859 | struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr; |
---|
860 | u_int32_t flowlabel; |
---|
861 | |
---|
862 | flowlabel = ntohl(ip6->ip6_flow); |
---|
863 | if ((flowlabel >> 28) != 6) |
---|
864 | return; /* version mismatch! */ |
---|
865 | flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20); |
---|
866 | ip6->ip6_flow = htonl(flowlabel); |
---|
867 | } |
---|
868 | #endif |
---|
869 | return; |
---|
870 | } |
---|
871 | |
---|
872 | |
---|
873 | /* |
---|
874 | * high resolution clock support taking advantage of a machine dependent |
---|
875 | * high resolution time counter (e.g., timestamp counter of intel pentium). |
---|
876 | * we assume |
---|
877 | * - 64-bit-long monotonically-increasing counter |
---|
878 | * - frequency range is 100M-4GHz (CPU speed) |
---|
879 | */ |
---|
880 | /* if pcc is not available or disabled, emulate 256MHz using microtime() */ |
---|
881 | #define MACHCLK_SHIFT 8 |
---|
882 | |
---|
883 | int machclk_usepcc; |
---|
884 | u_int32_t machclk_freq; |
---|
885 | u_int32_t machclk_per_tick; |
---|
886 | |
---|
887 | #if defined(__i386__) && defined(__NetBSD__) |
---|
888 | extern u_int64_t cpu_tsc_freq; |
---|
889 | #endif |
---|
890 | |
---|
891 | #ifndef __rtems__ |
---|
892 | #if (__FreeBSD_version >= 700035) |
---|
893 | /* Update TSC freq with the value indicated by the caller. */ |
---|
894 | static void |
---|
895 | tsc_freq_changed(void *arg, const struct cf_level *level, int status) |
---|
896 | { |
---|
897 | /* If there was an error during the transition, don't do anything. */ |
---|
898 | if (status != 0) |
---|
899 | return; |
---|
900 | |
---|
901 | #if (__FreeBSD_version >= 701102) && (defined(__amd64__) || defined(__i386__)) |
---|
902 | /* If TSC is P-state invariant, don't do anything. */ |
---|
903 | if (tsc_is_invariant) |
---|
904 | return; |
---|
905 | #endif |
---|
906 | |
---|
907 | /* Total setting for this level gives the new frequency in MHz. */ |
---|
908 | init_machclk(); |
---|
909 | } |
---|
910 | EVENTHANDLER_DEFINE(cpufreq_post_change, tsc_freq_changed, NULL, |
---|
911 | EVENTHANDLER_PRI_LAST); |
---|
912 | #endif /* __FreeBSD_version >= 700035 */ |
---|
913 | #endif /* __rtems__ */ |
---|
914 | |
---|
915 | static void |
---|
916 | init_machclk_setup(void) |
---|
917 | { |
---|
918 | #if (__FreeBSD_version >= 600000) |
---|
919 | callout_init(&tbr_callout, 0); |
---|
920 | #endif |
---|
921 | |
---|
922 | machclk_usepcc = 1; |
---|
923 | |
---|
924 | #if (!defined(__amd64__) && !defined(__i386__)) || defined(ALTQ_NOPCC) |
---|
925 | machclk_usepcc = 0; |
---|
926 | #endif |
---|
927 | #if defined(__FreeBSD__) && defined(SMP) |
---|
928 | machclk_usepcc = 0; |
---|
929 | #endif |
---|
930 | #if defined(__NetBSD__) && defined(MULTIPROCESSOR) |
---|
931 | machclk_usepcc = 0; |
---|
932 | #endif |
---|
933 | #if defined(__amd64__) || defined(__i386__) |
---|
934 | /* check if TSC is available */ |
---|
935 | #ifdef __FreeBSD__ |
---|
936 | if ((cpu_feature & CPUID_TSC) == 0 || |
---|
937 | atomic_load_acq_64(&tsc_freq) == 0) |
---|
938 | #else |
---|
939 | if ((cpu_feature & CPUID_TSC) == 0) |
---|
940 | #endif |
---|
941 | machclk_usepcc = 0; |
---|
942 | #endif |
---|
943 | } |
---|
944 | |
---|
945 | void |
---|
946 | init_machclk(void) |
---|
947 | { |
---|
948 | static int called; |
---|
949 | |
---|
950 | /* Call one-time initialization function. */ |
---|
951 | if (!called) { |
---|
952 | init_machclk_setup(); |
---|
953 | called = 1; |
---|
954 | } |
---|
955 | |
---|
956 | if (machclk_usepcc == 0) { |
---|
957 | /* emulate 256MHz using microtime() */ |
---|
958 | machclk_freq = 1000000 << MACHCLK_SHIFT; |
---|
959 | machclk_per_tick = machclk_freq / hz; |
---|
960 | #ifdef ALTQ_DEBUG |
---|
961 | printf("altq: emulate %uHz cpu clock\n", machclk_freq); |
---|
962 | #endif |
---|
963 | return; |
---|
964 | } |
---|
965 | |
---|
966 | /* |
---|
967 | * if the clock frequency (of Pentium TSC or Alpha PCC) is |
---|
968 | * accessible, just use it. |
---|
969 | */ |
---|
970 | #if defined(__amd64__) || defined(__i386__) |
---|
971 | #ifdef __FreeBSD__ |
---|
972 | machclk_freq = atomic_load_acq_64(&tsc_freq); |
---|
973 | #elif defined(__NetBSD__) |
---|
974 | machclk_freq = (u_int32_t)cpu_tsc_freq; |
---|
975 | #elif defined(__OpenBSD__) && (defined(I586_CPU) || defined(I686_CPU)) |
---|
976 | machclk_freq = pentium_mhz * 1000000; |
---|
977 | #endif |
---|
978 | #endif |
---|
979 | |
---|
980 | /* |
---|
981 | * if we don't know the clock frequency, measure it. |
---|
982 | */ |
---|
983 | if (machclk_freq == 0) { |
---|
984 | static int wait; |
---|
985 | struct timeval tv_start, tv_end; |
---|
986 | u_int64_t start, end, diff; |
---|
987 | int timo; |
---|
988 | |
---|
989 | microtime(&tv_start); |
---|
990 | start = read_machclk(); |
---|
991 | timo = hz; /* 1 sec */ |
---|
992 | (void)tsleep(&wait, PWAIT | PCATCH, "init_machclk", timo); |
---|
993 | microtime(&tv_end); |
---|
994 | end = read_machclk(); |
---|
995 | diff = (u_int64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000 |
---|
996 | + tv_end.tv_usec - tv_start.tv_usec; |
---|
997 | if (diff != 0) |
---|
998 | machclk_freq = (u_int)((end - start) * 1000000 / diff); |
---|
999 | } |
---|
1000 | |
---|
1001 | machclk_per_tick = machclk_freq / hz; |
---|
1002 | |
---|
1003 | #ifdef ALTQ_DEBUG |
---|
1004 | printf("altq: CPU clock: %uHz\n", machclk_freq); |
---|
1005 | #endif |
---|
1006 | } |
---|
1007 | |
---|
1008 | #if defined(__OpenBSD__) && defined(__i386__) |
---|
1009 | static __inline u_int64_t |
---|
1010 | rdtsc(void) |
---|
1011 | { |
---|
1012 | u_int64_t rv; |
---|
1013 | __asm __volatile(".byte 0x0f, 0x31" : "=A" (rv)); |
---|
1014 | return (rv); |
---|
1015 | } |
---|
1016 | #endif /* __OpenBSD__ && __i386__ */ |
---|
1017 | |
---|
1018 | u_int64_t |
---|
1019 | read_machclk(void) |
---|
1020 | { |
---|
1021 | u_int64_t val; |
---|
1022 | |
---|
1023 | if (machclk_usepcc) { |
---|
1024 | #if defined(__amd64__) || defined(__i386__) |
---|
1025 | val = rdtsc(); |
---|
1026 | #else |
---|
1027 | panic("read_machclk"); |
---|
1028 | #endif |
---|
1029 | } else { |
---|
1030 | struct timeval tv; |
---|
1031 | |
---|
1032 | microtime(&tv); |
---|
1033 | val = (((u_int64_t)(tv.tv_sec - boottime.tv_sec) * 1000000 |
---|
1034 | + tv.tv_usec) << MACHCLK_SHIFT); |
---|
1035 | } |
---|
1036 | return (val); |
---|
1037 | } |
---|
1038 | |
---|
1039 | #ifdef ALTQ3_CLFIER_COMPAT |
---|
1040 | |
---|
1041 | #ifndef IPPROTO_ESP |
---|
1042 | #define IPPROTO_ESP 50 /* encapsulating security payload */ |
---|
1043 | #endif |
---|
1044 | #ifndef IPPROTO_AH |
---|
1045 | #define IPPROTO_AH 51 /* authentication header */ |
---|
1046 | #endif |
---|
1047 | |
---|
1048 | /* |
---|
1049 | * extract flow information from a given packet. |
---|
1050 | * filt_mask shows flowinfo fields required. |
---|
1051 | * we assume the ip header is in one mbuf, and addresses and ports are |
---|
1052 | * in network byte order. |
---|
1053 | */ |
---|
1054 | int |
---|
1055 | altq_extractflow(m, af, flow, filt_bmask) |
---|
1056 | struct mbuf *m; |
---|
1057 | int af; |
---|
1058 | struct flowinfo *flow; |
---|
1059 | u_int32_t filt_bmask; |
---|
1060 | { |
---|
1061 | |
---|
1062 | switch (af) { |
---|
1063 | case PF_INET: { |
---|
1064 | struct flowinfo_in *fin; |
---|
1065 | struct ip *ip; |
---|
1066 | |
---|
1067 | ip = mtod(m, struct ip *); |
---|
1068 | |
---|
1069 | if (ip->ip_v != 4) |
---|
1070 | break; |
---|
1071 | |
---|
1072 | fin = (struct flowinfo_in *)flow; |
---|
1073 | fin->fi_len = sizeof(struct flowinfo_in); |
---|
1074 | fin->fi_family = AF_INET; |
---|
1075 | |
---|
1076 | fin->fi_proto = ip->ip_p; |
---|
1077 | fin->fi_tos = ip->ip_tos; |
---|
1078 | |
---|
1079 | fin->fi_src.s_addr = ip->ip_src.s_addr; |
---|
1080 | fin->fi_dst.s_addr = ip->ip_dst.s_addr; |
---|
1081 | |
---|
1082 | if (filt_bmask & FIMB4_PORTS) |
---|
1083 | /* if port info is required, extract port numbers */ |
---|
1084 | extract_ports4(m, ip, fin); |
---|
1085 | else { |
---|
1086 | fin->fi_sport = 0; |
---|
1087 | fin->fi_dport = 0; |
---|
1088 | fin->fi_gpi = 0; |
---|
1089 | } |
---|
1090 | return (1); |
---|
1091 | } |
---|
1092 | |
---|
1093 | #ifdef INET6 |
---|
1094 | case PF_INET6: { |
---|
1095 | struct flowinfo_in6 *fin6; |
---|
1096 | struct ip6_hdr *ip6; |
---|
1097 | |
---|
1098 | ip6 = mtod(m, struct ip6_hdr *); |
---|
1099 | /* should we check the ip version? */ |
---|
1100 | |
---|
1101 | fin6 = (struct flowinfo_in6 *)flow; |
---|
1102 | fin6->fi6_len = sizeof(struct flowinfo_in6); |
---|
1103 | fin6->fi6_family = AF_INET6; |
---|
1104 | |
---|
1105 | fin6->fi6_proto = ip6->ip6_nxt; |
---|
1106 | fin6->fi6_tclass = (ntohl(ip6->ip6_flow) >> 20) & 0xff; |
---|
1107 | |
---|
1108 | fin6->fi6_flowlabel = ip6->ip6_flow & htonl(0x000fffff); |
---|
1109 | fin6->fi6_src = ip6->ip6_src; |
---|
1110 | fin6->fi6_dst = ip6->ip6_dst; |
---|
1111 | |
---|
1112 | if ((filt_bmask & FIMB6_PORTS) || |
---|
1113 | ((filt_bmask & FIMB6_PROTO) |
---|
1114 | && ip6->ip6_nxt > IPPROTO_IPV6)) |
---|
1115 | /* |
---|
1116 | * if port info is required, or proto is required |
---|
1117 | * but there are option headers, extract port |
---|
1118 | * and protocol numbers. |
---|
1119 | */ |
---|
1120 | extract_ports6(m, ip6, fin6); |
---|
1121 | else { |
---|
1122 | fin6->fi6_sport = 0; |
---|
1123 | fin6->fi6_dport = 0; |
---|
1124 | fin6->fi6_gpi = 0; |
---|
1125 | } |
---|
1126 | return (1); |
---|
1127 | } |
---|
1128 | #endif /* INET6 */ |
---|
1129 | |
---|
1130 | default: |
---|
1131 | break; |
---|
1132 | } |
---|
1133 | |
---|
1134 | /* failed */ |
---|
1135 | flow->fi_len = sizeof(struct flowinfo); |
---|
1136 | flow->fi_family = AF_UNSPEC; |
---|
1137 | return (0); |
---|
1138 | } |
---|
1139 | |
---|
1140 | /* |
---|
1141 | * helper routine to extract port numbers |
---|
1142 | */ |
---|
1143 | /* structure for ipsec and ipv6 option header template */ |
---|
1144 | struct _opt6 { |
---|
1145 | u_int8_t opt6_nxt; /* next header */ |
---|
1146 | u_int8_t opt6_hlen; /* header extension length */ |
---|
1147 | u_int16_t _pad; |
---|
1148 | u_int32_t ah_spi; /* security parameter index |
---|
1149 | for authentication header */ |
---|
1150 | }; |
---|
1151 | |
---|
1152 | /* |
---|
1153 | * extract port numbers from a ipv4 packet. |
---|
1154 | */ |
---|
1155 | static int |
---|
1156 | extract_ports4(m, ip, fin) |
---|
1157 | struct mbuf *m; |
---|
1158 | struct ip *ip; |
---|
1159 | struct flowinfo_in *fin; |
---|
1160 | { |
---|
1161 | struct mbuf *m0; |
---|
1162 | u_short ip_off; |
---|
1163 | u_int8_t proto; |
---|
1164 | int off; |
---|
1165 | |
---|
1166 | fin->fi_sport = 0; |
---|
1167 | fin->fi_dport = 0; |
---|
1168 | fin->fi_gpi = 0; |
---|
1169 | |
---|
1170 | ip_off = ntohs(ip->ip_off); |
---|
1171 | /* if it is a fragment, try cached fragment info */ |
---|
1172 | if (ip_off & IP_OFFMASK) { |
---|
1173 | ip4f_lookup(ip, fin); |
---|
1174 | return (1); |
---|
1175 | } |
---|
1176 | |
---|
1177 | /* locate the mbuf containing the protocol header */ |
---|
1178 | for (m0 = m; m0 != NULL; m0 = m0->m_next) |
---|
1179 | if (((caddr_t)ip >= m0->m_data) && |
---|
1180 | ((caddr_t)ip < m0->m_data + m0->m_len)) |
---|
1181 | break; |
---|
1182 | if (m0 == NULL) { |
---|
1183 | #ifdef ALTQ_DEBUG |
---|
1184 | printf("extract_ports4: can't locate header! ip=%p\n", ip); |
---|
1185 | #endif |
---|
1186 | return (0); |
---|
1187 | } |
---|
1188 | off = ((caddr_t)ip - m0->m_data) + (ip->ip_hl << 2); |
---|
1189 | proto = ip->ip_p; |
---|
1190 | |
---|
1191 | #ifdef ALTQ_IPSEC |
---|
1192 | again: |
---|
1193 | #endif |
---|
1194 | while (off >= m0->m_len) { |
---|
1195 | off -= m0->m_len; |
---|
1196 | m0 = m0->m_next; |
---|
1197 | if (m0 == NULL) |
---|
1198 | return (0); /* bogus ip_hl! */ |
---|
1199 | } |
---|
1200 | if (m0->m_len < off + 4) |
---|
1201 | return (0); |
---|
1202 | |
---|
1203 | switch (proto) { |
---|
1204 | case IPPROTO_TCP: |
---|
1205 | case IPPROTO_UDP: { |
---|
1206 | struct udphdr *udp; |
---|
1207 | |
---|
1208 | udp = (struct udphdr *)(mtod(m0, caddr_t) + off); |
---|
1209 | fin->fi_sport = udp->uh_sport; |
---|
1210 | fin->fi_dport = udp->uh_dport; |
---|
1211 | fin->fi_proto = proto; |
---|
1212 | } |
---|
1213 | break; |
---|
1214 | |
---|
1215 | #ifdef ALTQ_IPSEC |
---|
1216 | case IPPROTO_ESP: |
---|
1217 | if (fin->fi_gpi == 0){ |
---|
1218 | u_int32_t *gpi; |
---|
1219 | |
---|
1220 | gpi = (u_int32_t *)(mtod(m0, caddr_t) + off); |
---|
1221 | fin->fi_gpi = *gpi; |
---|
1222 | } |
---|
1223 | fin->fi_proto = proto; |
---|
1224 | break; |
---|
1225 | |
---|
1226 | case IPPROTO_AH: { |
---|
1227 | /* get next header and header length */ |
---|
1228 | struct _opt6 *opt6; |
---|
1229 | |
---|
1230 | opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off); |
---|
1231 | proto = opt6->opt6_nxt; |
---|
1232 | off += 8 + (opt6->opt6_hlen * 4); |
---|
1233 | if (fin->fi_gpi == 0 && m0->m_len >= off + 8) |
---|
1234 | fin->fi_gpi = opt6->ah_spi; |
---|
1235 | } |
---|
1236 | /* goto the next header */ |
---|
1237 | goto again; |
---|
1238 | #endif /* ALTQ_IPSEC */ |
---|
1239 | |
---|
1240 | default: |
---|
1241 | fin->fi_proto = proto; |
---|
1242 | return (0); |
---|
1243 | } |
---|
1244 | |
---|
1245 | /* if this is a first fragment, cache it. */ |
---|
1246 | if (ip_off & IP_MF) |
---|
1247 | ip4f_cache(ip, fin); |
---|
1248 | |
---|
1249 | return (1); |
---|
1250 | } |
---|
1251 | |
---|
1252 | #ifdef INET6 |
---|
1253 | static int |
---|
1254 | extract_ports6(m, ip6, fin6) |
---|
1255 | struct mbuf *m; |
---|
1256 | struct ip6_hdr *ip6; |
---|
1257 | struct flowinfo_in6 *fin6; |
---|
1258 | { |
---|
1259 | struct mbuf *m0; |
---|
1260 | int off; |
---|
1261 | u_int8_t proto; |
---|
1262 | |
---|
1263 | fin6->fi6_gpi = 0; |
---|
1264 | fin6->fi6_sport = 0; |
---|
1265 | fin6->fi6_dport = 0; |
---|
1266 | |
---|
1267 | /* locate the mbuf containing the protocol header */ |
---|
1268 | for (m0 = m; m0 != NULL; m0 = m0->m_next) |
---|
1269 | if (((caddr_t)ip6 >= m0->m_data) && |
---|
1270 | ((caddr_t)ip6 < m0->m_data + m0->m_len)) |
---|
1271 | break; |
---|
1272 | if (m0 == NULL) { |
---|
1273 | #ifdef ALTQ_DEBUG |
---|
1274 | printf("extract_ports6: can't locate header! ip6=%p\n", ip6); |
---|
1275 | #endif |
---|
1276 | return (0); |
---|
1277 | } |
---|
1278 | off = ((caddr_t)ip6 - m0->m_data) + sizeof(struct ip6_hdr); |
---|
1279 | |
---|
1280 | proto = ip6->ip6_nxt; |
---|
1281 | do { |
---|
1282 | while (off >= m0->m_len) { |
---|
1283 | off -= m0->m_len; |
---|
1284 | m0 = m0->m_next; |
---|
1285 | if (m0 == NULL) |
---|
1286 | return (0); |
---|
1287 | } |
---|
1288 | if (m0->m_len < off + 4) |
---|
1289 | return (0); |
---|
1290 | |
---|
1291 | switch (proto) { |
---|
1292 | case IPPROTO_TCP: |
---|
1293 | case IPPROTO_UDP: { |
---|
1294 | struct udphdr *udp; |
---|
1295 | |
---|
1296 | udp = (struct udphdr *)(mtod(m0, caddr_t) + off); |
---|
1297 | fin6->fi6_sport = udp->uh_sport; |
---|
1298 | fin6->fi6_dport = udp->uh_dport; |
---|
1299 | fin6->fi6_proto = proto; |
---|
1300 | } |
---|
1301 | return (1); |
---|
1302 | |
---|
1303 | case IPPROTO_ESP: |
---|
1304 | if (fin6->fi6_gpi == 0) { |
---|
1305 | u_int32_t *gpi; |
---|
1306 | |
---|
1307 | gpi = (u_int32_t *)(mtod(m0, caddr_t) + off); |
---|
1308 | fin6->fi6_gpi = *gpi; |
---|
1309 | } |
---|
1310 | fin6->fi6_proto = proto; |
---|
1311 | return (1); |
---|
1312 | |
---|
1313 | case IPPROTO_AH: { |
---|
1314 | /* get next header and header length */ |
---|
1315 | struct _opt6 *opt6; |
---|
1316 | |
---|
1317 | opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off); |
---|
1318 | if (fin6->fi6_gpi == 0 && m0->m_len >= off + 8) |
---|
1319 | fin6->fi6_gpi = opt6->ah_spi; |
---|
1320 | proto = opt6->opt6_nxt; |
---|
1321 | off += 8 + (opt6->opt6_hlen * 4); |
---|
1322 | /* goto the next header */ |
---|
1323 | break; |
---|
1324 | } |
---|
1325 | |
---|
1326 | case IPPROTO_HOPOPTS: |
---|
1327 | case IPPROTO_ROUTING: |
---|
1328 | case IPPROTO_DSTOPTS: { |
---|
1329 | /* get next header and header length */ |
---|
1330 | struct _opt6 *opt6; |
---|
1331 | |
---|
1332 | opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off); |
---|
1333 | proto = opt6->opt6_nxt; |
---|
1334 | off += (opt6->opt6_hlen + 1) * 8; |
---|
1335 | /* goto the next header */ |
---|
1336 | break; |
---|
1337 | } |
---|
1338 | |
---|
1339 | case IPPROTO_FRAGMENT: |
---|
1340 | /* ipv6 fragmentations are not supported yet */ |
---|
1341 | default: |
---|
1342 | fin6->fi6_proto = proto; |
---|
1343 | return (0); |
---|
1344 | } |
---|
1345 | } while (1); |
---|
1346 | /*NOTREACHED*/ |
---|
1347 | } |
---|
1348 | #endif /* INET6 */ |
---|
1349 | |
---|
1350 | /* |
---|
1351 | * altq common classifier |
---|
1352 | */ |
---|
1353 | int |
---|
1354 | acc_add_filter(classifier, filter, class, phandle) |
---|
1355 | struct acc_classifier *classifier; |
---|
1356 | struct flow_filter *filter; |
---|
1357 | void *class; |
---|
1358 | u_long *phandle; |
---|
1359 | { |
---|
1360 | struct acc_filter *afp, *prev, *tmp; |
---|
1361 | int i, s; |
---|
1362 | |
---|
1363 | #ifdef INET6 |
---|
1364 | if (filter->ff_flow.fi_family != AF_INET && |
---|
1365 | filter->ff_flow.fi_family != AF_INET6) |
---|
1366 | return (EINVAL); |
---|
1367 | #else |
---|
1368 | if (filter->ff_flow.fi_family != AF_INET) |
---|
1369 | return (EINVAL); |
---|
1370 | #endif |
---|
1371 | |
---|
1372 | afp = malloc(sizeof(struct acc_filter), |
---|
1373 | M_DEVBUF, M_WAITOK); |
---|
1374 | if (afp == NULL) |
---|
1375 | return (ENOMEM); |
---|
1376 | bzero(afp, sizeof(struct acc_filter)); |
---|
1377 | |
---|
1378 | afp->f_filter = *filter; |
---|
1379 | afp->f_class = class; |
---|
1380 | |
---|
1381 | i = ACC_WILDCARD_INDEX; |
---|
1382 | if (filter->ff_flow.fi_family == AF_INET) { |
---|
1383 | struct flow_filter *filter4 = &afp->f_filter; |
---|
1384 | |
---|
1385 | /* |
---|
1386 | * if address is 0, it's a wildcard. if address mask |
---|
1387 | * isn't set, use full mask. |
---|
1388 | */ |
---|
1389 | if (filter4->ff_flow.fi_dst.s_addr == 0) |
---|
1390 | filter4->ff_mask.mask_dst.s_addr = 0; |
---|
1391 | else if (filter4->ff_mask.mask_dst.s_addr == 0) |
---|
1392 | filter4->ff_mask.mask_dst.s_addr = 0xffffffff; |
---|
1393 | if (filter4->ff_flow.fi_src.s_addr == 0) |
---|
1394 | filter4->ff_mask.mask_src.s_addr = 0; |
---|
1395 | else if (filter4->ff_mask.mask_src.s_addr == 0) |
---|
1396 | filter4->ff_mask.mask_src.s_addr = 0xffffffff; |
---|
1397 | |
---|
1398 | /* clear extra bits in addresses */ |
---|
1399 | filter4->ff_flow.fi_dst.s_addr &= |
---|
1400 | filter4->ff_mask.mask_dst.s_addr; |
---|
1401 | filter4->ff_flow.fi_src.s_addr &= |
---|
1402 | filter4->ff_mask.mask_src.s_addr; |
---|
1403 | |
---|
1404 | /* |
---|
1405 | * if dst address is a wildcard, use hash-entry |
---|
1406 | * ACC_WILDCARD_INDEX. |
---|
1407 | */ |
---|
1408 | if (filter4->ff_mask.mask_dst.s_addr != 0xffffffff) |
---|
1409 | i = ACC_WILDCARD_INDEX; |
---|
1410 | else |
---|
1411 | i = ACC_GET_HASH_INDEX(filter4->ff_flow.fi_dst.s_addr); |
---|
1412 | } |
---|
1413 | #ifdef INET6 |
---|
1414 | else if (filter->ff_flow.fi_family == AF_INET6) { |
---|
1415 | struct flow_filter6 *filter6 = |
---|
1416 | (struct flow_filter6 *)&afp->f_filter; |
---|
1417 | #ifndef IN6MASK0 /* taken from kame ipv6 */ |
---|
1418 | #define IN6MASK0 {{{ 0, 0, 0, 0 }}} |
---|
1419 | #define IN6MASK128 {{{ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }}} |
---|
1420 | const struct in6_addr in6mask0 = IN6MASK0; |
---|
1421 | const struct in6_addr in6mask128 = IN6MASK128; |
---|
1422 | #endif |
---|
1423 | |
---|
1424 | if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_flow6.fi6_dst)) |
---|
1425 | filter6->ff_mask6.mask6_dst = in6mask0; |
---|
1426 | else if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_mask6.mask6_dst)) |
---|
1427 | filter6->ff_mask6.mask6_dst = in6mask128; |
---|
1428 | if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_flow6.fi6_src)) |
---|
1429 | filter6->ff_mask6.mask6_src = in6mask0; |
---|
1430 | else if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_mask6.mask6_src)) |
---|
1431 | filter6->ff_mask6.mask6_src = in6mask128; |
---|
1432 | |
---|
1433 | /* clear extra bits in addresses */ |
---|
1434 | for (i = 0; i < 16; i++) |
---|
1435 | filter6->ff_flow6.fi6_dst.s6_addr[i] &= |
---|
1436 | filter6->ff_mask6.mask6_dst.s6_addr[i]; |
---|
1437 | for (i = 0; i < 16; i++) |
---|
1438 | filter6->ff_flow6.fi6_src.s6_addr[i] &= |
---|
1439 | filter6->ff_mask6.mask6_src.s6_addr[i]; |
---|
1440 | |
---|
1441 | if (filter6->ff_flow6.fi6_flowlabel == 0) |
---|
1442 | i = ACC_WILDCARD_INDEX; |
---|
1443 | else |
---|
1444 | i = ACC_GET_HASH_INDEX(filter6->ff_flow6.fi6_flowlabel); |
---|
1445 | } |
---|
1446 | #endif /* INET6 */ |
---|
1447 | |
---|
1448 | afp->f_handle = get_filt_handle(classifier, i); |
---|
1449 | |
---|
1450 | /* update filter bitmask */ |
---|
1451 | afp->f_fbmask = filt2fibmask(filter); |
---|
1452 | classifier->acc_fbmask |= afp->f_fbmask; |
---|
1453 | |
---|
1454 | /* |
---|
1455 | * add this filter to the filter list. |
---|
1456 | * filters are ordered from the highest rule number. |
---|
1457 | */ |
---|
1458 | #ifdef __NetBSD__ |
---|
1459 | s = splnet(); |
---|
1460 | #else |
---|
1461 | s = splimp(); |
---|
1462 | #endif |
---|
1463 | prev = NULL; |
---|
1464 | LIST_FOREACH(tmp, &classifier->acc_filters[i], f_chain) { |
---|
1465 | if (tmp->f_filter.ff_ruleno > afp->f_filter.ff_ruleno) |
---|
1466 | prev = tmp; |
---|
1467 | else |
---|
1468 | break; |
---|
1469 | } |
---|
1470 | if (prev == NULL) |
---|
1471 | LIST_INSERT_HEAD(&classifier->acc_filters[i], afp, f_chain); |
---|
1472 | else |
---|
1473 | LIST_INSERT_AFTER(prev, afp, f_chain); |
---|
1474 | splx(s); |
---|
1475 | |
---|
1476 | *phandle = afp->f_handle; |
---|
1477 | return (0); |
---|
1478 | } |
---|
1479 | |
---|
1480 | int |
---|
1481 | acc_delete_filter(classifier, handle) |
---|
1482 | struct acc_classifier *classifier; |
---|
1483 | u_long handle; |
---|
1484 | { |
---|
1485 | struct acc_filter *afp; |
---|
1486 | int s; |
---|
1487 | |
---|
1488 | if ((afp = filth_to_filtp(classifier, handle)) == NULL) |
---|
1489 | return (EINVAL); |
---|
1490 | |
---|
1491 | #ifdef __NetBSD__ |
---|
1492 | s = splnet(); |
---|
1493 | #else |
---|
1494 | s = splimp(); |
---|
1495 | #endif |
---|
1496 | LIST_REMOVE(afp, f_chain); |
---|
1497 | splx(s); |
---|
1498 | |
---|
1499 | free(afp, M_DEVBUF); |
---|
1500 | |
---|
1501 | /* todo: update filt_bmask */ |
---|
1502 | |
---|
1503 | return (0); |
---|
1504 | } |
---|
1505 | |
---|
1506 | /* |
---|
1507 | * delete filters referencing to the specified class. |
---|
1508 | * if the all flag is not 0, delete all the filters. |
---|
1509 | */ |
---|
1510 | int |
---|
1511 | acc_discard_filters(classifier, class, all) |
---|
1512 | struct acc_classifier *classifier; |
---|
1513 | void *class; |
---|
1514 | int all; |
---|
1515 | { |
---|
1516 | struct acc_filter *afp; |
---|
1517 | int i, s; |
---|
1518 | |
---|
1519 | #ifdef __NetBSD__ |
---|
1520 | s = splnet(); |
---|
1521 | #else |
---|
1522 | s = splimp(); |
---|
1523 | #endif |
---|
1524 | for (i = 0; i < ACC_FILTER_TABLESIZE; i++) { |
---|
1525 | do { |
---|
1526 | LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain) |
---|
1527 | if (all || afp->f_class == class) { |
---|
1528 | LIST_REMOVE(afp, f_chain); |
---|
1529 | free(afp, M_DEVBUF); |
---|
1530 | /* start again from the head */ |
---|
1531 | break; |
---|
1532 | } |
---|
1533 | } while (afp != NULL); |
---|
1534 | } |
---|
1535 | splx(s); |
---|
1536 | |
---|
1537 | if (all) |
---|
1538 | classifier->acc_fbmask = 0; |
---|
1539 | |
---|
1540 | return (0); |
---|
1541 | } |
---|
1542 | |
---|
1543 | void * |
---|
1544 | acc_classify(clfier, m, af) |
---|
1545 | void *clfier; |
---|
1546 | struct mbuf *m; |
---|
1547 | int af; |
---|
1548 | { |
---|
1549 | struct acc_classifier *classifier; |
---|
1550 | struct flowinfo flow; |
---|
1551 | struct acc_filter *afp; |
---|
1552 | int i; |
---|
1553 | |
---|
1554 | classifier = (struct acc_classifier *)clfier; |
---|
1555 | altq_extractflow(m, af, &flow, classifier->acc_fbmask); |
---|
1556 | |
---|
1557 | if (flow.fi_family == AF_INET) { |
---|
1558 | struct flowinfo_in *fp = (struct flowinfo_in *)&flow; |
---|
1559 | |
---|
1560 | if ((classifier->acc_fbmask & FIMB4_ALL) == FIMB4_TOS) { |
---|
1561 | /* only tos is used */ |
---|
1562 | LIST_FOREACH(afp, |
---|
1563 | &classifier->acc_filters[ACC_WILDCARD_INDEX], |
---|
1564 | f_chain) |
---|
1565 | if (apply_tosfilter4(afp->f_fbmask, |
---|
1566 | &afp->f_filter, fp)) |
---|
1567 | /* filter matched */ |
---|
1568 | return (afp->f_class); |
---|
1569 | } else if ((classifier->acc_fbmask & |
---|
1570 | (~(FIMB4_PROTO|FIMB4_SPORT|FIMB4_DPORT) & FIMB4_ALL)) |
---|
1571 | == 0) { |
---|
1572 | /* only proto and ports are used */ |
---|
1573 | LIST_FOREACH(afp, |
---|
1574 | &classifier->acc_filters[ACC_WILDCARD_INDEX], |
---|
1575 | f_chain) |
---|
1576 | if (apply_ppfilter4(afp->f_fbmask, |
---|
1577 | &afp->f_filter, fp)) |
---|
1578 | /* filter matched */ |
---|
1579 | return (afp->f_class); |
---|
1580 | } else { |
---|
1581 | /* get the filter hash entry from its dest address */ |
---|
1582 | i = ACC_GET_HASH_INDEX(fp->fi_dst.s_addr); |
---|
1583 | do { |
---|
1584 | /* |
---|
1585 | * go through this loop twice. first for dst |
---|
1586 | * hash, second for wildcards. |
---|
1587 | */ |
---|
1588 | LIST_FOREACH(afp, &classifier->acc_filters[i], |
---|
1589 | f_chain) |
---|
1590 | if (apply_filter4(afp->f_fbmask, |
---|
1591 | &afp->f_filter, fp)) |
---|
1592 | /* filter matched */ |
---|
1593 | return (afp->f_class); |
---|
1594 | |
---|
1595 | /* |
---|
1596 | * check again for filters with a dst addr |
---|
1597 | * wildcard. |
---|
1598 | * (daddr == 0 || dmask != 0xffffffff). |
---|
1599 | */ |
---|
1600 | if (i != ACC_WILDCARD_INDEX) |
---|
1601 | i = ACC_WILDCARD_INDEX; |
---|
1602 | else |
---|
1603 | break; |
---|
1604 | } while (1); |
---|
1605 | } |
---|
1606 | } |
---|
1607 | #ifdef INET6 |
---|
1608 | else if (flow.fi_family == AF_INET6) { |
---|
1609 | struct flowinfo_in6 *fp6 = (struct flowinfo_in6 *)&flow; |
---|
1610 | |
---|
1611 | /* get the filter hash entry from its flow ID */ |
---|
1612 | if (fp6->fi6_flowlabel != 0) |
---|
1613 | i = ACC_GET_HASH_INDEX(fp6->fi6_flowlabel); |
---|
1614 | else |
---|
1615 | /* flowlable can be zero */ |
---|
1616 | i = ACC_WILDCARD_INDEX; |
---|
1617 | |
---|
1618 | /* go through this loop twice. first for flow hash, second |
---|
1619 | for wildcards. */ |
---|
1620 | do { |
---|
1621 | LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain) |
---|
1622 | if (apply_filter6(afp->f_fbmask, |
---|
1623 | (struct flow_filter6 *)&afp->f_filter, |
---|
1624 | fp6)) |
---|
1625 | /* filter matched */ |
---|
1626 | return (afp->f_class); |
---|
1627 | |
---|
1628 | /* |
---|
1629 | * check again for filters with a wildcard. |
---|
1630 | */ |
---|
1631 | if (i != ACC_WILDCARD_INDEX) |
---|
1632 | i = ACC_WILDCARD_INDEX; |
---|
1633 | else |
---|
1634 | break; |
---|
1635 | } while (1); |
---|
1636 | } |
---|
1637 | #endif /* INET6 */ |
---|
1638 | |
---|
1639 | /* no filter matched */ |
---|
1640 | return (NULL); |
---|
1641 | } |
---|
1642 | |
---|
1643 | static int |
---|
1644 | apply_filter4(fbmask, filt, pkt) |
---|
1645 | u_int32_t fbmask; |
---|
1646 | struct flow_filter *filt; |
---|
1647 | struct flowinfo_in *pkt; |
---|
1648 | { |
---|
1649 | if (filt->ff_flow.fi_family != AF_INET) |
---|
1650 | return (0); |
---|
1651 | if ((fbmask & FIMB4_SPORT) && filt->ff_flow.fi_sport != pkt->fi_sport) |
---|
1652 | return (0); |
---|
1653 | if ((fbmask & FIMB4_DPORT) && filt->ff_flow.fi_dport != pkt->fi_dport) |
---|
1654 | return (0); |
---|
1655 | if ((fbmask & FIMB4_DADDR) && |
---|
1656 | filt->ff_flow.fi_dst.s_addr != |
---|
1657 | (pkt->fi_dst.s_addr & filt->ff_mask.mask_dst.s_addr)) |
---|
1658 | return (0); |
---|
1659 | if ((fbmask & FIMB4_SADDR) && |
---|
1660 | filt->ff_flow.fi_src.s_addr != |
---|
1661 | (pkt->fi_src.s_addr & filt->ff_mask.mask_src.s_addr)) |
---|
1662 | return (0); |
---|
1663 | if ((fbmask & FIMB4_PROTO) && filt->ff_flow.fi_proto != pkt->fi_proto) |
---|
1664 | return (0); |
---|
1665 | if ((fbmask & FIMB4_TOS) && filt->ff_flow.fi_tos != |
---|
1666 | (pkt->fi_tos & filt->ff_mask.mask_tos)) |
---|
1667 | return (0); |
---|
1668 | if ((fbmask & FIMB4_GPI) && filt->ff_flow.fi_gpi != (pkt->fi_gpi)) |
---|
1669 | return (0); |
---|
1670 | /* match */ |
---|
1671 | return (1); |
---|
1672 | } |
---|
1673 | |
---|
1674 | /* |
---|
1675 | * filter matching function optimized for a common case that checks |
---|
1676 | * only protocol and port numbers |
---|
1677 | */ |
---|
1678 | static int |
---|
1679 | apply_ppfilter4(fbmask, filt, pkt) |
---|
1680 | u_int32_t fbmask; |
---|
1681 | struct flow_filter *filt; |
---|
1682 | struct flowinfo_in *pkt; |
---|
1683 | { |
---|
1684 | if (filt->ff_flow.fi_family != AF_INET) |
---|
1685 | return (0); |
---|
1686 | if ((fbmask & FIMB4_SPORT) && filt->ff_flow.fi_sport != pkt->fi_sport) |
---|
1687 | return (0); |
---|
1688 | if ((fbmask & FIMB4_DPORT) && filt->ff_flow.fi_dport != pkt->fi_dport) |
---|
1689 | return (0); |
---|
1690 | if ((fbmask & FIMB4_PROTO) && filt->ff_flow.fi_proto != pkt->fi_proto) |
---|
1691 | return (0); |
---|
1692 | /* match */ |
---|
1693 | return (1); |
---|
1694 | } |
---|
1695 | |
---|
1696 | /* |
---|
1697 | * filter matching function only for tos field. |
---|
1698 | */ |
---|
1699 | static int |
---|
1700 | apply_tosfilter4(fbmask, filt, pkt) |
---|
1701 | u_int32_t fbmask; |
---|
1702 | struct flow_filter *filt; |
---|
1703 | struct flowinfo_in *pkt; |
---|
1704 | { |
---|
1705 | if (filt->ff_flow.fi_family != AF_INET) |
---|
1706 | return (0); |
---|
1707 | if ((fbmask & FIMB4_TOS) && filt->ff_flow.fi_tos != |
---|
1708 | (pkt->fi_tos & filt->ff_mask.mask_tos)) |
---|
1709 | return (0); |
---|
1710 | /* match */ |
---|
1711 | return (1); |
---|
1712 | } |
---|
1713 | |
---|
1714 | #ifdef INET6 |
---|
1715 | static int |
---|
1716 | apply_filter6(fbmask, filt, pkt) |
---|
1717 | u_int32_t fbmask; |
---|
1718 | struct flow_filter6 *filt; |
---|
1719 | struct flowinfo_in6 *pkt; |
---|
1720 | { |
---|
1721 | int i; |
---|
1722 | |
---|
1723 | if (filt->ff_flow6.fi6_family != AF_INET6) |
---|
1724 | return (0); |
---|
1725 | if ((fbmask & FIMB6_FLABEL) && |
---|
1726 | filt->ff_flow6.fi6_flowlabel != pkt->fi6_flowlabel) |
---|
1727 | return (0); |
---|
1728 | if ((fbmask & FIMB6_PROTO) && |
---|
1729 | filt->ff_flow6.fi6_proto != pkt->fi6_proto) |
---|
1730 | return (0); |
---|
1731 | if ((fbmask & FIMB6_SPORT) && |
---|
1732 | filt->ff_flow6.fi6_sport != pkt->fi6_sport) |
---|
1733 | return (0); |
---|
1734 | if ((fbmask & FIMB6_DPORT) && |
---|
1735 | filt->ff_flow6.fi6_dport != pkt->fi6_dport) |
---|
1736 | return (0); |
---|
1737 | if (fbmask & FIMB6_SADDR) { |
---|
1738 | for (i = 0; i < 4; i++) |
---|
1739 | if (filt->ff_flow6.fi6_src.s6_addr32[i] != |
---|
1740 | (pkt->fi6_src.s6_addr32[i] & |
---|
1741 | filt->ff_mask6.mask6_src.s6_addr32[i])) |
---|
1742 | return (0); |
---|
1743 | } |
---|
1744 | if (fbmask & FIMB6_DADDR) { |
---|
1745 | for (i = 0; i < 4; i++) |
---|
1746 | if (filt->ff_flow6.fi6_dst.s6_addr32[i] != |
---|
1747 | (pkt->fi6_dst.s6_addr32[i] & |
---|
1748 | filt->ff_mask6.mask6_dst.s6_addr32[i])) |
---|
1749 | return (0); |
---|
1750 | } |
---|
1751 | if ((fbmask & FIMB6_TCLASS) && |
---|
1752 | filt->ff_flow6.fi6_tclass != |
---|
1753 | (pkt->fi6_tclass & filt->ff_mask6.mask6_tclass)) |
---|
1754 | return (0); |
---|
1755 | if ((fbmask & FIMB6_GPI) && |
---|
1756 | filt->ff_flow6.fi6_gpi != pkt->fi6_gpi) |
---|
1757 | return (0); |
---|
1758 | /* match */ |
---|
1759 | return (1); |
---|
1760 | } |
---|
1761 | #endif /* INET6 */ |
---|
1762 | |
---|
1763 | /* |
---|
1764 | * filter handle: |
---|
1765 | * bit 20-28: index to the filter hash table |
---|
1766 | * bit 0-19: unique id in the hash bucket. |
---|
1767 | */ |
---|
1768 | static u_long |
---|
1769 | get_filt_handle(classifier, i) |
---|
1770 | struct acc_classifier *classifier; |
---|
1771 | int i; |
---|
1772 | { |
---|
1773 | static u_long handle_number = 1; |
---|
1774 | u_long handle; |
---|
1775 | struct acc_filter *afp; |
---|
1776 | |
---|
1777 | while (1) { |
---|
1778 | handle = handle_number++ & 0x000fffff; |
---|
1779 | |
---|
1780 | if (LIST_EMPTY(&classifier->acc_filters[i])) |
---|
1781 | break; |
---|
1782 | |
---|
1783 | LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain) |
---|
1784 | if ((afp->f_handle & 0x000fffff) == handle) |
---|
1785 | break; |
---|
1786 | if (afp == NULL) |
---|
1787 | break; |
---|
1788 | /* this handle is already used, try again */ |
---|
1789 | } |
---|
1790 | |
---|
1791 | return ((i << 20) | handle); |
---|
1792 | } |
---|
1793 | |
---|
1794 | /* convert filter handle to filter pointer */ |
---|
1795 | static struct acc_filter * |
---|
1796 | filth_to_filtp(classifier, handle) |
---|
1797 | struct acc_classifier *classifier; |
---|
1798 | u_long handle; |
---|
1799 | { |
---|
1800 | struct acc_filter *afp; |
---|
1801 | int i; |
---|
1802 | |
---|
1803 | i = ACC_GET_HINDEX(handle); |
---|
1804 | |
---|
1805 | LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain) |
---|
1806 | if (afp->f_handle == handle) |
---|
1807 | return (afp); |
---|
1808 | |
---|
1809 | return (NULL); |
---|
1810 | } |
---|
1811 | |
---|
1812 | /* create flowinfo bitmask */ |
---|
1813 | static u_int32_t |
---|
1814 | filt2fibmask(filt) |
---|
1815 | struct flow_filter *filt; |
---|
1816 | { |
---|
1817 | u_int32_t mask = 0; |
---|
1818 | #ifdef INET6 |
---|
1819 | struct flow_filter6 *filt6; |
---|
1820 | #endif |
---|
1821 | |
---|
1822 | switch (filt->ff_flow.fi_family) { |
---|
1823 | case AF_INET: |
---|
1824 | if (filt->ff_flow.fi_proto != 0) |
---|
1825 | mask |= FIMB4_PROTO; |
---|
1826 | if (filt->ff_flow.fi_tos != 0) |
---|
1827 | mask |= FIMB4_TOS; |
---|
1828 | if (filt->ff_flow.fi_dst.s_addr != 0) |
---|
1829 | mask |= FIMB4_DADDR; |
---|
1830 | if (filt->ff_flow.fi_src.s_addr != 0) |
---|
1831 | mask |= FIMB4_SADDR; |
---|
1832 | if (filt->ff_flow.fi_sport != 0) |
---|
1833 | mask |= FIMB4_SPORT; |
---|
1834 | if (filt->ff_flow.fi_dport != 0) |
---|
1835 | mask |= FIMB4_DPORT; |
---|
1836 | if (filt->ff_flow.fi_gpi != 0) |
---|
1837 | mask |= FIMB4_GPI; |
---|
1838 | break; |
---|
1839 | #ifdef INET6 |
---|
1840 | case AF_INET6: |
---|
1841 | filt6 = (struct flow_filter6 *)filt; |
---|
1842 | |
---|
1843 | if (filt6->ff_flow6.fi6_proto != 0) |
---|
1844 | mask |= FIMB6_PROTO; |
---|
1845 | if (filt6->ff_flow6.fi6_tclass != 0) |
---|
1846 | mask |= FIMB6_TCLASS; |
---|
1847 | if (!IN6_IS_ADDR_UNSPECIFIED(&filt6->ff_flow6.fi6_dst)) |
---|
1848 | mask |= FIMB6_DADDR; |
---|
1849 | if (!IN6_IS_ADDR_UNSPECIFIED(&filt6->ff_flow6.fi6_src)) |
---|
1850 | mask |= FIMB6_SADDR; |
---|
1851 | if (filt6->ff_flow6.fi6_sport != 0) |
---|
1852 | mask |= FIMB6_SPORT; |
---|
1853 | if (filt6->ff_flow6.fi6_dport != 0) |
---|
1854 | mask |= FIMB6_DPORT; |
---|
1855 | if (filt6->ff_flow6.fi6_gpi != 0) |
---|
1856 | mask |= FIMB6_GPI; |
---|
1857 | if (filt6->ff_flow6.fi6_flowlabel != 0) |
---|
1858 | mask |= FIMB6_FLABEL; |
---|
1859 | break; |
---|
1860 | #endif /* INET6 */ |
---|
1861 | } |
---|
1862 | return (mask); |
---|
1863 | } |
---|
1864 | |
---|
1865 | |
---|
1866 | /* |
---|
1867 | * helper functions to handle IPv4 fragments. |
---|
1868 | * currently only in-sequence fragments are handled. |
---|
1869 | * - fragment info is cached in a LRU list. |
---|
1870 | * - when a first fragment is found, cache its flow info. |
---|
1871 | * - when a non-first fragment is found, lookup the cache. |
---|
1872 | */ |
---|
1873 | |
---|
1874 | struct ip4_frag { |
---|
1875 | TAILQ_ENTRY(ip4_frag) ip4f_chain; |
---|
1876 | char ip4f_valid; |
---|
1877 | u_short ip4f_id; |
---|
1878 | struct flowinfo_in ip4f_info; |
---|
1879 | }; |
---|
1880 | |
---|
1881 | static TAILQ_HEAD(ip4f_list, ip4_frag) ip4f_list; /* IPv4 fragment cache */ |
---|
1882 | |
---|
1883 | #define IP4F_TABSIZE 16 /* IPv4 fragment cache size */ |
---|
1884 | |
---|
1885 | |
---|
1886 | static void |
---|
1887 | ip4f_cache(ip, fin) |
---|
1888 | struct ip *ip; |
---|
1889 | struct flowinfo_in *fin; |
---|
1890 | { |
---|
1891 | struct ip4_frag *fp; |
---|
1892 | |
---|
1893 | if (TAILQ_EMPTY(&ip4f_list)) { |
---|
1894 | /* first time call, allocate fragment cache entries. */ |
---|
1895 | if (ip4f_init() < 0) |
---|
1896 | /* allocation failed! */ |
---|
1897 | return; |
---|
1898 | } |
---|
1899 | |
---|
1900 | fp = ip4f_alloc(); |
---|
1901 | fp->ip4f_id = ip->ip_id; |
---|
1902 | fp->ip4f_info.fi_proto = ip->ip_p; |
---|
1903 | fp->ip4f_info.fi_src.s_addr = ip->ip_src.s_addr; |
---|
1904 | fp->ip4f_info.fi_dst.s_addr = ip->ip_dst.s_addr; |
---|
1905 | |
---|
1906 | /* save port numbers */ |
---|
1907 | fp->ip4f_info.fi_sport = fin->fi_sport; |
---|
1908 | fp->ip4f_info.fi_dport = fin->fi_dport; |
---|
1909 | fp->ip4f_info.fi_gpi = fin->fi_gpi; |
---|
1910 | } |
---|
1911 | |
---|
1912 | static int |
---|
1913 | ip4f_lookup(ip, fin) |
---|
1914 | struct ip *ip; |
---|
1915 | struct flowinfo_in *fin; |
---|
1916 | { |
---|
1917 | struct ip4_frag *fp; |
---|
1918 | |
---|
1919 | for (fp = TAILQ_FIRST(&ip4f_list); fp != NULL && fp->ip4f_valid; |
---|
1920 | fp = TAILQ_NEXT(fp, ip4f_chain)) |
---|
1921 | if (ip->ip_id == fp->ip4f_id && |
---|
1922 | ip->ip_src.s_addr == fp->ip4f_info.fi_src.s_addr && |
---|
1923 | ip->ip_dst.s_addr == fp->ip4f_info.fi_dst.s_addr && |
---|
1924 | ip->ip_p == fp->ip4f_info.fi_proto) { |
---|
1925 | |
---|
1926 | /* found the matching entry */ |
---|
1927 | fin->fi_sport = fp->ip4f_info.fi_sport; |
---|
1928 | fin->fi_dport = fp->ip4f_info.fi_dport; |
---|
1929 | fin->fi_gpi = fp->ip4f_info.fi_gpi; |
---|
1930 | |
---|
1931 | if ((ntohs(ip->ip_off) & IP_MF) == 0) |
---|
1932 | /* this is the last fragment, |
---|
1933 | release the entry. */ |
---|
1934 | ip4f_free(fp); |
---|
1935 | |
---|
1936 | return (1); |
---|
1937 | } |
---|
1938 | |
---|
1939 | /* no matching entry found */ |
---|
1940 | return (0); |
---|
1941 | } |
---|
1942 | |
---|
1943 | static int |
---|
1944 | ip4f_init(void) |
---|
1945 | { |
---|
1946 | struct ip4_frag *fp; |
---|
1947 | int i; |
---|
1948 | |
---|
1949 | TAILQ_INIT(&ip4f_list); |
---|
1950 | for (i=0; i<IP4F_TABSIZE; i++) { |
---|
1951 | fp = malloc(sizeof(struct ip4_frag), |
---|
1952 | M_DEVBUF, M_NOWAIT); |
---|
1953 | if (fp == NULL) { |
---|
1954 | printf("ip4f_init: can't alloc %dth entry!\n", i); |
---|
1955 | if (i == 0) |
---|
1956 | return (-1); |
---|
1957 | return (0); |
---|
1958 | } |
---|
1959 | fp->ip4f_valid = 0; |
---|
1960 | TAILQ_INSERT_TAIL(&ip4f_list, fp, ip4f_chain); |
---|
1961 | } |
---|
1962 | return (0); |
---|
1963 | } |
---|
1964 | |
---|
1965 | static struct ip4_frag * |
---|
1966 | ip4f_alloc(void) |
---|
1967 | { |
---|
1968 | struct ip4_frag *fp; |
---|
1969 | |
---|
1970 | /* reclaim an entry at the tail, put it at the head */ |
---|
1971 | fp = TAILQ_LAST(&ip4f_list, ip4f_list); |
---|
1972 | TAILQ_REMOVE(&ip4f_list, fp, ip4f_chain); |
---|
1973 | fp->ip4f_valid = 1; |
---|
1974 | TAILQ_INSERT_HEAD(&ip4f_list, fp, ip4f_chain); |
---|
1975 | return (fp); |
---|
1976 | } |
---|
1977 | |
---|
1978 | static void |
---|
1979 | ip4f_free(fp) |
---|
1980 | struct ip4_frag *fp; |
---|
1981 | { |
---|
1982 | TAILQ_REMOVE(&ip4f_list, fp, ip4f_chain); |
---|
1983 | fp->ip4f_valid = 0; |
---|
1984 | TAILQ_INSERT_TAIL(&ip4f_list, fp, ip4f_chain); |
---|
1985 | } |
---|
1986 | |
---|
1987 | #endif /* ALTQ3_CLFIER_COMPAT */ |
---|