1 | #include <rtems/freebsd/machine/rtems-bsd-config.h> |
---|
2 | |
---|
3 | /* $FreeBSD$ */ |
---|
4 | /* $KAME: altq_subr.c,v 1.21 2003/11/06 06:32:53 kjc Exp $ */ |
---|
5 | |
---|
6 | /* |
---|
7 | * Copyright (C) 1997-2003 |
---|
8 | * Sony Computer Science Laboratories Inc. All rights reserved. |
---|
9 | * |
---|
10 | * Redistribution and use in source and binary forms, with or without |
---|
11 | * modification, are permitted provided that the following conditions |
---|
12 | * are met: |
---|
13 | * 1. Redistributions of source code must retain the above copyright |
---|
14 | * notice, this list of conditions and the following disclaimer. |
---|
15 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
16 | * notice, this list of conditions and the following disclaimer in the |
---|
17 | * documentation and/or other materials provided with the distribution. |
---|
18 | * |
---|
19 | * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND |
---|
20 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
21 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
22 | * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE |
---|
23 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
24 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
25 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
26 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
27 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
28 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
29 | * SUCH DAMAGE. |
---|
30 | */ |
---|
31 | |
---|
32 | #if defined(__FreeBSD__) || defined(__NetBSD__) |
---|
33 | #include <rtems/freebsd/local/opt_altq.h> |
---|
34 | #include <rtems/freebsd/local/opt_inet.h> |
---|
35 | #ifdef __FreeBSD__ |
---|
36 | #include <rtems/freebsd/local/opt_inet6.h> |
---|
37 | #endif |
---|
38 | #endif /* __FreeBSD__ || __NetBSD__ */ |
---|
39 | |
---|
40 | #include <rtems/freebsd/sys/param.h> |
---|
41 | #include <rtems/freebsd/sys/malloc.h> |
---|
42 | #include <rtems/freebsd/sys/mbuf.h> |
---|
43 | #include <rtems/freebsd/sys/systm.h> |
---|
44 | #include <rtems/freebsd/sys/proc.h> |
---|
45 | #include <rtems/freebsd/sys/socket.h> |
---|
46 | #include <rtems/freebsd/sys/socketvar.h> |
---|
47 | #include <rtems/freebsd/sys/kernel.h> |
---|
48 | #include <rtems/freebsd/sys/errno.h> |
---|
49 | #include <rtems/freebsd/sys/syslog.h> |
---|
50 | #include <rtems/freebsd/sys/sysctl.h> |
---|
51 | #include <rtems/freebsd/sys/queue.h> |
---|
52 | |
---|
53 | #include <rtems/freebsd/net/if.h> |
---|
54 | #include <rtems/freebsd/net/if_dl.h> |
---|
55 | #include <rtems/freebsd/net/if_types.h> |
---|
56 | #ifdef __FreeBSD__ |
---|
57 | #include <rtems/freebsd/net/vnet.h> |
---|
58 | #endif |
---|
59 | |
---|
60 | #include <rtems/freebsd/netinet/in.h> |
---|
61 | #include <rtems/freebsd/netinet/in_systm.h> |
---|
62 | #include <rtems/freebsd/netinet/ip.h> |
---|
63 | #ifdef INET6 |
---|
64 | #include <rtems/freebsd/netinet/ip6.h> |
---|
65 | #endif |
---|
66 | #include <rtems/freebsd/netinet/tcp.h> |
---|
67 | #include <rtems/freebsd/netinet/udp.h> |
---|
68 | |
---|
69 | #include <rtems/freebsd/net/pfvar.h> |
---|
70 | #include <rtems/freebsd/altq/altq.h> |
---|
71 | #ifdef ALTQ3_COMPAT |
---|
72 | #include <rtems/freebsd/altq/altq_conf.h> |
---|
73 | #endif |
---|
74 | |
---|
75 | /* machine dependent clock related includes */ |
---|
76 | #ifdef __FreeBSD__ |
---|
77 | #if __FreeBSD__ < 3 |
---|
78 | #include <rtems/freebsd/local/opt_cpu.h> /* for FreeBSD-2.2.8 to get i586_ctr_freq */ |
---|
79 | #endif |
---|
80 | #include <rtems/freebsd/sys/bus.h> |
---|
81 | #include <rtems/freebsd/sys/cpu.h> |
---|
82 | #include <rtems/freebsd/sys/eventhandler.h> |
---|
83 | #include <rtems/freebsd/machine/clock.h> |
---|
84 | #endif |
---|
85 | #if defined(__i386__) |
---|
86 | #include <rtems/freebsd/machine/cpufunc.h> /* for pentium tsc */ |
---|
87 | #include <rtems/freebsd/machine/specialreg.h> /* for CPUID_TSC */ |
---|
88 | #ifdef __FreeBSD__ |
---|
89 | #include <rtems/freebsd/machine/md_var.h> /* for cpu_feature */ |
---|
90 | #elif defined(__NetBSD__) || defined(__OpenBSD__) |
---|
91 | #include <rtems/freebsd/machine/cpu.h> /* for cpu_feature */ |
---|
92 | #endif |
---|
93 | #endif /* __i386__ */ |
---|
94 | |
---|
95 | /* |
---|
96 | * internal function prototypes |
---|
97 | */ |
---|
98 | static void tbr_timeout(void *); |
---|
99 | int (*altq_input)(struct mbuf *, int) = NULL; |
---|
100 | static struct mbuf *tbr_dequeue(struct ifaltq *, int); |
---|
101 | static int tbr_timer = 0; /* token bucket regulator timer */ |
---|
102 | #if !defined(__FreeBSD__) || (__FreeBSD_version < 600000) |
---|
103 | static struct callout tbr_callout = CALLOUT_INITIALIZER; |
---|
104 | #else |
---|
105 | static struct callout tbr_callout; |
---|
106 | #endif |
---|
107 | |
---|
108 | #ifdef ALTQ3_CLFIER_COMPAT |
---|
109 | static int extract_ports4(struct mbuf *, struct ip *, struct flowinfo_in *); |
---|
110 | #ifdef INET6 |
---|
111 | static int extract_ports6(struct mbuf *, struct ip6_hdr *, |
---|
112 | struct flowinfo_in6 *); |
---|
113 | #endif |
---|
114 | static int apply_filter4(u_int32_t, struct flow_filter *, |
---|
115 | struct flowinfo_in *); |
---|
116 | static int apply_ppfilter4(u_int32_t, struct flow_filter *, |
---|
117 | struct flowinfo_in *); |
---|
118 | #ifdef INET6 |
---|
119 | static int apply_filter6(u_int32_t, struct flow_filter6 *, |
---|
120 | struct flowinfo_in6 *); |
---|
121 | #endif |
---|
122 | static int apply_tosfilter4(u_int32_t, struct flow_filter *, |
---|
123 | struct flowinfo_in *); |
---|
124 | static u_long get_filt_handle(struct acc_classifier *, int); |
---|
125 | static struct acc_filter *filth_to_filtp(struct acc_classifier *, u_long); |
---|
126 | static u_int32_t filt2fibmask(struct flow_filter *); |
---|
127 | |
---|
128 | static void ip4f_cache(struct ip *, struct flowinfo_in *); |
---|
129 | static int ip4f_lookup(struct ip *, struct flowinfo_in *); |
---|
130 | static int ip4f_init(void); |
---|
131 | static struct ip4_frag *ip4f_alloc(void); |
---|
132 | static void ip4f_free(struct ip4_frag *); |
---|
133 | #endif /* ALTQ3_CLFIER_COMPAT */ |
---|
134 | |
---|
135 | /* |
---|
136 | * alternate queueing support routines |
---|
137 | */ |
---|
138 | |
---|
139 | /* look up the queue state by the interface name and the queueing type. */ |
---|
140 | void * |
---|
141 | altq_lookup(name, type) |
---|
142 | char *name; |
---|
143 | int type; |
---|
144 | { |
---|
145 | struct ifnet *ifp; |
---|
146 | |
---|
147 | if ((ifp = ifunit(name)) != NULL) { |
---|
148 | /* read if_snd unlocked */ |
---|
149 | if (type != ALTQT_NONE && ifp->if_snd.altq_type == type) |
---|
150 | return (ifp->if_snd.altq_disc); |
---|
151 | } |
---|
152 | |
---|
153 | return NULL; |
---|
154 | } |
---|
155 | |
---|
156 | int |
---|
157 | altq_attach(ifq, type, discipline, enqueue, dequeue, request, clfier, classify) |
---|
158 | struct ifaltq *ifq; |
---|
159 | int type; |
---|
160 | void *discipline; |
---|
161 | int (*enqueue)(struct ifaltq *, struct mbuf *, struct altq_pktattr *); |
---|
162 | struct mbuf *(*dequeue)(struct ifaltq *, int); |
---|
163 | int (*request)(struct ifaltq *, int, void *); |
---|
164 | void *clfier; |
---|
165 | void *(*classify)(void *, struct mbuf *, int); |
---|
166 | { |
---|
167 | IFQ_LOCK(ifq); |
---|
168 | if (!ALTQ_IS_READY(ifq)) { |
---|
169 | IFQ_UNLOCK(ifq); |
---|
170 | return ENXIO; |
---|
171 | } |
---|
172 | |
---|
173 | #ifdef ALTQ3_COMPAT |
---|
174 | /* |
---|
175 | * pfaltq can override the existing discipline, but altq3 cannot. |
---|
176 | * check these if clfier is not NULL (which implies altq3). |
---|
177 | */ |
---|
178 | if (clfier != NULL) { |
---|
179 | if (ALTQ_IS_ENABLED(ifq)) { |
---|
180 | IFQ_UNLOCK(ifq); |
---|
181 | return EBUSY; |
---|
182 | } |
---|
183 | if (ALTQ_IS_ATTACHED(ifq)) { |
---|
184 | IFQ_UNLOCK(ifq); |
---|
185 | return EEXIST; |
---|
186 | } |
---|
187 | } |
---|
188 | #endif |
---|
189 | ifq->altq_type = type; |
---|
190 | ifq->altq_disc = discipline; |
---|
191 | ifq->altq_enqueue = enqueue; |
---|
192 | ifq->altq_dequeue = dequeue; |
---|
193 | ifq->altq_request = request; |
---|
194 | ifq->altq_clfier = clfier; |
---|
195 | ifq->altq_classify = classify; |
---|
196 | ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED); |
---|
197 | #ifdef ALTQ3_COMPAT |
---|
198 | #ifdef ALTQ_KLD |
---|
199 | altq_module_incref(type); |
---|
200 | #endif |
---|
201 | #endif |
---|
202 | IFQ_UNLOCK(ifq); |
---|
203 | return 0; |
---|
204 | } |
---|
205 | |
---|
206 | int |
---|
207 | altq_detach(ifq) |
---|
208 | struct ifaltq *ifq; |
---|
209 | { |
---|
210 | IFQ_LOCK(ifq); |
---|
211 | |
---|
212 | if (!ALTQ_IS_READY(ifq)) { |
---|
213 | IFQ_UNLOCK(ifq); |
---|
214 | return ENXIO; |
---|
215 | } |
---|
216 | if (ALTQ_IS_ENABLED(ifq)) { |
---|
217 | IFQ_UNLOCK(ifq); |
---|
218 | return EBUSY; |
---|
219 | } |
---|
220 | if (!ALTQ_IS_ATTACHED(ifq)) { |
---|
221 | IFQ_UNLOCK(ifq); |
---|
222 | return (0); |
---|
223 | } |
---|
224 | #ifdef ALTQ3_COMPAT |
---|
225 | #ifdef ALTQ_KLD |
---|
226 | altq_module_declref(ifq->altq_type); |
---|
227 | #endif |
---|
228 | #endif |
---|
229 | |
---|
230 | ifq->altq_type = ALTQT_NONE; |
---|
231 | ifq->altq_disc = NULL; |
---|
232 | ifq->altq_enqueue = NULL; |
---|
233 | ifq->altq_dequeue = NULL; |
---|
234 | ifq->altq_request = NULL; |
---|
235 | ifq->altq_clfier = NULL; |
---|
236 | ifq->altq_classify = NULL; |
---|
237 | ifq->altq_flags &= ALTQF_CANTCHANGE; |
---|
238 | |
---|
239 | IFQ_UNLOCK(ifq); |
---|
240 | return 0; |
---|
241 | } |
---|
242 | |
---|
243 | int |
---|
244 | altq_enable(ifq) |
---|
245 | struct ifaltq *ifq; |
---|
246 | { |
---|
247 | int s; |
---|
248 | |
---|
249 | IFQ_LOCK(ifq); |
---|
250 | |
---|
251 | if (!ALTQ_IS_READY(ifq)) { |
---|
252 | IFQ_UNLOCK(ifq); |
---|
253 | return ENXIO; |
---|
254 | } |
---|
255 | if (ALTQ_IS_ENABLED(ifq)) { |
---|
256 | IFQ_UNLOCK(ifq); |
---|
257 | return 0; |
---|
258 | } |
---|
259 | |
---|
260 | #ifdef __NetBSD__ |
---|
261 | s = splnet(); |
---|
262 | #else |
---|
263 | s = splimp(); |
---|
264 | #endif |
---|
265 | IFQ_PURGE_NOLOCK(ifq); |
---|
266 | ASSERT(ifq->ifq_len == 0); |
---|
267 | ifq->ifq_drv_maxlen = 0; /* disable bulk dequeue */ |
---|
268 | ifq->altq_flags |= ALTQF_ENABLED; |
---|
269 | if (ifq->altq_clfier != NULL) |
---|
270 | ifq->altq_flags |= ALTQF_CLASSIFY; |
---|
271 | splx(s); |
---|
272 | |
---|
273 | IFQ_UNLOCK(ifq); |
---|
274 | return 0; |
---|
275 | } |
---|
276 | |
---|
277 | int |
---|
278 | altq_disable(ifq) |
---|
279 | struct ifaltq *ifq; |
---|
280 | { |
---|
281 | int s; |
---|
282 | |
---|
283 | IFQ_LOCK(ifq); |
---|
284 | if (!ALTQ_IS_ENABLED(ifq)) { |
---|
285 | IFQ_UNLOCK(ifq); |
---|
286 | return 0; |
---|
287 | } |
---|
288 | |
---|
289 | #ifdef __NetBSD__ |
---|
290 | s = splnet(); |
---|
291 | #else |
---|
292 | s = splimp(); |
---|
293 | #endif |
---|
294 | IFQ_PURGE_NOLOCK(ifq); |
---|
295 | ASSERT(ifq->ifq_len == 0); |
---|
296 | ifq->altq_flags &= ~(ALTQF_ENABLED|ALTQF_CLASSIFY); |
---|
297 | splx(s); |
---|
298 | |
---|
299 | IFQ_UNLOCK(ifq); |
---|
300 | return 0; |
---|
301 | } |
---|
302 | |
---|
303 | #ifdef ALTQ_DEBUG |
---|
304 | void |
---|
305 | altq_assert(file, line, failedexpr) |
---|
306 | const char *file, *failedexpr; |
---|
307 | int line; |
---|
308 | { |
---|
309 | (void)printf("altq assertion \"%s\" failed: file \"%s\", line %d\n", |
---|
310 | failedexpr, file, line); |
---|
311 | panic("altq assertion"); |
---|
312 | /* NOTREACHED */ |
---|
313 | } |
---|
314 | #endif |
---|
315 | |
---|
316 | /* |
---|
317 | * internal representation of token bucket parameters |
---|
318 | * rate: byte_per_unittime << 32 |
---|
319 | * (((bits_per_sec) / 8) << 32) / machclk_freq |
---|
320 | * depth: byte << 32 |
---|
321 | * |
---|
322 | */ |
---|
323 | #define TBR_SHIFT 32 |
---|
324 | #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT) |
---|
325 | #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT) |
---|
326 | |
---|
327 | static struct mbuf * |
---|
328 | tbr_dequeue(ifq, op) |
---|
329 | struct ifaltq *ifq; |
---|
330 | int op; |
---|
331 | { |
---|
332 | struct tb_regulator *tbr; |
---|
333 | struct mbuf *m; |
---|
334 | int64_t interval; |
---|
335 | u_int64_t now; |
---|
336 | |
---|
337 | IFQ_LOCK_ASSERT(ifq); |
---|
338 | tbr = ifq->altq_tbr; |
---|
339 | if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) { |
---|
340 | /* if this is a remove after poll, bypass tbr check */ |
---|
341 | } else { |
---|
342 | /* update token only when it is negative */ |
---|
343 | if (tbr->tbr_token <= 0) { |
---|
344 | now = read_machclk(); |
---|
345 | interval = now - tbr->tbr_last; |
---|
346 | if (interval >= tbr->tbr_filluptime) |
---|
347 | tbr->tbr_token = tbr->tbr_depth; |
---|
348 | else { |
---|
349 | tbr->tbr_token += interval * tbr->tbr_rate; |
---|
350 | if (tbr->tbr_token > tbr->tbr_depth) |
---|
351 | tbr->tbr_token = tbr->tbr_depth; |
---|
352 | } |
---|
353 | tbr->tbr_last = now; |
---|
354 | } |
---|
355 | /* if token is still negative, don't allow dequeue */ |
---|
356 | if (tbr->tbr_token <= 0) |
---|
357 | return (NULL); |
---|
358 | } |
---|
359 | |
---|
360 | if (ALTQ_IS_ENABLED(ifq)) |
---|
361 | m = (*ifq->altq_dequeue)(ifq, op); |
---|
362 | else { |
---|
363 | if (op == ALTDQ_POLL) |
---|
364 | _IF_POLL(ifq, m); |
---|
365 | else |
---|
366 | _IF_DEQUEUE(ifq, m); |
---|
367 | } |
---|
368 | |
---|
369 | if (m != NULL && op == ALTDQ_REMOVE) |
---|
370 | tbr->tbr_token -= TBR_SCALE(m_pktlen(m)); |
---|
371 | tbr->tbr_lastop = op; |
---|
372 | return (m); |
---|
373 | } |
---|
374 | |
---|
375 | /* |
---|
376 | * set a token bucket regulator. |
---|
377 | * if the specified rate is zero, the token bucket regulator is deleted. |
---|
378 | */ |
---|
379 | int |
---|
380 | tbr_set(ifq, profile) |
---|
381 | struct ifaltq *ifq; |
---|
382 | struct tb_profile *profile; |
---|
383 | { |
---|
384 | struct tb_regulator *tbr, *otbr; |
---|
385 | |
---|
386 | if (tbr_dequeue_ptr == NULL) |
---|
387 | tbr_dequeue_ptr = tbr_dequeue; |
---|
388 | |
---|
389 | if (machclk_freq == 0) |
---|
390 | init_machclk(); |
---|
391 | if (machclk_freq == 0) { |
---|
392 | printf("tbr_set: no cpu clock available!\n"); |
---|
393 | return (ENXIO); |
---|
394 | } |
---|
395 | |
---|
396 | IFQ_LOCK(ifq); |
---|
397 | if (profile->rate == 0) { |
---|
398 | /* delete this tbr */ |
---|
399 | if ((tbr = ifq->altq_tbr) == NULL) { |
---|
400 | IFQ_UNLOCK(ifq); |
---|
401 | return (ENOENT); |
---|
402 | } |
---|
403 | ifq->altq_tbr = NULL; |
---|
404 | free(tbr, M_DEVBUF); |
---|
405 | IFQ_UNLOCK(ifq); |
---|
406 | return (0); |
---|
407 | } |
---|
408 | |
---|
409 | IFQ_UNLOCK(ifq); |
---|
410 | tbr = malloc(sizeof(struct tb_regulator), |
---|
411 | M_DEVBUF, M_WAITOK); |
---|
412 | if (tbr == NULL) { /* can not happen */ |
---|
413 | IFQ_UNLOCK(ifq); |
---|
414 | return (ENOMEM); |
---|
415 | } |
---|
416 | bzero(tbr, sizeof(struct tb_regulator)); |
---|
417 | |
---|
418 | tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq; |
---|
419 | tbr->tbr_depth = TBR_SCALE(profile->depth); |
---|
420 | if (tbr->tbr_rate > 0) |
---|
421 | tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate; |
---|
422 | else |
---|
423 | tbr->tbr_filluptime = 0xffffffffffffffffLL; |
---|
424 | tbr->tbr_token = tbr->tbr_depth; |
---|
425 | tbr->tbr_last = read_machclk(); |
---|
426 | tbr->tbr_lastop = ALTDQ_REMOVE; |
---|
427 | |
---|
428 | IFQ_LOCK(ifq); |
---|
429 | otbr = ifq->altq_tbr; |
---|
430 | ifq->altq_tbr = tbr; /* set the new tbr */ |
---|
431 | |
---|
432 | if (otbr != NULL) |
---|
433 | free(otbr, M_DEVBUF); |
---|
434 | else { |
---|
435 | if (tbr_timer == 0) { |
---|
436 | CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0); |
---|
437 | tbr_timer = 1; |
---|
438 | } |
---|
439 | } |
---|
440 | IFQ_UNLOCK(ifq); |
---|
441 | return (0); |
---|
442 | } |
---|
443 | |
---|
444 | /* |
---|
445 | * tbr_timeout goes through the interface list, and kicks the drivers |
---|
446 | * if necessary. |
---|
447 | * |
---|
448 | * MPSAFE |
---|
449 | */ |
---|
450 | static void |
---|
451 | tbr_timeout(arg) |
---|
452 | void *arg; |
---|
453 | { |
---|
454 | #if defined(__FreeBSD__) |
---|
455 | VNET_ITERATOR_DECL(vnet_iter); |
---|
456 | #endif |
---|
457 | struct ifnet *ifp; |
---|
458 | int active, s; |
---|
459 | |
---|
460 | active = 0; |
---|
461 | #ifdef __NetBSD__ |
---|
462 | s = splnet(); |
---|
463 | #else |
---|
464 | s = splimp(); |
---|
465 | #endif |
---|
466 | #if defined(__FreeBSD__) && (__FreeBSD_version >= 500000) |
---|
467 | IFNET_RLOCK_NOSLEEP(); |
---|
468 | VNET_LIST_RLOCK_NOSLEEP(); |
---|
469 | VNET_FOREACH(vnet_iter) { |
---|
470 | CURVNET_SET(vnet_iter); |
---|
471 | #endif |
---|
472 | for (ifp = TAILQ_FIRST(&V_ifnet); ifp; |
---|
473 | ifp = TAILQ_NEXT(ifp, if_list)) { |
---|
474 | /* read from if_snd unlocked */ |
---|
475 | if (!TBR_IS_ENABLED(&ifp->if_snd)) |
---|
476 | continue; |
---|
477 | active++; |
---|
478 | if (!IFQ_IS_EMPTY(&ifp->if_snd) && |
---|
479 | ifp->if_start != NULL) |
---|
480 | (*ifp->if_start)(ifp); |
---|
481 | } |
---|
482 | #if defined(__FreeBSD__) && (__FreeBSD_version >= 500000) |
---|
483 | CURVNET_RESTORE(); |
---|
484 | } |
---|
485 | VNET_LIST_RUNLOCK_NOSLEEP(); |
---|
486 | IFNET_RUNLOCK_NOSLEEP(); |
---|
487 | #endif |
---|
488 | splx(s); |
---|
489 | if (active > 0) |
---|
490 | CALLOUT_RESET(&tbr_callout, 1, tbr_timeout, (void *)0); |
---|
491 | else |
---|
492 | tbr_timer = 0; /* don't need tbr_timer anymore */ |
---|
493 | #if defined(__alpha__) && !defined(ALTQ_NOPCC) |
---|
494 | { |
---|
495 | /* |
---|
496 | * XXX read out the machine dependent clock once a second |
---|
497 | * to detect counter wrap-around. |
---|
498 | */ |
---|
499 | static u_int cnt; |
---|
500 | |
---|
501 | if (++cnt >= hz) { |
---|
502 | (void)read_machclk(); |
---|
503 | cnt = 0; |
---|
504 | } |
---|
505 | } |
---|
506 | #endif /* __alpha__ && !ALTQ_NOPCC */ |
---|
507 | } |
---|
508 | |
---|
509 | /* |
---|
510 | * get token bucket regulator profile |
---|
511 | */ |
---|
512 | int |
---|
513 | tbr_get(ifq, profile) |
---|
514 | struct ifaltq *ifq; |
---|
515 | struct tb_profile *profile; |
---|
516 | { |
---|
517 | struct tb_regulator *tbr; |
---|
518 | |
---|
519 | IFQ_LOCK(ifq); |
---|
520 | if ((tbr = ifq->altq_tbr) == NULL) { |
---|
521 | profile->rate = 0; |
---|
522 | profile->depth = 0; |
---|
523 | } else { |
---|
524 | profile->rate = |
---|
525 | (u_int)TBR_UNSCALE(tbr->tbr_rate * 8 * machclk_freq); |
---|
526 | profile->depth = (u_int)TBR_UNSCALE(tbr->tbr_depth); |
---|
527 | } |
---|
528 | IFQ_UNLOCK(ifq); |
---|
529 | return (0); |
---|
530 | } |
---|
531 | |
---|
532 | /* |
---|
533 | * attach a discipline to the interface. if one already exists, it is |
---|
534 | * overridden. |
---|
535 | * Locking is done in the discipline specific attach functions. Basically |
---|
536 | * they call back to altq_attach which takes care of the attach and locking. |
---|
537 | */ |
---|
538 | int |
---|
539 | altq_pfattach(struct pf_altq *a) |
---|
540 | { |
---|
541 | int error = 0; |
---|
542 | |
---|
543 | switch (a->scheduler) { |
---|
544 | case ALTQT_NONE: |
---|
545 | break; |
---|
546 | #ifdef ALTQ_CBQ |
---|
547 | case ALTQT_CBQ: |
---|
548 | error = cbq_pfattach(a); |
---|
549 | break; |
---|
550 | #endif |
---|
551 | #ifdef ALTQ_PRIQ |
---|
552 | case ALTQT_PRIQ: |
---|
553 | error = priq_pfattach(a); |
---|
554 | break; |
---|
555 | #endif |
---|
556 | #ifdef ALTQ_HFSC |
---|
557 | case ALTQT_HFSC: |
---|
558 | error = hfsc_pfattach(a); |
---|
559 | break; |
---|
560 | #endif |
---|
561 | default: |
---|
562 | error = ENXIO; |
---|
563 | } |
---|
564 | |
---|
565 | return (error); |
---|
566 | } |
---|
567 | |
---|
568 | /* |
---|
569 | * detach a discipline from the interface. |
---|
570 | * it is possible that the discipline was already overridden by another |
---|
571 | * discipline. |
---|
572 | */ |
---|
573 | int |
---|
574 | altq_pfdetach(struct pf_altq *a) |
---|
575 | { |
---|
576 | struct ifnet *ifp; |
---|
577 | int s, error = 0; |
---|
578 | |
---|
579 | if ((ifp = ifunit(a->ifname)) == NULL) |
---|
580 | return (EINVAL); |
---|
581 | |
---|
582 | /* if this discipline is no longer referenced, just return */ |
---|
583 | /* read unlocked from if_snd */ |
---|
584 | if (a->altq_disc == NULL || a->altq_disc != ifp->if_snd.altq_disc) |
---|
585 | return (0); |
---|
586 | |
---|
587 | #ifdef __NetBSD__ |
---|
588 | s = splnet(); |
---|
589 | #else |
---|
590 | s = splimp(); |
---|
591 | #endif |
---|
592 | /* read unlocked from if_snd, _disable and _detach take care */ |
---|
593 | if (ALTQ_IS_ENABLED(&ifp->if_snd)) |
---|
594 | error = altq_disable(&ifp->if_snd); |
---|
595 | if (error == 0) |
---|
596 | error = altq_detach(&ifp->if_snd); |
---|
597 | splx(s); |
---|
598 | |
---|
599 | return (error); |
---|
600 | } |
---|
601 | |
---|
602 | /* |
---|
603 | * add a discipline or a queue |
---|
604 | * Locking is done in the discipline specific functions with regards to |
---|
605 | * malloc with WAITOK, also it is not yet clear which lock to use. |
---|
606 | */ |
---|
607 | int |
---|
608 | altq_add(struct pf_altq *a) |
---|
609 | { |
---|
610 | int error = 0; |
---|
611 | |
---|
612 | if (a->qname[0] != 0) |
---|
613 | return (altq_add_queue(a)); |
---|
614 | |
---|
615 | if (machclk_freq == 0) |
---|
616 | init_machclk(); |
---|
617 | if (machclk_freq == 0) |
---|
618 | panic("altq_add: no cpu clock"); |
---|
619 | |
---|
620 | switch (a->scheduler) { |
---|
621 | #ifdef ALTQ_CBQ |
---|
622 | case ALTQT_CBQ: |
---|
623 | error = cbq_add_altq(a); |
---|
624 | break; |
---|
625 | #endif |
---|
626 | #ifdef ALTQ_PRIQ |
---|
627 | case ALTQT_PRIQ: |
---|
628 | error = priq_add_altq(a); |
---|
629 | break; |
---|
630 | #endif |
---|
631 | #ifdef ALTQ_HFSC |
---|
632 | case ALTQT_HFSC: |
---|
633 | error = hfsc_add_altq(a); |
---|
634 | break; |
---|
635 | #endif |
---|
636 | default: |
---|
637 | error = ENXIO; |
---|
638 | } |
---|
639 | |
---|
640 | return (error); |
---|
641 | } |
---|
642 | |
---|
643 | /* |
---|
644 | * remove a discipline or a queue |
---|
645 | * It is yet unclear what lock to use to protect this operation, the |
---|
646 | * discipline specific functions will determine and grab it |
---|
647 | */ |
---|
648 | int |
---|
649 | altq_remove(struct pf_altq *a) |
---|
650 | { |
---|
651 | int error = 0; |
---|
652 | |
---|
653 | if (a->qname[0] != 0) |
---|
654 | return (altq_remove_queue(a)); |
---|
655 | |
---|
656 | switch (a->scheduler) { |
---|
657 | #ifdef ALTQ_CBQ |
---|
658 | case ALTQT_CBQ: |
---|
659 | error = cbq_remove_altq(a); |
---|
660 | break; |
---|
661 | #endif |
---|
662 | #ifdef ALTQ_PRIQ |
---|
663 | case ALTQT_PRIQ: |
---|
664 | error = priq_remove_altq(a); |
---|
665 | break; |
---|
666 | #endif |
---|
667 | #ifdef ALTQ_HFSC |
---|
668 | case ALTQT_HFSC: |
---|
669 | error = hfsc_remove_altq(a); |
---|
670 | break; |
---|
671 | #endif |
---|
672 | default: |
---|
673 | error = ENXIO; |
---|
674 | } |
---|
675 | |
---|
676 | return (error); |
---|
677 | } |
---|
678 | |
---|
679 | /* |
---|
680 | * add a queue to the discipline |
---|
681 | * It is yet unclear what lock to use to protect this operation, the |
---|
682 | * discipline specific functions will determine and grab it |
---|
683 | */ |
---|
684 | int |
---|
685 | altq_add_queue(struct pf_altq *a) |
---|
686 | { |
---|
687 | int error = 0; |
---|
688 | |
---|
689 | switch (a->scheduler) { |
---|
690 | #ifdef ALTQ_CBQ |
---|
691 | case ALTQT_CBQ: |
---|
692 | error = cbq_add_queue(a); |
---|
693 | break; |
---|
694 | #endif |
---|
695 | #ifdef ALTQ_PRIQ |
---|
696 | case ALTQT_PRIQ: |
---|
697 | error = priq_add_queue(a); |
---|
698 | break; |
---|
699 | #endif |
---|
700 | #ifdef ALTQ_HFSC |
---|
701 | case ALTQT_HFSC: |
---|
702 | error = hfsc_add_queue(a); |
---|
703 | break; |
---|
704 | #endif |
---|
705 | default: |
---|
706 | error = ENXIO; |
---|
707 | } |
---|
708 | |
---|
709 | return (error); |
---|
710 | } |
---|
711 | |
---|
712 | /* |
---|
713 | * remove a queue from the discipline |
---|
714 | * It is yet unclear what lock to use to protect this operation, the |
---|
715 | * discipline specific functions will determine and grab it |
---|
716 | */ |
---|
717 | int |
---|
718 | altq_remove_queue(struct pf_altq *a) |
---|
719 | { |
---|
720 | int error = 0; |
---|
721 | |
---|
722 | switch (a->scheduler) { |
---|
723 | #ifdef ALTQ_CBQ |
---|
724 | case ALTQT_CBQ: |
---|
725 | error = cbq_remove_queue(a); |
---|
726 | break; |
---|
727 | #endif |
---|
728 | #ifdef ALTQ_PRIQ |
---|
729 | case ALTQT_PRIQ: |
---|
730 | error = priq_remove_queue(a); |
---|
731 | break; |
---|
732 | #endif |
---|
733 | #ifdef ALTQ_HFSC |
---|
734 | case ALTQT_HFSC: |
---|
735 | error = hfsc_remove_queue(a); |
---|
736 | break; |
---|
737 | #endif |
---|
738 | default: |
---|
739 | error = ENXIO; |
---|
740 | } |
---|
741 | |
---|
742 | return (error); |
---|
743 | } |
---|
744 | |
---|
745 | /* |
---|
746 | * get queue statistics |
---|
747 | * Locking is done in the discipline specific functions with regards to |
---|
748 | * copyout operations, also it is not yet clear which lock to use. |
---|
749 | */ |
---|
750 | int |
---|
751 | altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) |
---|
752 | { |
---|
753 | int error = 0; |
---|
754 | |
---|
755 | switch (a->scheduler) { |
---|
756 | #ifdef ALTQ_CBQ |
---|
757 | case ALTQT_CBQ: |
---|
758 | error = cbq_getqstats(a, ubuf, nbytes); |
---|
759 | break; |
---|
760 | #endif |
---|
761 | #ifdef ALTQ_PRIQ |
---|
762 | case ALTQT_PRIQ: |
---|
763 | error = priq_getqstats(a, ubuf, nbytes); |
---|
764 | break; |
---|
765 | #endif |
---|
766 | #ifdef ALTQ_HFSC |
---|
767 | case ALTQT_HFSC: |
---|
768 | error = hfsc_getqstats(a, ubuf, nbytes); |
---|
769 | break; |
---|
770 | #endif |
---|
771 | default: |
---|
772 | error = ENXIO; |
---|
773 | } |
---|
774 | |
---|
775 | return (error); |
---|
776 | } |
---|
777 | |
---|
778 | /* |
---|
779 | * read and write diffserv field in IPv4 or IPv6 header |
---|
780 | */ |
---|
781 | u_int8_t |
---|
782 | read_dsfield(m, pktattr) |
---|
783 | struct mbuf *m; |
---|
784 | struct altq_pktattr *pktattr; |
---|
785 | { |
---|
786 | struct mbuf *m0; |
---|
787 | u_int8_t ds_field = 0; |
---|
788 | |
---|
789 | if (pktattr == NULL || |
---|
790 | (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6)) |
---|
791 | return ((u_int8_t)0); |
---|
792 | |
---|
793 | /* verify that pattr_hdr is within the mbuf data */ |
---|
794 | for (m0 = m; m0 != NULL; m0 = m0->m_next) |
---|
795 | if ((pktattr->pattr_hdr >= m0->m_data) && |
---|
796 | (pktattr->pattr_hdr < m0->m_data + m0->m_len)) |
---|
797 | break; |
---|
798 | if (m0 == NULL) { |
---|
799 | /* ick, pattr_hdr is stale */ |
---|
800 | pktattr->pattr_af = AF_UNSPEC; |
---|
801 | #ifdef ALTQ_DEBUG |
---|
802 | printf("read_dsfield: can't locate header!\n"); |
---|
803 | #endif |
---|
804 | return ((u_int8_t)0); |
---|
805 | } |
---|
806 | |
---|
807 | if (pktattr->pattr_af == AF_INET) { |
---|
808 | struct ip *ip = (struct ip *)pktattr->pattr_hdr; |
---|
809 | |
---|
810 | if (ip->ip_v != 4) |
---|
811 | return ((u_int8_t)0); /* version mismatch! */ |
---|
812 | ds_field = ip->ip_tos; |
---|
813 | } |
---|
814 | #ifdef INET6 |
---|
815 | else if (pktattr->pattr_af == AF_INET6) { |
---|
816 | struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr; |
---|
817 | u_int32_t flowlabel; |
---|
818 | |
---|
819 | flowlabel = ntohl(ip6->ip6_flow); |
---|
820 | if ((flowlabel >> 28) != 6) |
---|
821 | return ((u_int8_t)0); /* version mismatch! */ |
---|
822 | ds_field = (flowlabel >> 20) & 0xff; |
---|
823 | } |
---|
824 | #endif |
---|
825 | return (ds_field); |
---|
826 | } |
---|
827 | |
---|
828 | void |
---|
829 | write_dsfield(struct mbuf *m, struct altq_pktattr *pktattr, u_int8_t dsfield) |
---|
830 | { |
---|
831 | struct mbuf *m0; |
---|
832 | |
---|
833 | if (pktattr == NULL || |
---|
834 | (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6)) |
---|
835 | return; |
---|
836 | |
---|
837 | /* verify that pattr_hdr is within the mbuf data */ |
---|
838 | for (m0 = m; m0 != NULL; m0 = m0->m_next) |
---|
839 | if ((pktattr->pattr_hdr >= m0->m_data) && |
---|
840 | (pktattr->pattr_hdr < m0->m_data + m0->m_len)) |
---|
841 | break; |
---|
842 | if (m0 == NULL) { |
---|
843 | /* ick, pattr_hdr is stale */ |
---|
844 | pktattr->pattr_af = AF_UNSPEC; |
---|
845 | #ifdef ALTQ_DEBUG |
---|
846 | printf("write_dsfield: can't locate header!\n"); |
---|
847 | #endif |
---|
848 | return; |
---|
849 | } |
---|
850 | |
---|
851 | if (pktattr->pattr_af == AF_INET) { |
---|
852 | struct ip *ip = (struct ip *)pktattr->pattr_hdr; |
---|
853 | u_int8_t old; |
---|
854 | int32_t sum; |
---|
855 | |
---|
856 | if (ip->ip_v != 4) |
---|
857 | return; /* version mismatch! */ |
---|
858 | old = ip->ip_tos; |
---|
859 | dsfield |= old & 3; /* leave CU bits */ |
---|
860 | if (old == dsfield) |
---|
861 | return; |
---|
862 | ip->ip_tos = dsfield; |
---|
863 | /* |
---|
864 | * update checksum (from RFC1624) |
---|
865 | * HC' = ~(~HC + ~m + m') |
---|
866 | */ |
---|
867 | sum = ~ntohs(ip->ip_sum) & 0xffff; |
---|
868 | sum += 0xff00 + (~old & 0xff) + dsfield; |
---|
869 | sum = (sum >> 16) + (sum & 0xffff); |
---|
870 | sum += (sum >> 16); /* add carry */ |
---|
871 | |
---|
872 | ip->ip_sum = htons(~sum & 0xffff); |
---|
873 | } |
---|
874 | #ifdef INET6 |
---|
875 | else if (pktattr->pattr_af == AF_INET6) { |
---|
876 | struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr; |
---|
877 | u_int32_t flowlabel; |
---|
878 | |
---|
879 | flowlabel = ntohl(ip6->ip6_flow); |
---|
880 | if ((flowlabel >> 28) != 6) |
---|
881 | return; /* version mismatch! */ |
---|
882 | flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20); |
---|
883 | ip6->ip6_flow = htonl(flowlabel); |
---|
884 | } |
---|
885 | #endif |
---|
886 | return; |
---|
887 | } |
---|
888 | |
---|
889 | |
---|
890 | /* |
---|
891 | * high resolution clock support taking advantage of a machine dependent |
---|
892 | * high resolution time counter (e.g., timestamp counter of intel pentium). |
---|
893 | * we assume |
---|
894 | * - 64-bit-long monotonically-increasing counter |
---|
895 | * - frequency range is 100M-4GHz (CPU speed) |
---|
896 | */ |
---|
897 | /* if pcc is not available or disabled, emulate 256MHz using microtime() */ |
---|
898 | #define MACHCLK_SHIFT 8 |
---|
899 | |
---|
900 | int machclk_usepcc; |
---|
901 | u_int32_t machclk_freq; |
---|
902 | u_int32_t machclk_per_tick; |
---|
903 | |
---|
904 | #ifdef __alpha__ |
---|
905 | #ifdef __FreeBSD__ |
---|
906 | extern u_int32_t cycles_per_sec; /* alpha cpu clock frequency */ |
---|
907 | #elif defined(__NetBSD__) || defined(__OpenBSD__) |
---|
908 | extern u_int64_t cycles_per_usec; /* alpha cpu clock frequency */ |
---|
909 | #endif |
---|
910 | #endif /* __alpha__ */ |
---|
911 | #if defined(__i386__) && defined(__NetBSD__) |
---|
912 | extern u_int64_t cpu_tsc_freq; |
---|
913 | #endif /* __alpha__ */ |
---|
914 | |
---|
915 | #if (__FreeBSD_version >= 700035) |
---|
916 | /* Update TSC freq with the value indicated by the caller. */ |
---|
917 | static void |
---|
918 | tsc_freq_changed(void *arg, const struct cf_level *level, int status) |
---|
919 | { |
---|
920 | /* If there was an error during the transition, don't do anything. */ |
---|
921 | if (status != 0) |
---|
922 | return; |
---|
923 | |
---|
924 | #if (__FreeBSD_version >= 701102) && (defined(__amd64__) || defined(__i386__)) |
---|
925 | /* If TSC is P-state invariant, don't do anything. */ |
---|
926 | if (tsc_is_invariant) |
---|
927 | return; |
---|
928 | #endif |
---|
929 | |
---|
930 | /* Total setting for this level gives the new frequency in MHz. */ |
---|
931 | init_machclk(); |
---|
932 | } |
---|
933 | EVENTHANDLER_DEFINE(cpufreq_post_change, tsc_freq_changed, NULL, |
---|
934 | EVENTHANDLER_PRI_LAST); |
---|
935 | #endif /* __FreeBSD_version >= 700035 */ |
---|
936 | |
---|
937 | static void |
---|
938 | init_machclk_setup(void) |
---|
939 | { |
---|
940 | #if (__FreeBSD_version >= 600000) |
---|
941 | callout_init(&tbr_callout, 0); |
---|
942 | #endif |
---|
943 | |
---|
944 | machclk_usepcc = 1; |
---|
945 | |
---|
946 | #if (!defined(__i386__) && !defined(__alpha__)) || defined(ALTQ_NOPCC) |
---|
947 | machclk_usepcc = 0; |
---|
948 | #endif |
---|
949 | #if defined(__FreeBSD__) && defined(SMP) |
---|
950 | machclk_usepcc = 0; |
---|
951 | #endif |
---|
952 | #if defined(__NetBSD__) && defined(MULTIPROCESSOR) |
---|
953 | machclk_usepcc = 0; |
---|
954 | #endif |
---|
955 | #ifdef __i386__ |
---|
956 | /* check if TSC is available */ |
---|
957 | if (machclk_usepcc == 1 && ((cpu_feature & CPUID_TSC) == 0 || |
---|
958 | tsc_is_broken)) |
---|
959 | machclk_usepcc = 0; |
---|
960 | #endif |
---|
961 | } |
---|
962 | |
---|
963 | void |
---|
964 | init_machclk(void) |
---|
965 | { |
---|
966 | static int called; |
---|
967 | |
---|
968 | /* Call one-time initialization function. */ |
---|
969 | if (!called) { |
---|
970 | init_machclk_setup(); |
---|
971 | called = 1; |
---|
972 | } |
---|
973 | |
---|
974 | if (machclk_usepcc == 0) { |
---|
975 | /* emulate 256MHz using microtime() */ |
---|
976 | machclk_freq = 1000000 << MACHCLK_SHIFT; |
---|
977 | machclk_per_tick = machclk_freq / hz; |
---|
978 | #ifdef ALTQ_DEBUG |
---|
979 | printf("altq: emulate %uHz cpu clock\n", machclk_freq); |
---|
980 | #endif |
---|
981 | return; |
---|
982 | } |
---|
983 | |
---|
984 | /* |
---|
985 | * if the clock frequency (of Pentium TSC or Alpha PCC) is |
---|
986 | * accessible, just use it. |
---|
987 | */ |
---|
988 | #ifdef __i386__ |
---|
989 | #ifdef __FreeBSD__ |
---|
990 | #if (__FreeBSD_version > 300000) |
---|
991 | machclk_freq = tsc_freq; |
---|
992 | #else |
---|
993 | machclk_freq = i586_ctr_freq; |
---|
994 | #endif |
---|
995 | #elif defined(__NetBSD__) |
---|
996 | machclk_freq = (u_int32_t)cpu_tsc_freq; |
---|
997 | #elif defined(__OpenBSD__) && (defined(I586_CPU) || defined(I686_CPU)) |
---|
998 | machclk_freq = pentium_mhz * 1000000; |
---|
999 | #endif |
---|
1000 | #elif defined(__alpha__) |
---|
1001 | #ifdef __FreeBSD__ |
---|
1002 | machclk_freq = cycles_per_sec; |
---|
1003 | #elif defined(__NetBSD__) || defined(__OpenBSD__) |
---|
1004 | machclk_freq = (u_int32_t)(cycles_per_usec * 1000000); |
---|
1005 | #endif |
---|
1006 | #endif /* __alpha__ */ |
---|
1007 | |
---|
1008 | /* |
---|
1009 | * if we don't know the clock frequency, measure it. |
---|
1010 | */ |
---|
1011 | if (machclk_freq == 0) { |
---|
1012 | static int wait; |
---|
1013 | struct timeval tv_start, tv_end; |
---|
1014 | u_int64_t start, end, diff; |
---|
1015 | int timo; |
---|
1016 | |
---|
1017 | microtime(&tv_start); |
---|
1018 | start = read_machclk(); |
---|
1019 | timo = hz; /* 1 sec */ |
---|
1020 | (void)tsleep(&wait, PWAIT | PCATCH, "init_machclk", timo); |
---|
1021 | microtime(&tv_end); |
---|
1022 | end = read_machclk(); |
---|
1023 | diff = (u_int64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000 |
---|
1024 | + tv_end.tv_usec - tv_start.tv_usec; |
---|
1025 | if (diff != 0) |
---|
1026 | machclk_freq = (u_int)((end - start) * 1000000 / diff); |
---|
1027 | } |
---|
1028 | |
---|
1029 | machclk_per_tick = machclk_freq / hz; |
---|
1030 | |
---|
1031 | #ifdef ALTQ_DEBUG |
---|
1032 | printf("altq: CPU clock: %uHz\n", machclk_freq); |
---|
1033 | #endif |
---|
1034 | } |
---|
1035 | |
---|
1036 | #if defined(__OpenBSD__) && defined(__i386__) |
---|
1037 | static __inline u_int64_t |
---|
1038 | rdtsc(void) |
---|
1039 | { |
---|
1040 | u_int64_t rv; |
---|
1041 | __asm __volatile(".byte 0x0f, 0x31" : "=A" (rv)); |
---|
1042 | return (rv); |
---|
1043 | } |
---|
1044 | #endif /* __OpenBSD__ && __i386__ */ |
---|
1045 | |
---|
1046 | u_int64_t |
---|
1047 | read_machclk(void) |
---|
1048 | { |
---|
1049 | u_int64_t val; |
---|
1050 | |
---|
1051 | if (machclk_usepcc) { |
---|
1052 | #if defined(__i386__) |
---|
1053 | val = rdtsc(); |
---|
1054 | #elif defined(__alpha__) |
---|
1055 | static u_int32_t last_pcc, upper; |
---|
1056 | u_int32_t pcc; |
---|
1057 | |
---|
1058 | /* |
---|
1059 | * for alpha, make a 64bit counter value out of the 32bit |
---|
1060 | * alpha processor cycle counter. |
---|
1061 | * read_machclk must be called within a half of its |
---|
1062 | * wrap-around cycle (about 5 sec for 400MHz cpu) to properly |
---|
1063 | * detect a counter wrap-around. |
---|
1064 | * tbr_timeout calls read_machclk once a second. |
---|
1065 | */ |
---|
1066 | pcc = (u_int32_t)alpha_rpcc(); |
---|
1067 | if (pcc <= last_pcc) |
---|
1068 | upper++; |
---|
1069 | last_pcc = pcc; |
---|
1070 | val = ((u_int64_t)upper << 32) + pcc; |
---|
1071 | #else |
---|
1072 | panic("read_machclk"); |
---|
1073 | #endif |
---|
1074 | } else { |
---|
1075 | struct timeval tv; |
---|
1076 | |
---|
1077 | microtime(&tv); |
---|
1078 | val = (((u_int64_t)(tv.tv_sec - boottime.tv_sec) * 1000000 |
---|
1079 | + tv.tv_usec) << MACHCLK_SHIFT); |
---|
1080 | } |
---|
1081 | return (val); |
---|
1082 | } |
---|
1083 | |
---|
1084 | #ifdef ALTQ3_CLFIER_COMPAT |
---|
1085 | |
---|
1086 | #ifndef IPPROTO_ESP |
---|
1087 | #define IPPROTO_ESP 50 /* encapsulating security payload */ |
---|
1088 | #endif |
---|
1089 | #ifndef IPPROTO_AH |
---|
1090 | #define IPPROTO_AH 51 /* authentication header */ |
---|
1091 | #endif |
---|
1092 | |
---|
1093 | /* |
---|
1094 | * extract flow information from a given packet. |
---|
1095 | * filt_mask shows flowinfo fields required. |
---|
1096 | * we assume the ip header is in one mbuf, and addresses and ports are |
---|
1097 | * in network byte order. |
---|
1098 | */ |
---|
1099 | int |
---|
1100 | altq_extractflow(m, af, flow, filt_bmask) |
---|
1101 | struct mbuf *m; |
---|
1102 | int af; |
---|
1103 | struct flowinfo *flow; |
---|
1104 | u_int32_t filt_bmask; |
---|
1105 | { |
---|
1106 | |
---|
1107 | switch (af) { |
---|
1108 | case PF_INET: { |
---|
1109 | struct flowinfo_in *fin; |
---|
1110 | struct ip *ip; |
---|
1111 | |
---|
1112 | ip = mtod(m, struct ip *); |
---|
1113 | |
---|
1114 | if (ip->ip_v != 4) |
---|
1115 | break; |
---|
1116 | |
---|
1117 | fin = (struct flowinfo_in *)flow; |
---|
1118 | fin->fi_len = sizeof(struct flowinfo_in); |
---|
1119 | fin->fi_family = AF_INET; |
---|
1120 | |
---|
1121 | fin->fi_proto = ip->ip_p; |
---|
1122 | fin->fi_tos = ip->ip_tos; |
---|
1123 | |
---|
1124 | fin->fi_src.s_addr = ip->ip_src.s_addr; |
---|
1125 | fin->fi_dst.s_addr = ip->ip_dst.s_addr; |
---|
1126 | |
---|
1127 | if (filt_bmask & FIMB4_PORTS) |
---|
1128 | /* if port info is required, extract port numbers */ |
---|
1129 | extract_ports4(m, ip, fin); |
---|
1130 | else { |
---|
1131 | fin->fi_sport = 0; |
---|
1132 | fin->fi_dport = 0; |
---|
1133 | fin->fi_gpi = 0; |
---|
1134 | } |
---|
1135 | return (1); |
---|
1136 | } |
---|
1137 | |
---|
1138 | #ifdef INET6 |
---|
1139 | case PF_INET6: { |
---|
1140 | struct flowinfo_in6 *fin6; |
---|
1141 | struct ip6_hdr *ip6; |
---|
1142 | |
---|
1143 | ip6 = mtod(m, struct ip6_hdr *); |
---|
1144 | /* should we check the ip version? */ |
---|
1145 | |
---|
1146 | fin6 = (struct flowinfo_in6 *)flow; |
---|
1147 | fin6->fi6_len = sizeof(struct flowinfo_in6); |
---|
1148 | fin6->fi6_family = AF_INET6; |
---|
1149 | |
---|
1150 | fin6->fi6_proto = ip6->ip6_nxt; |
---|
1151 | fin6->fi6_tclass = (ntohl(ip6->ip6_flow) >> 20) & 0xff; |
---|
1152 | |
---|
1153 | fin6->fi6_flowlabel = ip6->ip6_flow & htonl(0x000fffff); |
---|
1154 | fin6->fi6_src = ip6->ip6_src; |
---|
1155 | fin6->fi6_dst = ip6->ip6_dst; |
---|
1156 | |
---|
1157 | if ((filt_bmask & FIMB6_PORTS) || |
---|
1158 | ((filt_bmask & FIMB6_PROTO) |
---|
1159 | && ip6->ip6_nxt > IPPROTO_IPV6)) |
---|
1160 | /* |
---|
1161 | * if port info is required, or proto is required |
---|
1162 | * but there are option headers, extract port |
---|
1163 | * and protocol numbers. |
---|
1164 | */ |
---|
1165 | extract_ports6(m, ip6, fin6); |
---|
1166 | else { |
---|
1167 | fin6->fi6_sport = 0; |
---|
1168 | fin6->fi6_dport = 0; |
---|
1169 | fin6->fi6_gpi = 0; |
---|
1170 | } |
---|
1171 | return (1); |
---|
1172 | } |
---|
1173 | #endif /* INET6 */ |
---|
1174 | |
---|
1175 | default: |
---|
1176 | break; |
---|
1177 | } |
---|
1178 | |
---|
1179 | /* failed */ |
---|
1180 | flow->fi_len = sizeof(struct flowinfo); |
---|
1181 | flow->fi_family = AF_UNSPEC; |
---|
1182 | return (0); |
---|
1183 | } |
---|
1184 | |
---|
1185 | /* |
---|
1186 | * helper routine to extract port numbers |
---|
1187 | */ |
---|
1188 | /* structure for ipsec and ipv6 option header template */ |
---|
1189 | struct _opt6 { |
---|
1190 | u_int8_t opt6_nxt; /* next header */ |
---|
1191 | u_int8_t opt6_hlen; /* header extension length */ |
---|
1192 | u_int16_t _pad; |
---|
1193 | u_int32_t ah_spi; /* security parameter index |
---|
1194 | for authentication header */ |
---|
1195 | }; |
---|
1196 | |
---|
1197 | /* |
---|
1198 | * extract port numbers from a ipv4 packet. |
---|
1199 | */ |
---|
1200 | static int |
---|
1201 | extract_ports4(m, ip, fin) |
---|
1202 | struct mbuf *m; |
---|
1203 | struct ip *ip; |
---|
1204 | struct flowinfo_in *fin; |
---|
1205 | { |
---|
1206 | struct mbuf *m0; |
---|
1207 | u_short ip_off; |
---|
1208 | u_int8_t proto; |
---|
1209 | int off; |
---|
1210 | |
---|
1211 | fin->fi_sport = 0; |
---|
1212 | fin->fi_dport = 0; |
---|
1213 | fin->fi_gpi = 0; |
---|
1214 | |
---|
1215 | ip_off = ntohs(ip->ip_off); |
---|
1216 | /* if it is a fragment, try cached fragment info */ |
---|
1217 | if (ip_off & IP_OFFMASK) { |
---|
1218 | ip4f_lookup(ip, fin); |
---|
1219 | return (1); |
---|
1220 | } |
---|
1221 | |
---|
1222 | /* locate the mbuf containing the protocol header */ |
---|
1223 | for (m0 = m; m0 != NULL; m0 = m0->m_next) |
---|
1224 | if (((caddr_t)ip >= m0->m_data) && |
---|
1225 | ((caddr_t)ip < m0->m_data + m0->m_len)) |
---|
1226 | break; |
---|
1227 | if (m0 == NULL) { |
---|
1228 | #ifdef ALTQ_DEBUG |
---|
1229 | printf("extract_ports4: can't locate header! ip=%p\n", ip); |
---|
1230 | #endif |
---|
1231 | return (0); |
---|
1232 | } |
---|
1233 | off = ((caddr_t)ip - m0->m_data) + (ip->ip_hl << 2); |
---|
1234 | proto = ip->ip_p; |
---|
1235 | |
---|
1236 | #ifdef ALTQ_IPSEC |
---|
1237 | again: |
---|
1238 | #endif |
---|
1239 | while (off >= m0->m_len) { |
---|
1240 | off -= m0->m_len; |
---|
1241 | m0 = m0->m_next; |
---|
1242 | if (m0 == NULL) |
---|
1243 | return (0); /* bogus ip_hl! */ |
---|
1244 | } |
---|
1245 | if (m0->m_len < off + 4) |
---|
1246 | return (0); |
---|
1247 | |
---|
1248 | switch (proto) { |
---|
1249 | case IPPROTO_TCP: |
---|
1250 | case IPPROTO_UDP: { |
---|
1251 | struct udphdr *udp; |
---|
1252 | |
---|
1253 | udp = (struct udphdr *)(mtod(m0, caddr_t) + off); |
---|
1254 | fin->fi_sport = udp->uh_sport; |
---|
1255 | fin->fi_dport = udp->uh_dport; |
---|
1256 | fin->fi_proto = proto; |
---|
1257 | } |
---|
1258 | break; |
---|
1259 | |
---|
1260 | #ifdef ALTQ_IPSEC |
---|
1261 | case IPPROTO_ESP: |
---|
1262 | if (fin->fi_gpi == 0){ |
---|
1263 | u_int32_t *gpi; |
---|
1264 | |
---|
1265 | gpi = (u_int32_t *)(mtod(m0, caddr_t) + off); |
---|
1266 | fin->fi_gpi = *gpi; |
---|
1267 | } |
---|
1268 | fin->fi_proto = proto; |
---|
1269 | break; |
---|
1270 | |
---|
1271 | case IPPROTO_AH: { |
---|
1272 | /* get next header and header length */ |
---|
1273 | struct _opt6 *opt6; |
---|
1274 | |
---|
1275 | opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off); |
---|
1276 | proto = opt6->opt6_nxt; |
---|
1277 | off += 8 + (opt6->opt6_hlen * 4); |
---|
1278 | if (fin->fi_gpi == 0 && m0->m_len >= off + 8) |
---|
1279 | fin->fi_gpi = opt6->ah_spi; |
---|
1280 | } |
---|
1281 | /* goto the next header */ |
---|
1282 | goto again; |
---|
1283 | #endif /* ALTQ_IPSEC */ |
---|
1284 | |
---|
1285 | default: |
---|
1286 | fin->fi_proto = proto; |
---|
1287 | return (0); |
---|
1288 | } |
---|
1289 | |
---|
1290 | /* if this is a first fragment, cache it. */ |
---|
1291 | if (ip_off & IP_MF) |
---|
1292 | ip4f_cache(ip, fin); |
---|
1293 | |
---|
1294 | return (1); |
---|
1295 | } |
---|
1296 | |
---|
1297 | #ifdef INET6 |
---|
1298 | static int |
---|
1299 | extract_ports6(m, ip6, fin6) |
---|
1300 | struct mbuf *m; |
---|
1301 | struct ip6_hdr *ip6; |
---|
1302 | struct flowinfo_in6 *fin6; |
---|
1303 | { |
---|
1304 | struct mbuf *m0; |
---|
1305 | int off; |
---|
1306 | u_int8_t proto; |
---|
1307 | |
---|
1308 | fin6->fi6_gpi = 0; |
---|
1309 | fin6->fi6_sport = 0; |
---|
1310 | fin6->fi6_dport = 0; |
---|
1311 | |
---|
1312 | /* locate the mbuf containing the protocol header */ |
---|
1313 | for (m0 = m; m0 != NULL; m0 = m0->m_next) |
---|
1314 | if (((caddr_t)ip6 >= m0->m_data) && |
---|
1315 | ((caddr_t)ip6 < m0->m_data + m0->m_len)) |
---|
1316 | break; |
---|
1317 | if (m0 == NULL) { |
---|
1318 | #ifdef ALTQ_DEBUG |
---|
1319 | printf("extract_ports6: can't locate header! ip6=%p\n", ip6); |
---|
1320 | #endif |
---|
1321 | return (0); |
---|
1322 | } |
---|
1323 | off = ((caddr_t)ip6 - m0->m_data) + sizeof(struct ip6_hdr); |
---|
1324 | |
---|
1325 | proto = ip6->ip6_nxt; |
---|
1326 | do { |
---|
1327 | while (off >= m0->m_len) { |
---|
1328 | off -= m0->m_len; |
---|
1329 | m0 = m0->m_next; |
---|
1330 | if (m0 == NULL) |
---|
1331 | return (0); |
---|
1332 | } |
---|
1333 | if (m0->m_len < off + 4) |
---|
1334 | return (0); |
---|
1335 | |
---|
1336 | switch (proto) { |
---|
1337 | case IPPROTO_TCP: |
---|
1338 | case IPPROTO_UDP: { |
---|
1339 | struct udphdr *udp; |
---|
1340 | |
---|
1341 | udp = (struct udphdr *)(mtod(m0, caddr_t) + off); |
---|
1342 | fin6->fi6_sport = udp->uh_sport; |
---|
1343 | fin6->fi6_dport = udp->uh_dport; |
---|
1344 | fin6->fi6_proto = proto; |
---|
1345 | } |
---|
1346 | return (1); |
---|
1347 | |
---|
1348 | case IPPROTO_ESP: |
---|
1349 | if (fin6->fi6_gpi == 0) { |
---|
1350 | u_int32_t *gpi; |
---|
1351 | |
---|
1352 | gpi = (u_int32_t *)(mtod(m0, caddr_t) + off); |
---|
1353 | fin6->fi6_gpi = *gpi; |
---|
1354 | } |
---|
1355 | fin6->fi6_proto = proto; |
---|
1356 | return (1); |
---|
1357 | |
---|
1358 | case IPPROTO_AH: { |
---|
1359 | /* get next header and header length */ |
---|
1360 | struct _opt6 *opt6; |
---|
1361 | |
---|
1362 | opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off); |
---|
1363 | if (fin6->fi6_gpi == 0 && m0->m_len >= off + 8) |
---|
1364 | fin6->fi6_gpi = opt6->ah_spi; |
---|
1365 | proto = opt6->opt6_nxt; |
---|
1366 | off += 8 + (opt6->opt6_hlen * 4); |
---|
1367 | /* goto the next header */ |
---|
1368 | break; |
---|
1369 | } |
---|
1370 | |
---|
1371 | case IPPROTO_HOPOPTS: |
---|
1372 | case IPPROTO_ROUTING: |
---|
1373 | case IPPROTO_DSTOPTS: { |
---|
1374 | /* get next header and header length */ |
---|
1375 | struct _opt6 *opt6; |
---|
1376 | |
---|
1377 | opt6 = (struct _opt6 *)(mtod(m0, caddr_t) + off); |
---|
1378 | proto = opt6->opt6_nxt; |
---|
1379 | off += (opt6->opt6_hlen + 1) * 8; |
---|
1380 | /* goto the next header */ |
---|
1381 | break; |
---|
1382 | } |
---|
1383 | |
---|
1384 | case IPPROTO_FRAGMENT: |
---|
1385 | /* ipv6 fragmentations are not supported yet */ |
---|
1386 | default: |
---|
1387 | fin6->fi6_proto = proto; |
---|
1388 | return (0); |
---|
1389 | } |
---|
1390 | } while (1); |
---|
1391 | /*NOTREACHED*/ |
---|
1392 | } |
---|
1393 | #endif /* INET6 */ |
---|
1394 | |
---|
1395 | /* |
---|
1396 | * altq common classifier |
---|
1397 | */ |
---|
1398 | int |
---|
1399 | acc_add_filter(classifier, filter, class, phandle) |
---|
1400 | struct acc_classifier *classifier; |
---|
1401 | struct flow_filter *filter; |
---|
1402 | void *class; |
---|
1403 | u_long *phandle; |
---|
1404 | { |
---|
1405 | struct acc_filter *afp, *prev, *tmp; |
---|
1406 | int i, s; |
---|
1407 | |
---|
1408 | #ifdef INET6 |
---|
1409 | if (filter->ff_flow.fi_family != AF_INET && |
---|
1410 | filter->ff_flow.fi_family != AF_INET6) |
---|
1411 | return (EINVAL); |
---|
1412 | #else |
---|
1413 | if (filter->ff_flow.fi_family != AF_INET) |
---|
1414 | return (EINVAL); |
---|
1415 | #endif |
---|
1416 | |
---|
1417 | afp = malloc(sizeof(struct acc_filter), |
---|
1418 | M_DEVBUF, M_WAITOK); |
---|
1419 | if (afp == NULL) |
---|
1420 | return (ENOMEM); |
---|
1421 | bzero(afp, sizeof(struct acc_filter)); |
---|
1422 | |
---|
1423 | afp->f_filter = *filter; |
---|
1424 | afp->f_class = class; |
---|
1425 | |
---|
1426 | i = ACC_WILDCARD_INDEX; |
---|
1427 | if (filter->ff_flow.fi_family == AF_INET) { |
---|
1428 | struct flow_filter *filter4 = &afp->f_filter; |
---|
1429 | |
---|
1430 | /* |
---|
1431 | * if address is 0, it's a wildcard. if address mask |
---|
1432 | * isn't set, use full mask. |
---|
1433 | */ |
---|
1434 | if (filter4->ff_flow.fi_dst.s_addr == 0) |
---|
1435 | filter4->ff_mask.mask_dst.s_addr = 0; |
---|
1436 | else if (filter4->ff_mask.mask_dst.s_addr == 0) |
---|
1437 | filter4->ff_mask.mask_dst.s_addr = 0xffffffff; |
---|
1438 | if (filter4->ff_flow.fi_src.s_addr == 0) |
---|
1439 | filter4->ff_mask.mask_src.s_addr = 0; |
---|
1440 | else if (filter4->ff_mask.mask_src.s_addr == 0) |
---|
1441 | filter4->ff_mask.mask_src.s_addr = 0xffffffff; |
---|
1442 | |
---|
1443 | /* clear extra bits in addresses */ |
---|
1444 | filter4->ff_flow.fi_dst.s_addr &= |
---|
1445 | filter4->ff_mask.mask_dst.s_addr; |
---|
1446 | filter4->ff_flow.fi_src.s_addr &= |
---|
1447 | filter4->ff_mask.mask_src.s_addr; |
---|
1448 | |
---|
1449 | /* |
---|
1450 | * if dst address is a wildcard, use hash-entry |
---|
1451 | * ACC_WILDCARD_INDEX. |
---|
1452 | */ |
---|
1453 | if (filter4->ff_mask.mask_dst.s_addr != 0xffffffff) |
---|
1454 | i = ACC_WILDCARD_INDEX; |
---|
1455 | else |
---|
1456 | i = ACC_GET_HASH_INDEX(filter4->ff_flow.fi_dst.s_addr); |
---|
1457 | } |
---|
1458 | #ifdef INET6 |
---|
1459 | else if (filter->ff_flow.fi_family == AF_INET6) { |
---|
1460 | struct flow_filter6 *filter6 = |
---|
1461 | (struct flow_filter6 *)&afp->f_filter; |
---|
1462 | #ifndef IN6MASK0 /* taken from kame ipv6 */ |
---|
1463 | #define IN6MASK0 {{{ 0, 0, 0, 0 }}} |
---|
1464 | #define IN6MASK128 {{{ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }}} |
---|
1465 | const struct in6_addr in6mask0 = IN6MASK0; |
---|
1466 | const struct in6_addr in6mask128 = IN6MASK128; |
---|
1467 | #endif |
---|
1468 | |
---|
1469 | if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_flow6.fi6_dst)) |
---|
1470 | filter6->ff_mask6.mask6_dst = in6mask0; |
---|
1471 | else if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_mask6.mask6_dst)) |
---|
1472 | filter6->ff_mask6.mask6_dst = in6mask128; |
---|
1473 | if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_flow6.fi6_src)) |
---|
1474 | filter6->ff_mask6.mask6_src = in6mask0; |
---|
1475 | else if (IN6_IS_ADDR_UNSPECIFIED(&filter6->ff_mask6.mask6_src)) |
---|
1476 | filter6->ff_mask6.mask6_src = in6mask128; |
---|
1477 | |
---|
1478 | /* clear extra bits in addresses */ |
---|
1479 | for (i = 0; i < 16; i++) |
---|
1480 | filter6->ff_flow6.fi6_dst.s6_addr[i] &= |
---|
1481 | filter6->ff_mask6.mask6_dst.s6_addr[i]; |
---|
1482 | for (i = 0; i < 16; i++) |
---|
1483 | filter6->ff_flow6.fi6_src.s6_addr[i] &= |
---|
1484 | filter6->ff_mask6.mask6_src.s6_addr[i]; |
---|
1485 | |
---|
1486 | if (filter6->ff_flow6.fi6_flowlabel == 0) |
---|
1487 | i = ACC_WILDCARD_INDEX; |
---|
1488 | else |
---|
1489 | i = ACC_GET_HASH_INDEX(filter6->ff_flow6.fi6_flowlabel); |
---|
1490 | } |
---|
1491 | #endif /* INET6 */ |
---|
1492 | |
---|
1493 | afp->f_handle = get_filt_handle(classifier, i); |
---|
1494 | |
---|
1495 | /* update filter bitmask */ |
---|
1496 | afp->f_fbmask = filt2fibmask(filter); |
---|
1497 | classifier->acc_fbmask |= afp->f_fbmask; |
---|
1498 | |
---|
1499 | /* |
---|
1500 | * add this filter to the filter list. |
---|
1501 | * filters are ordered from the highest rule number. |
---|
1502 | */ |
---|
1503 | #ifdef __NetBSD__ |
---|
1504 | s = splnet(); |
---|
1505 | #else |
---|
1506 | s = splimp(); |
---|
1507 | #endif |
---|
1508 | prev = NULL; |
---|
1509 | LIST_FOREACH(tmp, &classifier->acc_filters[i], f_chain) { |
---|
1510 | if (tmp->f_filter.ff_ruleno > afp->f_filter.ff_ruleno) |
---|
1511 | prev = tmp; |
---|
1512 | else |
---|
1513 | break; |
---|
1514 | } |
---|
1515 | if (prev == NULL) |
---|
1516 | LIST_INSERT_HEAD(&classifier->acc_filters[i], afp, f_chain); |
---|
1517 | else |
---|
1518 | LIST_INSERT_AFTER(prev, afp, f_chain); |
---|
1519 | splx(s); |
---|
1520 | |
---|
1521 | *phandle = afp->f_handle; |
---|
1522 | return (0); |
---|
1523 | } |
---|
1524 | |
---|
1525 | int |
---|
1526 | acc_delete_filter(classifier, handle) |
---|
1527 | struct acc_classifier *classifier; |
---|
1528 | u_long handle; |
---|
1529 | { |
---|
1530 | struct acc_filter *afp; |
---|
1531 | int s; |
---|
1532 | |
---|
1533 | if ((afp = filth_to_filtp(classifier, handle)) == NULL) |
---|
1534 | return (EINVAL); |
---|
1535 | |
---|
1536 | #ifdef __NetBSD__ |
---|
1537 | s = splnet(); |
---|
1538 | #else |
---|
1539 | s = splimp(); |
---|
1540 | #endif |
---|
1541 | LIST_REMOVE(afp, f_chain); |
---|
1542 | splx(s); |
---|
1543 | |
---|
1544 | free(afp, M_DEVBUF); |
---|
1545 | |
---|
1546 | /* todo: update filt_bmask */ |
---|
1547 | |
---|
1548 | return (0); |
---|
1549 | } |
---|
1550 | |
---|
1551 | /* |
---|
1552 | * delete filters referencing to the specified class. |
---|
1553 | * if the all flag is not 0, delete all the filters. |
---|
1554 | */ |
---|
1555 | int |
---|
1556 | acc_discard_filters(classifier, class, all) |
---|
1557 | struct acc_classifier *classifier; |
---|
1558 | void *class; |
---|
1559 | int all; |
---|
1560 | { |
---|
1561 | struct acc_filter *afp; |
---|
1562 | int i, s; |
---|
1563 | |
---|
1564 | #ifdef __NetBSD__ |
---|
1565 | s = splnet(); |
---|
1566 | #else |
---|
1567 | s = splimp(); |
---|
1568 | #endif |
---|
1569 | for (i = 0; i < ACC_FILTER_TABLESIZE; i++) { |
---|
1570 | do { |
---|
1571 | LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain) |
---|
1572 | if (all || afp->f_class == class) { |
---|
1573 | LIST_REMOVE(afp, f_chain); |
---|
1574 | free(afp, M_DEVBUF); |
---|
1575 | /* start again from the head */ |
---|
1576 | break; |
---|
1577 | } |
---|
1578 | } while (afp != NULL); |
---|
1579 | } |
---|
1580 | splx(s); |
---|
1581 | |
---|
1582 | if (all) |
---|
1583 | classifier->acc_fbmask = 0; |
---|
1584 | |
---|
1585 | return (0); |
---|
1586 | } |
---|
1587 | |
---|
1588 | void * |
---|
1589 | acc_classify(clfier, m, af) |
---|
1590 | void *clfier; |
---|
1591 | struct mbuf *m; |
---|
1592 | int af; |
---|
1593 | { |
---|
1594 | struct acc_classifier *classifier; |
---|
1595 | struct flowinfo flow; |
---|
1596 | struct acc_filter *afp; |
---|
1597 | int i; |
---|
1598 | |
---|
1599 | classifier = (struct acc_classifier *)clfier; |
---|
1600 | altq_extractflow(m, af, &flow, classifier->acc_fbmask); |
---|
1601 | |
---|
1602 | if (flow.fi_family == AF_INET) { |
---|
1603 | struct flowinfo_in *fp = (struct flowinfo_in *)&flow; |
---|
1604 | |
---|
1605 | if ((classifier->acc_fbmask & FIMB4_ALL) == FIMB4_TOS) { |
---|
1606 | /* only tos is used */ |
---|
1607 | LIST_FOREACH(afp, |
---|
1608 | &classifier->acc_filters[ACC_WILDCARD_INDEX], |
---|
1609 | f_chain) |
---|
1610 | if (apply_tosfilter4(afp->f_fbmask, |
---|
1611 | &afp->f_filter, fp)) |
---|
1612 | /* filter matched */ |
---|
1613 | return (afp->f_class); |
---|
1614 | } else if ((classifier->acc_fbmask & |
---|
1615 | (~(FIMB4_PROTO|FIMB4_SPORT|FIMB4_DPORT) & FIMB4_ALL)) |
---|
1616 | == 0) { |
---|
1617 | /* only proto and ports are used */ |
---|
1618 | LIST_FOREACH(afp, |
---|
1619 | &classifier->acc_filters[ACC_WILDCARD_INDEX], |
---|
1620 | f_chain) |
---|
1621 | if (apply_ppfilter4(afp->f_fbmask, |
---|
1622 | &afp->f_filter, fp)) |
---|
1623 | /* filter matched */ |
---|
1624 | return (afp->f_class); |
---|
1625 | } else { |
---|
1626 | /* get the filter hash entry from its dest address */ |
---|
1627 | i = ACC_GET_HASH_INDEX(fp->fi_dst.s_addr); |
---|
1628 | do { |
---|
1629 | /* |
---|
1630 | * go through this loop twice. first for dst |
---|
1631 | * hash, second for wildcards. |
---|
1632 | */ |
---|
1633 | LIST_FOREACH(afp, &classifier->acc_filters[i], |
---|
1634 | f_chain) |
---|
1635 | if (apply_filter4(afp->f_fbmask, |
---|
1636 | &afp->f_filter, fp)) |
---|
1637 | /* filter matched */ |
---|
1638 | return (afp->f_class); |
---|
1639 | |
---|
1640 | /* |
---|
1641 | * check again for filters with a dst addr |
---|
1642 | * wildcard. |
---|
1643 | * (daddr == 0 || dmask != 0xffffffff). |
---|
1644 | */ |
---|
1645 | if (i != ACC_WILDCARD_INDEX) |
---|
1646 | i = ACC_WILDCARD_INDEX; |
---|
1647 | else |
---|
1648 | break; |
---|
1649 | } while (1); |
---|
1650 | } |
---|
1651 | } |
---|
1652 | #ifdef INET6 |
---|
1653 | else if (flow.fi_family == AF_INET6) { |
---|
1654 | struct flowinfo_in6 *fp6 = (struct flowinfo_in6 *)&flow; |
---|
1655 | |
---|
1656 | /* get the filter hash entry from its flow ID */ |
---|
1657 | if (fp6->fi6_flowlabel != 0) |
---|
1658 | i = ACC_GET_HASH_INDEX(fp6->fi6_flowlabel); |
---|
1659 | else |
---|
1660 | /* flowlable can be zero */ |
---|
1661 | i = ACC_WILDCARD_INDEX; |
---|
1662 | |
---|
1663 | /* go through this loop twice. first for flow hash, second |
---|
1664 | for wildcards. */ |
---|
1665 | do { |
---|
1666 | LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain) |
---|
1667 | if (apply_filter6(afp->f_fbmask, |
---|
1668 | (struct flow_filter6 *)&afp->f_filter, |
---|
1669 | fp6)) |
---|
1670 | /* filter matched */ |
---|
1671 | return (afp->f_class); |
---|
1672 | |
---|
1673 | /* |
---|
1674 | * check again for filters with a wildcard. |
---|
1675 | */ |
---|
1676 | if (i != ACC_WILDCARD_INDEX) |
---|
1677 | i = ACC_WILDCARD_INDEX; |
---|
1678 | else |
---|
1679 | break; |
---|
1680 | } while (1); |
---|
1681 | } |
---|
1682 | #endif /* INET6 */ |
---|
1683 | |
---|
1684 | /* no filter matched */ |
---|
1685 | return (NULL); |
---|
1686 | } |
---|
1687 | |
---|
1688 | static int |
---|
1689 | apply_filter4(fbmask, filt, pkt) |
---|
1690 | u_int32_t fbmask; |
---|
1691 | struct flow_filter *filt; |
---|
1692 | struct flowinfo_in *pkt; |
---|
1693 | { |
---|
1694 | if (filt->ff_flow.fi_family != AF_INET) |
---|
1695 | return (0); |
---|
1696 | if ((fbmask & FIMB4_SPORT) && filt->ff_flow.fi_sport != pkt->fi_sport) |
---|
1697 | return (0); |
---|
1698 | if ((fbmask & FIMB4_DPORT) && filt->ff_flow.fi_dport != pkt->fi_dport) |
---|
1699 | return (0); |
---|
1700 | if ((fbmask & FIMB4_DADDR) && |
---|
1701 | filt->ff_flow.fi_dst.s_addr != |
---|
1702 | (pkt->fi_dst.s_addr & filt->ff_mask.mask_dst.s_addr)) |
---|
1703 | return (0); |
---|
1704 | if ((fbmask & FIMB4_SADDR) && |
---|
1705 | filt->ff_flow.fi_src.s_addr != |
---|
1706 | (pkt->fi_src.s_addr & filt->ff_mask.mask_src.s_addr)) |
---|
1707 | return (0); |
---|
1708 | if ((fbmask & FIMB4_PROTO) && filt->ff_flow.fi_proto != pkt->fi_proto) |
---|
1709 | return (0); |
---|
1710 | if ((fbmask & FIMB4_TOS) && filt->ff_flow.fi_tos != |
---|
1711 | (pkt->fi_tos & filt->ff_mask.mask_tos)) |
---|
1712 | return (0); |
---|
1713 | if ((fbmask & FIMB4_GPI) && filt->ff_flow.fi_gpi != (pkt->fi_gpi)) |
---|
1714 | return (0); |
---|
1715 | /* match */ |
---|
1716 | return (1); |
---|
1717 | } |
---|
1718 | |
---|
1719 | /* |
---|
1720 | * filter matching function optimized for a common case that checks |
---|
1721 | * only protocol and port numbers |
---|
1722 | */ |
---|
1723 | static int |
---|
1724 | apply_ppfilter4(fbmask, filt, pkt) |
---|
1725 | u_int32_t fbmask; |
---|
1726 | struct flow_filter *filt; |
---|
1727 | struct flowinfo_in *pkt; |
---|
1728 | { |
---|
1729 | if (filt->ff_flow.fi_family != AF_INET) |
---|
1730 | return (0); |
---|
1731 | if ((fbmask & FIMB4_SPORT) && filt->ff_flow.fi_sport != pkt->fi_sport) |
---|
1732 | return (0); |
---|
1733 | if ((fbmask & FIMB4_DPORT) && filt->ff_flow.fi_dport != pkt->fi_dport) |
---|
1734 | return (0); |
---|
1735 | if ((fbmask & FIMB4_PROTO) && filt->ff_flow.fi_proto != pkt->fi_proto) |
---|
1736 | return (0); |
---|
1737 | /* match */ |
---|
1738 | return (1); |
---|
1739 | } |
---|
1740 | |
---|
1741 | /* |
---|
1742 | * filter matching function only for tos field. |
---|
1743 | */ |
---|
1744 | static int |
---|
1745 | apply_tosfilter4(fbmask, filt, pkt) |
---|
1746 | u_int32_t fbmask; |
---|
1747 | struct flow_filter *filt; |
---|
1748 | struct flowinfo_in *pkt; |
---|
1749 | { |
---|
1750 | if (filt->ff_flow.fi_family != AF_INET) |
---|
1751 | return (0); |
---|
1752 | if ((fbmask & FIMB4_TOS) && filt->ff_flow.fi_tos != |
---|
1753 | (pkt->fi_tos & filt->ff_mask.mask_tos)) |
---|
1754 | return (0); |
---|
1755 | /* match */ |
---|
1756 | return (1); |
---|
1757 | } |
---|
1758 | |
---|
1759 | #ifdef INET6 |
---|
1760 | static int |
---|
1761 | apply_filter6(fbmask, filt, pkt) |
---|
1762 | u_int32_t fbmask; |
---|
1763 | struct flow_filter6 *filt; |
---|
1764 | struct flowinfo_in6 *pkt; |
---|
1765 | { |
---|
1766 | int i; |
---|
1767 | |
---|
1768 | if (filt->ff_flow6.fi6_family != AF_INET6) |
---|
1769 | return (0); |
---|
1770 | if ((fbmask & FIMB6_FLABEL) && |
---|
1771 | filt->ff_flow6.fi6_flowlabel != pkt->fi6_flowlabel) |
---|
1772 | return (0); |
---|
1773 | if ((fbmask & FIMB6_PROTO) && |
---|
1774 | filt->ff_flow6.fi6_proto != pkt->fi6_proto) |
---|
1775 | return (0); |
---|
1776 | if ((fbmask & FIMB6_SPORT) && |
---|
1777 | filt->ff_flow6.fi6_sport != pkt->fi6_sport) |
---|
1778 | return (0); |
---|
1779 | if ((fbmask & FIMB6_DPORT) && |
---|
1780 | filt->ff_flow6.fi6_dport != pkt->fi6_dport) |
---|
1781 | return (0); |
---|
1782 | if (fbmask & FIMB6_SADDR) { |
---|
1783 | for (i = 0; i < 4; i++) |
---|
1784 | if (filt->ff_flow6.fi6_src.s6_addr32[i] != |
---|
1785 | (pkt->fi6_src.s6_addr32[i] & |
---|
1786 | filt->ff_mask6.mask6_src.s6_addr32[i])) |
---|
1787 | return (0); |
---|
1788 | } |
---|
1789 | if (fbmask & FIMB6_DADDR) { |
---|
1790 | for (i = 0; i < 4; i++) |
---|
1791 | if (filt->ff_flow6.fi6_dst.s6_addr32[i] != |
---|
1792 | (pkt->fi6_dst.s6_addr32[i] & |
---|
1793 | filt->ff_mask6.mask6_dst.s6_addr32[i])) |
---|
1794 | return (0); |
---|
1795 | } |
---|
1796 | if ((fbmask & FIMB6_TCLASS) && |
---|
1797 | filt->ff_flow6.fi6_tclass != |
---|
1798 | (pkt->fi6_tclass & filt->ff_mask6.mask6_tclass)) |
---|
1799 | return (0); |
---|
1800 | if ((fbmask & FIMB6_GPI) && |
---|
1801 | filt->ff_flow6.fi6_gpi != pkt->fi6_gpi) |
---|
1802 | return (0); |
---|
1803 | /* match */ |
---|
1804 | return (1); |
---|
1805 | } |
---|
1806 | #endif /* INET6 */ |
---|
1807 | |
---|
1808 | /* |
---|
1809 | * filter handle: |
---|
1810 | * bit 20-28: index to the filter hash table |
---|
1811 | * bit 0-19: unique id in the hash bucket. |
---|
1812 | */ |
---|
1813 | static u_long |
---|
1814 | get_filt_handle(classifier, i) |
---|
1815 | struct acc_classifier *classifier; |
---|
1816 | int i; |
---|
1817 | { |
---|
1818 | static u_long handle_number = 1; |
---|
1819 | u_long handle; |
---|
1820 | struct acc_filter *afp; |
---|
1821 | |
---|
1822 | while (1) { |
---|
1823 | handle = handle_number++ & 0x000fffff; |
---|
1824 | |
---|
1825 | if (LIST_EMPTY(&classifier->acc_filters[i])) |
---|
1826 | break; |
---|
1827 | |
---|
1828 | LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain) |
---|
1829 | if ((afp->f_handle & 0x000fffff) == handle) |
---|
1830 | break; |
---|
1831 | if (afp == NULL) |
---|
1832 | break; |
---|
1833 | /* this handle is already used, try again */ |
---|
1834 | } |
---|
1835 | |
---|
1836 | return ((i << 20) | handle); |
---|
1837 | } |
---|
1838 | |
---|
1839 | /* convert filter handle to filter pointer */ |
---|
1840 | static struct acc_filter * |
---|
1841 | filth_to_filtp(classifier, handle) |
---|
1842 | struct acc_classifier *classifier; |
---|
1843 | u_long handle; |
---|
1844 | { |
---|
1845 | struct acc_filter *afp; |
---|
1846 | int i; |
---|
1847 | |
---|
1848 | i = ACC_GET_HINDEX(handle); |
---|
1849 | |
---|
1850 | LIST_FOREACH(afp, &classifier->acc_filters[i], f_chain) |
---|
1851 | if (afp->f_handle == handle) |
---|
1852 | return (afp); |
---|
1853 | |
---|
1854 | return (NULL); |
---|
1855 | } |
---|
1856 | |
---|
1857 | /* create flowinfo bitmask */ |
---|
1858 | static u_int32_t |
---|
1859 | filt2fibmask(filt) |
---|
1860 | struct flow_filter *filt; |
---|
1861 | { |
---|
1862 | u_int32_t mask = 0; |
---|
1863 | #ifdef INET6 |
---|
1864 | struct flow_filter6 *filt6; |
---|
1865 | #endif |
---|
1866 | |
---|
1867 | switch (filt->ff_flow.fi_family) { |
---|
1868 | case AF_INET: |
---|
1869 | if (filt->ff_flow.fi_proto != 0) |
---|
1870 | mask |= FIMB4_PROTO; |
---|
1871 | if (filt->ff_flow.fi_tos != 0) |
---|
1872 | mask |= FIMB4_TOS; |
---|
1873 | if (filt->ff_flow.fi_dst.s_addr != 0) |
---|
1874 | mask |= FIMB4_DADDR; |
---|
1875 | if (filt->ff_flow.fi_src.s_addr != 0) |
---|
1876 | mask |= FIMB4_SADDR; |
---|
1877 | if (filt->ff_flow.fi_sport != 0) |
---|
1878 | mask |= FIMB4_SPORT; |
---|
1879 | if (filt->ff_flow.fi_dport != 0) |
---|
1880 | mask |= FIMB4_DPORT; |
---|
1881 | if (filt->ff_flow.fi_gpi != 0) |
---|
1882 | mask |= FIMB4_GPI; |
---|
1883 | break; |
---|
1884 | #ifdef INET6 |
---|
1885 | case AF_INET6: |
---|
1886 | filt6 = (struct flow_filter6 *)filt; |
---|
1887 | |
---|
1888 | if (filt6->ff_flow6.fi6_proto != 0) |
---|
1889 | mask |= FIMB6_PROTO; |
---|
1890 | if (filt6->ff_flow6.fi6_tclass != 0) |
---|
1891 | mask |= FIMB6_TCLASS; |
---|
1892 | if (!IN6_IS_ADDR_UNSPECIFIED(&filt6->ff_flow6.fi6_dst)) |
---|
1893 | mask |= FIMB6_DADDR; |
---|
1894 | if (!IN6_IS_ADDR_UNSPECIFIED(&filt6->ff_flow6.fi6_src)) |
---|
1895 | mask |= FIMB6_SADDR; |
---|
1896 | if (filt6->ff_flow6.fi6_sport != 0) |
---|
1897 | mask |= FIMB6_SPORT; |
---|
1898 | if (filt6->ff_flow6.fi6_dport != 0) |
---|
1899 | mask |= FIMB6_DPORT; |
---|
1900 | if (filt6->ff_flow6.fi6_gpi != 0) |
---|
1901 | mask |= FIMB6_GPI; |
---|
1902 | if (filt6->ff_flow6.fi6_flowlabel != 0) |
---|
1903 | mask |= FIMB6_FLABEL; |
---|
1904 | break; |
---|
1905 | #endif /* INET6 */ |
---|
1906 | } |
---|
1907 | return (mask); |
---|
1908 | } |
---|
1909 | |
---|
1910 | |
---|
1911 | /* |
---|
1912 | * helper functions to handle IPv4 fragments. |
---|
1913 | * currently only in-sequence fragments are handled. |
---|
1914 | * - fragment info is cached in a LRU list. |
---|
1915 | * - when a first fragment is found, cache its flow info. |
---|
1916 | * - when a non-first fragment is found, lookup the cache. |
---|
1917 | */ |
---|
1918 | |
---|
1919 | struct ip4_frag { |
---|
1920 | TAILQ_ENTRY(ip4_frag) ip4f_chain; |
---|
1921 | char ip4f_valid; |
---|
1922 | u_short ip4f_id; |
---|
1923 | struct flowinfo_in ip4f_info; |
---|
1924 | }; |
---|
1925 | |
---|
1926 | static TAILQ_HEAD(ip4f_list, ip4_frag) ip4f_list; /* IPv4 fragment cache */ |
---|
1927 | |
---|
1928 | #define IP4F_TABSIZE 16 /* IPv4 fragment cache size */ |
---|
1929 | |
---|
1930 | |
---|
1931 | static void |
---|
1932 | ip4f_cache(ip, fin) |
---|
1933 | struct ip *ip; |
---|
1934 | struct flowinfo_in *fin; |
---|
1935 | { |
---|
1936 | struct ip4_frag *fp; |
---|
1937 | |
---|
1938 | if (TAILQ_EMPTY(&ip4f_list)) { |
---|
1939 | /* first time call, allocate fragment cache entries. */ |
---|
1940 | if (ip4f_init() < 0) |
---|
1941 | /* allocation failed! */ |
---|
1942 | return; |
---|
1943 | } |
---|
1944 | |
---|
1945 | fp = ip4f_alloc(); |
---|
1946 | fp->ip4f_id = ip->ip_id; |
---|
1947 | fp->ip4f_info.fi_proto = ip->ip_p; |
---|
1948 | fp->ip4f_info.fi_src.s_addr = ip->ip_src.s_addr; |
---|
1949 | fp->ip4f_info.fi_dst.s_addr = ip->ip_dst.s_addr; |
---|
1950 | |
---|
1951 | /* save port numbers */ |
---|
1952 | fp->ip4f_info.fi_sport = fin->fi_sport; |
---|
1953 | fp->ip4f_info.fi_dport = fin->fi_dport; |
---|
1954 | fp->ip4f_info.fi_gpi = fin->fi_gpi; |
---|
1955 | } |
---|
1956 | |
---|
1957 | static int |
---|
1958 | ip4f_lookup(ip, fin) |
---|
1959 | struct ip *ip; |
---|
1960 | struct flowinfo_in *fin; |
---|
1961 | { |
---|
1962 | struct ip4_frag *fp; |
---|
1963 | |
---|
1964 | for (fp = TAILQ_FIRST(&ip4f_list); fp != NULL && fp->ip4f_valid; |
---|
1965 | fp = TAILQ_NEXT(fp, ip4f_chain)) |
---|
1966 | if (ip->ip_id == fp->ip4f_id && |
---|
1967 | ip->ip_src.s_addr == fp->ip4f_info.fi_src.s_addr && |
---|
1968 | ip->ip_dst.s_addr == fp->ip4f_info.fi_dst.s_addr && |
---|
1969 | ip->ip_p == fp->ip4f_info.fi_proto) { |
---|
1970 | |
---|
1971 | /* found the matching entry */ |
---|
1972 | fin->fi_sport = fp->ip4f_info.fi_sport; |
---|
1973 | fin->fi_dport = fp->ip4f_info.fi_dport; |
---|
1974 | fin->fi_gpi = fp->ip4f_info.fi_gpi; |
---|
1975 | |
---|
1976 | if ((ntohs(ip->ip_off) & IP_MF) == 0) |
---|
1977 | /* this is the last fragment, |
---|
1978 | release the entry. */ |
---|
1979 | ip4f_free(fp); |
---|
1980 | |
---|
1981 | return (1); |
---|
1982 | } |
---|
1983 | |
---|
1984 | /* no matching entry found */ |
---|
1985 | return (0); |
---|
1986 | } |
---|
1987 | |
---|
1988 | static int |
---|
1989 | ip4f_init(void) |
---|
1990 | { |
---|
1991 | struct ip4_frag *fp; |
---|
1992 | int i; |
---|
1993 | |
---|
1994 | TAILQ_INIT(&ip4f_list); |
---|
1995 | for (i=0; i<IP4F_TABSIZE; i++) { |
---|
1996 | fp = malloc(sizeof(struct ip4_frag), |
---|
1997 | M_DEVBUF, M_NOWAIT); |
---|
1998 | if (fp == NULL) { |
---|
1999 | printf("ip4f_init: can't alloc %dth entry!\n", i); |
---|
2000 | if (i == 0) |
---|
2001 | return (-1); |
---|
2002 | return (0); |
---|
2003 | } |
---|
2004 | fp->ip4f_valid = 0; |
---|
2005 | TAILQ_INSERT_TAIL(&ip4f_list, fp, ip4f_chain); |
---|
2006 | } |
---|
2007 | return (0); |
---|
2008 | } |
---|
2009 | |
---|
2010 | static struct ip4_frag * |
---|
2011 | ip4f_alloc(void) |
---|
2012 | { |
---|
2013 | struct ip4_frag *fp; |
---|
2014 | |
---|
2015 | /* reclaim an entry at the tail, put it at the head */ |
---|
2016 | fp = TAILQ_LAST(&ip4f_list, ip4f_list); |
---|
2017 | TAILQ_REMOVE(&ip4f_list, fp, ip4f_chain); |
---|
2018 | fp->ip4f_valid = 1; |
---|
2019 | TAILQ_INSERT_HEAD(&ip4f_list, fp, ip4f_chain); |
---|
2020 | return (fp); |
---|
2021 | } |
---|
2022 | |
---|
2023 | static void |
---|
2024 | ip4f_free(fp) |
---|
2025 | struct ip4_frag *fp; |
---|
2026 | { |
---|
2027 | TAILQ_REMOVE(&ip4f_list, fp, ip4f_chain); |
---|
2028 | fp->ip4f_valid = 0; |
---|
2029 | TAILQ_INSERT_TAIL(&ip4f_list, fp, ip4f_chain); |
---|
2030 | } |
---|
2031 | |
---|
2032 | #endif /* ALTQ3_CLFIER_COMPAT */ |
---|