[f244de9] | 1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
[3c05977] | 2 | |
---|
| 3 | /*- |
---|
[bb80d9d] | 4 | * SPDX-License-Identifier: BSD-2-Clause-FreeBSD |
---|
| 5 | * |
---|
[3c05977] | 6 | * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> |
---|
| 7 | * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> |
---|
| 8 | * Copyright (c) 2009 Apple, Inc. |
---|
| 9 | * All rights reserved. |
---|
| 10 | * |
---|
| 11 | * Redistribution and use in source and binary forms, with or without |
---|
| 12 | * modification, are permitted provided that the following conditions |
---|
| 13 | * are met: |
---|
| 14 | * 1. Redistributions of source code must retain the above copyright |
---|
| 15 | * notice, this list of conditions and the following disclaimer. |
---|
| 16 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
| 17 | * notice, this list of conditions and the following disclaimer in the |
---|
| 18 | * documentation and/or other materials provided with the distribution. |
---|
| 19 | * |
---|
| 20 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
---|
| 21 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
| 22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
| 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
---|
| 24 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
| 25 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
| 26 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
| 27 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
| 28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
| 29 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
| 30 | * SUCH DAMAGE. |
---|
| 31 | */ |
---|
| 32 | |
---|
[e599318] | 33 | #include <sys/cdefs.h> |
---|
[3c05977] | 34 | __FBSDID("$FreeBSD$"); |
---|
| 35 | |
---|
[e599318] | 36 | #include <rtems/bsd/local/opt_ktrace.h> |
---|
[c40e45b] | 37 | #include <rtems/bsd/local/opt_kqueue.h> |
---|
[e599318] | 38 | |
---|
[bb80d9d] | 39 | #ifdef COMPAT_FREEBSD11 |
---|
| 40 | #define _WANT_FREEBSD11_KEVENT |
---|
| 41 | #endif |
---|
| 42 | |
---|
[0237319] | 43 | #include <sys/param.h> |
---|
[e599318] | 44 | #include <sys/systm.h> |
---|
[c40e45b] | 45 | #include <sys/capsicum.h> |
---|
[e599318] | 46 | #include <sys/kernel.h> |
---|
[3c967ca] | 47 | #include <sys/lock.h> |
---|
[e599318] | 48 | #include <sys/mutex.h> |
---|
[c40e45b] | 49 | #include <sys/rwlock.h> |
---|
[e599318] | 50 | #include <sys/proc.h> |
---|
| 51 | #include <sys/malloc.h> |
---|
| 52 | #include <rtems/bsd/sys/unistd.h> |
---|
| 53 | #include <sys/file.h> |
---|
| 54 | #include <sys/filedesc.h> |
---|
| 55 | #include <sys/filio.h> |
---|
| 56 | #include <sys/fcntl.h> |
---|
| 57 | #include <sys/kthread.h> |
---|
| 58 | #include <sys/selinfo.h> |
---|
| 59 | #include <sys/queue.h> |
---|
| 60 | #include <sys/event.h> |
---|
| 61 | #include <sys/eventvar.h> |
---|
| 62 | #include <sys/poll.h> |
---|
| 63 | #include <sys/protosw.h> |
---|
[c40e45b] | 64 | #include <sys/resourcevar.h> |
---|
[e599318] | 65 | #include <sys/sigio.h> |
---|
| 66 | #include <sys/signalvar.h> |
---|
| 67 | #include <sys/socket.h> |
---|
| 68 | #include <sys/socketvar.h> |
---|
| 69 | #include <sys/stat.h> |
---|
| 70 | #include <sys/sysctl.h> |
---|
| 71 | #include <sys/sysproto.h> |
---|
| 72 | #include <sys/syscallsubr.h> |
---|
| 73 | #include <sys/taskqueue.h> |
---|
| 74 | #include <sys/uio.h> |
---|
[c40e45b] | 75 | #include <sys/user.h> |
---|
[3c05977] | 76 | #ifdef KTRACE |
---|
[e599318] | 77 | #include <sys/ktrace.h> |
---|
[3c05977] | 78 | #endif |
---|
[0577772] | 79 | #include <machine/atomic.h> |
---|
[3c05977] | 80 | |
---|
[0c9f27b] | 81 | #include <vm/uma.h> |
---|
| 82 | #ifdef __rtems__ |
---|
| 83 | #include <machine/rtems-bsd-syscall-api.h> |
---|
| 84 | |
---|
| 85 | /* Maintain a global kqueue list on RTEMS */ |
---|
| 86 | static struct kqlist fd_kqlist; |
---|
[3c05977] | 87 | #endif /* __rtems__ */ |
---|
| 88 | |
---|
| 89 | static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); |
---|
| 90 | |
---|
| 91 | /* |
---|
| 92 | * This lock is used if multiple kq locks are required. This possibly |
---|
| 93 | * should be made into a per proc lock. |
---|
| 94 | */ |
---|
| 95 | static struct mtx kq_global; |
---|
| 96 | MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); |
---|
| 97 | #define KQ_GLOBAL_LOCK(lck, haslck) do { \ |
---|
| 98 | if (!haslck) \ |
---|
| 99 | mtx_lock(lck); \ |
---|
| 100 | haslck = 1; \ |
---|
| 101 | } while (0) |
---|
| 102 | #define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ |
---|
| 103 | if (haslck) \ |
---|
| 104 | mtx_unlock(lck); \ |
---|
| 105 | haslck = 0; \ |
---|
| 106 | } while (0) |
---|
| 107 | |
---|
[c40e45b] | 108 | TASKQUEUE_DEFINE_THREAD(kqueue_ctx); |
---|
[3c05977] | 109 | |
---|
| 110 | static int kevent_copyout(void *arg, struct kevent *kevp, int count); |
---|
| 111 | static int kevent_copyin(void *arg, struct kevent *kevp, int count); |
---|
| 112 | static int kqueue_register(struct kqueue *kq, struct kevent *kev, |
---|
| 113 | struct thread *td, int waitok); |
---|
| 114 | static int kqueue_acquire(struct file *fp, struct kqueue **kqp); |
---|
| 115 | static void kqueue_release(struct kqueue *kq, int locked); |
---|
[c40e45b] | 116 | static void kqueue_destroy(struct kqueue *kq); |
---|
| 117 | static void kqueue_drain(struct kqueue *kq, struct thread *td); |
---|
[3c05977] | 118 | static int kqueue_expand(struct kqueue *kq, struct filterops *fops, |
---|
| 119 | uintptr_t ident, int waitok); |
---|
| 120 | static void kqueue_task(void *arg, int pending); |
---|
| 121 | static int kqueue_scan(struct kqueue *kq, int maxevents, |
---|
| 122 | struct kevent_copyops *k_ops, |
---|
| 123 | const struct timespec *timeout, |
---|
| 124 | struct kevent *keva, struct thread *td); |
---|
| 125 | static void kqueue_wakeup(struct kqueue *kq); |
---|
| 126 | static struct filterops *kqueue_fo_find(int filt); |
---|
| 127 | static void kqueue_fo_release(int filt); |
---|
[c37f9fb] | 128 | struct g_kevent_args; |
---|
| 129 | static int kern_kevent_generic(struct thread *td, |
---|
| 130 | struct g_kevent_args *uap, |
---|
[bb80d9d] | 131 | struct kevent_copyops *k_ops, const char *struct_name); |
---|
[3c05977] | 132 | |
---|
[0c9f27b] | 133 | #ifndef __rtems__ |
---|
[3c05977] | 134 | static fo_rdwr_t kqueue_read; |
---|
| 135 | static fo_rdwr_t kqueue_write; |
---|
| 136 | static fo_truncate_t kqueue_truncate; |
---|
| 137 | static fo_ioctl_t kqueue_ioctl; |
---|
| 138 | static fo_poll_t kqueue_poll; |
---|
| 139 | static fo_kqfilter_t kqueue_kqfilter; |
---|
| 140 | static fo_stat_t kqueue_stat; |
---|
| 141 | static fo_close_t kqueue_close; |
---|
[c40e45b] | 142 | static fo_fill_kinfo_t kqueue_fill_kinfo; |
---|
[3c05977] | 143 | |
---|
| 144 | static struct fileops kqueueops = { |
---|
[c40e45b] | 145 | .fo_read = invfo_rdwr, |
---|
| 146 | .fo_write = invfo_rdwr, |
---|
| 147 | .fo_truncate = invfo_truncate, |
---|
[3c05977] | 148 | .fo_ioctl = kqueue_ioctl, |
---|
| 149 | .fo_poll = kqueue_poll, |
---|
| 150 | .fo_kqfilter = kqueue_kqfilter, |
---|
| 151 | .fo_stat = kqueue_stat, |
---|
| 152 | .fo_close = kqueue_close, |
---|
[66659ff] | 153 | .fo_chmod = invfo_chmod, |
---|
| 154 | .fo_chown = invfo_chown, |
---|
[c40e45b] | 155 | .fo_sendfile = invfo_sendfile, |
---|
| 156 | .fo_fill_kinfo = kqueue_fill_kinfo, |
---|
[3c05977] | 157 | }; |
---|
[0c9f27b] | 158 | #else /* __rtems__ */ |
---|
| 159 | static const rtems_filesystem_file_handlers_r kqueueops; |
---|
| 160 | #endif /* __rtems__ */ |
---|
[3c05977] | 161 | |
---|
| 162 | static int knote_attach(struct knote *kn, struct kqueue *kq); |
---|
| 163 | static void knote_drop(struct knote *kn, struct thread *td); |
---|
[0577772] | 164 | static void knote_drop_detached(struct knote *kn, struct thread *td); |
---|
[3c05977] | 165 | static void knote_enqueue(struct knote *kn); |
---|
| 166 | static void knote_dequeue(struct knote *kn); |
---|
| 167 | static void knote_init(void); |
---|
| 168 | static struct knote *knote_alloc(int waitok); |
---|
| 169 | static void knote_free(struct knote *kn); |
---|
| 170 | |
---|
| 171 | static void filt_kqdetach(struct knote *kn); |
---|
| 172 | static int filt_kqueue(struct knote *kn, long hint); |
---|
[0c9f27b] | 173 | #ifndef __rtems__ |
---|
[3c05977] | 174 | static int filt_procattach(struct knote *kn); |
---|
| 175 | static void filt_procdetach(struct knote *kn); |
---|
| 176 | static int filt_proc(struct knote *kn, long hint); |
---|
[0c9f27b] | 177 | #endif /* __rtems__ */ |
---|
[3c05977] | 178 | static int filt_fileattach(struct knote *kn); |
---|
| 179 | static void filt_timerexpire(void *knx); |
---|
| 180 | static int filt_timerattach(struct knote *kn); |
---|
| 181 | static void filt_timerdetach(struct knote *kn); |
---|
[3489e3b] | 182 | static void filt_timerstart(struct knote *kn, sbintime_t to); |
---|
| 183 | static void filt_timertouch(struct knote *kn, struct kevent *kev, |
---|
| 184 | u_long type); |
---|
| 185 | static int filt_timervalidate(struct knote *kn, sbintime_t *to); |
---|
[3c05977] | 186 | static int filt_timer(struct knote *kn, long hint); |
---|
| 187 | static int filt_userattach(struct knote *kn); |
---|
| 188 | static void filt_userdetach(struct knote *kn); |
---|
| 189 | static int filt_user(struct knote *kn, long hint); |
---|
| 190 | static void filt_usertouch(struct knote *kn, struct kevent *kev, |
---|
| 191 | u_long type); |
---|
| 192 | |
---|
[66659ff] | 193 | static struct filterops file_filtops = { |
---|
| 194 | .f_isfd = 1, |
---|
| 195 | .f_attach = filt_fileattach, |
---|
| 196 | }; |
---|
| 197 | static struct filterops kqread_filtops = { |
---|
| 198 | .f_isfd = 1, |
---|
| 199 | .f_detach = filt_kqdetach, |
---|
| 200 | .f_event = filt_kqueue, |
---|
| 201 | }; |
---|
[3c05977] | 202 | /* XXX - move to kern_proc.c? */ |
---|
[0c9f27b] | 203 | #ifndef __rtems__ |
---|
[66659ff] | 204 | static struct filterops proc_filtops = { |
---|
| 205 | .f_isfd = 0, |
---|
| 206 | .f_attach = filt_procattach, |
---|
| 207 | .f_detach = filt_procdetach, |
---|
| 208 | .f_event = filt_proc, |
---|
| 209 | }; |
---|
[0c9f27b] | 210 | #endif /* __rtems__ */ |
---|
[66659ff] | 211 | static struct filterops timer_filtops = { |
---|
| 212 | .f_isfd = 0, |
---|
| 213 | .f_attach = filt_timerattach, |
---|
| 214 | .f_detach = filt_timerdetach, |
---|
| 215 | .f_event = filt_timer, |
---|
[3489e3b] | 216 | .f_touch = filt_timertouch, |
---|
[66659ff] | 217 | }; |
---|
[3c05977] | 218 | static struct filterops user_filtops = { |
---|
| 219 | .f_attach = filt_userattach, |
---|
| 220 | .f_detach = filt_userdetach, |
---|
| 221 | .f_event = filt_user, |
---|
| 222 | .f_touch = filt_usertouch, |
---|
| 223 | }; |
---|
| 224 | |
---|
| 225 | static uma_zone_t knote_zone; |
---|
[0577772] | 226 | static unsigned int kq_ncallouts = 0; |
---|
[c40e45b] | 227 | static unsigned int kq_calloutmax = 4 * 1024; |
---|
| 228 | SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, |
---|
[3c05977] | 229 | &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); |
---|
| 230 | |
---|
[0577772] | 231 | /* XXX - ensure not influx ? */ |
---|
[3c05977] | 232 | #define KNOTE_ACTIVATE(kn, islock) do { \ |
---|
| 233 | if ((islock)) \ |
---|
| 234 | mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ |
---|
| 235 | else \ |
---|
| 236 | KQ_LOCK((kn)->kn_kq); \ |
---|
| 237 | (kn)->kn_status |= KN_ACTIVE; \ |
---|
| 238 | if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ |
---|
| 239 | knote_enqueue((kn)); \ |
---|
| 240 | if (!(islock)) \ |
---|
| 241 | KQ_UNLOCK((kn)->kn_kq); \ |
---|
| 242 | } while(0) |
---|
| 243 | #define KQ_LOCK(kq) do { \ |
---|
| 244 | mtx_lock(&(kq)->kq_lock); \ |
---|
| 245 | } while (0) |
---|
| 246 | #define KQ_FLUX_WAKEUP(kq) do { \ |
---|
| 247 | if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ |
---|
| 248 | (kq)->kq_state &= ~KQ_FLUXWAIT; \ |
---|
| 249 | wakeup((kq)); \ |
---|
| 250 | } \ |
---|
| 251 | } while (0) |
---|
| 252 | #define KQ_UNLOCK_FLUX(kq) do { \ |
---|
| 253 | KQ_FLUX_WAKEUP(kq); \ |
---|
| 254 | mtx_unlock(&(kq)->kq_lock); \ |
---|
| 255 | } while (0) |
---|
| 256 | #define KQ_UNLOCK(kq) do { \ |
---|
| 257 | mtx_unlock(&(kq)->kq_lock); \ |
---|
| 258 | } while (0) |
---|
| 259 | #define KQ_OWNED(kq) do { \ |
---|
| 260 | mtx_assert(&(kq)->kq_lock, MA_OWNED); \ |
---|
| 261 | } while (0) |
---|
| 262 | #define KQ_NOTOWNED(kq) do { \ |
---|
| 263 | mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ |
---|
| 264 | } while (0) |
---|
[c40e45b] | 265 | |
---|
| 266 | static struct knlist * |
---|
| 267 | kn_list_lock(struct knote *kn) |
---|
| 268 | { |
---|
| 269 | struct knlist *knl; |
---|
| 270 | |
---|
| 271 | knl = kn->kn_knlist; |
---|
| 272 | if (knl != NULL) |
---|
| 273 | knl->kl_lock(knl->kl_lockarg); |
---|
| 274 | return (knl); |
---|
| 275 | } |
---|
| 276 | |
---|
| 277 | static void |
---|
| 278 | kn_list_unlock(struct knlist *knl) |
---|
| 279 | { |
---|
| 280 | bool do_free; |
---|
| 281 | |
---|
| 282 | if (knl == NULL) |
---|
| 283 | return; |
---|
| 284 | do_free = knl->kl_autodestroy && knlist_empty(knl); |
---|
| 285 | knl->kl_unlock(knl->kl_lockarg); |
---|
| 286 | if (do_free) { |
---|
| 287 | knlist_destroy(knl); |
---|
| 288 | free(knl, M_KQUEUE); |
---|
| 289 | } |
---|
| 290 | } |
---|
| 291 | |
---|
[0577772] | 292 | static bool |
---|
| 293 | kn_in_flux(struct knote *kn) |
---|
| 294 | { |
---|
| 295 | |
---|
| 296 | return (kn->kn_influx > 0); |
---|
| 297 | } |
---|
| 298 | |
---|
| 299 | static void |
---|
| 300 | kn_enter_flux(struct knote *kn) |
---|
| 301 | { |
---|
| 302 | |
---|
| 303 | KQ_OWNED(kn->kn_kq); |
---|
| 304 | MPASS(kn->kn_influx < INT_MAX); |
---|
| 305 | kn->kn_influx++; |
---|
| 306 | } |
---|
| 307 | |
---|
| 308 | static bool |
---|
| 309 | kn_leave_flux(struct knote *kn) |
---|
| 310 | { |
---|
| 311 | |
---|
| 312 | KQ_OWNED(kn->kn_kq); |
---|
| 313 | MPASS(kn->kn_influx > 0); |
---|
| 314 | kn->kn_influx--; |
---|
| 315 | return (kn->kn_influx == 0); |
---|
| 316 | } |
---|
| 317 | |
---|
[3c05977] | 318 | #define KNL_ASSERT_LOCK(knl, islocked) do { \ |
---|
| 319 | if (islocked) \ |
---|
| 320 | KNL_ASSERT_LOCKED(knl); \ |
---|
| 321 | else \ |
---|
| 322 | KNL_ASSERT_UNLOCKED(knl); \ |
---|
| 323 | } while (0) |
---|
| 324 | #ifdef INVARIANTS |
---|
| 325 | #define KNL_ASSERT_LOCKED(knl) do { \ |
---|
| 326 | knl->kl_assert_locked((knl)->kl_lockarg); \ |
---|
| 327 | } while (0) |
---|
| 328 | #define KNL_ASSERT_UNLOCKED(knl) do { \ |
---|
| 329 | knl->kl_assert_unlocked((knl)->kl_lockarg); \ |
---|
| 330 | } while (0) |
---|
| 331 | #else /* !INVARIANTS */ |
---|
| 332 | #define KNL_ASSERT_LOCKED(knl) do {} while(0) |
---|
| 333 | #define KNL_ASSERT_UNLOCKED(knl) do {} while (0) |
---|
| 334 | #endif /* INVARIANTS */ |
---|
| 335 | |
---|
[c40e45b] | 336 | #ifndef KN_HASHSIZE |
---|
[3c05977] | 337 | #define KN_HASHSIZE 64 /* XXX should be tunable */ |
---|
[c40e45b] | 338 | #endif |
---|
| 339 | |
---|
[3c05977] | 340 | #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) |
---|
| 341 | |
---|
| 342 | static int |
---|
| 343 | filt_nullattach(struct knote *kn) |
---|
| 344 | { |
---|
| 345 | |
---|
| 346 | return (ENXIO); |
---|
| 347 | }; |
---|
| 348 | |
---|
[66659ff] | 349 | struct filterops null_filtops = { |
---|
| 350 | .f_isfd = 0, |
---|
| 351 | .f_attach = filt_nullattach, |
---|
| 352 | }; |
---|
[3c05977] | 353 | |
---|
| 354 | /* XXX - make SYSINIT to add these, and move into respective modules. */ |
---|
| 355 | extern struct filterops sig_filtops; |
---|
| 356 | extern struct filterops fs_filtops; |
---|
| 357 | |
---|
| 358 | /* |
---|
| 359 | * Table for for all system-defined filters. |
---|
| 360 | */ |
---|
| 361 | static struct mtx filterops_lock; |
---|
| 362 | MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", |
---|
| 363 | MTX_DEF); |
---|
| 364 | static struct { |
---|
| 365 | struct filterops *for_fop; |
---|
[c40e45b] | 366 | int for_nolock; |
---|
[3c05977] | 367 | int for_refcnt; |
---|
| 368 | } sysfilt_ops[EVFILT_SYSCOUNT] = { |
---|
[c40e45b] | 369 | { &file_filtops, 1 }, /* EVFILT_READ */ |
---|
| 370 | { &file_filtops, 1 }, /* EVFILT_WRITE */ |
---|
[3c05977] | 371 | { &null_filtops }, /* EVFILT_AIO */ |
---|
[c40e45b] | 372 | { &file_filtops, 1 }, /* EVFILT_VNODE */ |
---|
[0c9f27b] | 373 | #ifndef __rtems__ |
---|
[c40e45b] | 374 | { &proc_filtops, 1 }, /* EVFILT_PROC */ |
---|
| 375 | { &sig_filtops, 1 }, /* EVFILT_SIGNAL */ |
---|
[0c9f27b] | 376 | #else /* __rtems__ */ |
---|
| 377 | { &null_filtops }, /* EVFILT_PROC */ |
---|
| 378 | { &null_filtops }, /* EVFILT_SIGNAL */ |
---|
| 379 | #endif /* __rtems__ */ |
---|
[c40e45b] | 380 | { &timer_filtops, 1 }, /* EVFILT_TIMER */ |
---|
[3c05977] | 381 | { &null_filtops }, /* former EVFILT_NETDEV */ |
---|
[0c9f27b] | 382 | #ifndef __rtems__ |
---|
[c40e45b] | 383 | { &fs_filtops, 1 }, /* EVFILT_FS */ |
---|
[0c9f27b] | 384 | #else /* __rtems__ */ |
---|
| 385 | { &null_filtops }, /* EVFILT_FS */ |
---|
| 386 | #endif /* __rtems__ */ |
---|
[3c05977] | 387 | { &null_filtops }, /* EVFILT_LIO */ |
---|
[c40e45b] | 388 | { &user_filtops, 1 }, /* EVFILT_USER */ |
---|
| 389 | { &null_filtops }, /* EVFILT_SENDFILE */ |
---|
[de8a76d] | 390 | { &file_filtops, 1 }, /* EVFILT_EMPTY */ |
---|
[3c05977] | 391 | }; |
---|
| 392 | |
---|
| 393 | /* |
---|
| 394 | * Simple redirection for all cdevsw style objects to call their fo_kqfilter |
---|
| 395 | * method. |
---|
| 396 | */ |
---|
| 397 | static int |
---|
| 398 | filt_fileattach(struct knote *kn) |
---|
| 399 | { |
---|
| 400 | |
---|
| 401 | return (fo_kqfilter(kn->kn_fp, kn)); |
---|
| 402 | } |
---|
| 403 | |
---|
| 404 | /*ARGSUSED*/ |
---|
| 405 | static int |
---|
| 406 | kqueue_kqfilter(struct file *fp, struct knote *kn) |
---|
| 407 | { |
---|
| 408 | struct kqueue *kq = kn->kn_fp->f_data; |
---|
| 409 | |
---|
| 410 | if (kn->kn_filter != EVFILT_READ) |
---|
| 411 | return (EINVAL); |
---|
| 412 | |
---|
| 413 | kn->kn_status |= KN_KQUEUE; |
---|
| 414 | kn->kn_fop = &kqread_filtops; |
---|
| 415 | knlist_add(&kq->kq_sel.si_note, kn, 0); |
---|
| 416 | |
---|
| 417 | return (0); |
---|
| 418 | } |
---|
[0c9f27b] | 419 | #ifdef __rtems__ |
---|
| 420 | static int |
---|
| 421 | rtems_bsd_kqueue_kqfilter(rtems_libio_t *iop, struct knote *kn) |
---|
| 422 | { |
---|
| 423 | struct file *fp = rtems_bsd_iop_to_fp(iop); |
---|
| 424 | |
---|
| 425 | return kqueue_kqfilter(fp, kn); |
---|
| 426 | } |
---|
| 427 | #endif /* __rtems__ */ |
---|
[3c05977] | 428 | |
---|
| 429 | static void |
---|
| 430 | filt_kqdetach(struct knote *kn) |
---|
| 431 | { |
---|
| 432 | struct kqueue *kq = kn->kn_fp->f_data; |
---|
| 433 | |
---|
| 434 | knlist_remove(&kq->kq_sel.si_note, kn, 0); |
---|
| 435 | } |
---|
| 436 | |
---|
| 437 | /*ARGSUSED*/ |
---|
| 438 | static int |
---|
| 439 | filt_kqueue(struct knote *kn, long hint) |
---|
| 440 | { |
---|
| 441 | struct kqueue *kq = kn->kn_fp->f_data; |
---|
| 442 | |
---|
| 443 | kn->kn_data = kq->kq_count; |
---|
| 444 | return (kn->kn_data > 0); |
---|
| 445 | } |
---|
| 446 | |
---|
[0c9f27b] | 447 | #ifndef __rtems__ |
---|
[3c05977] | 448 | /* XXX - move to kern_proc.c? */ |
---|
| 449 | static int |
---|
| 450 | filt_procattach(struct knote *kn) |
---|
| 451 | { |
---|
| 452 | struct proc *p; |
---|
| 453 | int error; |
---|
[c40e45b] | 454 | bool exiting, immediate; |
---|
[3c05977] | 455 | |
---|
[c40e45b] | 456 | exiting = immediate = false; |
---|
[bb80d9d] | 457 | if (kn->kn_sfflags & NOTE_EXIT) |
---|
| 458 | p = pfind_any(kn->kn_id); |
---|
| 459 | else |
---|
| 460 | p = pfind(kn->kn_id); |
---|
[3c05977] | 461 | if (p == NULL) |
---|
| 462 | return (ESRCH); |
---|
[bb80d9d] | 463 | if (p->p_flag & P_WEXIT) |
---|
| 464 | exiting = true; |
---|
| 465 | |
---|
[3c05977] | 466 | if ((error = p_cansee(curthread, p))) { |
---|
| 467 | PROC_UNLOCK(p); |
---|
| 468 | return (error); |
---|
| 469 | } |
---|
| 470 | |
---|
| 471 | kn->kn_ptr.p_proc = p; |
---|
| 472 | kn->kn_flags |= EV_CLEAR; /* automatically set */ |
---|
| 473 | |
---|
| 474 | /* |
---|
[c40e45b] | 475 | * Internal flag indicating registration done by kernel for the |
---|
| 476 | * purposes of getting a NOTE_CHILD notification. |
---|
[3c05977] | 477 | */ |
---|
[c40e45b] | 478 | if (kn->kn_flags & EV_FLAG2) { |
---|
| 479 | kn->kn_flags &= ~EV_FLAG2; |
---|
[3c05977] | 480 | kn->kn_data = kn->kn_sdata; /* ppid */ |
---|
| 481 | kn->kn_fflags = NOTE_CHILD; |
---|
[c40e45b] | 482 | kn->kn_sfflags &= ~(NOTE_EXIT | NOTE_EXEC | NOTE_FORK); |
---|
| 483 | immediate = true; /* Force immediate activation of child note. */ |
---|
| 484 | } |
---|
| 485 | /* |
---|
| 486 | * Internal flag indicating registration done by kernel (for other than |
---|
| 487 | * NOTE_CHILD). |
---|
| 488 | */ |
---|
| 489 | if (kn->kn_flags & EV_FLAG1) { |
---|
[3c05977] | 490 | kn->kn_flags &= ~EV_FLAG1; |
---|
| 491 | } |
---|
| 492 | |
---|
[c40e45b] | 493 | knlist_add(p->p_klist, kn, 1); |
---|
[3c05977] | 494 | |
---|
| 495 | /* |
---|
[c40e45b] | 496 | * Immediately activate any child notes or, in the case of a zombie |
---|
| 497 | * target process, exit notes. The latter is necessary to handle the |
---|
| 498 | * case where the target process, e.g. a child, dies before the kevent |
---|
| 499 | * is registered. |
---|
[3c05977] | 500 | */ |
---|
[c40e45b] | 501 | if (immediate || (exiting && filt_proc(kn, NOTE_EXIT))) |
---|
[3c05977] | 502 | KNOTE_ACTIVATE(kn, 0); |
---|
| 503 | |
---|
| 504 | PROC_UNLOCK(p); |
---|
| 505 | |
---|
| 506 | return (0); |
---|
| 507 | } |
---|
| 508 | |
---|
| 509 | /* |
---|
| 510 | * The knote may be attached to a different process, which may exit, |
---|
| 511 | * leaving nothing for the knote to be attached to. So when the process |
---|
| 512 | * exits, the knote is marked as DETACHED and also flagged as ONESHOT so |
---|
| 513 | * it will be deleted when read out. However, as part of the knote deletion, |
---|
| 514 | * this routine is called, so a check is needed to avoid actually performing |
---|
| 515 | * a detach, because the original process does not exist any more. |
---|
| 516 | */ |
---|
| 517 | /* XXX - move to kern_proc.c? */ |
---|
| 518 | static void |
---|
| 519 | filt_procdetach(struct knote *kn) |
---|
| 520 | { |
---|
| 521 | |
---|
[c40e45b] | 522 | knlist_remove(kn->kn_knlist, kn, 0); |
---|
[3c05977] | 523 | kn->kn_ptr.p_proc = NULL; |
---|
| 524 | } |
---|
| 525 | |
---|
| 526 | /* XXX - move to kern_proc.c? */ |
---|
| 527 | static int |
---|
| 528 | filt_proc(struct knote *kn, long hint) |
---|
| 529 | { |
---|
[c40e45b] | 530 | struct proc *p; |
---|
[3c05977] | 531 | u_int event; |
---|
| 532 | |
---|
[c40e45b] | 533 | p = kn->kn_ptr.p_proc; |
---|
| 534 | if (p == NULL) /* already activated, from attach filter */ |
---|
| 535 | return (0); |
---|
| 536 | |
---|
| 537 | /* Mask off extra data. */ |
---|
[3c05977] | 538 | event = (u_int)hint & NOTE_PCTRLMASK; |
---|
| 539 | |
---|
[c40e45b] | 540 | /* If the user is interested in this event, record it. */ |
---|
[3c05977] | 541 | if (kn->kn_sfflags & event) |
---|
| 542 | kn->kn_fflags |= event; |
---|
| 543 | |
---|
[c40e45b] | 544 | /* Process is gone, so flag the event as finished. */ |
---|
[3c05977] | 545 | if (event == NOTE_EXIT) { |
---|
[c40e45b] | 546 | kn->kn_flags |= EV_EOF | EV_ONESHOT; |
---|
[3c05977] | 547 | kn->kn_ptr.p_proc = NULL; |
---|
[7eeb079] | 548 | if (kn->kn_fflags & NOTE_EXIT) |
---|
[c40e45b] | 549 | kn->kn_data = KW_EXITCODE(p->p_xexit, p->p_xsig); |
---|
[7eeb079] | 550 | if (kn->kn_fflags == 0) |
---|
| 551 | kn->kn_flags |= EV_DROP; |
---|
[3c05977] | 552 | return (1); |
---|
| 553 | } |
---|
| 554 | |
---|
| 555 | return (kn->kn_fflags != 0); |
---|
| 556 | } |
---|
| 557 | |
---|
| 558 | /* |
---|
| 559 | * Called when the process forked. It mostly does the same as the |
---|
| 560 | * knote(), activating all knotes registered to be activated when the |
---|
| 561 | * process forked. Additionally, for each knote attached to the |
---|
| 562 | * parent, check whether user wants to track the new process. If so |
---|
| 563 | * attach a new knote to it, and immediately report an event with the |
---|
| 564 | * child's pid. |
---|
| 565 | */ |
---|
| 566 | void |
---|
| 567 | knote_fork(struct knlist *list, int pid) |
---|
| 568 | { |
---|
| 569 | struct kqueue *kq; |
---|
| 570 | struct knote *kn; |
---|
| 571 | struct kevent kev; |
---|
| 572 | int error; |
---|
| 573 | |
---|
| 574 | if (list == NULL) |
---|
| 575 | return; |
---|
| 576 | list->kl_lock(list->kl_lockarg); |
---|
| 577 | |
---|
| 578 | SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { |
---|
| 579 | kq = kn->kn_kq; |
---|
| 580 | KQ_LOCK(kq); |
---|
[0577772] | 581 | if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { |
---|
[3c05977] | 582 | KQ_UNLOCK(kq); |
---|
| 583 | continue; |
---|
| 584 | } |
---|
| 585 | |
---|
| 586 | /* |
---|
| 587 | * The same as knote(), activate the event. |
---|
| 588 | */ |
---|
| 589 | if ((kn->kn_sfflags & NOTE_TRACK) == 0) { |
---|
| 590 | kn->kn_status |= KN_HASKQLOCK; |
---|
[7eeb079] | 591 | if (kn->kn_fop->f_event(kn, NOTE_FORK)) |
---|
[3c05977] | 592 | KNOTE_ACTIVATE(kn, 1); |
---|
| 593 | kn->kn_status &= ~KN_HASKQLOCK; |
---|
| 594 | KQ_UNLOCK(kq); |
---|
| 595 | continue; |
---|
| 596 | } |
---|
| 597 | |
---|
| 598 | /* |
---|
| 599 | * The NOTE_TRACK case. In addition to the activation |
---|
[c40e45b] | 600 | * of the event, we need to register new events to |
---|
[3c05977] | 601 | * track the child. Drop the locks in preparation for |
---|
| 602 | * the call to kqueue_register(). |
---|
| 603 | */ |
---|
[0577772] | 604 | kn_enter_flux(kn); |
---|
[3c05977] | 605 | KQ_UNLOCK(kq); |
---|
| 606 | list->kl_unlock(list->kl_lockarg); |
---|
| 607 | |
---|
| 608 | /* |
---|
[c40e45b] | 609 | * Activate existing knote and register tracking knotes with |
---|
[3c05977] | 610 | * new process. |
---|
[c40e45b] | 611 | * |
---|
| 612 | * First register a knote to get just the child notice. This |
---|
| 613 | * must be a separate note from a potential NOTE_EXIT |
---|
| 614 | * notification since both NOTE_CHILD and NOTE_EXIT are defined |
---|
| 615 | * to use the data field (in conflicting ways). |
---|
| 616 | */ |
---|
| 617 | kev.ident = pid; |
---|
| 618 | kev.filter = kn->kn_filter; |
---|
| 619 | kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | |
---|
| 620 | EV_FLAG2; |
---|
| 621 | kev.fflags = kn->kn_sfflags; |
---|
| 622 | kev.data = kn->kn_id; /* parent */ |
---|
| 623 | kev.udata = kn->kn_kevent.udata;/* preserve udata */ |
---|
| 624 | error = kqueue_register(kq, &kev, NULL, 0); |
---|
| 625 | if (error) |
---|
| 626 | kn->kn_fflags |= NOTE_TRACKERR; |
---|
| 627 | |
---|
| 628 | /* |
---|
| 629 | * Then register another knote to track other potential events |
---|
| 630 | * from the new process. |
---|
[3c05977] | 631 | */ |
---|
| 632 | kev.ident = pid; |
---|
| 633 | kev.filter = kn->kn_filter; |
---|
| 634 | kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; |
---|
| 635 | kev.fflags = kn->kn_sfflags; |
---|
| 636 | kev.data = kn->kn_id; /* parent */ |
---|
| 637 | kev.udata = kn->kn_kevent.udata;/* preserve udata */ |
---|
| 638 | error = kqueue_register(kq, &kev, NULL, 0); |
---|
| 639 | if (error) |
---|
| 640 | kn->kn_fflags |= NOTE_TRACKERR; |
---|
[7eeb079] | 641 | if (kn->kn_fop->f_event(kn, NOTE_FORK)) |
---|
| 642 | KNOTE_ACTIVATE(kn, 0); |
---|
[3c05977] | 643 | KQ_LOCK(kq); |
---|
[0577772] | 644 | kn_leave_flux(kn); |
---|
[3c05977] | 645 | KQ_UNLOCK_FLUX(kq); |
---|
| 646 | list->kl_lock(list->kl_lockarg); |
---|
| 647 | } |
---|
| 648 | list->kl_unlock(list->kl_lockarg); |
---|
| 649 | } |
---|
[0c9f27b] | 650 | #endif /* __rtems__ */ |
---|
[3c05977] | 651 | |
---|
[af5333e] | 652 | /* |
---|
| 653 | * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the |
---|
| 654 | * interval timer support code. |
---|
| 655 | */ |
---|
[3c05977] | 656 | |
---|
[c37f9fb] | 657 | #define NOTE_TIMER_PRECMASK \ |
---|
| 658 | (NOTE_SECONDS | NOTE_MSECONDS | NOTE_USECONDS | NOTE_NSECONDS) |
---|
[c40e45b] | 659 | |
---|
| 660 | static sbintime_t |
---|
| 661 | timer2sbintime(intptr_t data, int flags) |
---|
| 662 | { |
---|
[c37f9fb] | 663 | int64_t secs; |
---|
[c40e45b] | 664 | |
---|
| 665 | /* |
---|
| 666 | * Macros for converting to the fractional second portion of an |
---|
| 667 | * sbintime_t using 64bit multiplication to improve precision. |
---|
| 668 | */ |
---|
| 669 | #define NS_TO_SBT(ns) (((ns) * (((uint64_t)1 << 63) / 500000000)) >> 32) |
---|
| 670 | #define US_TO_SBT(us) (((us) * (((uint64_t)1 << 63) / 500000)) >> 32) |
---|
| 671 | #define MS_TO_SBT(ms) (((ms) * (((uint64_t)1 << 63) / 500)) >> 32) |
---|
| 672 | switch (flags & NOTE_TIMER_PRECMASK) { |
---|
| 673 | case NOTE_SECONDS: |
---|
| 674 | #ifdef __LP64__ |
---|
| 675 | if (data > (SBT_MAX / SBT_1S)) |
---|
[0577772] | 676 | return (SBT_MAX); |
---|
[c40e45b] | 677 | #endif |
---|
| 678 | return ((sbintime_t)data << 32); |
---|
| 679 | case NOTE_MSECONDS: /* FALLTHROUGH */ |
---|
| 680 | case 0: |
---|
| 681 | if (data >= 1000) { |
---|
[c37f9fb] | 682 | secs = data / 1000; |
---|
[c40e45b] | 683 | #ifdef __LP64__ |
---|
| 684 | if (secs > (SBT_MAX / SBT_1S)) |
---|
[0577772] | 685 | return (SBT_MAX); |
---|
[c40e45b] | 686 | #endif |
---|
| 687 | return (secs << 32 | MS_TO_SBT(data % 1000)); |
---|
| 688 | } |
---|
[c37f9fb] | 689 | return (MS_TO_SBT(data)); |
---|
[c40e45b] | 690 | case NOTE_USECONDS: |
---|
| 691 | if (data >= 1000000) { |
---|
[c37f9fb] | 692 | secs = data / 1000000; |
---|
[c40e45b] | 693 | #ifdef __LP64__ |
---|
| 694 | if (secs > (SBT_MAX / SBT_1S)) |
---|
[0577772] | 695 | return (SBT_MAX); |
---|
[c40e45b] | 696 | #endif |
---|
| 697 | return (secs << 32 | US_TO_SBT(data % 1000000)); |
---|
| 698 | } |
---|
[c37f9fb] | 699 | return (US_TO_SBT(data)); |
---|
[c40e45b] | 700 | case NOTE_NSECONDS: |
---|
| 701 | if (data >= 1000000000) { |
---|
[c37f9fb] | 702 | secs = data / 1000000000; |
---|
[c40e45b] | 703 | #ifdef __LP64__ |
---|
| 704 | if (secs > (SBT_MAX / SBT_1S)) |
---|
[0577772] | 705 | return (SBT_MAX); |
---|
[c40e45b] | 706 | #endif |
---|
| 707 | return (secs << 32 | US_TO_SBT(data % 1000000000)); |
---|
| 708 | } |
---|
[0577772] | 709 | return (NS_TO_SBT(data)); |
---|
[c40e45b] | 710 | default: |
---|
| 711 | break; |
---|
| 712 | } |
---|
| 713 | return (-1); |
---|
[3c05977] | 714 | } |
---|
| 715 | |
---|
[0577772] | 716 | struct kq_timer_cb_data { |
---|
| 717 | struct callout c; |
---|
| 718 | sbintime_t next; /* next timer event fires at */ |
---|
[c37f9fb] | 719 | sbintime_t to; /* precalculated timer period, 0 for abs */ |
---|
[0577772] | 720 | }; |
---|
| 721 | |
---|
[3c05977] | 722 | static void |
---|
| 723 | filt_timerexpire(void *knx) |
---|
| 724 | { |
---|
[c40e45b] | 725 | struct knote *kn; |
---|
[0577772] | 726 | struct kq_timer_cb_data *kc; |
---|
[3c05977] | 727 | |
---|
[c40e45b] | 728 | kn = knx; |
---|
[3c05977] | 729 | kn->kn_data++; |
---|
| 730 | KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ |
---|
| 731 | |
---|
[0577772] | 732 | if ((kn->kn_flags & EV_ONESHOT) != 0) |
---|
| 733 | return; |
---|
| 734 | kc = kn->kn_ptr.p_v; |
---|
[c37f9fb] | 735 | if (kc->to == 0) |
---|
| 736 | return; |
---|
[0577772] | 737 | kc->next += kc->to; |
---|
| 738 | callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn, |
---|
| 739 | PCPU_GET(cpuid), C_ABSOLUTE); |
---|
[3c05977] | 740 | } |
---|
| 741 | |
---|
| 742 | /* |
---|
[c40e45b] | 743 | * data contains amount of time to sleep |
---|
[3c05977] | 744 | */ |
---|
| 745 | static int |
---|
[3489e3b] | 746 | filt_timervalidate(struct knote *kn, sbintime_t *to) |
---|
[3c05977] | 747 | { |
---|
[c37f9fb] | 748 | struct bintime bt; |
---|
[3489e3b] | 749 | sbintime_t sbt; |
---|
[3c05977] | 750 | |
---|
[0577772] | 751 | if (kn->kn_sdata < 0) |
---|
[c40e45b] | 752 | return (EINVAL); |
---|
[0577772] | 753 | if (kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) |
---|
[c40e45b] | 754 | kn->kn_sdata = 1; |
---|
[3489e3b] | 755 | /* |
---|
| 756 | * The only fflags values supported are the timer unit |
---|
| 757 | * (precision) and the absolute time indicator. |
---|
| 758 | */ |
---|
[c37f9fb] | 759 | if ((kn->kn_sfflags & ~(NOTE_TIMER_PRECMASK | NOTE_ABSTIME)) != 0) |
---|
[c40e45b] | 760 | return (EINVAL); |
---|
[3c05977] | 761 | |
---|
[3489e3b] | 762 | *to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); |
---|
[c37f9fb] | 763 | if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { |
---|
| 764 | getboottimebin(&bt); |
---|
| 765 | sbt = bttosbt(bt); |
---|
[3489e3b] | 766 | *to -= sbt; |
---|
[c37f9fb] | 767 | } |
---|
[3489e3b] | 768 | if (*to < 0) |
---|
[c40e45b] | 769 | return (EINVAL); |
---|
[3489e3b] | 770 | return (0); |
---|
| 771 | } |
---|
| 772 | |
---|
| 773 | static int |
---|
| 774 | filt_timerattach(struct knote *kn) |
---|
| 775 | { |
---|
| 776 | struct kq_timer_cb_data *kc; |
---|
| 777 | sbintime_t to; |
---|
| 778 | unsigned int ncallouts; |
---|
| 779 | int error; |
---|
| 780 | |
---|
| 781 | error = filt_timervalidate(kn, &to); |
---|
| 782 | if (error != 0) |
---|
| 783 | return (error); |
---|
[c40e45b] | 784 | |
---|
| 785 | do { |
---|
[0577772] | 786 | ncallouts = kq_ncallouts; |
---|
[c40e45b] | 787 | if (ncallouts >= kq_calloutmax) |
---|
| 788 | return (ENOMEM); |
---|
[0577772] | 789 | } while (!atomic_cmpset_int(&kq_ncallouts, ncallouts, ncallouts + 1)); |
---|
[3c05977] | 790 | |
---|
[c37f9fb] | 791 | if ((kn->kn_sfflags & NOTE_ABSTIME) == 0) |
---|
| 792 | kn->kn_flags |= EV_CLEAR; /* automatically set */ |
---|
[c40e45b] | 793 | kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ |
---|
[0577772] | 794 | kn->kn_ptr.p_v = kc = malloc(sizeof(*kc), M_KQUEUE, M_WAITOK); |
---|
| 795 | callout_init(&kc->c, 1); |
---|
[3489e3b] | 796 | filt_timerstart(kn, to); |
---|
| 797 | |
---|
| 798 | return (0); |
---|
| 799 | } |
---|
| 800 | |
---|
| 801 | static void |
---|
| 802 | filt_timerstart(struct knote *kn, sbintime_t to) |
---|
| 803 | { |
---|
| 804 | struct kq_timer_cb_data *kc; |
---|
| 805 | |
---|
| 806 | kc = kn->kn_ptr.p_v; |
---|
[c37f9fb] | 807 | if ((kn->kn_sfflags & NOTE_ABSTIME) != 0) { |
---|
| 808 | kc->next = to; |
---|
| 809 | kc->to = 0; |
---|
| 810 | } else { |
---|
| 811 | kc->next = to + sbinuptime(); |
---|
| 812 | kc->to = to; |
---|
| 813 | } |
---|
[0577772] | 814 | callout_reset_sbt_on(&kc->c, kc->next, 0, filt_timerexpire, kn, |
---|
| 815 | PCPU_GET(cpuid), C_ABSOLUTE); |
---|
[3c05977] | 816 | } |
---|
| 817 | |
---|
| 818 | static void |
---|
| 819 | filt_timerdetach(struct knote *kn) |
---|
| 820 | { |
---|
[0577772] | 821 | struct kq_timer_cb_data *kc; |
---|
[bcdce02] | 822 | unsigned int old __unused; |
---|
[3c05977] | 823 | |
---|
[0577772] | 824 | kc = kn->kn_ptr.p_v; |
---|
| 825 | callout_drain(&kc->c); |
---|
| 826 | free(kc, M_KQUEUE); |
---|
| 827 | old = atomic_fetchadd_int(&kq_ncallouts, -1); |
---|
[c40e45b] | 828 | KASSERT(old > 0, ("Number of callouts cannot become negative")); |
---|
| 829 | kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ |
---|
[3c05977] | 830 | } |
---|
| 831 | |
---|
[3489e3b] | 832 | static void |
---|
| 833 | filt_timertouch(struct knote *kn, struct kevent *kev, u_long type) |
---|
| 834 | { |
---|
| 835 | struct kq_timer_cb_data *kc; |
---|
| 836 | struct kqueue *kq; |
---|
| 837 | sbintime_t to; |
---|
| 838 | int error; |
---|
| 839 | |
---|
| 840 | switch (type) { |
---|
| 841 | case EVENT_REGISTER: |
---|
| 842 | /* Handle re-added timers that update data/fflags */ |
---|
| 843 | if (kev->flags & EV_ADD) { |
---|
| 844 | kc = kn->kn_ptr.p_v; |
---|
| 845 | |
---|
| 846 | /* Drain any existing callout. */ |
---|
| 847 | callout_drain(&kc->c); |
---|
| 848 | |
---|
| 849 | /* Throw away any existing undelivered record |
---|
| 850 | * of the timer expiration. This is done under |
---|
| 851 | * the presumption that if a process is |
---|
| 852 | * re-adding this timer with new parameters, |
---|
| 853 | * it is no longer interested in what may have |
---|
| 854 | * happened under the old parameters. If it is |
---|
| 855 | * interested, it can wait for the expiration, |
---|
| 856 | * delete the old timer definition, and then |
---|
| 857 | * add the new one. |
---|
| 858 | * |
---|
| 859 | * This has to be done while the kq is locked: |
---|
| 860 | * - if enqueued, dequeue |
---|
| 861 | * - make it no longer active |
---|
| 862 | * - clear the count of expiration events |
---|
| 863 | */ |
---|
| 864 | kq = kn->kn_kq; |
---|
| 865 | KQ_LOCK(kq); |
---|
| 866 | if (kn->kn_status & KN_QUEUED) |
---|
| 867 | knote_dequeue(kn); |
---|
| 868 | |
---|
| 869 | kn->kn_status &= ~KN_ACTIVE; |
---|
| 870 | kn->kn_data = 0; |
---|
| 871 | KQ_UNLOCK(kq); |
---|
| 872 | |
---|
| 873 | /* Reschedule timer based on new data/fflags */ |
---|
| 874 | kn->kn_sfflags = kev->fflags; |
---|
| 875 | kn->kn_sdata = kev->data; |
---|
| 876 | error = filt_timervalidate(kn, &to); |
---|
| 877 | if (error != 0) { |
---|
| 878 | kn->kn_flags |= EV_ERROR; |
---|
| 879 | kn->kn_data = error; |
---|
| 880 | } else |
---|
| 881 | filt_timerstart(kn, to); |
---|
| 882 | } |
---|
| 883 | break; |
---|
| 884 | |
---|
| 885 | case EVENT_PROCESS: |
---|
| 886 | *kev = kn->kn_kevent; |
---|
| 887 | if (kn->kn_flags & EV_CLEAR) { |
---|
| 888 | kn->kn_data = 0; |
---|
| 889 | kn->kn_fflags = 0; |
---|
| 890 | } |
---|
| 891 | break; |
---|
| 892 | |
---|
| 893 | default: |
---|
| 894 | panic("filt_timertouch() - invalid type (%ld)", type); |
---|
| 895 | break; |
---|
| 896 | } |
---|
| 897 | } |
---|
| 898 | |
---|
[3c05977] | 899 | static int |
---|
| 900 | filt_timer(struct knote *kn, long hint) |
---|
| 901 | { |
---|
| 902 | |
---|
| 903 | return (kn->kn_data != 0); |
---|
| 904 | } |
---|
| 905 | |
---|
| 906 | static int |
---|
| 907 | filt_userattach(struct knote *kn) |
---|
| 908 | { |
---|
| 909 | |
---|
| 910 | /* |
---|
| 911 | * EVFILT_USER knotes are not attached to anything in the kernel. |
---|
| 912 | */ |
---|
| 913 | kn->kn_hook = NULL; |
---|
| 914 | if (kn->kn_fflags & NOTE_TRIGGER) |
---|
| 915 | kn->kn_hookid = 1; |
---|
| 916 | else |
---|
| 917 | kn->kn_hookid = 0; |
---|
| 918 | return (0); |
---|
| 919 | } |
---|
| 920 | |
---|
| 921 | static void |
---|
| 922 | filt_userdetach(__unused struct knote *kn) |
---|
| 923 | { |
---|
| 924 | |
---|
| 925 | /* |
---|
| 926 | * EVFILT_USER knotes are not attached to anything in the kernel. |
---|
| 927 | */ |
---|
| 928 | } |
---|
| 929 | |
---|
| 930 | static int |
---|
| 931 | filt_user(struct knote *kn, __unused long hint) |
---|
| 932 | { |
---|
| 933 | |
---|
| 934 | return (kn->kn_hookid); |
---|
| 935 | } |
---|
| 936 | |
---|
| 937 | static void |
---|
| 938 | filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) |
---|
| 939 | { |
---|
| 940 | u_int ffctrl; |
---|
| 941 | |
---|
| 942 | switch (type) { |
---|
| 943 | case EVENT_REGISTER: |
---|
| 944 | if (kev->fflags & NOTE_TRIGGER) |
---|
| 945 | kn->kn_hookid = 1; |
---|
| 946 | |
---|
| 947 | ffctrl = kev->fflags & NOTE_FFCTRLMASK; |
---|
| 948 | kev->fflags &= NOTE_FFLAGSMASK; |
---|
| 949 | switch (ffctrl) { |
---|
| 950 | case NOTE_FFNOP: |
---|
| 951 | break; |
---|
| 952 | |
---|
| 953 | case NOTE_FFAND: |
---|
| 954 | kn->kn_sfflags &= kev->fflags; |
---|
| 955 | break; |
---|
| 956 | |
---|
| 957 | case NOTE_FFOR: |
---|
| 958 | kn->kn_sfflags |= kev->fflags; |
---|
| 959 | break; |
---|
| 960 | |
---|
| 961 | case NOTE_FFCOPY: |
---|
| 962 | kn->kn_sfflags = kev->fflags; |
---|
| 963 | break; |
---|
| 964 | |
---|
| 965 | default: |
---|
| 966 | /* XXX Return error? */ |
---|
| 967 | break; |
---|
| 968 | } |
---|
| 969 | kn->kn_sdata = kev->data; |
---|
| 970 | if (kev->flags & EV_CLEAR) { |
---|
| 971 | kn->kn_hookid = 0; |
---|
| 972 | kn->kn_data = 0; |
---|
| 973 | kn->kn_fflags = 0; |
---|
| 974 | } |
---|
| 975 | break; |
---|
| 976 | |
---|
| 977 | case EVENT_PROCESS: |
---|
| 978 | *kev = kn->kn_kevent; |
---|
| 979 | kev->fflags = kn->kn_sfflags; |
---|
| 980 | kev->data = kn->kn_sdata; |
---|
| 981 | if (kn->kn_flags & EV_CLEAR) { |
---|
| 982 | kn->kn_hookid = 0; |
---|
| 983 | kn->kn_data = 0; |
---|
| 984 | kn->kn_fflags = 0; |
---|
| 985 | } |
---|
| 986 | break; |
---|
| 987 | |
---|
| 988 | default: |
---|
| 989 | panic("filt_usertouch() - invalid type (%ld)", type); |
---|
| 990 | break; |
---|
| 991 | } |
---|
| 992 | } |
---|
| 993 | |
---|
[66659ff] | 994 | #ifdef __rtems__ |
---|
[c40e45b] | 995 | static int |
---|
| 996 | kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps); |
---|
| 997 | |
---|
[66659ff] | 998 | static |
---|
[0c9f27b] | 999 | #endif /* __rtems__ */ |
---|
[66659ff] | 1000 | int |
---|
| 1001 | sys_kqueue(struct thread *td, struct kqueue_args *uap) |
---|
[c40e45b] | 1002 | { |
---|
| 1003 | |
---|
| 1004 | return (kern_kqueue(td, 0, NULL)); |
---|
| 1005 | } |
---|
| 1006 | |
---|
| 1007 | static void |
---|
| 1008 | kqueue_init(struct kqueue *kq) |
---|
| 1009 | { |
---|
| 1010 | |
---|
| 1011 | mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK); |
---|
| 1012 | TAILQ_INIT(&kq->kq_head); |
---|
| 1013 | knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); |
---|
| 1014 | TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); |
---|
| 1015 | } |
---|
| 1016 | |
---|
| 1017 | int |
---|
| 1018 | kern_kqueue(struct thread *td, int flags, struct filecaps *fcaps) |
---|
[3c05977] | 1019 | { |
---|
| 1020 | struct filedesc *fdp; |
---|
| 1021 | struct kqueue *kq; |
---|
| 1022 | struct file *fp; |
---|
[c40e45b] | 1023 | struct ucred *cred; |
---|
[3c05977] | 1024 | int fd, error; |
---|
| 1025 | |
---|
[0c9f27b] | 1026 | #ifndef __rtems__ |
---|
[3c05977] | 1027 | fdp = td->td_proc->p_fd; |
---|
[c40e45b] | 1028 | cred = td->td_ucred; |
---|
| 1029 | if (!chgkqcnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_KQUEUES))) |
---|
| 1030 | return (ENOMEM); |
---|
[0c9f27b] | 1031 | #else /* __rtems__ */ |
---|
[c40e45b] | 1032 | (void)fdp; |
---|
| 1033 | (void)cred; |
---|
[0c9f27b] | 1034 | #endif /* __rtems__ */ |
---|
[3c05977] | 1035 | |
---|
[c40e45b] | 1036 | error = falloc_caps(td, &fp, &fd, flags, fcaps); |
---|
| 1037 | if (error != 0) { |
---|
| 1038 | chgkqcnt(cred->cr_ruidinfo, -1, 0); |
---|
| 1039 | return (error); |
---|
| 1040 | } |
---|
| 1041 | |
---|
| 1042 | /* An extra reference on `fp' has been held for us by falloc(). */ |
---|
[3c05977] | 1043 | kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); |
---|
[c40e45b] | 1044 | kqueue_init(kq); |
---|
[0c9f27b] | 1045 | #ifndef __rtems__ |
---|
[3c05977] | 1046 | kq->kq_fdp = fdp; |
---|
[c40e45b] | 1047 | kq->kq_cred = crhold(cred); |
---|
[0c9f27b] | 1048 | #endif /* __rtems__ */ |
---|
[3c05977] | 1049 | |
---|
[0c9f27b] | 1050 | #ifndef __rtems__ |
---|
[3c05977] | 1051 | FILEDESC_XLOCK(fdp); |
---|
[7eeb079] | 1052 | TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); |
---|
[3c05977] | 1053 | FILEDESC_XUNLOCK(fdp); |
---|
[0c9f27b] | 1054 | #else /* __rtems__ */ |
---|
| 1055 | rtems_libio_lock(); |
---|
[7eeb079] | 1056 | TAILQ_INSERT_HEAD(&fd_kqlist, kq, kq_list); |
---|
[0c9f27b] | 1057 | rtems_libio_unlock(); |
---|
| 1058 | #endif /* __rtems__ */ |
---|
[3c05977] | 1059 | |
---|
| 1060 | finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); |
---|
[72d5fa1] | 1061 | #ifndef __rtems__ |
---|
[3c05977] | 1062 | fdrop(fp, td); |
---|
[72d5fa1] | 1063 | #endif /* __rtems__ */ |
---|
[3c05977] | 1064 | |
---|
| 1065 | td->td_retval[0] = fd; |
---|
[c40e45b] | 1066 | return (0); |
---|
[3c05977] | 1067 | } |
---|
[0c9f27b] | 1068 | #ifdef __rtems__ |
---|
| 1069 | int |
---|
| 1070 | kqueue(void) |
---|
| 1071 | { |
---|
| 1072 | struct thread *td = rtems_bsd_get_curthread_or_null(); |
---|
| 1073 | struct kqueue_args ua; |
---|
| 1074 | int error; |
---|
| 1075 | |
---|
| 1076 | if (td != NULL) { |
---|
[66659ff] | 1077 | error = sys_kqueue(td, &ua); |
---|
[0c9f27b] | 1078 | } else { |
---|
| 1079 | error = ENOMEM; |
---|
| 1080 | } |
---|
| 1081 | |
---|
| 1082 | if (error == 0) { |
---|
| 1083 | return td->td_retval[0]; |
---|
| 1084 | } else { |
---|
| 1085 | rtems_set_errno_and_return_minus_one(error); |
---|
| 1086 | } |
---|
| 1087 | } |
---|
| 1088 | #endif /* __rtems__ */ |
---|
[3c05977] | 1089 | |
---|
[c37f9fb] | 1090 | struct g_kevent_args { |
---|
[3c05977] | 1091 | int fd; |
---|
[c37f9fb] | 1092 | void *changelist; |
---|
[3c05977] | 1093 | int nchanges; |
---|
[c37f9fb] | 1094 | void *eventlist; |
---|
[3c05977] | 1095 | int nevents; |
---|
| 1096 | const struct timespec *timeout; |
---|
| 1097 | }; |
---|
[c37f9fb] | 1098 | |
---|
[66659ff] | 1099 | #ifdef __rtems__ |
---|
[c40e45b] | 1100 | static int kern_kevent(struct thread *td, int fd, int nchanges, int nevents, |
---|
| 1101 | struct kevent_copyops *k_ops, const struct timespec *timeout); |
---|
| 1102 | |
---|
| 1103 | static int kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, |
---|
| 1104 | int nevents, struct kevent_copyops *k_ops, const struct timespec *timeout); |
---|
[0c9f27b] | 1105 | |
---|
[66659ff] | 1106 | static |
---|
[0c9f27b] | 1107 | #endif /* __rtems__ */ |
---|
[66659ff] | 1108 | int |
---|
| 1109 | sys_kevent(struct thread *td, struct kevent_args *uap) |
---|
[3c05977] | 1110 | { |
---|
[de8a76d] | 1111 | struct kevent_copyops k_ops = { |
---|
| 1112 | .arg = uap, |
---|
| 1113 | .k_copyout = kevent_copyout, |
---|
| 1114 | .k_copyin = kevent_copyin, |
---|
[c37f9fb] | 1115 | .kevent_size = sizeof(struct kevent), |
---|
| 1116 | }; |
---|
| 1117 | struct g_kevent_args gk_args = { |
---|
| 1118 | .fd = uap->fd, |
---|
| 1119 | .changelist = uap->changelist, |
---|
| 1120 | .nchanges = uap->nchanges, |
---|
| 1121 | .eventlist = uap->eventlist, |
---|
| 1122 | .nevents = uap->nevents, |
---|
| 1123 | .timeout = uap->timeout, |
---|
[de8a76d] | 1124 | }; |
---|
[c37f9fb] | 1125 | |
---|
[bb80d9d] | 1126 | return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent")); |
---|
[c37f9fb] | 1127 | } |
---|
| 1128 | |
---|
| 1129 | static int |
---|
| 1130 | kern_kevent_generic(struct thread *td, struct g_kevent_args *uap, |
---|
[bb80d9d] | 1131 | struct kevent_copyops *k_ops, const char *struct_name) |
---|
[c37f9fb] | 1132 | { |
---|
| 1133 | struct timespec ts, *tsp; |
---|
[3c05977] | 1134 | #ifdef KTRACE |
---|
[bb80d9d] | 1135 | struct kevent *eventlist = uap->eventlist; |
---|
[3c05977] | 1136 | #endif |
---|
[bb80d9d] | 1137 | int error; |
---|
[3c05977] | 1138 | |
---|
| 1139 | if (uap->timeout != NULL) { |
---|
| 1140 | error = copyin(uap->timeout, &ts, sizeof(ts)); |
---|
| 1141 | if (error) |
---|
| 1142 | return (error); |
---|
| 1143 | tsp = &ts; |
---|
| 1144 | } else |
---|
| 1145 | tsp = NULL; |
---|
| 1146 | |
---|
| 1147 | #ifdef KTRACE |
---|
[bb80d9d] | 1148 | if (KTRPOINT(td, KTR_STRUCT_ARRAY)) |
---|
| 1149 | ktrstructarray(struct_name, UIO_USERSPACE, uap->changelist, |
---|
| 1150 | uap->nchanges, k_ops->kevent_size); |
---|
[3c05977] | 1151 | #endif |
---|
| 1152 | |
---|
| 1153 | error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, |
---|
[c37f9fb] | 1154 | k_ops, tsp); |
---|
[3c05977] | 1155 | |
---|
| 1156 | #ifdef KTRACE |
---|
[bb80d9d] | 1157 | if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY)) |
---|
| 1158 | ktrstructarray(struct_name, UIO_USERSPACE, eventlist, |
---|
| 1159 | td->td_retval[0], k_ops->kevent_size); |
---|
[3c05977] | 1160 | #endif |
---|
| 1161 | |
---|
| 1162 | return (error); |
---|
| 1163 | } |
---|
[0c9f27b] | 1164 | #ifdef __rtems__ |
---|
| 1165 | __weak_reference(kevent, _kevent); |
---|
| 1166 | |
---|
| 1167 | int |
---|
| 1168 | kevent(int kq, const struct kevent *changelist, int nchanges, |
---|
| 1169 | struct kevent *eventlist, int nevents, |
---|
| 1170 | const struct timespec *timeout) |
---|
| 1171 | { |
---|
| 1172 | struct thread *td = rtems_bsd_get_curthread_or_null(); |
---|
| 1173 | struct kevent_args ua = { |
---|
| 1174 | .fd = kq, |
---|
| 1175 | .changelist = changelist, |
---|
| 1176 | .nchanges = nchanges, |
---|
| 1177 | .eventlist = eventlist, |
---|
| 1178 | .nevents = nevents, |
---|
| 1179 | .timeout = timeout |
---|
| 1180 | }; |
---|
| 1181 | int error; |
---|
| 1182 | |
---|
| 1183 | if (td != NULL) { |
---|
[66659ff] | 1184 | error = sys_kevent(td, &ua); |
---|
[0c9f27b] | 1185 | } else { |
---|
| 1186 | error = ENOMEM; |
---|
| 1187 | } |
---|
| 1188 | |
---|
| 1189 | if (error == 0) { |
---|
| 1190 | return td->td_retval[0]; |
---|
| 1191 | } else { |
---|
| 1192 | rtems_set_errno_and_return_minus_one(error); |
---|
| 1193 | } |
---|
| 1194 | } |
---|
| 1195 | #endif /* __rtems__ */ |
---|
[3c05977] | 1196 | |
---|
| 1197 | /* |
---|
| 1198 | * Copy 'count' items into the destination list pointed to by uap->eventlist. |
---|
| 1199 | */ |
---|
| 1200 | static int |
---|
| 1201 | kevent_copyout(void *arg, struct kevent *kevp, int count) |
---|
| 1202 | { |
---|
| 1203 | struct kevent_args *uap; |
---|
| 1204 | int error; |
---|
| 1205 | |
---|
| 1206 | KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); |
---|
| 1207 | uap = (struct kevent_args *)arg; |
---|
| 1208 | |
---|
| 1209 | error = copyout(kevp, uap->eventlist, count * sizeof *kevp); |
---|
| 1210 | if (error == 0) |
---|
| 1211 | uap->eventlist += count; |
---|
| 1212 | return (error); |
---|
| 1213 | } |
---|
| 1214 | |
---|
| 1215 | /* |
---|
| 1216 | * Copy 'count' items from the list pointed to by uap->changelist. |
---|
| 1217 | */ |
---|
| 1218 | static int |
---|
| 1219 | kevent_copyin(void *arg, struct kevent *kevp, int count) |
---|
| 1220 | { |
---|
| 1221 | struct kevent_args *uap; |
---|
| 1222 | int error; |
---|
| 1223 | |
---|
| 1224 | KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); |
---|
| 1225 | uap = (struct kevent_args *)arg; |
---|
| 1226 | |
---|
| 1227 | error = copyin(uap->changelist, kevp, count * sizeof *kevp); |
---|
| 1228 | if (error == 0) |
---|
| 1229 | uap->changelist += count; |
---|
| 1230 | return (error); |
---|
| 1231 | } |
---|
| 1232 | |
---|
[c37f9fb] | 1233 | #ifdef COMPAT_FREEBSD11 |
---|
| 1234 | static int |
---|
| 1235 | kevent11_copyout(void *arg, struct kevent *kevp, int count) |
---|
| 1236 | { |
---|
| 1237 | struct freebsd11_kevent_args *uap; |
---|
| 1238 | struct kevent_freebsd11 kev11; |
---|
| 1239 | int error, i; |
---|
| 1240 | |
---|
| 1241 | KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); |
---|
| 1242 | uap = (struct freebsd11_kevent_args *)arg; |
---|
| 1243 | |
---|
| 1244 | for (i = 0; i < count; i++) { |
---|
| 1245 | kev11.ident = kevp->ident; |
---|
| 1246 | kev11.filter = kevp->filter; |
---|
| 1247 | kev11.flags = kevp->flags; |
---|
| 1248 | kev11.fflags = kevp->fflags; |
---|
| 1249 | kev11.data = kevp->data; |
---|
| 1250 | kev11.udata = kevp->udata; |
---|
| 1251 | error = copyout(&kev11, uap->eventlist, sizeof(kev11)); |
---|
| 1252 | if (error != 0) |
---|
| 1253 | break; |
---|
| 1254 | uap->eventlist++; |
---|
| 1255 | kevp++; |
---|
| 1256 | } |
---|
| 1257 | return (error); |
---|
| 1258 | } |
---|
| 1259 | |
---|
| 1260 | /* |
---|
| 1261 | * Copy 'count' items from the list pointed to by uap->changelist. |
---|
| 1262 | */ |
---|
| 1263 | static int |
---|
| 1264 | kevent11_copyin(void *arg, struct kevent *kevp, int count) |
---|
| 1265 | { |
---|
| 1266 | struct freebsd11_kevent_args *uap; |
---|
| 1267 | struct kevent_freebsd11 kev11; |
---|
| 1268 | int error, i; |
---|
| 1269 | |
---|
| 1270 | KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); |
---|
| 1271 | uap = (struct freebsd11_kevent_args *)arg; |
---|
| 1272 | |
---|
| 1273 | for (i = 0; i < count; i++) { |
---|
| 1274 | error = copyin(uap->changelist, &kev11, sizeof(kev11)); |
---|
| 1275 | if (error != 0) |
---|
| 1276 | break; |
---|
| 1277 | kevp->ident = kev11.ident; |
---|
| 1278 | kevp->filter = kev11.filter; |
---|
| 1279 | kevp->flags = kev11.flags; |
---|
| 1280 | kevp->fflags = kev11.fflags; |
---|
| 1281 | kevp->data = (uintptr_t)kev11.data; |
---|
| 1282 | kevp->udata = kev11.udata; |
---|
| 1283 | bzero(&kevp->ext, sizeof(kevp->ext)); |
---|
| 1284 | uap->changelist++; |
---|
| 1285 | kevp++; |
---|
| 1286 | } |
---|
| 1287 | return (error); |
---|
| 1288 | } |
---|
| 1289 | |
---|
| 1290 | int |
---|
| 1291 | freebsd11_kevent(struct thread *td, struct freebsd11_kevent_args *uap) |
---|
| 1292 | { |
---|
| 1293 | struct kevent_copyops k_ops = { |
---|
| 1294 | .arg = uap, |
---|
| 1295 | .k_copyout = kevent11_copyout, |
---|
| 1296 | .k_copyin = kevent11_copyin, |
---|
| 1297 | .kevent_size = sizeof(struct kevent_freebsd11), |
---|
| 1298 | }; |
---|
| 1299 | struct g_kevent_args gk_args = { |
---|
| 1300 | .fd = uap->fd, |
---|
| 1301 | .changelist = uap->changelist, |
---|
| 1302 | .nchanges = uap->nchanges, |
---|
| 1303 | .eventlist = uap->eventlist, |
---|
| 1304 | .nevents = uap->nevents, |
---|
| 1305 | .timeout = uap->timeout, |
---|
| 1306 | }; |
---|
| 1307 | |
---|
[bb80d9d] | 1308 | return (kern_kevent_generic(td, &gk_args, &k_ops, "kevent_freebsd11")); |
---|
[c37f9fb] | 1309 | } |
---|
| 1310 | #endif |
---|
| 1311 | |
---|
[3c05977] | 1312 | int |
---|
| 1313 | kern_kevent(struct thread *td, int fd, int nchanges, int nevents, |
---|
| 1314 | struct kevent_copyops *k_ops, const struct timespec *timeout) |
---|
| 1315 | { |
---|
[c40e45b] | 1316 | cap_rights_t rights; |
---|
[3c05977] | 1317 | struct file *fp; |
---|
[c40e45b] | 1318 | int error; |
---|
[3c05977] | 1319 | |
---|
[c40e45b] | 1320 | cap_rights_init(&rights); |
---|
| 1321 | if (nchanges > 0) |
---|
| 1322 | cap_rights_set(&rights, CAP_KQUEUE_CHANGE); |
---|
| 1323 | if (nevents > 0) |
---|
| 1324 | cap_rights_set(&rights, CAP_KQUEUE_EVENT); |
---|
| 1325 | error = fget(td, fd, &rights, &fp); |
---|
| 1326 | if (error != 0) |
---|
[3c05977] | 1327 | return (error); |
---|
| 1328 | |
---|
[c40e45b] | 1329 | error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); |
---|
| 1330 | fdrop(fp, td); |
---|
[3c05977] | 1331 | |
---|
[c40e45b] | 1332 | return (error); |
---|
| 1333 | } |
---|
| 1334 | |
---|
| 1335 | static int |
---|
| 1336 | kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents, |
---|
| 1337 | struct kevent_copyops *k_ops, const struct timespec *timeout) |
---|
| 1338 | { |
---|
| 1339 | struct kevent keva[KQ_NEVENTS]; |
---|
| 1340 | struct kevent *kevp, *changes; |
---|
| 1341 | int i, n, nerrors, error; |
---|
| 1342 | |
---|
| 1343 | nerrors = 0; |
---|
[3c05977] | 1344 | while (nchanges > 0) { |
---|
| 1345 | n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; |
---|
| 1346 | error = k_ops->k_copyin(k_ops->arg, keva, n); |
---|
| 1347 | if (error) |
---|
[c40e45b] | 1348 | return (error); |
---|
[3c05977] | 1349 | changes = keva; |
---|
| 1350 | for (i = 0; i < n; i++) { |
---|
| 1351 | kevp = &changes[i]; |
---|
| 1352 | if (!kevp->filter) |
---|
| 1353 | continue; |
---|
| 1354 | kevp->flags &= ~EV_SYSFLAGS; |
---|
| 1355 | error = kqueue_register(kq, kevp, td, 1); |
---|
| 1356 | if (error || (kevp->flags & EV_RECEIPT)) { |
---|
[c40e45b] | 1357 | if (nevents == 0) |
---|
| 1358 | return (error); |
---|
| 1359 | kevp->flags = EV_ERROR; |
---|
| 1360 | kevp->data = error; |
---|
| 1361 | (void)k_ops->k_copyout(k_ops->arg, kevp, 1); |
---|
| 1362 | nevents--; |
---|
| 1363 | nerrors++; |
---|
[3c05977] | 1364 | } |
---|
| 1365 | } |
---|
| 1366 | nchanges -= n; |
---|
| 1367 | } |
---|
| 1368 | if (nerrors) { |
---|
| 1369 | td->td_retval[0] = nerrors; |
---|
[c40e45b] | 1370 | return (0); |
---|
[3c05977] | 1371 | } |
---|
| 1372 | |
---|
[c40e45b] | 1373 | return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td)); |
---|
| 1374 | } |
---|
| 1375 | |
---|
| 1376 | int |
---|
| 1377 | kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, |
---|
| 1378 | struct kevent_copyops *k_ops, const struct timespec *timeout) |
---|
| 1379 | { |
---|
| 1380 | struct kqueue *kq; |
---|
| 1381 | int error; |
---|
| 1382 | |
---|
| 1383 | error = kqueue_acquire(fp, &kq); |
---|
| 1384 | if (error != 0) |
---|
| 1385 | return (error); |
---|
| 1386 | error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout); |
---|
[3c05977] | 1387 | kqueue_release(kq, 0); |
---|
[c40e45b] | 1388 | return (error); |
---|
| 1389 | } |
---|
| 1390 | |
---|
| 1391 | /* |
---|
| 1392 | * Performs a kevent() call on a temporarily created kqueue. This can be |
---|
| 1393 | * used to perform one-shot polling, similar to poll() and select(). |
---|
| 1394 | */ |
---|
| 1395 | int |
---|
| 1396 | kern_kevent_anonymous(struct thread *td, int nevents, |
---|
| 1397 | struct kevent_copyops *k_ops) |
---|
| 1398 | { |
---|
| 1399 | struct kqueue kq = {}; |
---|
| 1400 | int error; |
---|
| 1401 | |
---|
| 1402 | kqueue_init(&kq); |
---|
| 1403 | kq.kq_refcnt = 1; |
---|
| 1404 | error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL); |
---|
| 1405 | kqueue_drain(&kq, td); |
---|
| 1406 | kqueue_destroy(&kq); |
---|
[3c05977] | 1407 | return (error); |
---|
| 1408 | } |
---|
| 1409 | |
---|
| 1410 | int |
---|
| 1411 | kqueue_add_filteropts(int filt, struct filterops *filtops) |
---|
| 1412 | { |
---|
| 1413 | int error; |
---|
| 1414 | |
---|
| 1415 | error = 0; |
---|
| 1416 | if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { |
---|
| 1417 | printf( |
---|
| 1418 | "trying to add a filterop that is out of range: %d is beyond %d\n", |
---|
| 1419 | ~filt, EVFILT_SYSCOUNT); |
---|
| 1420 | return EINVAL; |
---|
| 1421 | } |
---|
| 1422 | mtx_lock(&filterops_lock); |
---|
| 1423 | if (sysfilt_ops[~filt].for_fop != &null_filtops && |
---|
| 1424 | sysfilt_ops[~filt].for_fop != NULL) |
---|
| 1425 | error = EEXIST; |
---|
| 1426 | else { |
---|
| 1427 | sysfilt_ops[~filt].for_fop = filtops; |
---|
| 1428 | sysfilt_ops[~filt].for_refcnt = 0; |
---|
| 1429 | } |
---|
| 1430 | mtx_unlock(&filterops_lock); |
---|
| 1431 | |
---|
| 1432 | return (error); |
---|
| 1433 | } |
---|
| 1434 | |
---|
| 1435 | int |
---|
| 1436 | kqueue_del_filteropts(int filt) |
---|
| 1437 | { |
---|
| 1438 | int error; |
---|
| 1439 | |
---|
| 1440 | error = 0; |
---|
| 1441 | if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) |
---|
| 1442 | return EINVAL; |
---|
| 1443 | |
---|
| 1444 | mtx_lock(&filterops_lock); |
---|
| 1445 | if (sysfilt_ops[~filt].for_fop == &null_filtops || |
---|
| 1446 | sysfilt_ops[~filt].for_fop == NULL) |
---|
| 1447 | error = EINVAL; |
---|
| 1448 | else if (sysfilt_ops[~filt].for_refcnt != 0) |
---|
| 1449 | error = EBUSY; |
---|
| 1450 | else { |
---|
| 1451 | sysfilt_ops[~filt].for_fop = &null_filtops; |
---|
| 1452 | sysfilt_ops[~filt].for_refcnt = 0; |
---|
| 1453 | } |
---|
| 1454 | mtx_unlock(&filterops_lock); |
---|
| 1455 | |
---|
| 1456 | return error; |
---|
| 1457 | } |
---|
| 1458 | |
---|
| 1459 | static struct filterops * |
---|
| 1460 | kqueue_fo_find(int filt) |
---|
| 1461 | { |
---|
| 1462 | |
---|
| 1463 | if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) |
---|
| 1464 | return NULL; |
---|
| 1465 | |
---|
[c40e45b] | 1466 | if (sysfilt_ops[~filt].for_nolock) |
---|
| 1467 | return sysfilt_ops[~filt].for_fop; |
---|
| 1468 | |
---|
[3c05977] | 1469 | mtx_lock(&filterops_lock); |
---|
| 1470 | sysfilt_ops[~filt].for_refcnt++; |
---|
| 1471 | if (sysfilt_ops[~filt].for_fop == NULL) |
---|
| 1472 | sysfilt_ops[~filt].for_fop = &null_filtops; |
---|
| 1473 | mtx_unlock(&filterops_lock); |
---|
| 1474 | |
---|
| 1475 | return sysfilt_ops[~filt].for_fop; |
---|
| 1476 | } |
---|
| 1477 | |
---|
| 1478 | static void |
---|
| 1479 | kqueue_fo_release(int filt) |
---|
| 1480 | { |
---|
| 1481 | |
---|
| 1482 | if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) |
---|
| 1483 | return; |
---|
| 1484 | |
---|
[c40e45b] | 1485 | if (sysfilt_ops[~filt].for_nolock) |
---|
| 1486 | return; |
---|
| 1487 | |
---|
[3c05977] | 1488 | mtx_lock(&filterops_lock); |
---|
| 1489 | KASSERT(sysfilt_ops[~filt].for_refcnt > 0, |
---|
| 1490 | ("filter object refcount not valid on release")); |
---|
| 1491 | sysfilt_ops[~filt].for_refcnt--; |
---|
| 1492 | mtx_unlock(&filterops_lock); |
---|
| 1493 | } |
---|
| 1494 | |
---|
| 1495 | /* |
---|
| 1496 | * A ref to kq (obtained via kqueue_acquire) must be held. waitok will |
---|
| 1497 | * influence if memory allocation should wait. Make sure it is 0 if you |
---|
| 1498 | * hold any mutexes. |
---|
| 1499 | */ |
---|
| 1500 | static int |
---|
| 1501 | kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) |
---|
| 1502 | { |
---|
| 1503 | struct filterops *fops; |
---|
| 1504 | struct file *fp; |
---|
| 1505 | struct knote *kn, *tkn; |
---|
[c40e45b] | 1506 | struct knlist *knl; |
---|
[3c05977] | 1507 | int error, filt, event; |
---|
[7eeb079] | 1508 | int haskqglobal, filedesc_unlock; |
---|
[3c05977] | 1509 | |
---|
[c40e45b] | 1510 | if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) |
---|
| 1511 | return (EINVAL); |
---|
| 1512 | |
---|
[3c05977] | 1513 | fp = NULL; |
---|
| 1514 | kn = NULL; |
---|
[c40e45b] | 1515 | knl = NULL; |
---|
[3c05977] | 1516 | error = 0; |
---|
| 1517 | haskqglobal = 0; |
---|
[7eeb079] | 1518 | filedesc_unlock = 0; |
---|
[3c05977] | 1519 | |
---|
| 1520 | filt = kev->filter; |
---|
| 1521 | fops = kqueue_fo_find(filt); |
---|
| 1522 | if (fops == NULL) |
---|
| 1523 | return EINVAL; |
---|
| 1524 | |
---|
[c40e45b] | 1525 | if (kev->flags & EV_ADD) { |
---|
| 1526 | /* |
---|
| 1527 | * Prevent waiting with locks. Non-sleepable |
---|
| 1528 | * allocation failures are handled in the loop, only |
---|
| 1529 | * if the spare knote appears to be actually required. |
---|
| 1530 | */ |
---|
| 1531 | tkn = knote_alloc(waitok); |
---|
| 1532 | } else { |
---|
| 1533 | tkn = NULL; |
---|
| 1534 | } |
---|
[3c05977] | 1535 | |
---|
| 1536 | findkn: |
---|
| 1537 | if (fops->f_isfd) { |
---|
| 1538 | KASSERT(td != NULL, ("td is NULL")); |
---|
[c40e45b] | 1539 | if (kev->ident > INT_MAX) |
---|
| 1540 | error = EBADF; |
---|
| 1541 | else |
---|
[bcdce02] | 1542 | error = fget(td, kev->ident, &cap_event_rights, &fp); |
---|
[3c05977] | 1543 | if (error) |
---|
| 1544 | goto done; |
---|
| 1545 | |
---|
| 1546 | if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, |
---|
| 1547 | kev->ident, 0) != 0) { |
---|
| 1548 | /* try again */ |
---|
| 1549 | fdrop(fp, td); |
---|
| 1550 | fp = NULL; |
---|
| 1551 | error = kqueue_expand(kq, fops, kev->ident, waitok); |
---|
| 1552 | if (error) |
---|
| 1553 | goto done; |
---|
| 1554 | goto findkn; |
---|
| 1555 | } |
---|
| 1556 | |
---|
[0c9f27b] | 1557 | #ifndef __rtems__ |
---|
[3c05977] | 1558 | if (fp->f_type == DTYPE_KQUEUE) { |
---|
[0c9f27b] | 1559 | #else /* __rtems__ */ |
---|
| 1560 | if (fp->f_io.pathinfo.handlers == &kqueueops) { |
---|
| 1561 | #endif /* __rtems__ */ |
---|
[3c05977] | 1562 | /* |
---|
[c40e45b] | 1563 | * If we add some intelligence about what we are doing, |
---|
[3c05977] | 1564 | * we should be able to support events on ourselves. |
---|
| 1565 | * We need to know when we are doing this to prevent |
---|
| 1566 | * getting both the knlist lock and the kq lock since |
---|
| 1567 | * they are the same thing. |
---|
| 1568 | */ |
---|
| 1569 | if (fp->f_data == kq) { |
---|
| 1570 | error = EINVAL; |
---|
| 1571 | goto done; |
---|
| 1572 | } |
---|
| 1573 | |
---|
[7eeb079] | 1574 | /* |
---|
| 1575 | * Pre-lock the filedesc before the global |
---|
| 1576 | * lock mutex, see the comment in |
---|
| 1577 | * kqueue_close(). |
---|
| 1578 | */ |
---|
| 1579 | FILEDESC_XLOCK(td->td_proc->p_fd); |
---|
| 1580 | filedesc_unlock = 1; |
---|
[3c05977] | 1581 | KQ_GLOBAL_LOCK(&kq_global, haskqglobal); |
---|
| 1582 | } |
---|
| 1583 | |
---|
| 1584 | KQ_LOCK(kq); |
---|
| 1585 | if (kev->ident < kq->kq_knlistsize) { |
---|
| 1586 | SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) |
---|
| 1587 | if (kev->filter == kn->kn_filter) |
---|
| 1588 | break; |
---|
| 1589 | } |
---|
| 1590 | } else { |
---|
| 1591 | if ((kev->flags & EV_ADD) == EV_ADD) |
---|
| 1592 | kqueue_expand(kq, fops, kev->ident, waitok); |
---|
| 1593 | |
---|
| 1594 | KQ_LOCK(kq); |
---|
[c40e45b] | 1595 | |
---|
| 1596 | /* |
---|
| 1597 | * If possible, find an existing knote to use for this kevent. |
---|
| 1598 | */ |
---|
| 1599 | if (kev->filter == EVFILT_PROC && |
---|
| 1600 | (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { |
---|
| 1601 | /* This is an internal creation of a process tracking |
---|
| 1602 | * note. Don't attempt to coalesce this with an |
---|
| 1603 | * existing note. |
---|
| 1604 | */ |
---|
| 1605 | ; |
---|
| 1606 | } else if (kq->kq_knhashmask != 0) { |
---|
[3c05977] | 1607 | struct klist *list; |
---|
| 1608 | |
---|
| 1609 | list = &kq->kq_knhash[ |
---|
| 1610 | KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; |
---|
| 1611 | SLIST_FOREACH(kn, list, kn_link) |
---|
| 1612 | if (kev->ident == kn->kn_id && |
---|
| 1613 | kev->filter == kn->kn_filter) |
---|
| 1614 | break; |
---|
| 1615 | } |
---|
| 1616 | } |
---|
| 1617 | |
---|
[c40e45b] | 1618 | /* knote is in the process of changing, wait for it to stabilize. */ |
---|
[0577772] | 1619 | if (kn != NULL && kn_in_flux(kn)) { |
---|
[3c05977] | 1620 | KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); |
---|
[7eeb079] | 1621 | if (filedesc_unlock) { |
---|
| 1622 | FILEDESC_XUNLOCK(td->td_proc->p_fd); |
---|
| 1623 | filedesc_unlock = 0; |
---|
| 1624 | } |
---|
[3c05977] | 1625 | kq->kq_state |= KQ_FLUXWAIT; |
---|
| 1626 | msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); |
---|
| 1627 | if (fp != NULL) { |
---|
| 1628 | fdrop(fp, td); |
---|
| 1629 | fp = NULL; |
---|
| 1630 | } |
---|
| 1631 | goto findkn; |
---|
| 1632 | } |
---|
| 1633 | |
---|
| 1634 | /* |
---|
| 1635 | * kn now contains the matching knote, or NULL if no match |
---|
| 1636 | */ |
---|
| 1637 | if (kn == NULL) { |
---|
| 1638 | if (kev->flags & EV_ADD) { |
---|
| 1639 | kn = tkn; |
---|
| 1640 | tkn = NULL; |
---|
| 1641 | if (kn == NULL) { |
---|
| 1642 | KQ_UNLOCK(kq); |
---|
| 1643 | error = ENOMEM; |
---|
| 1644 | goto done; |
---|
| 1645 | } |
---|
| 1646 | kn->kn_fp = fp; |
---|
| 1647 | kn->kn_kq = kq; |
---|
| 1648 | kn->kn_fop = fops; |
---|
| 1649 | /* |
---|
| 1650 | * apply reference counts to knote structure, and |
---|
| 1651 | * do not release it at the end of this routine. |
---|
| 1652 | */ |
---|
| 1653 | fops = NULL; |
---|
| 1654 | fp = NULL; |
---|
| 1655 | |
---|
| 1656 | kn->kn_sfflags = kev->fflags; |
---|
| 1657 | kn->kn_sdata = kev->data; |
---|
| 1658 | kev->fflags = 0; |
---|
| 1659 | kev->data = 0; |
---|
| 1660 | kn->kn_kevent = *kev; |
---|
| 1661 | kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | |
---|
[c40e45b] | 1662 | EV_ENABLE | EV_DISABLE | EV_FORCEONESHOT); |
---|
[0577772] | 1663 | kn->kn_status = KN_DETACHED; |
---|
| 1664 | kn_enter_flux(kn); |
---|
[3c05977] | 1665 | |
---|
| 1666 | error = knote_attach(kn, kq); |
---|
| 1667 | KQ_UNLOCK(kq); |
---|
| 1668 | if (error != 0) { |
---|
| 1669 | tkn = kn; |
---|
| 1670 | goto done; |
---|
| 1671 | } |
---|
| 1672 | |
---|
| 1673 | if ((error = kn->kn_fop->f_attach(kn)) != 0) { |
---|
[0577772] | 1674 | knote_drop_detached(kn, td); |
---|
[3c05977] | 1675 | goto done; |
---|
| 1676 | } |
---|
[c40e45b] | 1677 | knl = kn_list_lock(kn); |
---|
[3c05977] | 1678 | goto done_ev_add; |
---|
| 1679 | } else { |
---|
| 1680 | /* No matching knote and the EV_ADD flag is not set. */ |
---|
| 1681 | KQ_UNLOCK(kq); |
---|
| 1682 | error = ENOENT; |
---|
| 1683 | goto done; |
---|
| 1684 | } |
---|
| 1685 | } |
---|
| 1686 | |
---|
| 1687 | if (kev->flags & EV_DELETE) { |
---|
[0577772] | 1688 | kn_enter_flux(kn); |
---|
[3c05977] | 1689 | KQ_UNLOCK(kq); |
---|
| 1690 | knote_drop(kn, td); |
---|
| 1691 | goto done; |
---|
| 1692 | } |
---|
| 1693 | |
---|
[c40e45b] | 1694 | if (kev->flags & EV_FORCEONESHOT) { |
---|
| 1695 | kn->kn_flags |= EV_ONESHOT; |
---|
| 1696 | KNOTE_ACTIVATE(kn, 1); |
---|
| 1697 | } |
---|
| 1698 | |
---|
[3c05977] | 1699 | /* |
---|
| 1700 | * The user may change some filter values after the initial EV_ADD, |
---|
| 1701 | * but doing so will not reset any filter which has already been |
---|
| 1702 | * triggered. |
---|
| 1703 | */ |
---|
[0577772] | 1704 | kn->kn_status |= KN_SCAN; |
---|
| 1705 | kn_enter_flux(kn); |
---|
[3c05977] | 1706 | KQ_UNLOCK(kq); |
---|
[c40e45b] | 1707 | knl = kn_list_lock(kn); |
---|
[3c05977] | 1708 | kn->kn_kevent.udata = kev->udata; |
---|
| 1709 | if (!fops->f_isfd && fops->f_touch != NULL) { |
---|
| 1710 | fops->f_touch(kn, kev, EVENT_REGISTER); |
---|
| 1711 | } else { |
---|
| 1712 | kn->kn_sfflags = kev->fflags; |
---|
| 1713 | kn->kn_sdata = kev->data; |
---|
| 1714 | } |
---|
| 1715 | |
---|
| 1716 | /* |
---|
| 1717 | * We can get here with kn->kn_knlist == NULL. This can happen when |
---|
| 1718 | * the initial attach event decides that the event is "completed" |
---|
| 1719 | * already. i.e. filt_procattach is called on a zombie process. It |
---|
| 1720 | * will call filt_proc which will remove it from the list, and NULL |
---|
| 1721 | * kn_knlist. |
---|
| 1722 | */ |
---|
| 1723 | done_ev_add: |
---|
[c40e45b] | 1724 | if ((kev->flags & EV_ENABLE) != 0) |
---|
| 1725 | kn->kn_status &= ~KN_DISABLED; |
---|
| 1726 | else if ((kev->flags & EV_DISABLE) != 0) |
---|
| 1727 | kn->kn_status |= KN_DISABLED; |
---|
| 1728 | |
---|
| 1729 | if ((kn->kn_status & KN_DISABLED) == 0) |
---|
| 1730 | event = kn->kn_fop->f_event(kn, 0); |
---|
| 1731 | else |
---|
| 1732 | event = 0; |
---|
| 1733 | |
---|
[3c05977] | 1734 | KQ_LOCK(kq); |
---|
| 1735 | if (event) |
---|
[c40e45b] | 1736 | kn->kn_status |= KN_ACTIVE; |
---|
| 1737 | if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == |
---|
| 1738 | KN_ACTIVE) |
---|
| 1739 | knote_enqueue(kn); |
---|
[0577772] | 1740 | kn->kn_status &= ~KN_SCAN; |
---|
| 1741 | kn_leave_flux(kn); |
---|
[c40e45b] | 1742 | kn_list_unlock(knl); |
---|
[3c05977] | 1743 | KQ_UNLOCK_FLUX(kq); |
---|
| 1744 | |
---|
| 1745 | done: |
---|
| 1746 | KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); |
---|
[7eeb079] | 1747 | if (filedesc_unlock) |
---|
| 1748 | FILEDESC_XUNLOCK(td->td_proc->p_fd); |
---|
[3c05977] | 1749 | if (fp != NULL) |
---|
| 1750 | fdrop(fp, td); |
---|
[c40e45b] | 1751 | knote_free(tkn); |
---|
[3c05977] | 1752 | if (fops != NULL) |
---|
| 1753 | kqueue_fo_release(filt); |
---|
| 1754 | return (error); |
---|
| 1755 | } |
---|
| 1756 | |
---|
| 1757 | static int |
---|
| 1758 | kqueue_acquire(struct file *fp, struct kqueue **kqp) |
---|
| 1759 | { |
---|
| 1760 | int error; |
---|
| 1761 | struct kqueue *kq; |
---|
| 1762 | |
---|
| 1763 | error = 0; |
---|
| 1764 | |
---|
| 1765 | kq = fp->f_data; |
---|
[0c9f27b] | 1766 | #ifndef __rtems__ |
---|
[3c05977] | 1767 | if (fp->f_type != DTYPE_KQUEUE || kq == NULL) |
---|
[0c9f27b] | 1768 | #else /* __rtems__ */ |
---|
| 1769 | if (fp->f_io.pathinfo.handlers != &kqueueops || kq == NULL) |
---|
| 1770 | #endif /* __rtems__ */ |
---|
[3c05977] | 1771 | return (EBADF); |
---|
| 1772 | *kqp = kq; |
---|
| 1773 | KQ_LOCK(kq); |
---|
| 1774 | if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { |
---|
| 1775 | KQ_UNLOCK(kq); |
---|
| 1776 | return (EBADF); |
---|
| 1777 | } |
---|
| 1778 | kq->kq_refcnt++; |
---|
| 1779 | KQ_UNLOCK(kq); |
---|
| 1780 | |
---|
| 1781 | return error; |
---|
| 1782 | } |
---|
| 1783 | |
---|
| 1784 | static void |
---|
| 1785 | kqueue_release(struct kqueue *kq, int locked) |
---|
| 1786 | { |
---|
| 1787 | if (locked) |
---|
| 1788 | KQ_OWNED(kq); |
---|
| 1789 | else |
---|
| 1790 | KQ_LOCK(kq); |
---|
| 1791 | kq->kq_refcnt--; |
---|
| 1792 | if (kq->kq_refcnt == 1) |
---|
| 1793 | wakeup(&kq->kq_refcnt); |
---|
| 1794 | if (!locked) |
---|
| 1795 | KQ_UNLOCK(kq); |
---|
| 1796 | } |
---|
| 1797 | |
---|
| 1798 | static void |
---|
| 1799 | kqueue_schedtask(struct kqueue *kq) |
---|
| 1800 | { |
---|
| 1801 | |
---|
| 1802 | KQ_OWNED(kq); |
---|
| 1803 | KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), |
---|
| 1804 | ("scheduling kqueue task while draining")); |
---|
| 1805 | |
---|
| 1806 | if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { |
---|
[c40e45b] | 1807 | taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task); |
---|
[3c05977] | 1808 | kq->kq_state |= KQ_TASKSCHED; |
---|
| 1809 | } |
---|
| 1810 | } |
---|
| 1811 | |
---|
| 1812 | /* |
---|
| 1813 | * Expand the kq to make sure we have storage for fops/ident pair. |
---|
| 1814 | * |
---|
| 1815 | * Return 0 on success (or no work necessary), return errno on failure. |
---|
| 1816 | * |
---|
| 1817 | * Not calling hashinit w/ waitok (proper malloc flag) should be safe. |
---|
| 1818 | * If kqueue_register is called from a non-fd context, there usually/should |
---|
| 1819 | * be no locks held. |
---|
| 1820 | */ |
---|
| 1821 | static int |
---|
| 1822 | kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, |
---|
| 1823 | int waitok) |
---|
| 1824 | { |
---|
| 1825 | struct klist *list, *tmp_knhash, *to_free; |
---|
| 1826 | u_long tmp_knhashmask; |
---|
| 1827 | int size; |
---|
| 1828 | int fd; |
---|
| 1829 | int mflag = waitok ? M_WAITOK : M_NOWAIT; |
---|
| 1830 | |
---|
| 1831 | KQ_NOTOWNED(kq); |
---|
| 1832 | |
---|
| 1833 | to_free = NULL; |
---|
| 1834 | if (fops->f_isfd) { |
---|
| 1835 | fd = ident; |
---|
| 1836 | if (kq->kq_knlistsize <= fd) { |
---|
| 1837 | size = kq->kq_knlistsize; |
---|
| 1838 | while (size <= fd) |
---|
| 1839 | size += KQEXTENT; |
---|
[66659ff] | 1840 | list = malloc(size * sizeof(*list), M_KQUEUE, mflag); |
---|
[3c05977] | 1841 | if (list == NULL) |
---|
| 1842 | return ENOMEM; |
---|
| 1843 | KQ_LOCK(kq); |
---|
| 1844 | if (kq->kq_knlistsize > fd) { |
---|
| 1845 | to_free = list; |
---|
| 1846 | list = NULL; |
---|
| 1847 | } else { |
---|
| 1848 | if (kq->kq_knlist != NULL) { |
---|
| 1849 | bcopy(kq->kq_knlist, list, |
---|
[66659ff] | 1850 | kq->kq_knlistsize * sizeof(*list)); |
---|
[3c05977] | 1851 | to_free = kq->kq_knlist; |
---|
| 1852 | kq->kq_knlist = NULL; |
---|
| 1853 | } |
---|
| 1854 | bzero((caddr_t)list + |
---|
[66659ff] | 1855 | kq->kq_knlistsize * sizeof(*list), |
---|
| 1856 | (size - kq->kq_knlistsize) * sizeof(*list)); |
---|
[3c05977] | 1857 | kq->kq_knlistsize = size; |
---|
| 1858 | kq->kq_knlist = list; |
---|
| 1859 | } |
---|
| 1860 | KQ_UNLOCK(kq); |
---|
| 1861 | } |
---|
| 1862 | } else { |
---|
| 1863 | if (kq->kq_knhashmask == 0) { |
---|
| 1864 | tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, |
---|
| 1865 | &tmp_knhashmask); |
---|
| 1866 | if (tmp_knhash == NULL) |
---|
| 1867 | return ENOMEM; |
---|
| 1868 | KQ_LOCK(kq); |
---|
| 1869 | if (kq->kq_knhashmask == 0) { |
---|
| 1870 | kq->kq_knhash = tmp_knhash; |
---|
| 1871 | kq->kq_knhashmask = tmp_knhashmask; |
---|
| 1872 | } else { |
---|
| 1873 | to_free = tmp_knhash; |
---|
| 1874 | } |
---|
| 1875 | KQ_UNLOCK(kq); |
---|
| 1876 | } |
---|
| 1877 | } |
---|
| 1878 | free(to_free, M_KQUEUE); |
---|
| 1879 | |
---|
| 1880 | KQ_NOTOWNED(kq); |
---|
| 1881 | return 0; |
---|
| 1882 | } |
---|
| 1883 | |
---|
| 1884 | static void |
---|
| 1885 | kqueue_task(void *arg, int pending) |
---|
| 1886 | { |
---|
| 1887 | struct kqueue *kq; |
---|
| 1888 | int haskqglobal; |
---|
| 1889 | |
---|
| 1890 | haskqglobal = 0; |
---|
| 1891 | kq = arg; |
---|
| 1892 | |
---|
| 1893 | KQ_GLOBAL_LOCK(&kq_global, haskqglobal); |
---|
| 1894 | KQ_LOCK(kq); |
---|
| 1895 | |
---|
| 1896 | KNOTE_LOCKED(&kq->kq_sel.si_note, 0); |
---|
| 1897 | |
---|
| 1898 | kq->kq_state &= ~KQ_TASKSCHED; |
---|
| 1899 | if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { |
---|
| 1900 | wakeup(&kq->kq_state); |
---|
| 1901 | } |
---|
| 1902 | KQ_UNLOCK(kq); |
---|
| 1903 | KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); |
---|
| 1904 | } |
---|
| 1905 | |
---|
| 1906 | /* |
---|
| 1907 | * Scan, update kn_data (if not ONESHOT), and copyout triggered events. |
---|
[0577772] | 1908 | * We treat KN_MARKER knotes as if they are in flux. |
---|
[3c05977] | 1909 | */ |
---|
| 1910 | static int |
---|
| 1911 | kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, |
---|
| 1912 | const struct timespec *tsp, struct kevent *keva, struct thread *td) |
---|
| 1913 | { |
---|
| 1914 | struct kevent *kevp; |
---|
| 1915 | struct knote *kn, *marker; |
---|
[c40e45b] | 1916 | struct knlist *knl; |
---|
| 1917 | sbintime_t asbt, rsbt; |
---|
| 1918 | int count, error, haskqglobal, influx, nkev, touch; |
---|
[3c05977] | 1919 | |
---|
| 1920 | count = maxevents; |
---|
| 1921 | nkev = 0; |
---|
| 1922 | error = 0; |
---|
| 1923 | haskqglobal = 0; |
---|
| 1924 | |
---|
| 1925 | if (maxevents == 0) |
---|
| 1926 | goto done_nl; |
---|
| 1927 | |
---|
[c40e45b] | 1928 | rsbt = 0; |
---|
[3c05977] | 1929 | if (tsp != NULL) { |
---|
[c40e45b] | 1930 | if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || |
---|
| 1931 | tsp->tv_nsec >= 1000000000) { |
---|
[3c05977] | 1932 | error = EINVAL; |
---|
| 1933 | goto done_nl; |
---|
| 1934 | } |
---|
[c40e45b] | 1935 | if (timespecisset(tsp)) { |
---|
| 1936 | if (tsp->tv_sec <= INT32_MAX) { |
---|
| 1937 | rsbt = tstosbt(*tsp); |
---|
| 1938 | if (TIMESEL(&asbt, rsbt)) |
---|
| 1939 | asbt += tc_tick_sbt; |
---|
| 1940 | if (asbt <= SBT_MAX - rsbt) |
---|
| 1941 | asbt += rsbt; |
---|
| 1942 | else |
---|
| 1943 | asbt = 0; |
---|
| 1944 | rsbt >>= tc_precexp; |
---|
| 1945 | } else |
---|
| 1946 | asbt = 0; |
---|
| 1947 | } else |
---|
| 1948 | asbt = -1; |
---|
| 1949 | } else |
---|
| 1950 | asbt = 0; |
---|
[3c05977] | 1951 | marker = knote_alloc(1); |
---|
| 1952 | marker->kn_status = KN_MARKER; |
---|
| 1953 | KQ_LOCK(kq); |
---|
| 1954 | |
---|
| 1955 | retry: |
---|
| 1956 | kevp = keva; |
---|
| 1957 | if (kq->kq_count == 0) { |
---|
[c40e45b] | 1958 | if (asbt == -1) { |
---|
[3c05977] | 1959 | error = EWOULDBLOCK; |
---|
| 1960 | } else { |
---|
| 1961 | kq->kq_state |= KQ_SLEEP; |
---|
[c40e45b] | 1962 | error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, |
---|
| 1963 | "kqread", asbt, rsbt, C_ABSOLUTE); |
---|
[3c05977] | 1964 | } |
---|
| 1965 | if (error == 0) |
---|
| 1966 | goto retry; |
---|
| 1967 | /* don't restart after signals... */ |
---|
| 1968 | if (error == ERESTART) |
---|
| 1969 | error = EINTR; |
---|
| 1970 | else if (error == EWOULDBLOCK) |
---|
| 1971 | error = 0; |
---|
| 1972 | goto done; |
---|
| 1973 | } |
---|
| 1974 | |
---|
| 1975 | TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); |
---|
| 1976 | influx = 0; |
---|
| 1977 | while (count) { |
---|
| 1978 | KQ_OWNED(kq); |
---|
| 1979 | kn = TAILQ_FIRST(&kq->kq_head); |
---|
| 1980 | |
---|
| 1981 | if ((kn->kn_status == KN_MARKER && kn != marker) || |
---|
[0577772] | 1982 | kn_in_flux(kn)) { |
---|
[3c05977] | 1983 | if (influx) { |
---|
| 1984 | influx = 0; |
---|
| 1985 | KQ_FLUX_WAKEUP(kq); |
---|
| 1986 | } |
---|
| 1987 | kq->kq_state |= KQ_FLUXWAIT; |
---|
| 1988 | error = msleep(kq, &kq->kq_lock, PSOCK, |
---|
| 1989 | "kqflxwt", 0); |
---|
| 1990 | continue; |
---|
| 1991 | } |
---|
| 1992 | |
---|
| 1993 | TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); |
---|
| 1994 | if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { |
---|
| 1995 | kn->kn_status &= ~KN_QUEUED; |
---|
| 1996 | kq->kq_count--; |
---|
| 1997 | continue; |
---|
| 1998 | } |
---|
| 1999 | if (kn == marker) { |
---|
| 2000 | KQ_FLUX_WAKEUP(kq); |
---|
| 2001 | if (count == maxevents) |
---|
| 2002 | goto retry; |
---|
| 2003 | goto done; |
---|
| 2004 | } |
---|
[0577772] | 2005 | KASSERT(!kn_in_flux(kn), |
---|
| 2006 | ("knote %p is unexpectedly in flux", kn)); |
---|
[3c05977] | 2007 | |
---|
[7eeb079] | 2008 | if ((kn->kn_flags & EV_DROP) == EV_DROP) { |
---|
| 2009 | kn->kn_status &= ~KN_QUEUED; |
---|
[0577772] | 2010 | kn_enter_flux(kn); |
---|
[7eeb079] | 2011 | kq->kq_count--; |
---|
| 2012 | KQ_UNLOCK(kq); |
---|
| 2013 | /* |
---|
[0577772] | 2014 | * We don't need to lock the list since we've |
---|
| 2015 | * marked it as in flux. |
---|
[7eeb079] | 2016 | */ |
---|
| 2017 | knote_drop(kn, td); |
---|
| 2018 | KQ_LOCK(kq); |
---|
| 2019 | continue; |
---|
| 2020 | } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { |
---|
[3c05977] | 2021 | kn->kn_status &= ~KN_QUEUED; |
---|
[0577772] | 2022 | kn_enter_flux(kn); |
---|
[3c05977] | 2023 | kq->kq_count--; |
---|
| 2024 | KQ_UNLOCK(kq); |
---|
| 2025 | /* |
---|
[0577772] | 2026 | * We don't need to lock the list since we've |
---|
| 2027 | * marked the knote as being in flux. |
---|
[3c05977] | 2028 | */ |
---|
| 2029 | *kevp = kn->kn_kevent; |
---|
| 2030 | knote_drop(kn, td); |
---|
| 2031 | KQ_LOCK(kq); |
---|
| 2032 | kn = NULL; |
---|
| 2033 | } else { |
---|
[0577772] | 2034 | kn->kn_status |= KN_SCAN; |
---|
| 2035 | kn_enter_flux(kn); |
---|
[3c05977] | 2036 | KQ_UNLOCK(kq); |
---|
| 2037 | if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) |
---|
| 2038 | KQ_GLOBAL_LOCK(&kq_global, haskqglobal); |
---|
[c40e45b] | 2039 | knl = kn_list_lock(kn); |
---|
[3c05977] | 2040 | if (kn->kn_fop->f_event(kn, 0) == 0) { |
---|
| 2041 | KQ_LOCK(kq); |
---|
| 2042 | KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); |
---|
[0577772] | 2043 | kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE | |
---|
[7eeb079] | 2044 | KN_SCAN); |
---|
[0577772] | 2045 | kn_leave_flux(kn); |
---|
[3c05977] | 2046 | kq->kq_count--; |
---|
[c40e45b] | 2047 | kn_list_unlock(knl); |
---|
[3c05977] | 2048 | influx = 1; |
---|
| 2049 | continue; |
---|
| 2050 | } |
---|
| 2051 | touch = (!kn->kn_fop->f_isfd && |
---|
| 2052 | kn->kn_fop->f_touch != NULL); |
---|
| 2053 | if (touch) |
---|
| 2054 | kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); |
---|
| 2055 | else |
---|
| 2056 | *kevp = kn->kn_kevent; |
---|
| 2057 | KQ_LOCK(kq); |
---|
| 2058 | KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); |
---|
[c40e45b] | 2059 | if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { |
---|
[3c05977] | 2060 | /* |
---|
| 2061 | * Manually clear knotes who weren't |
---|
| 2062 | * 'touch'ed. |
---|
| 2063 | */ |
---|
| 2064 | if (touch == 0 && kn->kn_flags & EV_CLEAR) { |
---|
| 2065 | kn->kn_data = 0; |
---|
| 2066 | kn->kn_fflags = 0; |
---|
| 2067 | } |
---|
| 2068 | if (kn->kn_flags & EV_DISPATCH) |
---|
| 2069 | kn->kn_status |= KN_DISABLED; |
---|
| 2070 | kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); |
---|
| 2071 | kq->kq_count--; |
---|
| 2072 | } else |
---|
| 2073 | TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); |
---|
| 2074 | |
---|
[0577772] | 2075 | kn->kn_status &= ~KN_SCAN; |
---|
| 2076 | kn_leave_flux(kn); |
---|
[c40e45b] | 2077 | kn_list_unlock(knl); |
---|
[3c05977] | 2078 | influx = 1; |
---|
| 2079 | } |
---|
| 2080 | |
---|
| 2081 | /* we are returning a copy to the user */ |
---|
| 2082 | kevp++; |
---|
| 2083 | nkev++; |
---|
| 2084 | count--; |
---|
| 2085 | |
---|
| 2086 | if (nkev == KQ_NEVENTS) { |
---|
| 2087 | influx = 0; |
---|
| 2088 | KQ_UNLOCK_FLUX(kq); |
---|
| 2089 | error = k_ops->k_copyout(k_ops->arg, keva, nkev); |
---|
| 2090 | nkev = 0; |
---|
| 2091 | kevp = keva; |
---|
| 2092 | KQ_LOCK(kq); |
---|
| 2093 | if (error) |
---|
| 2094 | break; |
---|
| 2095 | } |
---|
| 2096 | } |
---|
| 2097 | TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); |
---|
| 2098 | done: |
---|
| 2099 | KQ_OWNED(kq); |
---|
| 2100 | KQ_UNLOCK_FLUX(kq); |
---|
| 2101 | knote_free(marker); |
---|
| 2102 | done_nl: |
---|
| 2103 | KQ_NOTOWNED(kq); |
---|
| 2104 | if (nkev != 0) |
---|
| 2105 | error = k_ops->k_copyout(k_ops->arg, keva, nkev); |
---|
| 2106 | td->td_retval[0] = maxevents - count; |
---|
| 2107 | return (error); |
---|
| 2108 | } |
---|
| 2109 | |
---|
[0c9f27b] | 2110 | #ifndef __rtems__ |
---|
[3c05977] | 2111 | /*ARGSUSED*/ |
---|
| 2112 | static int |
---|
| 2113 | kqueue_ioctl(struct file *fp, u_long cmd, void *data, |
---|
| 2114 | struct ucred *active_cred, struct thread *td) |
---|
| 2115 | { |
---|
| 2116 | /* |
---|
| 2117 | * Enabling sigio causes two major problems: |
---|
| 2118 | * 1) infinite recursion: |
---|
| 2119 | * Synopsys: kevent is being used to track signals and have FIOASYNC |
---|
| 2120 | * set. On receipt of a signal this will cause a kqueue to recurse |
---|
| 2121 | * into itself over and over. Sending the sigio causes the kqueue |
---|
| 2122 | * to become ready, which in turn posts sigio again, forever. |
---|
| 2123 | * Solution: this can be solved by setting a flag in the kqueue that |
---|
| 2124 | * we have a SIGIO in progress. |
---|
| 2125 | * 2) locking problems: |
---|
| 2126 | * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts |
---|
| 2127 | * us above the proc and pgrp locks. |
---|
| 2128 | * Solution: Post a signal using an async mechanism, being sure to |
---|
| 2129 | * record a generation count in the delivery so that we do not deliver |
---|
| 2130 | * a signal to the wrong process. |
---|
| 2131 | * |
---|
| 2132 | * Note, these two mechanisms are somewhat mutually exclusive! |
---|
| 2133 | */ |
---|
| 2134 | #if 0 |
---|
| 2135 | struct kqueue *kq; |
---|
| 2136 | |
---|
| 2137 | kq = fp->f_data; |
---|
| 2138 | switch (cmd) { |
---|
| 2139 | case FIOASYNC: |
---|
| 2140 | if (*(int *)data) { |
---|
| 2141 | kq->kq_state |= KQ_ASYNC; |
---|
| 2142 | } else { |
---|
| 2143 | kq->kq_state &= ~KQ_ASYNC; |
---|
| 2144 | } |
---|
| 2145 | return (0); |
---|
| 2146 | |
---|
| 2147 | case FIOSETOWN: |
---|
| 2148 | return (fsetown(*(int *)data, &kq->kq_sigio)); |
---|
| 2149 | |
---|
| 2150 | case FIOGETOWN: |
---|
| 2151 | *(int *)data = fgetown(&kq->kq_sigio); |
---|
| 2152 | return (0); |
---|
| 2153 | } |
---|
| 2154 | #endif |
---|
| 2155 | |
---|
| 2156 | return (ENOTTY); |
---|
| 2157 | } |
---|
[0c9f27b] | 2158 | #endif /* __rtems__ */ |
---|
[3c05977] | 2159 | |
---|
| 2160 | /*ARGSUSED*/ |
---|
| 2161 | static int |
---|
| 2162 | kqueue_poll(struct file *fp, int events, struct ucred *active_cred, |
---|
| 2163 | struct thread *td) |
---|
| 2164 | { |
---|
| 2165 | struct kqueue *kq; |
---|
| 2166 | int revents = 0; |
---|
| 2167 | int error; |
---|
| 2168 | |
---|
| 2169 | if ((error = kqueue_acquire(fp, &kq))) |
---|
| 2170 | return POLLERR; |
---|
| 2171 | |
---|
| 2172 | KQ_LOCK(kq); |
---|
| 2173 | if (events & (POLLIN | POLLRDNORM)) { |
---|
| 2174 | if (kq->kq_count) { |
---|
| 2175 | revents |= events & (POLLIN | POLLRDNORM); |
---|
| 2176 | } else { |
---|
| 2177 | selrecord(td, &kq->kq_sel); |
---|
| 2178 | if (SEL_WAITING(&kq->kq_sel)) |
---|
| 2179 | kq->kq_state |= KQ_SEL; |
---|
| 2180 | } |
---|
| 2181 | } |
---|
| 2182 | kqueue_release(kq, 1); |
---|
| 2183 | KQ_UNLOCK(kq); |
---|
| 2184 | return (revents); |
---|
| 2185 | } |
---|
[0c9f27b] | 2186 | #ifdef __rtems__ |
---|
| 2187 | static int |
---|
| 2188 | rtems_bsd_kqueue_poll(rtems_libio_t *iop, int events) |
---|
| 2189 | { |
---|
| 2190 | struct thread *td = rtems_bsd_get_curthread_or_null(); |
---|
| 2191 | struct file *fp = rtems_bsd_iop_to_fp(iop); |
---|
| 2192 | int error; |
---|
| 2193 | |
---|
| 2194 | if (td != NULL) { |
---|
| 2195 | error = kqueue_poll(fp, events, NULL, td); |
---|
| 2196 | } else { |
---|
| 2197 | error = ENOMEM; |
---|
| 2198 | } |
---|
| 2199 | |
---|
| 2200 | return error; |
---|
| 2201 | } |
---|
| 2202 | #endif /* __rtems__ */ |
---|
[3c05977] | 2203 | |
---|
| 2204 | /*ARGSUSED*/ |
---|
[0c9f27b] | 2205 | #ifndef __rtems__ |
---|
[3c05977] | 2206 | static int |
---|
| 2207 | kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, |
---|
| 2208 | struct thread *td) |
---|
| 2209 | { |
---|
| 2210 | |
---|
| 2211 | bzero((void *)st, sizeof *st); |
---|
[0c9f27b] | 2212 | #else /* __rtems__ */ |
---|
| 2213 | static int |
---|
| 2214 | rtems_bsd_kqueue_stat(const rtems_filesystem_location_info_t *loc, |
---|
| 2215 | struct stat *st) |
---|
| 2216 | { |
---|
| 2217 | (void) loc; |
---|
| 2218 | #endif /* __rtems__ */ |
---|
[3c05977] | 2219 | /* |
---|
| 2220 | * We no longer return kq_count because the unlocked value is useless. |
---|
| 2221 | * If you spent all this time getting the count, why not spend your |
---|
| 2222 | * syscall better by calling kevent? |
---|
| 2223 | * |
---|
| 2224 | * XXX - This is needed for libc_r. |
---|
| 2225 | */ |
---|
| 2226 | st->st_mode = S_IFIFO; |
---|
| 2227 | return (0); |
---|
| 2228 | } |
---|
| 2229 | |
---|
[c40e45b] | 2230 | static void |
---|
| 2231 | kqueue_drain(struct kqueue *kq, struct thread *td) |
---|
[3c05977] | 2232 | { |
---|
| 2233 | struct knote *kn; |
---|
| 2234 | int i; |
---|
[0c9f27b] | 2235 | |
---|
[3c05977] | 2236 | KQ_LOCK(kq); |
---|
| 2237 | |
---|
| 2238 | KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, |
---|
| 2239 | ("kqueue already closing")); |
---|
| 2240 | kq->kq_state |= KQ_CLOSING; |
---|
| 2241 | if (kq->kq_refcnt > 1) |
---|
| 2242 | msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); |
---|
| 2243 | |
---|
| 2244 | KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); |
---|
| 2245 | |
---|
| 2246 | KASSERT(knlist_empty(&kq->kq_sel.si_note), |
---|
| 2247 | ("kqueue's knlist not empty")); |
---|
| 2248 | |
---|
| 2249 | for (i = 0; i < kq->kq_knlistsize; i++) { |
---|
| 2250 | while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { |
---|
[0577772] | 2251 | if (kn_in_flux(kn)) { |
---|
[3c05977] | 2252 | kq->kq_state |= KQ_FLUXWAIT; |
---|
| 2253 | msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); |
---|
| 2254 | continue; |
---|
| 2255 | } |
---|
[0577772] | 2256 | kn_enter_flux(kn); |
---|
[3c05977] | 2257 | KQ_UNLOCK(kq); |
---|
| 2258 | knote_drop(kn, td); |
---|
| 2259 | KQ_LOCK(kq); |
---|
| 2260 | } |
---|
| 2261 | } |
---|
| 2262 | if (kq->kq_knhashmask != 0) { |
---|
| 2263 | for (i = 0; i <= kq->kq_knhashmask; i++) { |
---|
| 2264 | while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { |
---|
[0577772] | 2265 | if (kn_in_flux(kn)) { |
---|
[3c05977] | 2266 | kq->kq_state |= KQ_FLUXWAIT; |
---|
| 2267 | msleep(kq, &kq->kq_lock, PSOCK, |
---|
| 2268 | "kqclo2", 0); |
---|
| 2269 | continue; |
---|
| 2270 | } |
---|
[0577772] | 2271 | kn_enter_flux(kn); |
---|
[3c05977] | 2272 | KQ_UNLOCK(kq); |
---|
| 2273 | knote_drop(kn, td); |
---|
| 2274 | KQ_LOCK(kq); |
---|
| 2275 | } |
---|
| 2276 | } |
---|
| 2277 | } |
---|
| 2278 | |
---|
| 2279 | if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { |
---|
| 2280 | kq->kq_state |= KQ_TASKDRAIN; |
---|
| 2281 | msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); |
---|
| 2282 | } |
---|
| 2283 | |
---|
| 2284 | if ((kq->kq_state & KQ_SEL) == KQ_SEL) { |
---|
| 2285 | selwakeuppri(&kq->kq_sel, PSOCK); |
---|
| 2286 | if (!SEL_WAITING(&kq->kq_sel)) |
---|
| 2287 | kq->kq_state &= ~KQ_SEL; |
---|
| 2288 | } |
---|
| 2289 | |
---|
| 2290 | KQ_UNLOCK(kq); |
---|
[c40e45b] | 2291 | } |
---|
| 2292 | |
---|
| 2293 | static void |
---|
| 2294 | kqueue_destroy(struct kqueue *kq) |
---|
| 2295 | { |
---|
| 2296 | |
---|
[62c8ca0] | 2297 | #ifndef __rtems__ |
---|
[c40e45b] | 2298 | KASSERT(kq->kq_fdp == NULL, |
---|
| 2299 | ("kqueue still attached to a file descriptor")); |
---|
[62c8ca0] | 2300 | #endif /* __rtems__ */ |
---|
[c40e45b] | 2301 | seldrain(&kq->kq_sel); |
---|
| 2302 | knlist_destroy(&kq->kq_sel.si_note); |
---|
| 2303 | mtx_destroy(&kq->kq_lock); |
---|
| 2304 | |
---|
| 2305 | if (kq->kq_knhash != NULL) |
---|
| 2306 | free(kq->kq_knhash, M_KQUEUE); |
---|
| 2307 | if (kq->kq_knlist != NULL) |
---|
| 2308 | free(kq->kq_knlist, M_KQUEUE); |
---|
| 2309 | |
---|
| 2310 | funsetown(&kq->kq_sigio); |
---|
| 2311 | } |
---|
| 2312 | |
---|
| 2313 | /*ARGSUSED*/ |
---|
| 2314 | static int |
---|
| 2315 | kqueue_close(struct file *fp, struct thread *td) |
---|
| 2316 | { |
---|
| 2317 | struct kqueue *kq = fp->f_data; |
---|
| 2318 | struct filedesc *fdp; |
---|
| 2319 | int error; |
---|
| 2320 | int filedesc_unlock; |
---|
| 2321 | |
---|
| 2322 | if ((error = kqueue_acquire(fp, &kq))) |
---|
| 2323 | return error; |
---|
| 2324 | kqueue_drain(kq, td); |
---|
[3c05977] | 2325 | |
---|
[0c9f27b] | 2326 | #ifndef __rtems__ |
---|
[7eeb079] | 2327 | /* |
---|
| 2328 | * We could be called due to the knote_drop() doing fdrop(), |
---|
| 2329 | * called from kqueue_register(). In this case the global |
---|
| 2330 | * lock is owned, and filedesc sx is locked before, to not |
---|
| 2331 | * take the sleepable lock after non-sleepable. |
---|
| 2332 | */ |
---|
[c40e45b] | 2333 | fdp = kq->kq_fdp; |
---|
| 2334 | kq->kq_fdp = NULL; |
---|
[7eeb079] | 2335 | if (!sx_xlocked(FILEDESC_LOCK(fdp))) { |
---|
| 2336 | FILEDESC_XLOCK(fdp); |
---|
| 2337 | filedesc_unlock = 1; |
---|
| 2338 | } else |
---|
| 2339 | filedesc_unlock = 0; |
---|
| 2340 | TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); |
---|
| 2341 | if (filedesc_unlock) |
---|
| 2342 | FILEDESC_XUNLOCK(fdp); |
---|
[0c9f27b] | 2343 | #else /* __rtems__ */ |
---|
[7eeb079] | 2344 | (void)filedesc_unlock; |
---|
[0c9f27b] | 2345 | rtems_libio_lock(); |
---|
[7eeb079] | 2346 | TAILQ_REMOVE(&fd_kqlist, kq, kq_list); |
---|
[0c9f27b] | 2347 | rtems_libio_unlock(); |
---|
| 2348 | #endif /* __rtems__ */ |
---|
[3c05977] | 2349 | |
---|
[c40e45b] | 2350 | kqueue_destroy(kq); |
---|
| 2351 | chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0); |
---|
| 2352 | crfree(kq->kq_cred); |
---|
[3c05977] | 2353 | free(kq, M_KQUEUE); |
---|
| 2354 | fp->f_data = NULL; |
---|
| 2355 | |
---|
| 2356 | return (0); |
---|
| 2357 | } |
---|
[0c9f27b] | 2358 | #ifdef __rtems__ |
---|
| 2359 | static int |
---|
| 2360 | rtems_bsd_kqueue_close(rtems_libio_t *iop) |
---|
| 2361 | { |
---|
| 2362 | struct thread *td = rtems_bsd_get_curthread_or_null(); |
---|
| 2363 | struct file *fp = rtems_bsd_iop_to_fp(iop); |
---|
| 2364 | int error; |
---|
| 2365 | |
---|
| 2366 | if (td != NULL) { |
---|
| 2367 | error = kqueue_close(fp, td); |
---|
| 2368 | } else { |
---|
| 2369 | error = ENOMEM; |
---|
| 2370 | } |
---|
| 2371 | |
---|
| 2372 | return rtems_bsd_error_to_status_and_errno(error); |
---|
| 2373 | } |
---|
[f59bd57] | 2374 | #endif /* __rtems__ */ |
---|
[3c05977] | 2375 | |
---|
[c40e45b] | 2376 | #ifndef __rtems__ |
---|
| 2377 | static int |
---|
| 2378 | kqueue_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) |
---|
| 2379 | { |
---|
| 2380 | |
---|
| 2381 | kif->kf_type = KF_TYPE_KQUEUE; |
---|
| 2382 | return (0); |
---|
| 2383 | } |
---|
| 2384 | #endif /* __rtems__ */ |
---|
| 2385 | |
---|
[3c05977] | 2386 | static void |
---|
| 2387 | kqueue_wakeup(struct kqueue *kq) |
---|
| 2388 | { |
---|
| 2389 | KQ_OWNED(kq); |
---|
| 2390 | |
---|
| 2391 | if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { |
---|
| 2392 | kq->kq_state &= ~KQ_SLEEP; |
---|
| 2393 | wakeup(kq); |
---|
| 2394 | } |
---|
| 2395 | if ((kq->kq_state & KQ_SEL) == KQ_SEL) { |
---|
| 2396 | selwakeuppri(&kq->kq_sel, PSOCK); |
---|
| 2397 | if (!SEL_WAITING(&kq->kq_sel)) |
---|
| 2398 | kq->kq_state &= ~KQ_SEL; |
---|
| 2399 | } |
---|
| 2400 | if (!knlist_empty(&kq->kq_sel.si_note)) |
---|
| 2401 | kqueue_schedtask(kq); |
---|
| 2402 | if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { |
---|
[0c9f27b] | 2403 | #ifndef __rtems__ |
---|
[3c05977] | 2404 | pgsigio(&kq->kq_sigio, SIGIO, 0); |
---|
[0c9f27b] | 2405 | #else /* __rtems__ */ |
---|
| 2406 | BSD_ASSERT(0); |
---|
| 2407 | #endif /* __rtems__ */ |
---|
[3c05977] | 2408 | } |
---|
| 2409 | } |
---|
| 2410 | |
---|
| 2411 | /* |
---|
| 2412 | * Walk down a list of knotes, activating them if their event has triggered. |
---|
| 2413 | * |
---|
| 2414 | * There is a possibility to optimize in the case of one kq watching another. |
---|
| 2415 | * Instead of scheduling a task to wake it up, you could pass enough state |
---|
| 2416 | * down the chain to make up the parent kqueue. Make this code functional |
---|
| 2417 | * first. |
---|
| 2418 | */ |
---|
| 2419 | void |
---|
| 2420 | knote(struct knlist *list, long hint, int lockflags) |
---|
| 2421 | { |
---|
| 2422 | struct kqueue *kq; |
---|
[c40e45b] | 2423 | struct knote *kn, *tkn; |
---|
[3c05977] | 2424 | int error; |
---|
| 2425 | |
---|
| 2426 | if (list == NULL) |
---|
| 2427 | return; |
---|
| 2428 | |
---|
| 2429 | KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); |
---|
| 2430 | |
---|
| 2431 | if ((lockflags & KNF_LISTLOCKED) == 0) |
---|
| 2432 | list->kl_lock(list->kl_lockarg); |
---|
| 2433 | |
---|
| 2434 | /* |
---|
[0577772] | 2435 | * If we unlock the list lock (and enter influx), we can |
---|
[c40e45b] | 2436 | * eliminate the kqueue scheduling, but this will introduce |
---|
| 2437 | * four lock/unlock's for each knote to test. Also, marker |
---|
| 2438 | * would be needed to keep iteration position, since filters |
---|
| 2439 | * or other threads could remove events. |
---|
[3c05977] | 2440 | */ |
---|
[c40e45b] | 2441 | SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { |
---|
[3c05977] | 2442 | kq = kn->kn_kq; |
---|
[7eeb079] | 2443 | KQ_LOCK(kq); |
---|
[0577772] | 2444 | if (kn_in_flux(kn) && (kn->kn_status & KN_SCAN) == 0) { |
---|
[7eeb079] | 2445 | /* |
---|
| 2446 | * Do not process the influx notes, except for |
---|
| 2447 | * the influx coming from the kq unlock in the |
---|
| 2448 | * kqueue_scan(). In the later case, we do |
---|
| 2449 | * not interfere with the scan, since the code |
---|
| 2450 | * fragment in kqueue_scan() locks the knlist, |
---|
| 2451 | * and cannot proceed until we finished. |
---|
| 2452 | */ |
---|
| 2453 | KQ_UNLOCK(kq); |
---|
| 2454 | } else if ((lockflags & KNF_NOKQLOCK) != 0) { |
---|
[0577772] | 2455 | kn_enter_flux(kn); |
---|
[7eeb079] | 2456 | KQ_UNLOCK(kq); |
---|
| 2457 | error = kn->kn_fop->f_event(kn, hint); |
---|
[3c05977] | 2458 | KQ_LOCK(kq); |
---|
[0577772] | 2459 | kn_leave_flux(kn); |
---|
[7eeb079] | 2460 | if (error) |
---|
| 2461 | KNOTE_ACTIVATE(kn, 1); |
---|
| 2462 | KQ_UNLOCK_FLUX(kq); |
---|
| 2463 | } else { |
---|
| 2464 | kn->kn_status |= KN_HASKQLOCK; |
---|
| 2465 | if (kn->kn_fop->f_event(kn, hint)) |
---|
| 2466 | KNOTE_ACTIVATE(kn, 1); |
---|
| 2467 | kn->kn_status &= ~KN_HASKQLOCK; |
---|
| 2468 | KQ_UNLOCK(kq); |
---|
[3c05977] | 2469 | } |
---|
| 2470 | } |
---|
| 2471 | if ((lockflags & KNF_LISTLOCKED) == 0) |
---|
| 2472 | list->kl_unlock(list->kl_lockarg); |
---|
| 2473 | } |
---|
| 2474 | |
---|
| 2475 | /* |
---|
| 2476 | * add a knote to a knlist |
---|
| 2477 | */ |
---|
| 2478 | void |
---|
| 2479 | knlist_add(struct knlist *knl, struct knote *kn, int islocked) |
---|
| 2480 | { |
---|
[0577772] | 2481 | |
---|
[3c05977] | 2482 | KNL_ASSERT_LOCK(knl, islocked); |
---|
| 2483 | KQ_NOTOWNED(kn->kn_kq); |
---|
[0577772] | 2484 | KASSERT(kn_in_flux(kn), ("knote %p not in flux", kn)); |
---|
| 2485 | KASSERT((kn->kn_status & KN_DETACHED) != 0, |
---|
| 2486 | ("knote %p was not detached", kn)); |
---|
[3c05977] | 2487 | if (!islocked) |
---|
| 2488 | knl->kl_lock(knl->kl_lockarg); |
---|
| 2489 | SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); |
---|
| 2490 | if (!islocked) |
---|
| 2491 | knl->kl_unlock(knl->kl_lockarg); |
---|
| 2492 | KQ_LOCK(kn->kn_kq); |
---|
| 2493 | kn->kn_knlist = knl; |
---|
| 2494 | kn->kn_status &= ~KN_DETACHED; |
---|
| 2495 | KQ_UNLOCK(kn->kn_kq); |
---|
| 2496 | } |
---|
| 2497 | |
---|
| 2498 | static void |
---|
[c40e45b] | 2499 | knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, |
---|
| 2500 | int kqislocked) |
---|
[3c05977] | 2501 | { |
---|
[0577772] | 2502 | |
---|
| 2503 | KASSERT(!kqislocked || knlislocked, ("kq locked w/o knl locked")); |
---|
[3c05977] | 2504 | KNL_ASSERT_LOCK(knl, knlislocked); |
---|
| 2505 | mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); |
---|
[0577772] | 2506 | KASSERT(kqislocked || kn_in_flux(kn), ("knote %p not in flux", kn)); |
---|
| 2507 | KASSERT((kn->kn_status & KN_DETACHED) == 0, |
---|
| 2508 | ("knote %p was already detached", kn)); |
---|
[3c05977] | 2509 | if (!knlislocked) |
---|
| 2510 | knl->kl_lock(knl->kl_lockarg); |
---|
| 2511 | SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); |
---|
| 2512 | kn->kn_knlist = NULL; |
---|
| 2513 | if (!knlislocked) |
---|
[c40e45b] | 2514 | kn_list_unlock(knl); |
---|
[3c05977] | 2515 | if (!kqislocked) |
---|
| 2516 | KQ_LOCK(kn->kn_kq); |
---|
| 2517 | kn->kn_status |= KN_DETACHED; |
---|
| 2518 | if (!kqislocked) |
---|
| 2519 | KQ_UNLOCK(kn->kn_kq); |
---|
| 2520 | } |
---|
| 2521 | |
---|
| 2522 | /* |
---|
[c40e45b] | 2523 | * remove knote from the specified knlist |
---|
[3c05977] | 2524 | */ |
---|
| 2525 | void |
---|
| 2526 | knlist_remove(struct knlist *knl, struct knote *kn, int islocked) |
---|
| 2527 | { |
---|
| 2528 | |
---|
| 2529 | knlist_remove_kq(knl, kn, islocked, 0); |
---|
| 2530 | } |
---|
| 2531 | |
---|
| 2532 | int |
---|
| 2533 | knlist_empty(struct knlist *knl) |
---|
| 2534 | { |
---|
[c40e45b] | 2535 | |
---|
[3c05977] | 2536 | KNL_ASSERT_LOCKED(knl); |
---|
[0577772] | 2537 | return (SLIST_EMPTY(&knl->kl_list)); |
---|
[3c05977] | 2538 | } |
---|
| 2539 | |
---|
[0577772] | 2540 | static struct mtx knlist_lock; |
---|
[3c05977] | 2541 | MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", |
---|
[0577772] | 2542 | MTX_DEF); |
---|
[3c05977] | 2543 | static void knlist_mtx_lock(void *arg); |
---|
| 2544 | static void knlist_mtx_unlock(void *arg); |
---|
| 2545 | |
---|
| 2546 | static void |
---|
| 2547 | knlist_mtx_lock(void *arg) |
---|
| 2548 | { |
---|
[c40e45b] | 2549 | |
---|
[3c05977] | 2550 | mtx_lock((struct mtx *)arg); |
---|
| 2551 | } |
---|
| 2552 | |
---|
| 2553 | static void |
---|
| 2554 | knlist_mtx_unlock(void *arg) |
---|
| 2555 | { |
---|
[c40e45b] | 2556 | |
---|
[3c05977] | 2557 | mtx_unlock((struct mtx *)arg); |
---|
| 2558 | } |
---|
| 2559 | |
---|
| 2560 | static void |
---|
| 2561 | knlist_mtx_assert_locked(void *arg) |
---|
| 2562 | { |
---|
[c40e45b] | 2563 | |
---|
[3c05977] | 2564 | mtx_assert((struct mtx *)arg, MA_OWNED); |
---|
| 2565 | } |
---|
| 2566 | |
---|
| 2567 | static void |
---|
| 2568 | knlist_mtx_assert_unlocked(void *arg) |
---|
| 2569 | { |
---|
[c40e45b] | 2570 | |
---|
[3c05977] | 2571 | mtx_assert((struct mtx *)arg, MA_NOTOWNED); |
---|
| 2572 | } |
---|
| 2573 | |
---|
[c40e45b] | 2574 | #ifndef __rtems__ |
---|
| 2575 | static void |
---|
| 2576 | knlist_rw_rlock(void *arg) |
---|
| 2577 | { |
---|
| 2578 | |
---|
| 2579 | rw_rlock((struct rwlock *)arg); |
---|
| 2580 | } |
---|
| 2581 | |
---|
| 2582 | static void |
---|
| 2583 | knlist_rw_runlock(void *arg) |
---|
| 2584 | { |
---|
| 2585 | |
---|
| 2586 | rw_runlock((struct rwlock *)arg); |
---|
| 2587 | } |
---|
| 2588 | |
---|
| 2589 | static void |
---|
| 2590 | knlist_rw_assert_locked(void *arg) |
---|
| 2591 | { |
---|
| 2592 | |
---|
| 2593 | rw_assert((struct rwlock *)arg, RA_LOCKED); |
---|
| 2594 | } |
---|
| 2595 | |
---|
| 2596 | static void |
---|
| 2597 | knlist_rw_assert_unlocked(void *arg) |
---|
| 2598 | { |
---|
| 2599 | |
---|
| 2600 | rw_assert((struct rwlock *)arg, RA_UNLOCKED); |
---|
| 2601 | } |
---|
| 2602 | #endif /* __rtems__ */ |
---|
| 2603 | |
---|
[3c05977] | 2604 | void |
---|
| 2605 | knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), |
---|
| 2606 | void (*kl_unlock)(void *), |
---|
| 2607 | void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) |
---|
| 2608 | { |
---|
| 2609 | |
---|
| 2610 | if (lock == NULL) |
---|
| 2611 | knl->kl_lockarg = &knlist_lock; |
---|
| 2612 | else |
---|
| 2613 | knl->kl_lockarg = lock; |
---|
| 2614 | |
---|
| 2615 | if (kl_lock == NULL) |
---|
| 2616 | knl->kl_lock = knlist_mtx_lock; |
---|
| 2617 | else |
---|
| 2618 | knl->kl_lock = kl_lock; |
---|
| 2619 | if (kl_unlock == NULL) |
---|
| 2620 | knl->kl_unlock = knlist_mtx_unlock; |
---|
| 2621 | else |
---|
| 2622 | knl->kl_unlock = kl_unlock; |
---|
| 2623 | if (kl_assert_locked == NULL) |
---|
| 2624 | knl->kl_assert_locked = knlist_mtx_assert_locked; |
---|
| 2625 | else |
---|
| 2626 | knl->kl_assert_locked = kl_assert_locked; |
---|
| 2627 | if (kl_assert_unlocked == NULL) |
---|
| 2628 | knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; |
---|
| 2629 | else |
---|
| 2630 | knl->kl_assert_unlocked = kl_assert_unlocked; |
---|
| 2631 | |
---|
[c40e45b] | 2632 | knl->kl_autodestroy = 0; |
---|
[3c05977] | 2633 | SLIST_INIT(&knl->kl_list); |
---|
| 2634 | } |
---|
| 2635 | |
---|
| 2636 | void |
---|
| 2637 | knlist_init_mtx(struct knlist *knl, struct mtx *lock) |
---|
| 2638 | { |
---|
| 2639 | |
---|
| 2640 | knlist_init(knl, lock, NULL, NULL, NULL, NULL); |
---|
| 2641 | } |
---|
| 2642 | |
---|
[c40e45b] | 2643 | struct knlist * |
---|
| 2644 | knlist_alloc(struct mtx *lock) |
---|
| 2645 | { |
---|
| 2646 | struct knlist *knl; |
---|
| 2647 | |
---|
| 2648 | knl = malloc(sizeof(struct knlist), M_KQUEUE, M_WAITOK); |
---|
| 2649 | knlist_init_mtx(knl, lock); |
---|
| 2650 | return (knl); |
---|
| 2651 | } |
---|
| 2652 | |
---|
| 2653 | #ifndef __rtems__ |
---|
| 2654 | void |
---|
| 2655 | knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) |
---|
| 2656 | { |
---|
| 2657 | |
---|
| 2658 | knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, |
---|
| 2659 | knlist_rw_assert_locked, knlist_rw_assert_unlocked); |
---|
| 2660 | } |
---|
| 2661 | #endif /* __rtems__ */ |
---|
| 2662 | |
---|
[3c05977] | 2663 | void |
---|
| 2664 | knlist_destroy(struct knlist *knl) |
---|
| 2665 | { |
---|
| 2666 | |
---|
[0577772] | 2667 | KASSERT(KNLIST_EMPTY(knl), |
---|
| 2668 | ("destroying knlist %p with knotes on it", knl)); |
---|
[3c05977] | 2669 | } |
---|
| 2670 | |
---|
[c40e45b] | 2671 | void |
---|
| 2672 | knlist_detach(struct knlist *knl) |
---|
| 2673 | { |
---|
| 2674 | |
---|
| 2675 | KNL_ASSERT_LOCKED(knl); |
---|
| 2676 | knl->kl_autodestroy = 1; |
---|
| 2677 | if (knlist_empty(knl)) { |
---|
| 2678 | knlist_destroy(knl); |
---|
| 2679 | free(knl, M_KQUEUE); |
---|
| 2680 | } |
---|
| 2681 | } |
---|
| 2682 | |
---|
[3c05977] | 2683 | /* |
---|
| 2684 | * Even if we are locked, we may need to drop the lock to allow any influx |
---|
| 2685 | * knotes time to "settle". |
---|
| 2686 | */ |
---|
| 2687 | void |
---|
| 2688 | knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) |
---|
| 2689 | { |
---|
| 2690 | struct knote *kn, *kn2; |
---|
| 2691 | struct kqueue *kq; |
---|
| 2692 | |
---|
[c40e45b] | 2693 | KASSERT(!knl->kl_autodestroy, ("cleardel for autodestroy %p", knl)); |
---|
[3c05977] | 2694 | if (islocked) |
---|
| 2695 | KNL_ASSERT_LOCKED(knl); |
---|
| 2696 | else { |
---|
| 2697 | KNL_ASSERT_UNLOCKED(knl); |
---|
| 2698 | again: /* need to reacquire lock since we have dropped it */ |
---|
| 2699 | knl->kl_lock(knl->kl_lockarg); |
---|
| 2700 | } |
---|
| 2701 | |
---|
| 2702 | SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { |
---|
| 2703 | kq = kn->kn_kq; |
---|
| 2704 | KQ_LOCK(kq); |
---|
[0577772] | 2705 | if (kn_in_flux(kn)) { |
---|
[3c05977] | 2706 | KQ_UNLOCK(kq); |
---|
| 2707 | continue; |
---|
| 2708 | } |
---|
| 2709 | knlist_remove_kq(knl, kn, 1, 1); |
---|
| 2710 | if (killkn) { |
---|
[0577772] | 2711 | kn_enter_flux(kn); |
---|
[3c05977] | 2712 | KQ_UNLOCK(kq); |
---|
[0577772] | 2713 | knote_drop_detached(kn, td); |
---|
[3c05977] | 2714 | } else { |
---|
| 2715 | /* Make sure cleared knotes disappear soon */ |
---|
[0577772] | 2716 | kn->kn_flags |= EV_EOF | EV_ONESHOT; |
---|
[3c05977] | 2717 | KQ_UNLOCK(kq); |
---|
| 2718 | } |
---|
| 2719 | kq = NULL; |
---|
| 2720 | } |
---|
| 2721 | |
---|
| 2722 | if (!SLIST_EMPTY(&knl->kl_list)) { |
---|
[0577772] | 2723 | /* there are still in flux knotes remaining */ |
---|
[3c05977] | 2724 | kn = SLIST_FIRST(&knl->kl_list); |
---|
| 2725 | kq = kn->kn_kq; |
---|
| 2726 | KQ_LOCK(kq); |
---|
[0577772] | 2727 | KASSERT(kn_in_flux(kn), ("knote removed w/o list lock")); |
---|
[3c05977] | 2728 | knl->kl_unlock(knl->kl_lockarg); |
---|
| 2729 | kq->kq_state |= KQ_FLUXWAIT; |
---|
| 2730 | msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); |
---|
| 2731 | kq = NULL; |
---|
| 2732 | goto again; |
---|
| 2733 | } |
---|
| 2734 | |
---|
| 2735 | if (islocked) |
---|
| 2736 | KNL_ASSERT_LOCKED(knl); |
---|
| 2737 | else { |
---|
| 2738 | knl->kl_unlock(knl->kl_lockarg); |
---|
| 2739 | KNL_ASSERT_UNLOCKED(knl); |
---|
| 2740 | } |
---|
| 2741 | } |
---|
| 2742 | |
---|
| 2743 | /* |
---|
| 2744 | * Remove all knotes referencing a specified fd must be called with FILEDESC |
---|
| 2745 | * lock. This prevents a race where a new fd comes along and occupies the |
---|
| 2746 | * entry and we attach a knote to the fd. |
---|
| 2747 | */ |
---|
| 2748 | void |
---|
| 2749 | knote_fdclose(struct thread *td, int fd) |
---|
| 2750 | { |
---|
[0c9f27b] | 2751 | #ifndef __rtems__ |
---|
[3c05977] | 2752 | struct filedesc *fdp = td->td_proc->p_fd; |
---|
[0c9f27b] | 2753 | #endif /* __rtems__ */ |
---|
[3c05977] | 2754 | struct kqueue *kq; |
---|
| 2755 | struct knote *kn; |
---|
| 2756 | int influx; |
---|
| 2757 | |
---|
[595b333] | 2758 | #ifndef __rtems__ |
---|
[3c05977] | 2759 | FILEDESC_XLOCK_ASSERT(fdp); |
---|
[595b333] | 2760 | #endif /* __rtems__ */ |
---|
[3c05977] | 2761 | |
---|
| 2762 | /* |
---|
| 2763 | * We shouldn't have to worry about new kevents appearing on fd |
---|
| 2764 | * since filedesc is locked. |
---|
| 2765 | */ |
---|
[0c9f27b] | 2766 | #ifndef __rtems__ |
---|
[7eeb079] | 2767 | TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { |
---|
[0c9f27b] | 2768 | #else /* __rtems__ */ |
---|
| 2769 | /* FIXME: Use separate lock? */ |
---|
| 2770 | rtems_libio_lock(); |
---|
[7eeb079] | 2771 | TAILQ_FOREACH(kq, &fd_kqlist, kq_list) { |
---|
[0c9f27b] | 2772 | #endif /* __rtems__ */ |
---|
[3c05977] | 2773 | KQ_LOCK(kq); |
---|
| 2774 | |
---|
| 2775 | again: |
---|
| 2776 | influx = 0; |
---|
| 2777 | while (kq->kq_knlistsize > fd && |
---|
| 2778 | (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { |
---|
[0577772] | 2779 | if (kn_in_flux(kn)) { |
---|
[3c05977] | 2780 | /* someone else might be waiting on our knote */ |
---|
| 2781 | if (influx) |
---|
| 2782 | wakeup(kq); |
---|
| 2783 | kq->kq_state |= KQ_FLUXWAIT; |
---|
| 2784 | msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); |
---|
| 2785 | goto again; |
---|
| 2786 | } |
---|
[0577772] | 2787 | kn_enter_flux(kn); |
---|
[3c05977] | 2788 | KQ_UNLOCK(kq); |
---|
| 2789 | influx = 1; |
---|
[0577772] | 2790 | knote_drop(kn, td); |
---|
[3c05977] | 2791 | KQ_LOCK(kq); |
---|
| 2792 | } |
---|
| 2793 | KQ_UNLOCK_FLUX(kq); |
---|
| 2794 | } |
---|
[0c9f27b] | 2795 | #ifdef __rtems__ |
---|
| 2796 | rtems_libio_unlock(); |
---|
| 2797 | #endif /* __rtems__ */ |
---|
[3c05977] | 2798 | } |
---|
| 2799 | |
---|
| 2800 | static int |
---|
| 2801 | knote_attach(struct knote *kn, struct kqueue *kq) |
---|
| 2802 | { |
---|
| 2803 | struct klist *list; |
---|
| 2804 | |
---|
[0577772] | 2805 | KASSERT(kn_in_flux(kn), ("knote %p not marked influx", kn)); |
---|
[3c05977] | 2806 | KQ_OWNED(kq); |
---|
| 2807 | |
---|
| 2808 | if (kn->kn_fop->f_isfd) { |
---|
| 2809 | if (kn->kn_id >= kq->kq_knlistsize) |
---|
[0577772] | 2810 | return (ENOMEM); |
---|
[3c05977] | 2811 | list = &kq->kq_knlist[kn->kn_id]; |
---|
| 2812 | } else { |
---|
| 2813 | if (kq->kq_knhash == NULL) |
---|
[0577772] | 2814 | return (ENOMEM); |
---|
[3c05977] | 2815 | list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; |
---|
| 2816 | } |
---|
| 2817 | SLIST_INSERT_HEAD(list, kn, kn_link); |
---|
[0577772] | 2818 | return (0); |
---|
[3c05977] | 2819 | } |
---|
| 2820 | |
---|
| 2821 | static void |
---|
| 2822 | knote_drop(struct knote *kn, struct thread *td) |
---|
[0577772] | 2823 | { |
---|
| 2824 | |
---|
| 2825 | if ((kn->kn_status & KN_DETACHED) == 0) |
---|
| 2826 | kn->kn_fop->f_detach(kn); |
---|
| 2827 | knote_drop_detached(kn, td); |
---|
| 2828 | } |
---|
| 2829 | |
---|
| 2830 | static void |
---|
| 2831 | knote_drop_detached(struct knote *kn, struct thread *td) |
---|
[3c05977] | 2832 | { |
---|
| 2833 | struct kqueue *kq; |
---|
| 2834 | struct klist *list; |
---|
| 2835 | |
---|
| 2836 | kq = kn->kn_kq; |
---|
| 2837 | |
---|
[0577772] | 2838 | KASSERT((kn->kn_status & KN_DETACHED) != 0, |
---|
| 2839 | ("knote %p still attached", kn)); |
---|
[3c05977] | 2840 | KQ_NOTOWNED(kq); |
---|
| 2841 | |
---|
| 2842 | KQ_LOCK(kq); |
---|
[0577772] | 2843 | KASSERT(kn->kn_influx == 1, |
---|
| 2844 | ("knote_drop called on %p with influx %d", kn, kn->kn_influx)); |
---|
| 2845 | |
---|
[3c05977] | 2846 | if (kn->kn_fop->f_isfd) |
---|
| 2847 | list = &kq->kq_knlist[kn->kn_id]; |
---|
| 2848 | else |
---|
| 2849 | list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; |
---|
| 2850 | |
---|
| 2851 | if (!SLIST_EMPTY(list)) |
---|
| 2852 | SLIST_REMOVE(list, kn, knote, kn_link); |
---|
| 2853 | if (kn->kn_status & KN_QUEUED) |
---|
| 2854 | knote_dequeue(kn); |
---|
| 2855 | KQ_UNLOCK_FLUX(kq); |
---|
| 2856 | |
---|
| 2857 | if (kn->kn_fop->f_isfd) { |
---|
| 2858 | fdrop(kn->kn_fp, td); |
---|
| 2859 | kn->kn_fp = NULL; |
---|
| 2860 | } |
---|
| 2861 | kqueue_fo_release(kn->kn_kevent.filter); |
---|
| 2862 | kn->kn_fop = NULL; |
---|
| 2863 | knote_free(kn); |
---|
| 2864 | } |
---|
| 2865 | |
---|
| 2866 | static void |
---|
| 2867 | knote_enqueue(struct knote *kn) |
---|
| 2868 | { |
---|
| 2869 | struct kqueue *kq = kn->kn_kq; |
---|
| 2870 | |
---|
| 2871 | KQ_OWNED(kn->kn_kq); |
---|
| 2872 | KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); |
---|
| 2873 | |
---|
| 2874 | TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); |
---|
| 2875 | kn->kn_status |= KN_QUEUED; |
---|
| 2876 | kq->kq_count++; |
---|
| 2877 | kqueue_wakeup(kq); |
---|
| 2878 | } |
---|
| 2879 | |
---|
| 2880 | static void |
---|
| 2881 | knote_dequeue(struct knote *kn) |
---|
| 2882 | { |
---|
| 2883 | struct kqueue *kq = kn->kn_kq; |
---|
| 2884 | |
---|
| 2885 | KQ_OWNED(kn->kn_kq); |
---|
| 2886 | KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); |
---|
| 2887 | |
---|
| 2888 | TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); |
---|
| 2889 | kn->kn_status &= ~KN_QUEUED; |
---|
| 2890 | kq->kq_count--; |
---|
| 2891 | } |
---|
| 2892 | |
---|
| 2893 | static void |
---|
| 2894 | knote_init(void) |
---|
| 2895 | { |
---|
| 2896 | |
---|
| 2897 | knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, |
---|
| 2898 | NULL, NULL, UMA_ALIGN_PTR, 0); |
---|
| 2899 | } |
---|
| 2900 | SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); |
---|
| 2901 | |
---|
| 2902 | static struct knote * |
---|
| 2903 | knote_alloc(int waitok) |
---|
| 2904 | { |
---|
[c40e45b] | 2905 | |
---|
| 2906 | return (uma_zalloc(knote_zone, (waitok ? M_WAITOK : M_NOWAIT) | |
---|
| 2907 | M_ZERO)); |
---|
[3c05977] | 2908 | } |
---|
| 2909 | |
---|
| 2910 | static void |
---|
| 2911 | knote_free(struct knote *kn) |
---|
| 2912 | { |
---|
[c40e45b] | 2913 | |
---|
| 2914 | uma_zfree(knote_zone, kn); |
---|
[3c05977] | 2915 | } |
---|
| 2916 | |
---|
| 2917 | /* |
---|
| 2918 | * Register the kev w/ the kq specified by fd. |
---|
| 2919 | */ |
---|
| 2920 | int |
---|
| 2921 | kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) |
---|
| 2922 | { |
---|
| 2923 | struct kqueue *kq; |
---|
| 2924 | struct file *fp; |
---|
[c40e45b] | 2925 | cap_rights_t rights; |
---|
[3c05977] | 2926 | int error; |
---|
| 2927 | |
---|
[c40e45b] | 2928 | error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp); |
---|
| 2929 | if (error != 0) |
---|
[3c05977] | 2930 | return (error); |
---|
| 2931 | if ((error = kqueue_acquire(fp, &kq)) != 0) |
---|
| 2932 | goto noacquire; |
---|
| 2933 | |
---|
| 2934 | error = kqueue_register(kq, kev, td, waitok); |
---|
| 2935 | kqueue_release(kq, 0); |
---|
| 2936 | |
---|
| 2937 | noacquire: |
---|
| 2938 | fdrop(fp, td); |
---|
[0577772] | 2939 | return (error); |
---|
[3c05977] | 2940 | } |
---|
[0c9f27b] | 2941 | #ifdef __rtems__ |
---|
| 2942 | static const rtems_filesystem_file_handlers_r kqueueops = { |
---|
| 2943 | .open_h = rtems_filesystem_default_open, |
---|
| 2944 | .close_h = rtems_bsd_kqueue_close, |
---|
| 2945 | .read_h = rtems_filesystem_default_read, |
---|
| 2946 | .write_h = rtems_filesystem_default_write, |
---|
| 2947 | .ioctl_h = rtems_filesystem_default_ioctl, |
---|
| 2948 | .lseek_h = rtems_filesystem_default_lseek, |
---|
| 2949 | .fstat_h = rtems_bsd_kqueue_stat, |
---|
| 2950 | .ftruncate_h = rtems_filesystem_default_ftruncate, |
---|
| 2951 | .fsync_h = rtems_filesystem_default_fsync_or_fdatasync, |
---|
| 2952 | .fdatasync_h = rtems_filesystem_default_fsync_or_fdatasync, |
---|
| 2953 | .fcntl_h = rtems_filesystem_default_fcntl, |
---|
| 2954 | .poll_h = rtems_bsd_kqueue_poll, |
---|
[6e328c3] | 2955 | .kqfilter_h = rtems_bsd_kqueue_kqfilter, |
---|
| 2956 | .readv_h = rtems_filesystem_default_readv, |
---|
| 2957 | .writev_h = rtems_filesystem_default_writev, |
---|
| 2958 | .mmap_h = rtems_filesystem_default_mmap |
---|
[0c9f27b] | 2959 | }; |
---|
[3c05977] | 2960 | #endif /* __rtems__ */ |
---|