source: rtems-libbsd/freebsd/sys/kern/sys_pipe.c @ baf1ca7

55-freebsd-126-freebsd-12
Last change on this file since baf1ca7 was bcdce02, checked in by Sebastian Huber <sebastian.huber@…>, on 08/21/18 at 11:47:02

Update to FreeBSD head 2018-06-01

Git mirror commit fb63610a69b0eb7f69a201ba05c4c1a7a2739cf9.

Update #3472.

  • Property mode set to 100755
File size: 54.2 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3/*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * Copyright (c) 1996 John S. Dyson
7 * Copyright (c) 2012 Giovanni Trematerra
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice immediately at the beginning of the file, without modification,
15 *    this list of conditions, and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. Absolutely no warranty of function or purpose is made by the author
20 *    John S. Dyson.
21 * 4. Modifications may be freely made to this file if the above conditions
22 *    are met.
23 */
24
25/*
26 * This file contains a high-performance replacement for the socket-based
27 * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
28 * all features of sockets, but does do everything that pipes normally
29 * do.
30 */
31
32/*
33 * This code has two modes of operation, a small write mode and a large
34 * write mode.  The small write mode acts like conventional pipes with
35 * a kernel buffer.  If the buffer is less than PIPE_MINDIRECT, then the
36 * "normal" pipe buffering is done.  If the buffer is between PIPE_MINDIRECT
37 * and PIPE_SIZE in size, the sending process pins the underlying pages in
38 * memory, and the receiving process copies directly from these pinned pages
39 * in the sending process.
40 *
41 * If the sending process receives a signal, it is possible that it will
42 * go away, and certainly its address space can change, because control
43 * is returned back to the user-mode side.  In that case, the pipe code
44 * arranges to copy the buffer supplied by the user process, to a pageable
45 * kernel buffer, and the receiving process will grab the data from the
46 * pageable kernel buffer.  Since signals don't happen all that often,
47 * the copy operation is normally eliminated.
48 *
49 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
50 * happen for small transfers so that the system will not spend all of
51 * its time context switching.
52 *
53 * In order to limit the resource use of pipes, two sysctls exist:
54 *
55 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
56 * address space available to us in pipe_map. This value is normally
57 * autotuned, but may also be loader tuned.
58 *
59 * kern.ipc.pipekva - This read-only sysctl tracks the current amount of
60 * memory in use by pipes.
61 *
62 * Based on how large pipekva is relative to maxpipekva, the following
63 * will happen:
64 *
65 * 0% - 50%:
66 *     New pipes are given 16K of memory backing, pipes may dynamically
67 *     grow to as large as 64K where needed.
68 * 50% - 75%:
69 *     New pipes are given 4K (or PAGE_SIZE) of memory backing,
70 *     existing pipes may NOT grow.
71 * 75% - 100%:
72 *     New pipes are given 4K (or PAGE_SIZE) of memory backing,
73 *     existing pipes will be shrunk down to 4K whenever possible.
74 *
75 * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0.  If
76 * that is set,  the only resize that will occur is the 0 -> SMALL_PIPE_SIZE
77 * resize which MUST occur for reverse-direction pipes when they are
78 * first used.
79 *
80 * Additional information about the current state of pipes may be obtained
81 * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail,
82 * and kern.ipc.piperesizefail.
83 *
84 * Locking rules:  There are two locks present here:  A mutex, used via
85 * PIPE_LOCK, and a flag, used via pipelock().  All locking is done via
86 * the flag, as mutexes can not persist over uiomove.  The mutex
87 * exists only to guard access to the flag, and is not in itself a
88 * locking mechanism.  Also note that there is only a single mutex for
89 * both directions of a pipe.
90 *
91 * As pipelock() may have to sleep before it can acquire the flag, it
92 * is important to reread all data after a call to pipelock(); everything
93 * in the structure may have changed.
94 */
95
96#include <sys/cdefs.h>
97__FBSDID("$FreeBSD$");
98
99#include <sys/param.h>
100#include <sys/systm.h>
101#include <sys/conf.h>
102#include <sys/fcntl.h>
103#include <sys/file.h>
104#include <sys/filedesc.h>
105#include <sys/filio.h>
106#include <sys/kernel.h>
107#include <sys/lock.h>
108#include <sys/mutex.h>
109#include <sys/ttycom.h>
110#include <sys/stat.h>
111#include <sys/malloc.h>
112#include <sys/poll.h>
113#include <sys/selinfo.h>
114#include <sys/signalvar.h>
115#include <sys/syscallsubr.h>
116#include <sys/sysctl.h>
117#include <sys/sysproto.h>
118#include <sys/pipe.h>
119#include <sys/proc.h>
120#include <sys/vnode.h>
121#include <sys/uio.h>
122#include <sys/user.h>
123#include <sys/event.h>
124
125#include <security/mac/mac_framework.h>
126
127#include <vm/vm.h>
128#include <vm/vm_param.h>
129#include <vm/vm_object.h>
130#include <vm/vm_kern.h>
131#include <vm/vm_extern.h>
132#include <vm/pmap.h>
133#include <vm/vm_map.h>
134#include <vm/vm_page.h>
135#include <vm/uma.h>
136
137/*
138 * Use this define if you want to disable *fancy* VM things.  Expect an
139 * approx 30% decrease in transfer rate.  This could be useful for
140 * NetBSD or OpenBSD.
141 */
142/* #define PIPE_NODIRECT */
143
144#define PIPE_PEER(pipe) \
145        (((pipe)->pipe_state & PIPE_NAMED) ? (pipe) : ((pipe)->pipe_peer))
146
147/*
148 * interfaces to the outside world
149 */
150#ifndef __rtems__
151static fo_rdwr_t        pipe_read;
152static fo_rdwr_t        pipe_write;
153static fo_truncate_t    pipe_truncate;
154static fo_ioctl_t       pipe_ioctl;
155static fo_poll_t        pipe_poll;
156static fo_kqfilter_t    pipe_kqfilter;
157static fo_stat_t        pipe_stat;
158static fo_close_t       pipe_close;
159static fo_chmod_t       pipe_chmod;
160static fo_chown_t       pipe_chown;
161static fo_fill_kinfo_t  pipe_fill_kinfo;
162
163struct fileops pipeops = {
164        .fo_read = pipe_read,
165        .fo_write = pipe_write,
166        .fo_truncate = pipe_truncate,
167        .fo_ioctl = pipe_ioctl,
168        .fo_poll = pipe_poll,
169        .fo_kqfilter = pipe_kqfilter,
170        .fo_stat = pipe_stat,
171        .fo_close = pipe_close,
172        .fo_chmod = pipe_chmod,
173        .fo_chown = pipe_chown,
174        .fo_sendfile = invfo_sendfile,
175        .fo_fill_kinfo = pipe_fill_kinfo,
176        .fo_flags = DFLAG_PASSABLE
177};
178#else /* __rtems__ */
179#define PIPE_NODIRECT
180#define PRIBIO                  (0)
181
182static int rtems_bsd_pipe_open(rtems_libio_t *iop, const char *path,
183    int oflag, mode_t mode);
184static int rtems_bsd_pipe_close(rtems_libio_t *iop);
185static ssize_t rtems_bsd_pipe_read(rtems_libio_t *iop, void *buffer,
186    size_t count);
187static ssize_t rtems_bsd_pipe_readv(rtems_libio_t *iop,
188    const struct iovec *iov, int iovcnt, ssize_t total);
189static ssize_t rtems_bsd_pipe_write(rtems_libio_t *iop, const void *buffer,
190    size_t count);
191static ssize_t rtems_bsd_pipe_writev(rtems_libio_t *iop,
192    const struct iovec *iov, int iovcnt, ssize_t total);
193static int rtems_bsd_pipe_ioctl(rtems_libio_t *iop, ioctl_command_t request,
194    void *buffer);
195static int rtems_bsd_pipe_stat(const rtems_filesystem_location_info_t *loc,
196    struct stat *buf);
197static int rtems_bsd_pipe_fcntl(rtems_libio_t *iop, int cmd);
198static int rtems_bsd_pipe_poll(rtems_libio_t *iop, int events);
199int rtems_bsd_pipe_kqfilter(rtems_libio_t *iop, struct knote *kn);
200
201static const rtems_filesystem_file_handlers_r pipeops = {
202        .open_h = rtems_bsd_pipe_open,
203        .close_h = rtems_bsd_pipe_close,
204        .read_h = rtems_bsd_pipe_read,
205        .write_h = rtems_bsd_pipe_write,
206        .ioctl_h = rtems_bsd_pipe_ioctl,
207        .lseek_h = rtems_filesystem_default_lseek,
208        .fstat_h = rtems_bsd_pipe_stat,
209        .ftruncate_h = rtems_filesystem_default_ftruncate,
210        .fsync_h = rtems_filesystem_default_fsync_or_fdatasync,
211        .fdatasync_h = rtems_filesystem_default_fsync_or_fdatasync,
212        .fcntl_h = rtems_bsd_pipe_fcntl,
213        .poll_h = rtems_bsd_pipe_poll,
214        .kqfilter_h = rtems_bsd_pipe_kqfilter,
215        .readv_h = rtems_bsd_pipe_readv,
216        .writev_h = rtems_bsd_pipe_writev,
217        .mmap_h = rtems_filesystem_default_mmap
218};
219
220long    maxpipekva;                     /* Limit on pipe KVA */
221
222#endif /* __rtems__ */
223
224static void     filt_pipedetach(struct knote *kn);
225static void     filt_pipedetach_notsup(struct knote *kn);
226static int      filt_pipenotsup(struct knote *kn, long hint);
227static int      filt_piperead(struct knote *kn, long hint);
228static int      filt_pipewrite(struct knote *kn, long hint);
229
230static struct filterops pipe_nfiltops = {
231        .f_isfd = 1,
232        .f_detach = filt_pipedetach_notsup,
233        .f_event = filt_pipenotsup
234};
235static struct filterops pipe_rfiltops = {
236        .f_isfd = 1,
237        .f_detach = filt_pipedetach,
238        .f_event = filt_piperead
239};
240static struct filterops pipe_wfiltops = {
241        .f_isfd = 1,
242        .f_detach = filt_pipedetach,
243        .f_event = filt_pipewrite
244};
245
246/*
247 * Default pipe buffer size(s), this can be kind-of large now because pipe
248 * space is pageable.  The pipe code will try to maintain locality of
249 * reference for performance reasons, so small amounts of outstanding I/O
250 * will not wipe the cache.
251 */
252#define MINPIPESIZE (PIPE_SIZE/3)
253#define MAXPIPESIZE (2*PIPE_SIZE/3)
254
255static long amountpipekva;
256static int pipefragretry;
257static int pipeallocfail;
258static int piperesizefail;
259static int piperesizeallowed = 1;
260
261SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
262           &maxpipekva, 0, "Pipe KVA limit");
263SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD,
264           &amountpipekva, 0, "Pipe KVA usage");
265SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD,
266          &pipefragretry, 0, "Pipe allocation retries due to fragmentation");
267SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD,
268          &pipeallocfail, 0, "Pipe allocation failures");
269SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD,
270          &piperesizefail, 0, "Pipe resize failures");
271SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW,
272          &piperesizeallowed, 0, "Pipe resizing allowed");
273
274static void pipeinit(void *dummy __unused);
275static void pipeclose(struct pipe *cpipe);
276static void pipe_free_kmem(struct pipe *cpipe);
277static void pipe_create(struct pipe *pipe, int backing);
278static void pipe_paircreate(struct thread *td, struct pipepair **p_pp);
279static __inline int pipelock(struct pipe *cpipe, int catch);
280static __inline void pipeunlock(struct pipe *cpipe);
281#ifndef PIPE_NODIRECT
282static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
283static void pipe_destroy_write_buffer(struct pipe *wpipe);
284static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
285static void pipe_clone_write_buffer(struct pipe *wpipe);
286#endif
287static int pipespace(struct pipe *cpipe, int size);
288static int pipespace_new(struct pipe *cpipe, int size);
289
290static int      pipe_zone_ctor(void *mem, int size, void *arg, int flags);
291static int      pipe_zone_init(void *mem, int size, int flags);
292static void     pipe_zone_fini(void *mem, int size);
293
294static uma_zone_t pipe_zone;
295static struct unrhdr *pipeino_unr;
296static dev_t pipedev_ino;
297
298SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
299
300static void
301pipeinit(void *dummy __unused)
302{
303
304        pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair),
305            pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini,
306            UMA_ALIGN_PTR, 0);
307        KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
308        pipeino_unr = new_unrhdr(1, INT32_MAX, NULL);
309        KASSERT(pipeino_unr != NULL, ("pipe fake inodes not initialized"));
310        pipedev_ino = devfs_alloc_cdp_inode();
311        KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized"));
312}
313
314static int
315pipe_zone_ctor(void *mem, int size, void *arg, int flags)
316{
317        struct pipepair *pp;
318        struct pipe *rpipe, *wpipe;
319
320        KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size"));
321
322        pp = (struct pipepair *)mem;
323
324        /*
325         * We zero both pipe endpoints to make sure all the kmem pointers
326         * are NULL, flag fields are zero'd, etc.  We timestamp both
327         * endpoints with the same time.
328         */
329        rpipe = &pp->pp_rpipe;
330        bzero(rpipe, sizeof(*rpipe));
331#ifndef __rtems__
332        vfs_timestamp(&rpipe->pipe_ctime);
333#else /* __rtems__ */
334        rpipe->pipe_ctime.tv_sec = time(NULL);
335#endif /* __rtems__ */
336        rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime;
337
338        wpipe = &pp->pp_wpipe;
339        bzero(wpipe, sizeof(*wpipe));
340        wpipe->pipe_ctime = rpipe->pipe_ctime;
341        wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime;
342
343        rpipe->pipe_peer = wpipe;
344        rpipe->pipe_pair = pp;
345        wpipe->pipe_peer = rpipe;
346        wpipe->pipe_pair = pp;
347
348        /*
349         * Mark both endpoints as present; they will later get free'd
350         * one at a time.  When both are free'd, then the whole pair
351         * is released.
352         */
353        rpipe->pipe_present = PIPE_ACTIVE;
354        wpipe->pipe_present = PIPE_ACTIVE;
355
356        /*
357         * Eventually, the MAC Framework may initialize the label
358         * in ctor or init, but for now we do it elswhere to avoid
359         * blocking in ctor or init.
360         */
361        pp->pp_label = NULL;
362
363        return (0);
364}
365
366static int
367pipe_zone_init(void *mem, int size, int flags)
368{
369        struct pipepair *pp;
370
371        KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size"));
372
373        pp = (struct pipepair *)mem;
374
375        mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_NEW);
376        return (0);
377}
378
379static void
380pipe_zone_fini(void *mem, int size)
381{
382        struct pipepair *pp;
383
384        KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size"));
385
386        pp = (struct pipepair *)mem;
387
388        mtx_destroy(&pp->pp_mtx);
389}
390
391static void
392pipe_paircreate(struct thread *td, struct pipepair **p_pp)
393{
394        struct pipepair *pp;
395        struct pipe *rpipe, *wpipe;
396
397        *p_pp = pp = uma_zalloc(pipe_zone, M_WAITOK);
398#ifdef MAC
399        /*
400         * The MAC label is shared between the connected endpoints.  As a
401         * result mac_pipe_init() and mac_pipe_create() are called once
402         * for the pair, and not on the endpoints.
403         */
404        mac_pipe_init(pp);
405        mac_pipe_create(td->td_ucred, pp);
406#endif
407        rpipe = &pp->pp_rpipe;
408        wpipe = &pp->pp_wpipe;
409
410        knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe));
411        knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe));
412
413        /* Only the forward direction pipe is backed by default */
414        pipe_create(rpipe, 1);
415        pipe_create(wpipe, 0);
416
417        rpipe->pipe_state |= PIPE_DIRECTOK;
418        wpipe->pipe_state |= PIPE_DIRECTOK;
419}
420
421void
422pipe_named_ctor(struct pipe **ppipe, struct thread *td)
423{
424        struct pipepair *pp;
425
426        pipe_paircreate(td, &pp);
427        pp->pp_rpipe.pipe_state |= PIPE_NAMED;
428        *ppipe = &pp->pp_rpipe;
429}
430
431void
432pipe_dtor(struct pipe *dpipe)
433{
434        struct pipe *peer;
435        ino_t ino;
436
437        ino = dpipe->pipe_ino;
438        peer = (dpipe->pipe_state & PIPE_NAMED) != 0 ? dpipe->pipe_peer : NULL;
439        funsetown(&dpipe->pipe_sigio);
440        pipeclose(dpipe);
441        if (peer != NULL) {
442                funsetown(&peer->pipe_sigio);
443                pipeclose(peer);
444        }
445        if (ino != 0 && ino != (ino_t)-1)
446                free_unr(pipeino_unr, ino);
447}
448
449/*
450 * The pipe system call for the DTYPE_PIPE type of pipes.  If we fail, let
451 * the zone pick up the pieces via pipeclose().
452 */
453int
454kern_pipe(struct thread *td, int fildes[2], int flags, struct filecaps *fcaps1,
455    struct filecaps *fcaps2)
456{
457        struct file *rf, *wf;
458        struct pipe *rpipe, *wpipe;
459        struct pipepair *pp;
460        int fd, fflags, error;
461
462        pipe_paircreate(td, &pp);
463        rpipe = &pp->pp_rpipe;
464        wpipe = &pp->pp_wpipe;
465        error = falloc_caps(td, &rf, &fd, flags, fcaps1);
466        if (error) {
467                pipeclose(rpipe);
468                pipeclose(wpipe);
469                return (error);
470        }
471        /* An extra reference on `rf' has been held for us by falloc_caps(). */
472        fildes[0] = fd;
473
474        fflags = FREAD | FWRITE;
475        if ((flags & O_NONBLOCK) != 0)
476                fflags |= FNONBLOCK;
477
478        /*
479         * Warning: once we've gotten past allocation of the fd for the
480         * read-side, we can only drop the read side via fdrop() in order
481         * to avoid races against processes which manage to dup() the read
482         * side while we are blocked trying to allocate the write side.
483         */
484        finit(rf, fflags, DTYPE_PIPE, rpipe, &pipeops);
485        error = falloc_caps(td, &wf, &fd, flags, fcaps2);
486        if (error) {
487                fdclose(td, rf, fildes[0]);
488#ifndef __rtems__
489                fdrop(rf, td);
490#endif /* __rtems__ */
491                /* rpipe has been closed by fdrop(). */
492                pipeclose(wpipe);
493                return (error);
494        }
495        /* An extra reference on `wf' has been held for us by falloc_caps(). */
496        finit(wf, fflags, DTYPE_PIPE, wpipe, &pipeops);
497#ifndef __rtems__
498        fdrop(wf, td);
499#endif /* __rtems__ */
500        fildes[1] = fd;
501#ifndef __rtems__
502        fdrop(rf, td);
503#endif /* __rtems__ */
504
505        return (0);
506}
507
508#ifdef COMPAT_FREEBSD10
509/* ARGSUSED */
510int
511freebsd10_pipe(struct thread *td, struct freebsd10_pipe_args *uap __unused)
512{
513        int error;
514        int fildes[2];
515
516        error = kern_pipe(td, fildes, 0, NULL, NULL);
517        if (error)
518                return (error);
519
520        td->td_retval[0] = fildes[0];
521        td->td_retval[1] = fildes[1];
522
523        return (0);
524}
525#endif
526
527#ifndef __rtems__
528int
529sys_pipe2(struct thread *td, struct pipe2_args *uap)
530{
531        int error, fildes[2];
532
533        if (uap->flags & ~(O_CLOEXEC | O_NONBLOCK))
534                return (EINVAL);
535        error = kern_pipe(td, fildes, uap->flags, NULL, NULL);
536        if (error)
537                return (error);
538        error = copyout(fildes, uap->fildes, 2 * sizeof(int));
539        if (error) {
540                (void)kern_close(td, fildes[0]);
541                (void)kern_close(td, fildes[1]);
542        }
543        return (error);
544}
545#endif /* __rtems__ */
546
547#ifdef __rtems__
548int
549pipe(int fildes[2])
550{
551        struct thread *td = rtems_bsd_get_curthread_or_null();
552        int error;
553
554        if (td != NULL) {
555                error = kern_pipe(td, fildes, 0, NULL, NULL);
556        } else {
557                error = ENOMEM;
558        }
559
560        if (error == 0) {
561                return error;
562        } else {
563                rtems_set_errno_and_return_minus_one(error);
564        }
565}
566#endif /* __rtems__ */
567
568/*
569 * Allocate kva for pipe circular buffer, the space is pageable
570 * This routine will 'realloc' the size of a pipe safely, if it fails
571 * it will retain the old buffer.
572 * If it fails it will return ENOMEM.
573 */
574static int
575pipespace_new(cpipe, size)
576        struct pipe *cpipe;
577        int size;
578{
579        caddr_t buffer;
580        int error, cnt, firstseg;
581        static int curfail = 0;
582        static struct timeval lastfail;
583
584        KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
585        KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW),
586                ("pipespace: resize of direct writes not allowed"));
587retry:
588        cnt = cpipe->pipe_buffer.cnt;
589        if (cnt > size)
590                size = cnt;
591
592        size = round_page(size);
593#ifndef __rtems__
594        buffer = (caddr_t) vm_map_min(pipe_map);
595
596        error = vm_map_find(pipe_map, NULL, 0,
597                (vm_offset_t *) &buffer, size, 0, VMFS_ANY_SPACE,
598                VM_PROT_ALL, VM_PROT_ALL, 0);
599        if (error != KERN_SUCCESS) {
600#else /* __rtems__ */
601        (void)error;
602        buffer = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
603        if (buffer == NULL) {
604#endif /* __rtems__ */
605                if ((cpipe->pipe_buffer.buffer == NULL) &&
606                        (size > SMALL_PIPE_SIZE)) {
607                        size = SMALL_PIPE_SIZE;
608                        pipefragretry++;
609                        goto retry;
610                }
611                if (cpipe->pipe_buffer.buffer == NULL) {
612                        pipeallocfail++;
613                        if (ppsratecheck(&lastfail, &curfail, 1))
614                                printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n");
615                } else {
616                        piperesizefail++;
617                }
618                return (ENOMEM);
619        }
620
621        /* copy data, then free old resources if we're resizing */
622        if (cnt > 0) {
623                if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) {
624                        firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out;
625                        bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
626                                buffer, firstseg);
627                        if ((cnt - firstseg) > 0)
628                                bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg],
629                                        cpipe->pipe_buffer.in);
630                } else {
631                        bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
632                                buffer, cnt);
633                }
634        }
635        pipe_free_kmem(cpipe);
636        cpipe->pipe_buffer.buffer = buffer;
637        cpipe->pipe_buffer.size = size;
638        cpipe->pipe_buffer.in = cnt;
639        cpipe->pipe_buffer.out = 0;
640        cpipe->pipe_buffer.cnt = cnt;
641        atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size);
642        return (0);
643}
644
645/*
646 * Wrapper for pipespace_new() that performs locking assertions.
647 */
648static int
649pipespace(cpipe, size)
650        struct pipe *cpipe;
651        int size;
652{
653
654        KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
655                ("Unlocked pipe passed to pipespace"));
656        return (pipespace_new(cpipe, size));
657}
658
659/*
660 * lock a pipe for I/O, blocking other access
661 */
662static __inline int
663pipelock(cpipe, catch)
664        struct pipe *cpipe;
665        int catch;
666{
667        int error;
668
669        PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
670        while (cpipe->pipe_state & PIPE_LOCKFL) {
671                cpipe->pipe_state |= PIPE_LWANT;
672                error = msleep(cpipe, PIPE_MTX(cpipe),
673                    catch ? (PRIBIO | PCATCH) : PRIBIO,
674                    "pipelk", 0);
675                if (error != 0)
676                        return (error);
677        }
678        cpipe->pipe_state |= PIPE_LOCKFL;
679        return (0);
680}
681
682/*
683 * unlock a pipe I/O lock
684 */
685static __inline void
686pipeunlock(cpipe)
687        struct pipe *cpipe;
688{
689
690        PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
691        KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
692                ("Unlocked pipe passed to pipeunlock"));
693        cpipe->pipe_state &= ~PIPE_LOCKFL;
694        if (cpipe->pipe_state & PIPE_LWANT) {
695                cpipe->pipe_state &= ~PIPE_LWANT;
696                wakeup(cpipe);
697        }
698}
699
700void
701pipeselwakeup(cpipe)
702        struct pipe *cpipe;
703{
704
705        PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
706        if (cpipe->pipe_state & PIPE_SEL) {
707                selwakeuppri(&cpipe->pipe_sel, PSOCK);
708                if (!SEL_WAITING(&cpipe->pipe_sel))
709                        cpipe->pipe_state &= ~PIPE_SEL;
710        }
711#ifndef __rtems__
712        if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
713                pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
714#endif /* __rtems__ */
715        KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0);
716}
717
718/*
719 * Initialize and allocate VM and memory for pipe.  The structure
720 * will start out zero'd from the ctor, so we just manage the kmem.
721 */
722static void
723pipe_create(pipe, backing)
724        struct pipe *pipe;
725        int backing;
726{
727
728        if (backing) {
729                /*
730                 * Note that these functions can fail if pipe map is exhausted
731                 * (as a result of too many pipes created), but we ignore the
732                 * error as it is not fatal and could be provoked by
733                 * unprivileged users. The only consequence is worse performance
734                 * with given pipe.
735                 */
736                if (amountpipekva > maxpipekva / 2)
737                        (void)pipespace_new(pipe, SMALL_PIPE_SIZE);
738                else
739                        (void)pipespace_new(pipe, PIPE_SIZE);
740        }
741
742        pipe->pipe_ino = -1;
743}
744
745/* ARGSUSED */
746static int
747pipe_read(fp, uio, active_cred, flags, td)
748        struct file *fp;
749        struct uio *uio;
750        struct ucred *active_cred;
751        struct thread *td;
752        int flags;
753{
754        struct pipe *rpipe;
755        int error;
756        int nread = 0;
757        int size;
758
759        rpipe = fp->f_data;
760        PIPE_LOCK(rpipe);
761        ++rpipe->pipe_busy;
762        error = pipelock(rpipe, 1);
763        if (error)
764                goto unlocked_error;
765
766#ifdef MAC
767        error = mac_pipe_check_read(active_cred, rpipe->pipe_pair);
768        if (error)
769                goto locked_error;
770#endif
771        if (amountpipekva > (3 * maxpipekva) / 4) {
772                if (!(rpipe->pipe_state & PIPE_DIRECTW) &&
773                        (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
774                        (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
775                        (piperesizeallowed == 1)) {
776                        PIPE_UNLOCK(rpipe);
777                        pipespace(rpipe, SMALL_PIPE_SIZE);
778                        PIPE_LOCK(rpipe);
779                }
780        }
781
782        while (uio->uio_resid) {
783                /*
784                 * normal pipe buffer receive
785                 */
786                if (rpipe->pipe_buffer.cnt > 0) {
787                        size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
788                        if (size > rpipe->pipe_buffer.cnt)
789                                size = rpipe->pipe_buffer.cnt;
790                        if (size > uio->uio_resid)
791                                size = uio->uio_resid;
792
793                        PIPE_UNLOCK(rpipe);
794                        error = uiomove(
795                            &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
796                            size, uio);
797                        PIPE_LOCK(rpipe);
798                        if (error)
799                                break;
800
801                        rpipe->pipe_buffer.out += size;
802                        if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
803                                rpipe->pipe_buffer.out = 0;
804
805                        rpipe->pipe_buffer.cnt -= size;
806
807                        /*
808                         * If there is no more to read in the pipe, reset
809                         * its pointers to the beginning.  This improves
810                         * cache hit stats.
811                         */
812                        if (rpipe->pipe_buffer.cnt == 0) {
813                                rpipe->pipe_buffer.in = 0;
814                                rpipe->pipe_buffer.out = 0;
815                        }
816                        nread += size;
817#ifndef PIPE_NODIRECT
818                /*
819                 * Direct copy, bypassing a kernel buffer.
820                 */
821                } else if ((size = rpipe->pipe_map.cnt) &&
822                           (rpipe->pipe_state & PIPE_DIRECTW)) {
823                        if (size > uio->uio_resid)
824                                size = (u_int) uio->uio_resid;
825
826                        PIPE_UNLOCK(rpipe);
827                        error = uiomove_fromphys(rpipe->pipe_map.ms,
828                            rpipe->pipe_map.pos, size, uio);
829                        PIPE_LOCK(rpipe);
830                        if (error)
831                                break;
832                        nread += size;
833                        rpipe->pipe_map.pos += size;
834                        rpipe->pipe_map.cnt -= size;
835                        if (rpipe->pipe_map.cnt == 0) {
836                                rpipe->pipe_state &= ~(PIPE_DIRECTW|PIPE_WANTW);
837                                wakeup(rpipe);
838                        }
839#endif
840                } else {
841                        /*
842                         * detect EOF condition
843                         * read returns 0 on EOF, no need to set error
844                         */
845                        if (rpipe->pipe_state & PIPE_EOF)
846                                break;
847
848                        /*
849                         * If the "write-side" has been blocked, wake it up now.
850                         */
851                        if (rpipe->pipe_state & PIPE_WANTW) {
852                                rpipe->pipe_state &= ~PIPE_WANTW;
853                                wakeup(rpipe);
854                        }
855
856                        /*
857                         * Break if some data was read.
858                         */
859                        if (nread > 0)
860                                break;
861
862                        /*
863                         * Unlock the pipe buffer for our remaining processing.
864                         * We will either break out with an error or we will
865                         * sleep and relock to loop.
866                         */
867                        pipeunlock(rpipe);
868
869                        /*
870                         * Handle non-blocking mode operation or
871                         * wait for more data.
872                         */
873#ifndef __rtems__
874                        if (fp->f_flag & FNONBLOCK) {
875#else /* __rtems__ */
876                        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FNONBLOCK) {
877#endif /* __rtems__ */
878                                error = EAGAIN;
879                        } else {
880                                rpipe->pipe_state |= PIPE_WANTR;
881                                if ((error = msleep(rpipe, PIPE_MTX(rpipe),
882                                    PRIBIO | PCATCH,
883                                    "piperd", 0)) == 0)
884                                        error = pipelock(rpipe, 1);
885                        }
886                        if (error)
887                                goto unlocked_error;
888                }
889        }
890#ifdef MAC
891locked_error:
892#endif
893        pipeunlock(rpipe);
894
895        /* XXX: should probably do this before getting any locks. */
896        if (error == 0)
897#ifndef __rtems__
898                vfs_timestamp(&rpipe->pipe_atime);
899#else /* __rtems__ */
900                rpipe->pipe_atime.tv_sec = time(NULL);
901#endif /* __rtems__ */
902unlocked_error:
903        --rpipe->pipe_busy;
904
905        /*
906         * PIPE_WANT processing only makes sense if pipe_busy is 0.
907         */
908        if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
909                rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
910                wakeup(rpipe);
911        } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
912                /*
913                 * Handle write blocking hysteresis.
914                 */
915                if (rpipe->pipe_state & PIPE_WANTW) {
916                        rpipe->pipe_state &= ~PIPE_WANTW;
917                        wakeup(rpipe);
918                }
919        }
920
921        if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
922                pipeselwakeup(rpipe);
923
924        PIPE_UNLOCK(rpipe);
925        return (error);
926}
927#ifdef __rtems__
928static ssize_t
929rtems_bsd_pipe_read(rtems_libio_t *iop, void *buffer, size_t count)
930{
931        struct thread *td = rtems_bsd_get_curthread_or_null();
932        struct file *fp = rtems_bsd_iop_to_fp(iop);
933        struct iovec iov = {
934                .iov_base = buffer,
935                .iov_len = count
936        };
937        struct uio auio = {
938                .uio_iov = &iov,
939                .uio_iovcnt = 1,
940                .uio_offset = 0,
941                .uio_resid = count,
942                .uio_segflg = UIO_USERSPACE,
943                .uio_rw = UIO_READ,
944                .uio_td = td
945        };
946        int error;
947
948        if (td != NULL) {
949                error = pipe_read(fp, &auio, NULL, 0, NULL);
950        } else {
951                error = ENOMEM;
952        }
953
954        if (error == 0) {
955                return (count - auio.uio_resid);
956        } else {
957                rtems_set_errno_and_return_minus_one(error);
958        }
959}
960
961static ssize_t
962rtems_bsd_pipe_readv(rtems_libio_t *iop, const struct iovec *iov,
963    int iovcnt, ssize_t total)
964{
965        struct thread *td = rtems_bsd_get_curthread_or_null();
966        struct file *fp = rtems_bsd_iop_to_fp(iop);
967        struct uio auio = {
968                .uio_iov = __DECONST(struct iovec *, iov),
969                .uio_iovcnt = iovcnt,
970                .uio_offset = 0,
971                .uio_resid = total,
972                .uio_segflg = UIO_USERSPACE,
973                .uio_rw = UIO_READ,
974                .uio_td = td
975        };
976        int error;
977
978        if (td != NULL) {
979                error = pipe_read(fp, &auio, NULL, 0, NULL);
980        } else {
981                error = ENOMEM;
982        }
983
984        if (error == 0) {
985                return (total - auio.uio_resid);
986        } else {
987                rtems_set_errno_and_return_minus_one(error);
988        }
989}
990#endif /* __rtems__ */
991
992#ifndef PIPE_NODIRECT
993/*
994 * Map the sending processes' buffer into kernel space and wire it.
995 * This is similar to a physical write operation.
996 */
997static int
998pipe_build_write_buffer(wpipe, uio)
999        struct pipe *wpipe;
1000        struct uio *uio;
1001{
1002        u_int size;
1003        int i;
1004
1005        PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
1006        KASSERT(wpipe->pipe_state & PIPE_DIRECTW,
1007                ("Clone attempt on non-direct write pipe!"));
1008
1009        if (uio->uio_iov->iov_len > wpipe->pipe_buffer.size)
1010                size = wpipe->pipe_buffer.size;
1011        else
1012                size = uio->uio_iov->iov_len;
1013
1014        if ((i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
1015            (vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ,
1016            wpipe->pipe_map.ms, PIPENPAGES)) < 0)
1017                return (EFAULT);
1018
1019/*
1020 * set up the control block
1021 */
1022        wpipe->pipe_map.npages = i;
1023        wpipe->pipe_map.pos =
1024            ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
1025        wpipe->pipe_map.cnt = size;
1026
1027/*
1028 * and update the uio data
1029 */
1030
1031        uio->uio_iov->iov_len -= size;
1032        uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
1033        if (uio->uio_iov->iov_len == 0)
1034                uio->uio_iov++;
1035        uio->uio_resid -= size;
1036        uio->uio_offset += size;
1037        return (0);
1038}
1039
1040/*
1041 * unmap and unwire the process buffer
1042 */
1043static void
1044pipe_destroy_write_buffer(wpipe)
1045        struct pipe *wpipe;
1046{
1047
1048        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
1049        vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages);
1050        wpipe->pipe_map.npages = 0;
1051}
1052
1053/*
1054 * In the case of a signal, the writing process might go away.  This
1055 * code copies the data into the circular buffer so that the source
1056 * pages can be freed without loss of data.
1057 */
1058static void
1059pipe_clone_write_buffer(wpipe)
1060        struct pipe *wpipe;
1061{
1062        struct uio uio;
1063        struct iovec iov;
1064        int size;
1065        int pos;
1066
1067        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
1068        size = wpipe->pipe_map.cnt;
1069        pos = wpipe->pipe_map.pos;
1070
1071        wpipe->pipe_buffer.in = size;
1072        wpipe->pipe_buffer.out = 0;
1073        wpipe->pipe_buffer.cnt = size;
1074        wpipe->pipe_state &= ~PIPE_DIRECTW;
1075
1076        PIPE_UNLOCK(wpipe);
1077        iov.iov_base = wpipe->pipe_buffer.buffer;
1078        iov.iov_len = size;
1079        uio.uio_iov = &iov;
1080        uio.uio_iovcnt = 1;
1081        uio.uio_offset = 0;
1082        uio.uio_resid = size;
1083        uio.uio_segflg = UIO_SYSSPACE;
1084        uio.uio_rw = UIO_READ;
1085        uio.uio_td = curthread;
1086        uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio);
1087        PIPE_LOCK(wpipe);
1088        pipe_destroy_write_buffer(wpipe);
1089}
1090
1091/*
1092 * This implements the pipe buffer write mechanism.  Note that only
1093 * a direct write OR a normal pipe write can be pending at any given time.
1094 * If there are any characters in the pipe buffer, the direct write will
1095 * be deferred until the receiving process grabs all of the bytes from
1096 * the pipe buffer.  Then the direct mapping write is set-up.
1097 */
1098static int
1099pipe_direct_write(wpipe, uio)
1100        struct pipe *wpipe;
1101        struct uio *uio;
1102{
1103        int error;
1104
1105retry:
1106        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
1107        error = pipelock(wpipe, 1);
1108        if (error != 0)
1109                goto error1;
1110        if ((wpipe->pipe_state & PIPE_EOF) != 0) {
1111                error = EPIPE;
1112                pipeunlock(wpipe);
1113                goto error1;
1114        }
1115        while (wpipe->pipe_state & PIPE_DIRECTW) {
1116                if (wpipe->pipe_state & PIPE_WANTR) {
1117                        wpipe->pipe_state &= ~PIPE_WANTR;
1118                        wakeup(wpipe);
1119                }
1120                pipeselwakeup(wpipe);
1121                wpipe->pipe_state |= PIPE_WANTW;
1122                pipeunlock(wpipe);
1123                error = msleep(wpipe, PIPE_MTX(wpipe),
1124                    PRIBIO | PCATCH, "pipdww", 0);
1125                if (error)
1126                        goto error1;
1127                else
1128                        goto retry;
1129        }
1130        wpipe->pipe_map.cnt = 0;        /* transfer not ready yet */
1131        if (wpipe->pipe_buffer.cnt > 0) {
1132                if (wpipe->pipe_state & PIPE_WANTR) {
1133                        wpipe->pipe_state &= ~PIPE_WANTR;
1134                        wakeup(wpipe);
1135                }
1136                pipeselwakeup(wpipe);
1137                wpipe->pipe_state |= PIPE_WANTW;
1138                pipeunlock(wpipe);
1139                error = msleep(wpipe, PIPE_MTX(wpipe),
1140                    PRIBIO | PCATCH, "pipdwc", 0);
1141                if (error)
1142                        goto error1;
1143                else
1144                        goto retry;
1145        }
1146
1147        wpipe->pipe_state |= PIPE_DIRECTW;
1148
1149        PIPE_UNLOCK(wpipe);
1150        error = pipe_build_write_buffer(wpipe, uio);
1151        PIPE_LOCK(wpipe);
1152        if (error) {
1153                wpipe->pipe_state &= ~PIPE_DIRECTW;
1154                pipeunlock(wpipe);
1155                goto error1;
1156        }
1157
1158        error = 0;
1159        while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1160                if (wpipe->pipe_state & PIPE_EOF) {
1161                        pipe_destroy_write_buffer(wpipe);
1162                        pipeselwakeup(wpipe);
1163                        pipeunlock(wpipe);
1164                        error = EPIPE;
1165                        goto error1;
1166                }
1167                if (wpipe->pipe_state & PIPE_WANTR) {
1168                        wpipe->pipe_state &= ~PIPE_WANTR;
1169                        wakeup(wpipe);
1170                }
1171                pipeselwakeup(wpipe);
1172                wpipe->pipe_state |= PIPE_WANTW;
1173                pipeunlock(wpipe);
1174                error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
1175                    "pipdwt", 0);
1176                pipelock(wpipe, 0);
1177        }
1178
1179        if (wpipe->pipe_state & PIPE_EOF)
1180                error = EPIPE;
1181        if (wpipe->pipe_state & PIPE_DIRECTW) {
1182                /*
1183                 * this bit of trickery substitutes a kernel buffer for
1184                 * the process that might be going away.
1185                 */
1186                pipe_clone_write_buffer(wpipe);
1187        } else {
1188                pipe_destroy_write_buffer(wpipe);
1189        }
1190        pipeunlock(wpipe);
1191        return (error);
1192
1193error1:
1194        wakeup(wpipe);
1195        return (error);
1196}
1197#endif
1198
1199static int
1200pipe_write(fp, uio, active_cred, flags, td)
1201        struct file *fp;
1202        struct uio *uio;
1203        struct ucred *active_cred;
1204        struct thread *td;
1205        int flags;
1206{
1207        int error = 0;
1208        int desiredsize;
1209        ssize_t orig_resid;
1210        struct pipe *wpipe, *rpipe;
1211
1212        rpipe = fp->f_data;
1213        wpipe = PIPE_PEER(rpipe);
1214        PIPE_LOCK(rpipe);
1215        error = pipelock(wpipe, 1);
1216        if (error) {
1217                PIPE_UNLOCK(rpipe);
1218                return (error);
1219        }
1220        /*
1221         * detect loss of pipe read side, issue SIGPIPE if lost.
1222         */
1223        if (wpipe->pipe_present != PIPE_ACTIVE ||
1224            (wpipe->pipe_state & PIPE_EOF)) {
1225                pipeunlock(wpipe);
1226                PIPE_UNLOCK(rpipe);
1227                return (EPIPE);
1228        }
1229#ifdef MAC
1230        error = mac_pipe_check_write(active_cred, wpipe->pipe_pair);
1231        if (error) {
1232                pipeunlock(wpipe);
1233                PIPE_UNLOCK(rpipe);
1234                return (error);
1235        }
1236#endif
1237        ++wpipe->pipe_busy;
1238
1239        /* Choose a larger size if it's advantageous */
1240        desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size);
1241        while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) {
1242                if (piperesizeallowed != 1)
1243                        break;
1244                if (amountpipekva > maxpipekva / 2)
1245                        break;
1246                if (desiredsize == BIG_PIPE_SIZE)
1247                        break;
1248                desiredsize = desiredsize * 2;
1249        }
1250
1251        /* Choose a smaller size if we're in a OOM situation */
1252        if ((amountpipekva > (3 * maxpipekva) / 4) &&
1253                (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
1254                (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
1255                (piperesizeallowed == 1))
1256                desiredsize = SMALL_PIPE_SIZE;
1257
1258        /* Resize if the above determined that a new size was necessary */
1259        if ((desiredsize != wpipe->pipe_buffer.size) &&
1260                ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) {
1261                PIPE_UNLOCK(wpipe);
1262                pipespace(wpipe, desiredsize);
1263                PIPE_LOCK(wpipe);
1264        }
1265        if (wpipe->pipe_buffer.size == 0) {
1266                /*
1267                 * This can only happen for reverse direction use of pipes
1268                 * in a complete OOM situation.
1269                 */
1270                error = ENOMEM;
1271                --wpipe->pipe_busy;
1272                pipeunlock(wpipe);
1273                PIPE_UNLOCK(wpipe);
1274                return (error);
1275        }
1276
1277        pipeunlock(wpipe);
1278
1279        orig_resid = uio->uio_resid;
1280
1281        while (uio->uio_resid) {
1282                int space;
1283
1284                pipelock(wpipe, 0);
1285                if (wpipe->pipe_state & PIPE_EOF) {
1286                        pipeunlock(wpipe);
1287                        error = EPIPE;
1288                        break;
1289                }
1290#ifndef PIPE_NODIRECT
1291                /*
1292                 * If the transfer is large, we can gain performance if
1293                 * we do process-to-process copies directly.
1294                 * If the write is non-blocking, we don't use the
1295                 * direct write mechanism.
1296                 *
1297                 * The direct write mechanism will detect the reader going
1298                 * away on us.
1299                 */
1300                if (uio->uio_segflg == UIO_USERSPACE &&
1301                    uio->uio_iov->iov_len >= PIPE_MINDIRECT &&
1302                    wpipe->pipe_buffer.size >= PIPE_MINDIRECT &&
1303                    (fp->f_flag & FNONBLOCK) == 0) {
1304                        pipeunlock(wpipe);
1305                        error = pipe_direct_write(wpipe, uio);
1306                        if (error)
1307                                break;
1308                        continue;
1309                }
1310#endif
1311
1312                /*
1313                 * Pipe buffered writes cannot be coincidental with
1314                 * direct writes.  We wait until the currently executing
1315                 * direct write is completed before we start filling the
1316                 * pipe buffer.  We break out if a signal occurs or the
1317                 * reader goes away.
1318                 */
1319                if (wpipe->pipe_state & PIPE_DIRECTW) {
1320                        if (wpipe->pipe_state & PIPE_WANTR) {
1321                                wpipe->pipe_state &= ~PIPE_WANTR;
1322                                wakeup(wpipe);
1323                        }
1324                        pipeselwakeup(wpipe);
1325                        wpipe->pipe_state |= PIPE_WANTW;
1326                        pipeunlock(wpipe);
1327                        error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1328                            "pipbww", 0);
1329                        if (error)
1330                                break;
1331                        else
1332                                continue;
1333                }
1334
1335                space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1336
1337                /* Writes of size <= PIPE_BUF must be atomic. */
1338                if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
1339                        space = 0;
1340
1341                if (space > 0) {
1342                        int size;       /* Transfer size */
1343                        int segsize;    /* first segment to transfer */
1344
1345                        /*
1346                         * Transfer size is minimum of uio transfer
1347                         * and free space in pipe buffer.
1348                         */
1349                        if (space > uio->uio_resid)
1350                                size = uio->uio_resid;
1351                        else
1352                                size = space;
1353                        /*
1354                         * First segment to transfer is minimum of
1355                         * transfer size and contiguous space in
1356                         * pipe buffer.  If first segment to transfer
1357                         * is less than the transfer size, we've got
1358                         * a wraparound in the buffer.
1359                         */
1360                        segsize = wpipe->pipe_buffer.size -
1361                                wpipe->pipe_buffer.in;
1362                        if (segsize > size)
1363                                segsize = size;
1364
1365                        /* Transfer first segment */
1366
1367                        PIPE_UNLOCK(rpipe);
1368                        error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1369                                        segsize, uio);
1370                        PIPE_LOCK(rpipe);
1371
1372                        if (error == 0 && segsize < size) {
1373                                KASSERT(wpipe->pipe_buffer.in + segsize ==
1374                                        wpipe->pipe_buffer.size,
1375                                        ("Pipe buffer wraparound disappeared"));
1376                                /*
1377                                 * Transfer remaining part now, to
1378                                 * support atomic writes.  Wraparound
1379                                 * happened.
1380                                 */
1381
1382                                PIPE_UNLOCK(rpipe);
1383                                error = uiomove(
1384                                    &wpipe->pipe_buffer.buffer[0],
1385                                    size - segsize, uio);
1386                                PIPE_LOCK(rpipe);
1387                        }
1388                        if (error == 0) {
1389                                wpipe->pipe_buffer.in += size;
1390                                if (wpipe->pipe_buffer.in >=
1391                                    wpipe->pipe_buffer.size) {
1392                                        KASSERT(wpipe->pipe_buffer.in ==
1393                                                size - segsize +
1394                                                wpipe->pipe_buffer.size,
1395                                                ("Expected wraparound bad"));
1396                                        wpipe->pipe_buffer.in = size - segsize;
1397                                }
1398
1399                                wpipe->pipe_buffer.cnt += size;
1400                                KASSERT(wpipe->pipe_buffer.cnt <=
1401                                        wpipe->pipe_buffer.size,
1402                                        ("Pipe buffer overflow"));
1403                        }
1404                        pipeunlock(wpipe);
1405                        if (error != 0)
1406                                break;
1407                } else {
1408                        /*
1409                         * If the "read-side" has been blocked, wake it up now.
1410                         */
1411                        if (wpipe->pipe_state & PIPE_WANTR) {
1412                                wpipe->pipe_state &= ~PIPE_WANTR;
1413                                wakeup(wpipe);
1414                        }
1415
1416                        /*
1417                         * don't block on non-blocking I/O
1418                         */
1419#ifndef __rtems__
1420                        if (fp->f_flag & FNONBLOCK) {
1421#else /* __rtems__ */
1422                        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FNONBLOCK) {
1423#endif /* __rtems__ */
1424                                error = EAGAIN;
1425                                pipeunlock(wpipe);
1426                                break;
1427                        }
1428
1429                        /*
1430                         * We have no more space and have something to offer,
1431                         * wake up select/poll.
1432                         */
1433                        pipeselwakeup(wpipe);
1434
1435                        wpipe->pipe_state |= PIPE_WANTW;
1436                        pipeunlock(wpipe);
1437                        error = msleep(wpipe, PIPE_MTX(rpipe),
1438                            PRIBIO | PCATCH, "pipewr", 0);
1439                        if (error != 0)
1440                                break;
1441                }
1442        }
1443
1444        pipelock(wpipe, 0);
1445        --wpipe->pipe_busy;
1446
1447        if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1448                wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1449                wakeup(wpipe);
1450        } else if (wpipe->pipe_buffer.cnt > 0) {
1451                /*
1452                 * If we have put any characters in the buffer, we wake up
1453                 * the reader.
1454                 */
1455                if (wpipe->pipe_state & PIPE_WANTR) {
1456                        wpipe->pipe_state &= ~PIPE_WANTR;
1457                        wakeup(wpipe);
1458                }
1459        }
1460
1461        /*
1462         * Don't return EPIPE if any byte was written.
1463         * EINTR and other interrupts are handled by generic I/O layer.
1464         * Do not pretend that I/O succeeded for obvious user error
1465         * like EFAULT.
1466         */
1467        if (uio->uio_resid != orig_resid && error == EPIPE)
1468                error = 0;
1469
1470        if (error == 0)
1471#ifndef __rtems__
1472                vfs_timestamp(&wpipe->pipe_mtime);
1473#else /* __rtems__ */
1474                wpipe->pipe_mtime.tv_sec = time(NULL);
1475#endif /* __rtems__ */
1476
1477        /*
1478         * We have something to offer,
1479         * wake up select/poll.
1480         */
1481        if (wpipe->pipe_buffer.cnt)
1482                pipeselwakeup(wpipe);
1483
1484        pipeunlock(wpipe);
1485        PIPE_UNLOCK(rpipe);
1486        return (error);
1487}
1488#ifdef __rtems__
1489static ssize_t
1490rtems_bsd_pipe_write(rtems_libio_t *iop, const void *buffer, size_t count)
1491{
1492        struct thread *td = rtems_bsd_get_curthread_or_null();
1493        struct file *fp = rtems_bsd_iop_to_fp(iop);
1494        struct iovec iov = {
1495                .iov_base = __DECONST(void *, buffer),
1496                .iov_len = count
1497        };
1498        struct uio auio = {
1499                .uio_iov = &iov,
1500                .uio_iovcnt = 1,
1501                .uio_offset = 0,
1502                .uio_resid = count,
1503                .uio_segflg = UIO_USERSPACE,
1504                .uio_rw = UIO_WRITE,
1505                .uio_td = td
1506        };
1507        int error;
1508
1509        if (td != NULL) {
1510                error = pipe_write(fp, &auio, NULL, 0, NULL);
1511        } else {
1512                error = ENOMEM;
1513        }
1514
1515        if (error == 0) {
1516                return (count - auio.uio_resid);
1517        } else {
1518                rtems_set_errno_and_return_minus_one(error);
1519        }
1520}
1521
1522static ssize_t
1523rtems_bsd_pipe_writev(rtems_libio_t *iop, const struct iovec *iov,
1524    int iovcnt, ssize_t total)
1525{
1526        struct thread *td = rtems_bsd_get_curthread_or_null();
1527        struct file *fp = rtems_bsd_iop_to_fp(iop);
1528        struct uio auio = {
1529                .uio_iov = __DECONST(struct iovec *, iov),
1530                .uio_iovcnt = iovcnt,
1531                .uio_offset = 0,
1532                .uio_resid = total,
1533                .uio_segflg = UIO_USERSPACE,
1534                .uio_rw = UIO_WRITE,
1535                .uio_td = td
1536        };
1537        int error;
1538
1539        if (td != NULL) {
1540                error = pipe_write(fp, &auio, NULL, 0, NULL);
1541        } else {
1542                error = ENOMEM;
1543        }
1544
1545        if (error == 0) {
1546                return (total - auio.uio_resid);
1547        } else {
1548                rtems_set_errno_and_return_minus_one(error);
1549        }
1550}
1551#endif /* __rtems__ */
1552
1553/* ARGSUSED */
1554#ifndef __rtems__
1555static int
1556pipe_truncate(fp, length, active_cred, td)
1557        struct file *fp;
1558        off_t length;
1559        struct ucred *active_cred;
1560        struct thread *td;
1561{
1562        struct pipe *cpipe;
1563        int error;
1564
1565        cpipe = fp->f_data;
1566        if (cpipe->pipe_state & PIPE_NAMED)
1567                error = vnops.fo_truncate(fp, length, active_cred, td);
1568        else
1569                error = invfo_truncate(fp, length, active_cred, td);
1570        return (error);
1571}
1572#endif /* __rtems__ */
1573
1574/*
1575 * we implement a very minimal set of ioctls for compatibility with sockets.
1576 */
1577static int
1578pipe_ioctl(fp, cmd, data, active_cred, td)
1579        struct file *fp;
1580        u_long cmd;
1581        void *data;
1582        struct ucred *active_cred;
1583        struct thread *td;
1584{
1585        struct pipe *mpipe = fp->f_data;
1586        int error;
1587
1588        PIPE_LOCK(mpipe);
1589
1590#ifdef MAC
1591        error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data);
1592        if (error) {
1593                PIPE_UNLOCK(mpipe);
1594                return (error);
1595        }
1596#endif
1597
1598        error = 0;
1599        switch (cmd) {
1600
1601        case FIONBIO:
1602                break;
1603
1604        case FIOASYNC:
1605                if (*(int *)data) {
1606                        mpipe->pipe_state |= PIPE_ASYNC;
1607                } else {
1608                        mpipe->pipe_state &= ~PIPE_ASYNC;
1609                }
1610                break;
1611
1612        case FIONREAD:
1613#ifndef __rtems__
1614                if (!(fp->f_flag & FREAD)) {
1615#else /* __rtems__ */
1616                if (!(rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD)) {
1617#endif /* __rtems__ */
1618                        *(int *)data = 0;
1619                        PIPE_UNLOCK(mpipe);
1620                        return (0);
1621                }
1622                if (mpipe->pipe_state & PIPE_DIRECTW)
1623                        *(int *)data = mpipe->pipe_map.cnt;
1624                else
1625                        *(int *)data = mpipe->pipe_buffer.cnt;
1626                break;
1627
1628        case FIOSETOWN:
1629                PIPE_UNLOCK(mpipe);
1630                error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1631                goto out_unlocked;
1632
1633        case FIOGETOWN:
1634                *(int *)data = fgetown(&mpipe->pipe_sigio);
1635                break;
1636
1637        /* This is deprecated, FIOSETOWN should be used instead. */
1638        case TIOCSPGRP:
1639                PIPE_UNLOCK(mpipe);
1640                error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1641                goto out_unlocked;
1642
1643        /* This is deprecated, FIOGETOWN should be used instead. */
1644        case TIOCGPGRP:
1645                *(int *)data = -fgetown(&mpipe->pipe_sigio);
1646                break;
1647
1648        default:
1649                error = ENOTTY;
1650                break;
1651        }
1652        PIPE_UNLOCK(mpipe);
1653out_unlocked:
1654        return (error);
1655}
1656#ifdef __rtems__
1657static int
1658rtems_bsd_pipe_ioctl(rtems_libio_t *iop, ioctl_command_t request, void *buffer)
1659{
1660        struct thread *td = rtems_bsd_get_curthread_or_null();
1661        struct file *fp = rtems_bsd_iop_to_fp(iop);
1662        int error;
1663
1664        if (td != NULL) {
1665                error = pipe_ioctl(fp, request, buffer, NULL, td);
1666        } else {
1667                error = ENOMEM;
1668        }
1669
1670        return rtems_bsd_error_to_status_and_errno(error);
1671}
1672#endif /* __rtems__ */
1673
1674static int
1675pipe_poll(fp, events, active_cred, td)
1676        struct file *fp;
1677        int events;
1678        struct ucred *active_cred;
1679        struct thread *td;
1680{
1681        struct pipe *rpipe;
1682        struct pipe *wpipe;
1683        int levents, revents;
1684#ifdef MAC
1685        int error;
1686#endif
1687
1688        revents = 0;
1689        rpipe = fp->f_data;
1690        wpipe = PIPE_PEER(rpipe);
1691        PIPE_LOCK(rpipe);
1692#ifdef MAC
1693        error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair);
1694        if (error)
1695                goto locked_error;
1696#endif
1697#ifndef __rtems__
1698        if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM))
1699#else /* __rtems__ */
1700        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD && events & (POLLIN | POLLRDNORM))
1701#endif /* __rtems__ */
1702                if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1703                    (rpipe->pipe_buffer.cnt > 0))
1704                        revents |= events & (POLLIN | POLLRDNORM);
1705
1706#ifndef __rtems__
1707        if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM))
1708#else /* __rtems__ */
1709        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FWRITE && events & (POLLOUT | POLLWRNORM))
1710#endif /* __rtems__ */
1711                if (wpipe->pipe_present != PIPE_ACTIVE ||
1712                    (wpipe->pipe_state & PIPE_EOF) ||
1713                    (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1714                     ((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF ||
1715                         wpipe->pipe_buffer.size == 0)))
1716                        revents |= events & (POLLOUT | POLLWRNORM);
1717
1718        levents = events &
1719            (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND);
1720#ifndef __rtems__
1721        if (rpipe->pipe_state & PIPE_NAMED && fp->f_flag & FREAD && levents &&
1722            fp->f_seqcount == rpipe->pipe_wgen)
1723#else /* __rtems__ */
1724        if (rpipe->pipe_state & PIPE_NAMED && rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD && levents)
1725#endif /* __rtems__ */
1726                events |= POLLINIGNEOF;
1727
1728        if ((events & POLLINIGNEOF) == 0) {
1729                if (rpipe->pipe_state & PIPE_EOF) {
1730                        revents |= (events & (POLLIN | POLLRDNORM));
1731                        if (wpipe->pipe_present != PIPE_ACTIVE ||
1732                            (wpipe->pipe_state & PIPE_EOF))
1733                                revents |= POLLHUP;
1734                }
1735        }
1736
1737        if (revents == 0) {
1738#ifndef __rtems__
1739                if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) {
1740#else /* __rtems__ */
1741                if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD && events & (POLLIN | POLLRDNORM)) {
1742#endif /* __rtems__ */
1743                        selrecord(td, &rpipe->pipe_sel);
1744                        if (SEL_WAITING(&rpipe->pipe_sel))
1745                                rpipe->pipe_state |= PIPE_SEL;
1746                }
1747
1748#ifndef __rtems__
1749                if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) {
1750#else /* __rtems__ */
1751                if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FWRITE && events & (POLLOUT | POLLWRNORM)) {
1752#endif /* __rtems__ */
1753                        selrecord(td, &wpipe->pipe_sel);
1754                        if (SEL_WAITING(&wpipe->pipe_sel))
1755                                wpipe->pipe_state |= PIPE_SEL;
1756                }
1757        }
1758#ifdef MAC
1759locked_error:
1760#endif
1761        PIPE_UNLOCK(rpipe);
1762
1763        return (revents);
1764}
1765#ifdef __rtems__
1766static int
1767rtems_bsd_pipe_poll(rtems_libio_t *iop, int events)
1768{
1769        struct thread *td = rtems_bsd_get_curthread_or_null();
1770        struct file *fp = rtems_bsd_iop_to_fp(iop);
1771        int error;
1772
1773        if (td != NULL) {
1774                error = pipe_poll(fp, events, NULL, td);
1775        } else {
1776                error = ENOMEM;
1777        }
1778
1779        return error;
1780}
1781#endif /* __rtems__ */
1782
1783/*
1784 * We shouldn't need locks here as we're doing a read and this should
1785 * be a natural race.
1786 */
1787#ifndef __rtems__
1788static int
1789pipe_stat(fp, ub, active_cred, td)
1790        struct file *fp;
1791        struct stat *ub;
1792        struct ucred *active_cred;
1793        struct thread *td;
1794{
1795        struct pipe *pipe;
1796#else /* __rtems__ */
1797static int
1798pipe_stat(struct pipe *pipe, struct stat *ub)
1799{
1800#endif /* __rtems__ */
1801        int new_unr;
1802#ifdef MAC
1803        int error;
1804#endif
1805
1806#ifndef __rtems__
1807        pipe = fp->f_data;
1808#endif /* __rtems__ */
1809        PIPE_LOCK(pipe);
1810#ifdef MAC
1811        error = mac_pipe_check_stat(active_cred, pipe->pipe_pair);
1812        if (error) {
1813                PIPE_UNLOCK(pipe);
1814                return (error);
1815        }
1816#endif
1817
1818        /* For named pipes ask the underlying filesystem. */
1819        if (pipe->pipe_state & PIPE_NAMED) {
1820                PIPE_UNLOCK(pipe);
1821#ifndef __rtems__
1822                return (vnops.fo_stat(fp, ub, active_cred, td));
1823#else /* __rtems__ */
1824                return (ENXIO);
1825#endif /* __rtems__ */
1826        }
1827
1828        /*
1829         * Lazily allocate an inode number for the pipe.  Most pipe
1830         * users do not call fstat(2) on the pipe, which means that
1831         * postponing the inode allocation until it is must be
1832         * returned to userland is useful.  If alloc_unr failed,
1833         * assign st_ino zero instead of returning an error.
1834         * Special pipe_ino values:
1835         *  -1 - not yet initialized;
1836         *  0  - alloc_unr failed, return 0 as st_ino forever.
1837         */
1838        if (pipe->pipe_ino == (ino_t)-1) {
1839                new_unr = alloc_unr(pipeino_unr);
1840                if (new_unr != -1)
1841                        pipe->pipe_ino = new_unr;
1842                else
1843                        pipe->pipe_ino = 0;
1844        }
1845        PIPE_UNLOCK(pipe);
1846
1847#ifndef __rtems__
1848        bzero(ub, sizeof(*ub));
1849#endif /* __rtems__ */
1850        ub->st_mode = S_IFIFO;
1851        ub->st_blksize = PAGE_SIZE;
1852        if (pipe->pipe_state & PIPE_DIRECTW)
1853                ub->st_size = pipe->pipe_map.cnt;
1854        else
1855                ub->st_size = pipe->pipe_buffer.cnt;
1856        ub->st_blocks = howmany(ub->st_size, ub->st_blksize);
1857        ub->st_atim = pipe->pipe_atime;
1858        ub->st_mtim = pipe->pipe_mtime;
1859        ub->st_ctim = pipe->pipe_ctime;
1860#ifndef __rtems__
1861        ub->st_uid = fp->f_cred->cr_uid;
1862        ub->st_gid = fp->f_cred->cr_gid;
1863        ub->st_dev = pipedev_ino;
1864        ub->st_ino = pipe->pipe_ino;
1865#else /* __rtems__ */
1866        ub->st_uid = BSD_DEFAULT_UID;
1867        ub->st_gid = BSD_DEFAULT_GID;
1868        ub->st_dev = rtems_filesystem_make_dev_t(0xcc494cd6U, 0x1d970b4dU);
1869        ub->st_ino = pipe->pipe_ino;
1870#endif /* __rtems__ */
1871        /*
1872         * Left as 0: st_nlink, st_rdev, st_flags, st_gen.
1873         */
1874        return (0);
1875}
1876#ifdef __rtems__
1877static int
1878rtems_bsd_pipe_stat(
1879        const rtems_filesystem_location_info_t *loc,
1880        struct stat *buf
1881)
1882{
1883        struct pipe *pipe = rtems_bsd_loc_to_f_data(loc);
1884        int error = pipe_stat(pipe, buf);
1885
1886        return rtems_bsd_error_to_status_and_errno(error);
1887}
1888#endif /* __rtems__ */
1889
1890/* ARGSUSED */
1891static int
1892pipe_close(fp, td)
1893        struct file *fp;
1894        struct thread *td;
1895{
1896
1897#ifndef __rtems__
1898        if (fp->f_vnode != NULL)
1899                return vnops.fo_close(fp, td);
1900        fp->f_ops = &badfileops;
1901#else /* __rtems__ */
1902        fp->f_io.pathinfo.handlers = &rtems_filesystem_handlers_default;
1903#endif /* __rtems__ */
1904        pipe_dtor(fp->f_data);
1905        fp->f_data = NULL;
1906        return (0);
1907}
1908
1909#ifndef __rtems__
1910static int
1911pipe_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td)
1912{
1913        struct pipe *cpipe;
1914        int error;
1915
1916        cpipe = fp->f_data;
1917        if (cpipe->pipe_state & PIPE_NAMED)
1918                error = vn_chmod(fp, mode, active_cred, td);
1919        else
1920                error = invfo_chmod(fp, mode, active_cred, td);
1921        return (error);
1922}
1923
1924static int
1925pipe_chown(fp, uid, gid, active_cred, td)
1926        struct file *fp;
1927        uid_t uid;
1928        gid_t gid;
1929        struct ucred *active_cred;
1930        struct thread *td;
1931{
1932        struct pipe *cpipe;
1933        int error;
1934
1935        cpipe = fp->f_data;
1936        if (cpipe->pipe_state & PIPE_NAMED)
1937                error = vn_chown(fp, uid, gid, active_cred, td);
1938        else
1939                error = invfo_chown(fp, uid, gid, active_cred, td);
1940        return (error);
1941}
1942
1943static int
1944pipe_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
1945{
1946        struct pipe *pi;
1947
1948        if (fp->f_type == DTYPE_FIFO)
1949                return (vn_fill_kinfo(fp, kif, fdp));
1950        kif->kf_type = KF_TYPE_PIPE;
1951        pi = fp->f_data;
1952        kif->kf_un.kf_pipe.kf_pipe_addr = (uintptr_t)pi;
1953        kif->kf_un.kf_pipe.kf_pipe_peer = (uintptr_t)pi->pipe_peer;
1954        kif->kf_un.kf_pipe.kf_pipe_buffer_cnt = pi->pipe_buffer.cnt;
1955        return (0);
1956}
1957#endif /* __rtems__ */
1958
1959static void
1960pipe_free_kmem(cpipe)
1961        struct pipe *cpipe;
1962{
1963
1964        KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
1965            ("pipe_free_kmem: pipe mutex locked"));
1966
1967        if (cpipe->pipe_buffer.buffer != NULL) {
1968                atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size);
1969#ifndef __rtems__
1970                vm_map_remove(pipe_map,
1971                    (vm_offset_t)cpipe->pipe_buffer.buffer,
1972                    (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size);
1973#else /* __rtems__ */
1974                free(cpipe->pipe_buffer.buffer, M_TEMP);
1975#endif /* __rtems__ */
1976                cpipe->pipe_buffer.buffer = NULL;
1977        }
1978#ifndef PIPE_NODIRECT
1979        {
1980                cpipe->pipe_map.cnt = 0;
1981                cpipe->pipe_map.pos = 0;
1982                cpipe->pipe_map.npages = 0;
1983        }
1984#endif
1985}
1986
1987/*
1988 * shutdown the pipe
1989 */
1990static void
1991pipeclose(cpipe)
1992        struct pipe *cpipe;
1993{
1994        struct pipepair *pp;
1995        struct pipe *ppipe;
1996
1997        KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL"));
1998
1999        PIPE_LOCK(cpipe);
2000        pipelock(cpipe, 0);
2001        pp = cpipe->pipe_pair;
2002
2003        pipeselwakeup(cpipe);
2004
2005        /*
2006         * If the other side is blocked, wake it up saying that
2007         * we want to close it down.
2008         */
2009        cpipe->pipe_state |= PIPE_EOF;
2010        while (cpipe->pipe_busy) {
2011                wakeup(cpipe);
2012                cpipe->pipe_state |= PIPE_WANT;
2013                pipeunlock(cpipe);
2014                msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
2015                pipelock(cpipe, 0);
2016        }
2017
2018
2019        /*
2020         * Disconnect from peer, if any.
2021         */
2022        ppipe = cpipe->pipe_peer;
2023        if (ppipe->pipe_present == PIPE_ACTIVE) {
2024                pipeselwakeup(ppipe);
2025
2026                ppipe->pipe_state |= PIPE_EOF;
2027                wakeup(ppipe);
2028                KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0);
2029        }
2030
2031        /*
2032         * Mark this endpoint as free.  Release kmem resources.  We
2033         * don't mark this endpoint as unused until we've finished
2034         * doing that, or the pipe might disappear out from under
2035         * us.
2036         */
2037        PIPE_UNLOCK(cpipe);
2038        pipe_free_kmem(cpipe);
2039        PIPE_LOCK(cpipe);
2040        cpipe->pipe_present = PIPE_CLOSING;
2041        pipeunlock(cpipe);
2042
2043        /*
2044         * knlist_clear() may sleep dropping the PIPE_MTX. Set the
2045         * PIPE_FINALIZED, that allows other end to free the
2046         * pipe_pair, only after the knotes are completely dismantled.
2047         */
2048        knlist_clear(&cpipe->pipe_sel.si_note, 1);
2049        cpipe->pipe_present = PIPE_FINALIZED;
2050        seldrain(&cpipe->pipe_sel);
2051        knlist_destroy(&cpipe->pipe_sel.si_note);
2052
2053        /*
2054         * If both endpoints are now closed, release the memory for the
2055         * pipe pair.  If not, unlock.
2056         */
2057        if (ppipe->pipe_present == PIPE_FINALIZED) {
2058                PIPE_UNLOCK(cpipe);
2059#ifdef MAC
2060                mac_pipe_destroy(pp);
2061#endif
2062                uma_zfree(pipe_zone, cpipe->pipe_pair);
2063        } else
2064                PIPE_UNLOCK(cpipe);
2065}
2066
2067/*ARGSUSED*/
2068static int
2069pipe_kqfilter(struct file *fp, struct knote *kn)
2070{
2071        struct pipe *cpipe;
2072
2073        /*
2074         * If a filter is requested that is not supported by this file
2075         * descriptor, don't return an error, but also don't ever generate an
2076         * event.
2077         */
2078#ifndef __rtems__
2079        if ((kn->kn_filter == EVFILT_READ) && !(fp->f_flag & FREAD)) {
2080#else /* __rtems__ */
2081        if ((kn->kn_filter == EVFILT_READ) && !(rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD)) {
2082#endif /* __rtems__ */
2083                kn->kn_fop = &pipe_nfiltops;
2084                return (0);
2085        }
2086#ifndef __rtems__
2087        if ((kn->kn_filter == EVFILT_WRITE) && !(fp->f_flag & FWRITE)) {
2088#else /* __rtems__ */
2089        if ((kn->kn_filter == EVFILT_WRITE) && !(rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FWRITE)) {
2090#endif /* __rtems__ */
2091                kn->kn_fop = &pipe_nfiltops;
2092                return (0);
2093        }
2094        cpipe = fp->f_data;
2095        PIPE_LOCK(cpipe);
2096        switch (kn->kn_filter) {
2097        case EVFILT_READ:
2098                kn->kn_fop = &pipe_rfiltops;
2099                break;
2100        case EVFILT_WRITE:
2101                kn->kn_fop = &pipe_wfiltops;
2102                if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) {
2103                        /* other end of pipe has been closed */
2104                        PIPE_UNLOCK(cpipe);
2105                        return (EPIPE);
2106                }
2107                cpipe = PIPE_PEER(cpipe);
2108                break;
2109        default:
2110                PIPE_UNLOCK(cpipe);
2111                return (EINVAL);
2112        }
2113
2114        kn->kn_hook = cpipe;
2115        knlist_add(&cpipe->pipe_sel.si_note, kn, 1);
2116        PIPE_UNLOCK(cpipe);
2117        return (0);
2118}
2119#ifdef __rtems__
2120int
2121rtems_bsd_pipe_kqfilter(rtems_libio_t *iop, struct knote *kn)
2122{
2123        struct file *fp = rtems_bsd_iop_to_fp(iop);
2124
2125        return pipe_kqfilter(fp, kn);
2126}
2127#endif /* __rtems__ */
2128
2129static void
2130filt_pipedetach(struct knote *kn)
2131{
2132        struct pipe *cpipe = kn->kn_hook;
2133
2134        PIPE_LOCK(cpipe);
2135        knlist_remove(&cpipe->pipe_sel.si_note, kn, 1);
2136        PIPE_UNLOCK(cpipe);
2137}
2138
2139/*ARGSUSED*/
2140static int
2141filt_piperead(struct knote *kn, long hint)
2142{
2143        struct pipe *rpipe = kn->kn_hook;
2144        struct pipe *wpipe = rpipe->pipe_peer;
2145        int ret;
2146
2147        PIPE_LOCK_ASSERT(rpipe, MA_OWNED);
2148        kn->kn_data = rpipe->pipe_buffer.cnt;
2149        if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
2150                kn->kn_data = rpipe->pipe_map.cnt;
2151
2152        if ((rpipe->pipe_state & PIPE_EOF) ||
2153            wpipe->pipe_present != PIPE_ACTIVE ||
2154            (wpipe->pipe_state & PIPE_EOF)) {
2155                kn->kn_flags |= EV_EOF;
2156                return (1);
2157        }
2158        ret = kn->kn_data > 0;
2159        return ret;
2160}
2161
2162/*ARGSUSED*/
2163static int
2164filt_pipewrite(struct knote *kn, long hint)
2165{
2166        struct pipe *wpipe;
2167   
2168        wpipe = kn->kn_hook;
2169        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
2170        if (wpipe->pipe_present != PIPE_ACTIVE ||
2171            (wpipe->pipe_state & PIPE_EOF)) {
2172                kn->kn_data = 0;
2173                kn->kn_flags |= EV_EOF;
2174                return (1);
2175        }
2176        kn->kn_data = (wpipe->pipe_buffer.size > 0) ?
2177            (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) : PIPE_BUF;
2178        if (wpipe->pipe_state & PIPE_DIRECTW)
2179                kn->kn_data = 0;
2180
2181        return (kn->kn_data >= PIPE_BUF);
2182}
2183
2184static void
2185filt_pipedetach_notsup(struct knote *kn)
2186{
2187
2188}
2189
2190static int
2191filt_pipenotsup(struct knote *kn, long hint)
2192{
2193
2194        return (0);
2195}
2196#ifdef __rtems__
2197static int
2198rtems_bsd_pipe_open(rtems_libio_t *iop, const char *path, int oflag,
2199    mode_t mode)
2200{
2201        return rtems_bsd_error_to_status_and_errno(ENXIO);
2202}
2203
2204static int
2205rtems_bsd_pipe_close(rtems_libio_t *iop)
2206{
2207        struct file *fp = rtems_bsd_iop_to_fp(iop);
2208        int error = pipe_close(fp, NULL);
2209
2210        return rtems_bsd_error_to_status_and_errno(error);
2211}
2212
2213static int
2214rtems_bsd_pipe_fcntl(rtems_libio_t *iop, int cmd)
2215{
2216        int error = 0;
2217
2218        if (cmd == F_SETFL) {
2219                struct file *fp = rtems_bsd_iop_to_fp(iop);
2220                int nbio = iop->flags & LIBIO_FLAGS_NO_DELAY;
2221
2222                error = pipe_ioctl(fp, FIONBIO, &nbio, NULL, NULL);
2223        }
2224
2225        return rtems_bsd_error_to_status_and_errno(error);
2226}
2227#endif /* __rtems__ */
Note: See TracBrowser for help on using the repository browser.