source: rtems-libbsd/freebsd/sys/kern/sys_pipe.c @ 3489e3b

55-freebsd-126-freebsd-12
Last change on this file since 3489e3b was 3489e3b, checked in by Sebastian Huber <sebastian.huber@…>, on 08/22/18 at 12:59:50

Update to FreeBSD head 2018-09-17

Git mirror commit 6c2192b1ef8c50788c751f878552526800b1e319.

Update #3472.

  • Property mode set to 100755
File size: 53.9 KB
RevLine 
[6959fac]1#include <machine/rtems-bsd-kernel-space.h>
2
3/*-
[bb80d9d]4 * SPDX-License-Identifier: BSD-4-Clause
5 *
[6959fac]6 * Copyright (c) 1996 John S. Dyson
[c40e45b]7 * Copyright (c) 2012 Giovanni Trematerra
[6959fac]8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice immediately at the beginning of the file, without modification,
15 *    this list of conditions, and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. Absolutely no warranty of function or purpose is made by the author
20 *    John S. Dyson.
21 * 4. Modifications may be freely made to this file if the above conditions
22 *    are met.
23 */
24
25/*
26 * This file contains a high-performance replacement for the socket-based
27 * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
28 * all features of sockets, but does do everything that pipes normally
29 * do.
30 */
31
32/*
33 * This code has two modes of operation, a small write mode and a large
34 * write mode.  The small write mode acts like conventional pipes with
35 * a kernel buffer.  If the buffer is less than PIPE_MINDIRECT, then the
36 * "normal" pipe buffering is done.  If the buffer is between PIPE_MINDIRECT
37 * and PIPE_SIZE in size, the sending process pins the underlying pages in
38 * memory, and the receiving process copies directly from these pinned pages
39 * in the sending process.
40 *
41 * If the sending process receives a signal, it is possible that it will
42 * go away, and certainly its address space can change, because control
43 * is returned back to the user-mode side.  In that case, the pipe code
44 * arranges to copy the buffer supplied by the user process, to a pageable
45 * kernel buffer, and the receiving process will grab the data from the
46 * pageable kernel buffer.  Since signals don't happen all that often,
47 * the copy operation is normally eliminated.
48 *
49 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
50 * happen for small transfers so that the system will not spend all of
51 * its time context switching.
52 *
53 * In order to limit the resource use of pipes, two sysctls exist:
54 *
55 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
56 * address space available to us in pipe_map. This value is normally
57 * autotuned, but may also be loader tuned.
58 *
59 * kern.ipc.pipekva - This read-only sysctl tracks the current amount of
60 * memory in use by pipes.
61 *
62 * Based on how large pipekva is relative to maxpipekva, the following
63 * will happen:
64 *
65 * 0% - 50%:
66 *     New pipes are given 16K of memory backing, pipes may dynamically
67 *     grow to as large as 64K where needed.
68 * 50% - 75%:
69 *     New pipes are given 4K (or PAGE_SIZE) of memory backing,
70 *     existing pipes may NOT grow.
71 * 75% - 100%:
72 *     New pipes are given 4K (or PAGE_SIZE) of memory backing,
73 *     existing pipes will be shrunk down to 4K whenever possible.
74 *
75 * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0.  If
76 * that is set,  the only resize that will occur is the 0 -> SMALL_PIPE_SIZE
77 * resize which MUST occur for reverse-direction pipes when they are
78 * first used.
79 *
80 * Additional information about the current state of pipes may be obtained
81 * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail,
82 * and kern.ipc.piperesizefail.
83 *
84 * Locking rules:  There are two locks present here:  A mutex, used via
85 * PIPE_LOCK, and a flag, used via pipelock().  All locking is done via
86 * the flag, as mutexes can not persist over uiomove.  The mutex
87 * exists only to guard access to the flag, and is not in itself a
88 * locking mechanism.  Also note that there is only a single mutex for
89 * both directions of a pipe.
90 *
91 * As pipelock() may have to sleep before it can acquire the flag, it
92 * is important to reread all data after a call to pipelock(); everything
93 * in the structure may have changed.
94 */
95
96#include <sys/cdefs.h>
97__FBSDID("$FreeBSD$");
98
[0237319]99#include <sys/param.h>
[6959fac]100#include <sys/systm.h>
101#include <sys/conf.h>
102#include <sys/fcntl.h>
103#include <sys/file.h>
104#include <sys/filedesc.h>
105#include <sys/filio.h>
106#include <sys/kernel.h>
[3c967ca]107#include <sys/lock.h>
[6959fac]108#include <sys/mutex.h>
109#include <sys/ttycom.h>
110#include <sys/stat.h>
111#include <sys/malloc.h>
112#include <sys/poll.h>
113#include <sys/selinfo.h>
114#include <sys/signalvar.h>
115#include <sys/syscallsubr.h>
116#include <sys/sysctl.h>
117#include <sys/sysproto.h>
118#include <sys/pipe.h>
119#include <sys/proc.h>
120#include <sys/vnode.h>
121#include <sys/uio.h>
[c40e45b]122#include <sys/user.h>
[6959fac]123#include <sys/event.h>
124
125#include <security/mac/mac_framework.h>
126
127#include <vm/vm.h>
128#include <vm/vm_param.h>
129#include <vm/vm_object.h>
130#include <vm/vm_kern.h>
131#include <vm/vm_extern.h>
132#include <vm/pmap.h>
133#include <vm/vm_map.h>
134#include <vm/vm_page.h>
135#include <vm/uma.h>
136
137/*
138 * Use this define if you want to disable *fancy* VM things.  Expect an
139 * approx 30% decrease in transfer rate.  This could be useful for
140 * NetBSD or OpenBSD.
141 */
142/* #define PIPE_NODIRECT */
143
[c40e45b]144#define PIPE_PEER(pipe) \
145        (((pipe)->pipe_state & PIPE_NAMED) ? (pipe) : ((pipe)->pipe_peer))
146
[6959fac]147/*
148 * interfaces to the outside world
149 */
[b1580fb]150#ifndef __rtems__
[6959fac]151static fo_rdwr_t        pipe_read;
152static fo_rdwr_t        pipe_write;
153static fo_truncate_t    pipe_truncate;
154static fo_ioctl_t       pipe_ioctl;
155static fo_poll_t        pipe_poll;
156static fo_kqfilter_t    pipe_kqfilter;
157static fo_stat_t        pipe_stat;
158static fo_close_t       pipe_close;
[c40e45b]159static fo_chmod_t       pipe_chmod;
160static fo_chown_t       pipe_chown;
161static fo_fill_kinfo_t  pipe_fill_kinfo;
[6959fac]162
[c40e45b]163struct fileops pipeops = {
[6959fac]164        .fo_read = pipe_read,
165        .fo_write = pipe_write,
166        .fo_truncate = pipe_truncate,
167        .fo_ioctl = pipe_ioctl,
168        .fo_poll = pipe_poll,
169        .fo_kqfilter = pipe_kqfilter,
170        .fo_stat = pipe_stat,
171        .fo_close = pipe_close,
[c40e45b]172        .fo_chmod = pipe_chmod,
173        .fo_chown = pipe_chown,
174        .fo_sendfile = invfo_sendfile,
175        .fo_fill_kinfo = pipe_fill_kinfo,
[6959fac]176        .fo_flags = DFLAG_PASSABLE
177};
[b1580fb]178#else /* __rtems__ */
179#define PIPE_NODIRECT
180#define PRIBIO                  (0)
181
[17ab62c]182static int rtems_bsd_pipe_open(rtems_libio_t *iop, const char *path,
183    int oflag, mode_t mode);
[b1580fb]184static int rtems_bsd_pipe_close(rtems_libio_t *iop);
[17ab62c]185static ssize_t rtems_bsd_pipe_read(rtems_libio_t *iop, void *buffer,
186    size_t count);
187static ssize_t rtems_bsd_pipe_readv(rtems_libio_t *iop,
188    const struct iovec *iov, int iovcnt, ssize_t total);
189static ssize_t rtems_bsd_pipe_write(rtems_libio_t *iop, const void *buffer,
190    size_t count);
191static ssize_t rtems_bsd_pipe_writev(rtems_libio_t *iop,
192    const struct iovec *iov, int iovcnt, ssize_t total);
193static int rtems_bsd_pipe_ioctl(rtems_libio_t *iop, ioctl_command_t request,
194    void *buffer);
195static int rtems_bsd_pipe_stat(const rtems_filesystem_location_info_t *loc,
196    struct stat *buf);
[b1580fb]197static int rtems_bsd_pipe_fcntl(rtems_libio_t *iop, int cmd);
198static int rtems_bsd_pipe_poll(rtems_libio_t *iop, int events);
199int rtems_bsd_pipe_kqfilter(rtems_libio_t *iop, struct knote *kn);
200
201static const rtems_filesystem_file_handlers_r pipeops = {
202        .open_h = rtems_bsd_pipe_open,
203        .close_h = rtems_bsd_pipe_close,
204        .read_h = rtems_bsd_pipe_read,
205        .write_h = rtems_bsd_pipe_write,
206        .ioctl_h = rtems_bsd_pipe_ioctl,
207        .lseek_h = rtems_filesystem_default_lseek,
208        .fstat_h = rtems_bsd_pipe_stat,
209        .ftruncate_h = rtems_filesystem_default_ftruncate,
210        .fsync_h = rtems_filesystem_default_fsync_or_fdatasync,
211        .fdatasync_h = rtems_filesystem_default_fsync_or_fdatasync,
212        .fcntl_h = rtems_bsd_pipe_fcntl,
213        .poll_h = rtems_bsd_pipe_poll,
[17ab62c]214        .kqfilter_h = rtems_bsd_pipe_kqfilter,
215        .readv_h = rtems_bsd_pipe_readv,
216        .writev_h = rtems_bsd_pipe_writev,
217        .mmap_h = rtems_filesystem_default_mmap
[b1580fb]218};
219
220long    maxpipekva;                     /* Limit on pipe KVA */
221
222#endif /* __rtems__ */
[6959fac]223
224static void     filt_pipedetach(struct knote *kn);
[c40e45b]225static void     filt_pipedetach_notsup(struct knote *kn);
226static int      filt_pipenotsup(struct knote *kn, long hint);
[6959fac]227static int      filt_piperead(struct knote *kn, long hint);
228static int      filt_pipewrite(struct knote *kn, long hint);
229
[c40e45b]230static struct filterops pipe_nfiltops = {
231        .f_isfd = 1,
232        .f_detach = filt_pipedetach_notsup,
233        .f_event = filt_pipenotsup
234};
[6959fac]235static struct filterops pipe_rfiltops = {
236        .f_isfd = 1,
237        .f_detach = filt_pipedetach,
238        .f_event = filt_piperead
239};
240static struct filterops pipe_wfiltops = {
241        .f_isfd = 1,
242        .f_detach = filt_pipedetach,
243        .f_event = filt_pipewrite
244};
245
246/*
247 * Default pipe buffer size(s), this can be kind-of large now because pipe
248 * space is pageable.  The pipe code will try to maintain locality of
249 * reference for performance reasons, so small amounts of outstanding I/O
250 * will not wipe the cache.
251 */
252#define MINPIPESIZE (PIPE_SIZE/3)
253#define MAXPIPESIZE (2*PIPE_SIZE/3)
254
255static long amountpipekva;
256static int pipefragretry;
257static int pipeallocfail;
258static int piperesizefail;
259static int piperesizeallowed = 1;
260
[c40e45b]261SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
[6959fac]262           &maxpipekva, 0, "Pipe KVA limit");
263SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD,
264           &amountpipekva, 0, "Pipe KVA usage");
265SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD,
266          &pipefragretry, 0, "Pipe allocation retries due to fragmentation");
267SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD,
268          &pipeallocfail, 0, "Pipe allocation failures");
269SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD,
270          &piperesizefail, 0, "Pipe resize failures");
271SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW,
272          &piperesizeallowed, 0, "Pipe resizing allowed");
273
274static void pipeinit(void *dummy __unused);
275static void pipeclose(struct pipe *cpipe);
276static void pipe_free_kmem(struct pipe *cpipe);
[c40e45b]277static void pipe_create(struct pipe *pipe, int backing);
278static void pipe_paircreate(struct thread *td, struct pipepair **p_pp);
[6959fac]279static __inline int pipelock(struct pipe *cpipe, int catch);
280static __inline void pipeunlock(struct pipe *cpipe);
281#ifndef PIPE_NODIRECT
282static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
283static void pipe_destroy_write_buffer(struct pipe *wpipe);
284static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
285static void pipe_clone_write_buffer(struct pipe *wpipe);
286#endif
287static int pipespace(struct pipe *cpipe, int size);
288static int pipespace_new(struct pipe *cpipe, int size);
289
290static int      pipe_zone_ctor(void *mem, int size, void *arg, int flags);
291static int      pipe_zone_init(void *mem, int size, int flags);
292static void     pipe_zone_fini(void *mem, int size);
293
294static uma_zone_t pipe_zone;
295static struct unrhdr *pipeino_unr;
296static dev_t pipedev_ino;
297
298SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
299
300static void
301pipeinit(void *dummy __unused)
302{
303
304        pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair),
305            pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini,
306            UMA_ALIGN_PTR, 0);
307        KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
308        pipeino_unr = new_unrhdr(1, INT32_MAX, NULL);
309        KASSERT(pipeino_unr != NULL, ("pipe fake inodes not initialized"));
310        pipedev_ino = devfs_alloc_cdp_inode();
311        KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized"));
312}
313
314static int
315pipe_zone_ctor(void *mem, int size, void *arg, int flags)
316{
317        struct pipepair *pp;
318        struct pipe *rpipe, *wpipe;
319
320        KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size"));
321
322        pp = (struct pipepair *)mem;
323
324        /*
325         * We zero both pipe endpoints to make sure all the kmem pointers
326         * are NULL, flag fields are zero'd, etc.  We timestamp both
327         * endpoints with the same time.
328         */
329        rpipe = &pp->pp_rpipe;
330        bzero(rpipe, sizeof(*rpipe));
[b1580fb]331#ifndef __rtems__
[6959fac]332        vfs_timestamp(&rpipe->pipe_ctime);
[b1580fb]333#else /* __rtems__ */
334        rpipe->pipe_ctime.tv_sec = time(NULL);
335#endif /* __rtems__ */
[6959fac]336        rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime;
337
338        wpipe = &pp->pp_wpipe;
339        bzero(wpipe, sizeof(*wpipe));
340        wpipe->pipe_ctime = rpipe->pipe_ctime;
341        wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime;
342
343        rpipe->pipe_peer = wpipe;
344        rpipe->pipe_pair = pp;
345        wpipe->pipe_peer = rpipe;
346        wpipe->pipe_pair = pp;
347
348        /*
349         * Mark both endpoints as present; they will later get free'd
350         * one at a time.  When both are free'd, then the whole pair
351         * is released.
352         */
353        rpipe->pipe_present = PIPE_ACTIVE;
354        wpipe->pipe_present = PIPE_ACTIVE;
355
356        /*
357         * Eventually, the MAC Framework may initialize the label
358         * in ctor or init, but for now we do it elswhere to avoid
359         * blocking in ctor or init.
360         */
361        pp->pp_label = NULL;
362
363        return (0);
364}
365
366static int
367pipe_zone_init(void *mem, int size, int flags)
368{
369        struct pipepair *pp;
370
371        KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size"));
372
373        pp = (struct pipepair *)mem;
374
[c40e45b]375        mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_NEW);
[6959fac]376        return (0);
377}
378
379static void
380pipe_zone_fini(void *mem, int size)
381{
382        struct pipepair *pp;
383
384        KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size"));
385
386        pp = (struct pipepair *)mem;
387
388        mtx_destroy(&pp->pp_mtx);
389}
390
[c40e45b]391static void
392pipe_paircreate(struct thread *td, struct pipepair **p_pp)
[6959fac]393{
394        struct pipepair *pp;
395        struct pipe *rpipe, *wpipe;
396
[c40e45b]397        *p_pp = pp = uma_zalloc(pipe_zone, M_WAITOK);
[6959fac]398#ifdef MAC
399        /*
400         * The MAC label is shared between the connected endpoints.  As a
401         * result mac_pipe_init() and mac_pipe_create() are called once
402         * for the pair, and not on the endpoints.
403         */
404        mac_pipe_init(pp);
405        mac_pipe_create(td->td_ucred, pp);
406#endif
407        rpipe = &pp->pp_rpipe;
408        wpipe = &pp->pp_wpipe;
409
410        knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe));
411        knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe));
412
413        /* Only the forward direction pipe is backed by default */
[c40e45b]414        pipe_create(rpipe, 1);
415        pipe_create(wpipe, 0);
[6959fac]416
417        rpipe->pipe_state |= PIPE_DIRECTOK;
418        wpipe->pipe_state |= PIPE_DIRECTOK;
[c40e45b]419}
420
421void
422pipe_named_ctor(struct pipe **ppipe, struct thread *td)
423{
424        struct pipepair *pp;
425
426        pipe_paircreate(td, &pp);
427        pp->pp_rpipe.pipe_state |= PIPE_NAMED;
428        *ppipe = &pp->pp_rpipe;
429}
430
431void
432pipe_dtor(struct pipe *dpipe)
433{
434        struct pipe *peer;
435        ino_t ino;
[6959fac]436
[c40e45b]437        ino = dpipe->pipe_ino;
438        peer = (dpipe->pipe_state & PIPE_NAMED) != 0 ? dpipe->pipe_peer : NULL;
439        funsetown(&dpipe->pipe_sigio);
440        pipeclose(dpipe);
441        if (peer != NULL) {
442                funsetown(&peer->pipe_sigio);
443                pipeclose(peer);
444        }
445        if (ino != 0 && ino != (ino_t)-1)
446                free_unr(pipeino_unr, ino);
447}
448
449/*
450 * The pipe system call for the DTYPE_PIPE type of pipes.  If we fail, let
451 * the zone pick up the pieces via pipeclose().
452 */
453int
454kern_pipe(struct thread *td, int fildes[2], int flags, struct filecaps *fcaps1,
455    struct filecaps *fcaps2)
456{
457        struct file *rf, *wf;
458        struct pipe *rpipe, *wpipe;
459        struct pipepair *pp;
460        int fd, fflags, error;
461
462        pipe_paircreate(td, &pp);
463        rpipe = &pp->pp_rpipe;
464        wpipe = &pp->pp_wpipe;
465        error = falloc_caps(td, &rf, &fd, flags, fcaps1);
[6959fac]466        if (error) {
467                pipeclose(rpipe);
468                pipeclose(wpipe);
469                return (error);
470        }
[c40e45b]471        /* An extra reference on `rf' has been held for us by falloc_caps(). */
[6959fac]472        fildes[0] = fd;
473
474        fflags = FREAD | FWRITE;
475        if ((flags & O_NONBLOCK) != 0)
476                fflags |= FNONBLOCK;
477
478        /*
479         * Warning: once we've gotten past allocation of the fd for the
480         * read-side, we can only drop the read side via fdrop() in order
481         * to avoid races against processes which manage to dup() the read
482         * side while we are blocked trying to allocate the write side.
483         */
484        finit(rf, fflags, DTYPE_PIPE, rpipe, &pipeops);
[c40e45b]485        error = falloc_caps(td, &wf, &fd, flags, fcaps2);
[6959fac]486        if (error) {
[c40e45b]487                fdclose(td, rf, fildes[0]);
[72d5fa1]488#ifndef __rtems__
[6959fac]489                fdrop(rf, td);
[72d5fa1]490#endif /* __rtems__ */
[6959fac]491                /* rpipe has been closed by fdrop(). */
492                pipeclose(wpipe);
493                return (error);
494        }
[c40e45b]495        /* An extra reference on `wf' has been held for us by falloc_caps(). */
[6959fac]496        finit(wf, fflags, DTYPE_PIPE, wpipe, &pipeops);
[72d5fa1]497#ifndef __rtems__
[6959fac]498        fdrop(wf, td);
[72d5fa1]499#endif /* __rtems__ */
[6959fac]500        fildes[1] = fd;
[72d5fa1]501#ifndef __rtems__
[6959fac]502        fdrop(rf, td);
[72d5fa1]503#endif /* __rtems__ */
[6959fac]504
505        return (0);
506}
507
[c40e45b]508#ifdef COMPAT_FREEBSD10
[6959fac]509/* ARGSUSED */
510int
[c40e45b]511freebsd10_pipe(struct thread *td, struct freebsd10_pipe_args *uap __unused)
[6959fac]512{
513        int error;
514        int fildes[2];
515
[c40e45b]516        error = kern_pipe(td, fildes, 0, NULL, NULL);
[6959fac]517        if (error)
518                return (error);
519
520        td->td_retval[0] = fildes[0];
521        td->td_retval[1] = fildes[1];
522
523        return (0);
524}
[c40e45b]525#endif
526
527#ifndef __rtems__
528int
529sys_pipe2(struct thread *td, struct pipe2_args *uap)
530{
531        int error, fildes[2];
532
533        if (uap->flags & ~(O_CLOEXEC | O_NONBLOCK))
534                return (EINVAL);
535        error = kern_pipe(td, fildes, uap->flags, NULL, NULL);
536        if (error)
537                return (error);
538        error = copyout(fildes, uap->fildes, 2 * sizeof(int));
539        if (error) {
540                (void)kern_close(td, fildes[0]);
541                (void)kern_close(td, fildes[1]);
542        }
543        return (error);
544}
545#endif /* __rtems__ */
546
[b1580fb]547#ifdef __rtems__
548int
549pipe(int fildes[2])
550{
551        struct thread *td = rtems_bsd_get_curthread_or_null();
552        int error;
553
554        if (td != NULL) {
[c40e45b]555                error = kern_pipe(td, fildes, 0, NULL, NULL);
[b1580fb]556        } else {
557                error = ENOMEM;
558        }
559
560        if (error == 0) {
561                return error;
562        } else {
563                rtems_set_errno_and_return_minus_one(error);
564        }
565}
566#endif /* __rtems__ */
[6959fac]567
568/*
569 * Allocate kva for pipe circular buffer, the space is pageable
570 * This routine will 'realloc' the size of a pipe safely, if it fails
571 * it will retain the old buffer.
572 * If it fails it will return ENOMEM.
573 */
574static int
[3489e3b]575pipespace_new(struct pipe *cpipe, int size)
[6959fac]576{
577        caddr_t buffer;
578        int error, cnt, firstseg;
579        static int curfail = 0;
580        static struct timeval lastfail;
581
582        KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
583        KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW),
584                ("pipespace: resize of direct writes not allowed"));
585retry:
586        cnt = cpipe->pipe_buffer.cnt;
587        if (cnt > size)
588                size = cnt;
589
590        size = round_page(size);
[b1580fb]591#ifndef __rtems__
[6959fac]592        buffer = (caddr_t) vm_map_min(pipe_map);
593
594        error = vm_map_find(pipe_map, NULL, 0,
[c40e45b]595                (vm_offset_t *) &buffer, size, 0, VMFS_ANY_SPACE,
[6959fac]596                VM_PROT_ALL, VM_PROT_ALL, 0);
597        if (error != KERN_SUCCESS) {
[b1580fb]598#else /* __rtems__ */
[c40e45b]599        (void)error;
[b1580fb]600        buffer = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
601        if (buffer == NULL) {
602#endif /* __rtems__ */
[6959fac]603                if ((cpipe->pipe_buffer.buffer == NULL) &&
604                        (size > SMALL_PIPE_SIZE)) {
605                        size = SMALL_PIPE_SIZE;
606                        pipefragretry++;
607                        goto retry;
608                }
609                if (cpipe->pipe_buffer.buffer == NULL) {
610                        pipeallocfail++;
611                        if (ppsratecheck(&lastfail, &curfail, 1))
612                                printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n");
613                } else {
614                        piperesizefail++;
615                }
616                return (ENOMEM);
617        }
618
619        /* copy data, then free old resources if we're resizing */
620        if (cnt > 0) {
621                if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) {
622                        firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out;
623                        bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
624                                buffer, firstseg);
625                        if ((cnt - firstseg) > 0)
626                                bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg],
627                                        cpipe->pipe_buffer.in);
628                } else {
629                        bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
630                                buffer, cnt);
631                }
632        }
633        pipe_free_kmem(cpipe);
634        cpipe->pipe_buffer.buffer = buffer;
635        cpipe->pipe_buffer.size = size;
636        cpipe->pipe_buffer.in = cnt;
637        cpipe->pipe_buffer.out = 0;
638        cpipe->pipe_buffer.cnt = cnt;
639        atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size);
640        return (0);
641}
642
643/*
644 * Wrapper for pipespace_new() that performs locking assertions.
645 */
646static int
[3489e3b]647pipespace(struct pipe *cpipe, int size)
[6959fac]648{
649
650        KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
651                ("Unlocked pipe passed to pipespace"));
652        return (pipespace_new(cpipe, size));
653}
654
655/*
656 * lock a pipe for I/O, blocking other access
657 */
658static __inline int
[3489e3b]659pipelock(struct pipe *cpipe, int catch)
[6959fac]660{
661        int error;
662
663        PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
664        while (cpipe->pipe_state & PIPE_LOCKFL) {
665                cpipe->pipe_state |= PIPE_LWANT;
666                error = msleep(cpipe, PIPE_MTX(cpipe),
667                    catch ? (PRIBIO | PCATCH) : PRIBIO,
668                    "pipelk", 0);
669                if (error != 0)
670                        return (error);
671        }
672        cpipe->pipe_state |= PIPE_LOCKFL;
673        return (0);
674}
675
676/*
677 * unlock a pipe I/O lock
678 */
679static __inline void
[3489e3b]680pipeunlock(struct pipe *cpipe)
[6959fac]681{
682
683        PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
684        KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
685                ("Unlocked pipe passed to pipeunlock"));
686        cpipe->pipe_state &= ~PIPE_LOCKFL;
687        if (cpipe->pipe_state & PIPE_LWANT) {
688                cpipe->pipe_state &= ~PIPE_LWANT;
689                wakeup(cpipe);
690        }
691}
692
[c40e45b]693void
[3489e3b]694pipeselwakeup(struct pipe *cpipe)
[6959fac]695{
696
697        PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
698        if (cpipe->pipe_state & PIPE_SEL) {
699                selwakeuppri(&cpipe->pipe_sel, PSOCK);
700                if (!SEL_WAITING(&cpipe->pipe_sel))
701                        cpipe->pipe_state &= ~PIPE_SEL;
702        }
[c40e45b]703#ifndef __rtems__
[6959fac]704        if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
705                pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
[c40e45b]706#endif /* __rtems__ */
[6959fac]707        KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0);
708}
709
710/*
711 * Initialize and allocate VM and memory for pipe.  The structure
712 * will start out zero'd from the ctor, so we just manage the kmem.
713 */
[c40e45b]714static void
[3489e3b]715pipe_create(struct pipe *pipe, int backing)
[6959fac]716{
717
718        if (backing) {
[c40e45b]719                /*
720                 * Note that these functions can fail if pipe map is exhausted
721                 * (as a result of too many pipes created), but we ignore the
722                 * error as it is not fatal and could be provoked by
723                 * unprivileged users. The only consequence is worse performance
724                 * with given pipe.
725                 */
[6959fac]726                if (amountpipekva > maxpipekva / 2)
[c40e45b]727                        (void)pipespace_new(pipe, SMALL_PIPE_SIZE);
[6959fac]728                else
[c40e45b]729                        (void)pipespace_new(pipe, PIPE_SIZE);
[6959fac]730        }
[c40e45b]731
[6959fac]732        pipe->pipe_ino = -1;
733}
734
735/* ARGSUSED */
736static int
[3489e3b]737pipe_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
738    int flags, struct thread *td)
[6959fac]739{
[c40e45b]740        struct pipe *rpipe;
[6959fac]741        int error;
742        int nread = 0;
743        int size;
744
[c40e45b]745        rpipe = fp->f_data;
[6959fac]746        PIPE_LOCK(rpipe);
747        ++rpipe->pipe_busy;
748        error = pipelock(rpipe, 1);
749        if (error)
750                goto unlocked_error;
751
752#ifdef MAC
753        error = mac_pipe_check_read(active_cred, rpipe->pipe_pair);
754        if (error)
755                goto locked_error;
756#endif
757        if (amountpipekva > (3 * maxpipekva) / 4) {
758                if (!(rpipe->pipe_state & PIPE_DIRECTW) &&
759                        (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
760                        (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
761                        (piperesizeallowed == 1)) {
762                        PIPE_UNLOCK(rpipe);
763                        pipespace(rpipe, SMALL_PIPE_SIZE);
764                        PIPE_LOCK(rpipe);
765                }
766        }
767
768        while (uio->uio_resid) {
769                /*
770                 * normal pipe buffer receive
771                 */
772                if (rpipe->pipe_buffer.cnt > 0) {
773                        size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
774                        if (size > rpipe->pipe_buffer.cnt)
775                                size = rpipe->pipe_buffer.cnt;
776                        if (size > uio->uio_resid)
777                                size = uio->uio_resid;
778
779                        PIPE_UNLOCK(rpipe);
780                        error = uiomove(
781                            &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
782                            size, uio);
783                        PIPE_LOCK(rpipe);
784                        if (error)
785                                break;
786
787                        rpipe->pipe_buffer.out += size;
788                        if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
789                                rpipe->pipe_buffer.out = 0;
790
791                        rpipe->pipe_buffer.cnt -= size;
792
793                        /*
794                         * If there is no more to read in the pipe, reset
795                         * its pointers to the beginning.  This improves
796                         * cache hit stats.
797                         */
798                        if (rpipe->pipe_buffer.cnt == 0) {
799                                rpipe->pipe_buffer.in = 0;
800                                rpipe->pipe_buffer.out = 0;
801                        }
802                        nread += size;
803#ifndef PIPE_NODIRECT
804                /*
805                 * Direct copy, bypassing a kernel buffer.
806                 */
807                } else if ((size = rpipe->pipe_map.cnt) &&
808                           (rpipe->pipe_state & PIPE_DIRECTW)) {
809                        if (size > uio->uio_resid)
810                                size = (u_int) uio->uio_resid;
811
812                        PIPE_UNLOCK(rpipe);
813                        error = uiomove_fromphys(rpipe->pipe_map.ms,
814                            rpipe->pipe_map.pos, size, uio);
815                        PIPE_LOCK(rpipe);
816                        if (error)
817                                break;
818                        nread += size;
819                        rpipe->pipe_map.pos += size;
820                        rpipe->pipe_map.cnt -= size;
821                        if (rpipe->pipe_map.cnt == 0) {
[c40e45b]822                                rpipe->pipe_state &= ~(PIPE_DIRECTW|PIPE_WANTW);
[6959fac]823                                wakeup(rpipe);
824                        }
825#endif
826                } else {
827                        /*
828                         * detect EOF condition
829                         * read returns 0 on EOF, no need to set error
830                         */
831                        if (rpipe->pipe_state & PIPE_EOF)
832                                break;
833
834                        /*
835                         * If the "write-side" has been blocked, wake it up now.
836                         */
837                        if (rpipe->pipe_state & PIPE_WANTW) {
838                                rpipe->pipe_state &= ~PIPE_WANTW;
839                                wakeup(rpipe);
840                        }
841
842                        /*
843                         * Break if some data was read.
844                         */
845                        if (nread > 0)
846                                break;
847
848                        /*
849                         * Unlock the pipe buffer for our remaining processing.
850                         * We will either break out with an error or we will
851                         * sleep and relock to loop.
852                         */
853                        pipeunlock(rpipe);
854
855                        /*
856                         * Handle non-blocking mode operation or
857                         * wait for more data.
858                         */
[b1580fb]859#ifndef __rtems__
[6959fac]860                        if (fp->f_flag & FNONBLOCK) {
[b1580fb]861#else /* __rtems__ */
862                        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FNONBLOCK) {
863#endif /* __rtems__ */
[6959fac]864                                error = EAGAIN;
865                        } else {
866                                rpipe->pipe_state |= PIPE_WANTR;
867                                if ((error = msleep(rpipe, PIPE_MTX(rpipe),
868                                    PRIBIO | PCATCH,
869                                    "piperd", 0)) == 0)
870                                        error = pipelock(rpipe, 1);
871                        }
872                        if (error)
873                                goto unlocked_error;
874                }
875        }
876#ifdef MAC
877locked_error:
878#endif
879        pipeunlock(rpipe);
880
881        /* XXX: should probably do this before getting any locks. */
882        if (error == 0)
[b1580fb]883#ifndef __rtems__
[6959fac]884                vfs_timestamp(&rpipe->pipe_atime);
[b1580fb]885#else /* __rtems__ */
886                rpipe->pipe_atime.tv_sec = time(NULL);
887#endif /* __rtems__ */
[6959fac]888unlocked_error:
889        --rpipe->pipe_busy;
890
891        /*
892         * PIPE_WANT processing only makes sense if pipe_busy is 0.
893         */
894        if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
895                rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
896                wakeup(rpipe);
897        } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
898                /*
899                 * Handle write blocking hysteresis.
900                 */
901                if (rpipe->pipe_state & PIPE_WANTW) {
902                        rpipe->pipe_state &= ~PIPE_WANTW;
903                        wakeup(rpipe);
904                }
905        }
906
907        if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
908                pipeselwakeup(rpipe);
909
910        PIPE_UNLOCK(rpipe);
911        return (error);
912}
[b1580fb]913#ifdef __rtems__
914static ssize_t
915rtems_bsd_pipe_read(rtems_libio_t *iop, void *buffer, size_t count)
916{
917        struct thread *td = rtems_bsd_get_curthread_or_null();
918        struct file *fp = rtems_bsd_iop_to_fp(iop);
919        struct iovec iov = {
920                .iov_base = buffer,
921                .iov_len = count
922        };
923        struct uio auio = {
924                .uio_iov = &iov,
925                .uio_iovcnt = 1,
926                .uio_offset = 0,
927                .uio_resid = count,
928                .uio_segflg = UIO_USERSPACE,
929                .uio_rw = UIO_READ,
930                .uio_td = td
931        };
932        int error;
933
934        if (td != NULL) {
935                error = pipe_read(fp, &auio, NULL, 0, NULL);
936        } else {
937                error = ENOMEM;
938        }
939
940        if (error == 0) {
941                return (count - auio.uio_resid);
942        } else {
943                rtems_set_errno_and_return_minus_one(error);
944        }
945}
[17ab62c]946
947static ssize_t
948rtems_bsd_pipe_readv(rtems_libio_t *iop, const struct iovec *iov,
949    int iovcnt, ssize_t total)
950{
951        struct thread *td = rtems_bsd_get_curthread_or_null();
952        struct file *fp = rtems_bsd_iop_to_fp(iop);
953        struct uio auio = {
954                .uio_iov = __DECONST(struct iovec *, iov),
955                .uio_iovcnt = iovcnt,
956                .uio_offset = 0,
957                .uio_resid = total,
958                .uio_segflg = UIO_USERSPACE,
959                .uio_rw = UIO_READ,
960                .uio_td = td
961        };
962        int error;
963
964        if (td != NULL) {
965                error = pipe_read(fp, &auio, NULL, 0, NULL);
966        } else {
967                error = ENOMEM;
968        }
969
970        if (error == 0) {
971                return (total - auio.uio_resid);
972        } else {
973                rtems_set_errno_and_return_minus_one(error);
974        }
975}
[b1580fb]976#endif /* __rtems__ */
[6959fac]977
978#ifndef PIPE_NODIRECT
979/*
980 * Map the sending processes' buffer into kernel space and wire it.
981 * This is similar to a physical write operation.
982 */
983static int
[3489e3b]984pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio)
[6959fac]985{
986        u_int size;
987        int i;
988
989        PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
990        KASSERT(wpipe->pipe_state & PIPE_DIRECTW,
991                ("Clone attempt on non-direct write pipe!"));
992
993        if (uio->uio_iov->iov_len > wpipe->pipe_buffer.size)
994                size = wpipe->pipe_buffer.size;
995        else
996                size = uio->uio_iov->iov_len;
997
998        if ((i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
999            (vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ,
1000            wpipe->pipe_map.ms, PIPENPAGES)) < 0)
1001                return (EFAULT);
1002
1003/*
1004 * set up the control block
1005 */
1006        wpipe->pipe_map.npages = i;
1007        wpipe->pipe_map.pos =
1008            ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
1009        wpipe->pipe_map.cnt = size;
1010
1011/*
1012 * and update the uio data
1013 */
1014
1015        uio->uio_iov->iov_len -= size;
1016        uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
1017        if (uio->uio_iov->iov_len == 0)
1018                uio->uio_iov++;
1019        uio->uio_resid -= size;
1020        uio->uio_offset += size;
1021        return (0);
1022}
1023
1024/*
1025 * unmap and unwire the process buffer
1026 */
1027static void
[3489e3b]1028pipe_destroy_write_buffer(struct pipe *wpipe)
[6959fac]1029{
1030
1031        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
1032        vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages);
1033        wpipe->pipe_map.npages = 0;
1034}
1035
1036/*
1037 * In the case of a signal, the writing process might go away.  This
1038 * code copies the data into the circular buffer so that the source
1039 * pages can be freed without loss of data.
1040 */
1041static void
[3489e3b]1042pipe_clone_write_buffer(struct pipe *wpipe)
[6959fac]1043{
1044        struct uio uio;
1045        struct iovec iov;
1046        int size;
1047        int pos;
1048
1049        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
1050        size = wpipe->pipe_map.cnt;
1051        pos = wpipe->pipe_map.pos;
1052
1053        wpipe->pipe_buffer.in = size;
1054        wpipe->pipe_buffer.out = 0;
1055        wpipe->pipe_buffer.cnt = size;
1056        wpipe->pipe_state &= ~PIPE_DIRECTW;
1057
1058        PIPE_UNLOCK(wpipe);
1059        iov.iov_base = wpipe->pipe_buffer.buffer;
1060        iov.iov_len = size;
1061        uio.uio_iov = &iov;
1062        uio.uio_iovcnt = 1;
1063        uio.uio_offset = 0;
1064        uio.uio_resid = size;
1065        uio.uio_segflg = UIO_SYSSPACE;
1066        uio.uio_rw = UIO_READ;
1067        uio.uio_td = curthread;
1068        uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio);
1069        PIPE_LOCK(wpipe);
1070        pipe_destroy_write_buffer(wpipe);
1071}
1072
1073/*
1074 * This implements the pipe buffer write mechanism.  Note that only
1075 * a direct write OR a normal pipe write can be pending at any given time.
1076 * If there are any characters in the pipe buffer, the direct write will
1077 * be deferred until the receiving process grabs all of the bytes from
1078 * the pipe buffer.  Then the direct mapping write is set-up.
1079 */
1080static int
[3489e3b]1081pipe_direct_write(struct pipe *wpipe, struct uio *uio)
[6959fac]1082{
1083        int error;
1084
1085retry:
1086        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
1087        error = pipelock(wpipe, 1);
[c40e45b]1088        if (error != 0)
1089                goto error1;
1090        if ((wpipe->pipe_state & PIPE_EOF) != 0) {
[6959fac]1091                error = EPIPE;
1092                pipeunlock(wpipe);
1093                goto error1;
1094        }
1095        while (wpipe->pipe_state & PIPE_DIRECTW) {
1096                if (wpipe->pipe_state & PIPE_WANTR) {
1097                        wpipe->pipe_state &= ~PIPE_WANTR;
1098                        wakeup(wpipe);
1099                }
1100                pipeselwakeup(wpipe);
1101                wpipe->pipe_state |= PIPE_WANTW;
1102                pipeunlock(wpipe);
1103                error = msleep(wpipe, PIPE_MTX(wpipe),
1104                    PRIBIO | PCATCH, "pipdww", 0);
1105                if (error)
1106                        goto error1;
1107                else
1108                        goto retry;
1109        }
1110        wpipe->pipe_map.cnt = 0;        /* transfer not ready yet */
1111        if (wpipe->pipe_buffer.cnt > 0) {
1112                if (wpipe->pipe_state & PIPE_WANTR) {
1113                        wpipe->pipe_state &= ~PIPE_WANTR;
1114                        wakeup(wpipe);
1115                }
1116                pipeselwakeup(wpipe);
1117                wpipe->pipe_state |= PIPE_WANTW;
1118                pipeunlock(wpipe);
1119                error = msleep(wpipe, PIPE_MTX(wpipe),
1120                    PRIBIO | PCATCH, "pipdwc", 0);
1121                if (error)
1122                        goto error1;
1123                else
1124                        goto retry;
1125        }
1126
1127        wpipe->pipe_state |= PIPE_DIRECTW;
1128
1129        PIPE_UNLOCK(wpipe);
1130        error = pipe_build_write_buffer(wpipe, uio);
1131        PIPE_LOCK(wpipe);
1132        if (error) {
1133                wpipe->pipe_state &= ~PIPE_DIRECTW;
1134                pipeunlock(wpipe);
1135                goto error1;
1136        }
1137
1138        error = 0;
1139        while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1140                if (wpipe->pipe_state & PIPE_EOF) {
1141                        pipe_destroy_write_buffer(wpipe);
1142                        pipeselwakeup(wpipe);
1143                        pipeunlock(wpipe);
1144                        error = EPIPE;
1145                        goto error1;
1146                }
1147                if (wpipe->pipe_state & PIPE_WANTR) {
1148                        wpipe->pipe_state &= ~PIPE_WANTR;
1149                        wakeup(wpipe);
1150                }
1151                pipeselwakeup(wpipe);
[c40e45b]1152                wpipe->pipe_state |= PIPE_WANTW;
[6959fac]1153                pipeunlock(wpipe);
1154                error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
1155                    "pipdwt", 0);
1156                pipelock(wpipe, 0);
1157        }
1158
1159        if (wpipe->pipe_state & PIPE_EOF)
1160                error = EPIPE;
1161        if (wpipe->pipe_state & PIPE_DIRECTW) {
1162                /*
1163                 * this bit of trickery substitutes a kernel buffer for
1164                 * the process that might be going away.
1165                 */
1166                pipe_clone_write_buffer(wpipe);
1167        } else {
1168                pipe_destroy_write_buffer(wpipe);
1169        }
1170        pipeunlock(wpipe);
1171        return (error);
1172
1173error1:
1174        wakeup(wpipe);
1175        return (error);
1176}
1177#endif
1178
1179static int
[3489e3b]1180pipe_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
1181    int flags, struct thread *td)
[6959fac]1182{
1183        int error = 0;
1184        int desiredsize;
1185        ssize_t orig_resid;
1186        struct pipe *wpipe, *rpipe;
1187
1188        rpipe = fp->f_data;
[c40e45b]1189        wpipe = PIPE_PEER(rpipe);
[6959fac]1190        PIPE_LOCK(rpipe);
1191        error = pipelock(wpipe, 1);
1192        if (error) {
1193                PIPE_UNLOCK(rpipe);
1194                return (error);
1195        }
1196        /*
1197         * detect loss of pipe read side, issue SIGPIPE if lost.
1198         */
1199        if (wpipe->pipe_present != PIPE_ACTIVE ||
1200            (wpipe->pipe_state & PIPE_EOF)) {
1201                pipeunlock(wpipe);
1202                PIPE_UNLOCK(rpipe);
1203                return (EPIPE);
1204        }
1205#ifdef MAC
1206        error = mac_pipe_check_write(active_cred, wpipe->pipe_pair);
1207        if (error) {
1208                pipeunlock(wpipe);
1209                PIPE_UNLOCK(rpipe);
1210                return (error);
1211        }
1212#endif
1213        ++wpipe->pipe_busy;
1214
1215        /* Choose a larger size if it's advantageous */
1216        desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size);
1217        while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) {
1218                if (piperesizeallowed != 1)
1219                        break;
1220                if (amountpipekva > maxpipekva / 2)
1221                        break;
1222                if (desiredsize == BIG_PIPE_SIZE)
1223                        break;
1224                desiredsize = desiredsize * 2;
1225        }
1226
1227        /* Choose a smaller size if we're in a OOM situation */
1228        if ((amountpipekva > (3 * maxpipekva) / 4) &&
1229                (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
1230                (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
1231                (piperesizeallowed == 1))
1232                desiredsize = SMALL_PIPE_SIZE;
1233
1234        /* Resize if the above determined that a new size was necessary */
1235        if ((desiredsize != wpipe->pipe_buffer.size) &&
1236                ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) {
1237                PIPE_UNLOCK(wpipe);
1238                pipespace(wpipe, desiredsize);
1239                PIPE_LOCK(wpipe);
1240        }
1241        if (wpipe->pipe_buffer.size == 0) {
1242                /*
1243                 * This can only happen for reverse direction use of pipes
1244                 * in a complete OOM situation.
1245                 */
1246                error = ENOMEM;
1247                --wpipe->pipe_busy;
1248                pipeunlock(wpipe);
1249                PIPE_UNLOCK(wpipe);
1250                return (error);
1251        }
1252
1253        pipeunlock(wpipe);
1254
1255        orig_resid = uio->uio_resid;
1256
1257        while (uio->uio_resid) {
1258                int space;
1259
1260                pipelock(wpipe, 0);
1261                if (wpipe->pipe_state & PIPE_EOF) {
1262                        pipeunlock(wpipe);
1263                        error = EPIPE;
1264                        break;
1265                }
1266#ifndef PIPE_NODIRECT
1267                /*
1268                 * If the transfer is large, we can gain performance if
1269                 * we do process-to-process copies directly.
1270                 * If the write is non-blocking, we don't use the
1271                 * direct write mechanism.
1272                 *
1273                 * The direct write mechanism will detect the reader going
1274                 * away on us.
1275                 */
1276                if (uio->uio_segflg == UIO_USERSPACE &&
1277                    uio->uio_iov->iov_len >= PIPE_MINDIRECT &&
1278                    wpipe->pipe_buffer.size >= PIPE_MINDIRECT &&
1279                    (fp->f_flag & FNONBLOCK) == 0) {
1280                        pipeunlock(wpipe);
1281                        error = pipe_direct_write(wpipe, uio);
1282                        if (error)
1283                                break;
1284                        continue;
1285                }
1286#endif
1287
1288                /*
1289                 * Pipe buffered writes cannot be coincidental with
1290                 * direct writes.  We wait until the currently executing
1291                 * direct write is completed before we start filling the
1292                 * pipe buffer.  We break out if a signal occurs or the
1293                 * reader goes away.
1294                 */
1295                if (wpipe->pipe_state & PIPE_DIRECTW) {
1296                        if (wpipe->pipe_state & PIPE_WANTR) {
1297                                wpipe->pipe_state &= ~PIPE_WANTR;
1298                                wakeup(wpipe);
1299                        }
1300                        pipeselwakeup(wpipe);
1301                        wpipe->pipe_state |= PIPE_WANTW;
1302                        pipeunlock(wpipe);
1303                        error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1304                            "pipbww", 0);
1305                        if (error)
1306                                break;
1307                        else
1308                                continue;
1309                }
1310
1311                space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1312
1313                /* Writes of size <= PIPE_BUF must be atomic. */
1314                if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
1315                        space = 0;
1316
1317                if (space > 0) {
1318                        int size;       /* Transfer size */
1319                        int segsize;    /* first segment to transfer */
1320
1321                        /*
1322                         * Transfer size is minimum of uio transfer
1323                         * and free space in pipe buffer.
1324                         */
1325                        if (space > uio->uio_resid)
1326                                size = uio->uio_resid;
1327                        else
1328                                size = space;
1329                        /*
1330                         * First segment to transfer is minimum of
1331                         * transfer size and contiguous space in
1332                         * pipe buffer.  If first segment to transfer
1333                         * is less than the transfer size, we've got
1334                         * a wraparound in the buffer.
1335                         */
1336                        segsize = wpipe->pipe_buffer.size -
1337                                wpipe->pipe_buffer.in;
1338                        if (segsize > size)
1339                                segsize = size;
1340
1341                        /* Transfer first segment */
1342
1343                        PIPE_UNLOCK(rpipe);
1344                        error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1345                                        segsize, uio);
1346                        PIPE_LOCK(rpipe);
1347
1348                        if (error == 0 && segsize < size) {
1349                                KASSERT(wpipe->pipe_buffer.in + segsize ==
1350                                        wpipe->pipe_buffer.size,
1351                                        ("Pipe buffer wraparound disappeared"));
1352                                /*
1353                                 * Transfer remaining part now, to
1354                                 * support atomic writes.  Wraparound
1355                                 * happened.
1356                                 */
1357
1358                                PIPE_UNLOCK(rpipe);
1359                                error = uiomove(
1360                                    &wpipe->pipe_buffer.buffer[0],
1361                                    size - segsize, uio);
1362                                PIPE_LOCK(rpipe);
1363                        }
1364                        if (error == 0) {
1365                                wpipe->pipe_buffer.in += size;
1366                                if (wpipe->pipe_buffer.in >=
1367                                    wpipe->pipe_buffer.size) {
1368                                        KASSERT(wpipe->pipe_buffer.in ==
1369                                                size - segsize +
1370                                                wpipe->pipe_buffer.size,
1371                                                ("Expected wraparound bad"));
1372                                        wpipe->pipe_buffer.in = size - segsize;
1373                                }
1374
1375                                wpipe->pipe_buffer.cnt += size;
1376                                KASSERT(wpipe->pipe_buffer.cnt <=
1377                                        wpipe->pipe_buffer.size,
1378                                        ("Pipe buffer overflow"));
1379                        }
1380                        pipeunlock(wpipe);
1381                        if (error != 0)
1382                                break;
1383                } else {
1384                        /*
1385                         * If the "read-side" has been blocked, wake it up now.
1386                         */
1387                        if (wpipe->pipe_state & PIPE_WANTR) {
1388                                wpipe->pipe_state &= ~PIPE_WANTR;
1389                                wakeup(wpipe);
1390                        }
1391
1392                        /*
1393                         * don't block on non-blocking I/O
1394                         */
[b1580fb]1395#ifndef __rtems__
[6959fac]1396                        if (fp->f_flag & FNONBLOCK) {
[b1580fb]1397#else /* __rtems__ */
1398                        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FNONBLOCK) {
1399#endif /* __rtems__ */
[6959fac]1400                                error = EAGAIN;
1401                                pipeunlock(wpipe);
1402                                break;
1403                        }
1404
1405                        /*
1406                         * We have no more space and have something to offer,
1407                         * wake up select/poll.
1408                         */
1409                        pipeselwakeup(wpipe);
1410
1411                        wpipe->pipe_state |= PIPE_WANTW;
1412                        pipeunlock(wpipe);
1413                        error = msleep(wpipe, PIPE_MTX(rpipe),
1414                            PRIBIO | PCATCH, "pipewr", 0);
1415                        if (error != 0)
1416                                break;
1417                }
1418        }
1419
1420        pipelock(wpipe, 0);
1421        --wpipe->pipe_busy;
1422
1423        if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1424                wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1425                wakeup(wpipe);
1426        } else if (wpipe->pipe_buffer.cnt > 0) {
1427                /*
1428                 * If we have put any characters in the buffer, we wake up
1429                 * the reader.
1430                 */
1431                if (wpipe->pipe_state & PIPE_WANTR) {
1432                        wpipe->pipe_state &= ~PIPE_WANTR;
1433                        wakeup(wpipe);
1434                }
1435        }
1436
1437        /*
[c40e45b]1438         * Don't return EPIPE if any byte was written.
1439         * EINTR and other interrupts are handled by generic I/O layer.
1440         * Do not pretend that I/O succeeded for obvious user error
1441         * like EFAULT.
[6959fac]1442         */
[c40e45b]1443        if (uio->uio_resid != orig_resid && error == EPIPE)
[6959fac]1444                error = 0;
1445
1446        if (error == 0)
[b1580fb]1447#ifndef __rtems__
[6959fac]1448                vfs_timestamp(&wpipe->pipe_mtime);
[b1580fb]1449#else /* __rtems__ */
1450                wpipe->pipe_mtime.tv_sec = time(NULL);
1451#endif /* __rtems__ */
[6959fac]1452
1453        /*
1454         * We have something to offer,
1455         * wake up select/poll.
1456         */
1457        if (wpipe->pipe_buffer.cnt)
1458                pipeselwakeup(wpipe);
1459
1460        pipeunlock(wpipe);
1461        PIPE_UNLOCK(rpipe);
1462        return (error);
1463}
[b1580fb]1464#ifdef __rtems__
1465static ssize_t
1466rtems_bsd_pipe_write(rtems_libio_t *iop, const void *buffer, size_t count)
1467{
1468        struct thread *td = rtems_bsd_get_curthread_or_null();
1469        struct file *fp = rtems_bsd_iop_to_fp(iop);
1470        struct iovec iov = {
1471                .iov_base = __DECONST(void *, buffer),
1472                .iov_len = count
1473        };
1474        struct uio auio = {
1475                .uio_iov = &iov,
1476                .uio_iovcnt = 1,
1477                .uio_offset = 0,
1478                .uio_resid = count,
1479                .uio_segflg = UIO_USERSPACE,
1480                .uio_rw = UIO_WRITE,
1481                .uio_td = td
1482        };
1483        int error;
1484
1485        if (td != NULL) {
1486                error = pipe_write(fp, &auio, NULL, 0, NULL);
1487        } else {
1488                error = ENOMEM;
1489        }
1490
1491        if (error == 0) {
1492                return (count - auio.uio_resid);
1493        } else {
1494                rtems_set_errno_and_return_minus_one(error);
1495        }
1496}
[17ab62c]1497
1498static ssize_t
1499rtems_bsd_pipe_writev(rtems_libio_t *iop, const struct iovec *iov,
1500    int iovcnt, ssize_t total)
1501{
1502        struct thread *td = rtems_bsd_get_curthread_or_null();
1503        struct file *fp = rtems_bsd_iop_to_fp(iop);
1504        struct uio auio = {
1505                .uio_iov = __DECONST(struct iovec *, iov),
1506                .uio_iovcnt = iovcnt,
1507                .uio_offset = 0,
1508                .uio_resid = total,
1509                .uio_segflg = UIO_USERSPACE,
1510                .uio_rw = UIO_WRITE,
1511                .uio_td = td
1512        };
1513        int error;
1514
1515        if (td != NULL) {
1516                error = pipe_write(fp, &auio, NULL, 0, NULL);
1517        } else {
1518                error = ENOMEM;
1519        }
1520
1521        if (error == 0) {
1522                return (total - auio.uio_resid);
1523        } else {
1524                rtems_set_errno_and_return_minus_one(error);
1525        }
1526}
[b1580fb]1527#endif /* __rtems__ */
[6959fac]1528
1529/* ARGSUSED */
[b1580fb]1530#ifndef __rtems__
[6959fac]1531static int
[3489e3b]1532pipe_truncate(struct file *fp, off_t length, struct ucred *active_cred,
1533    struct thread *td)
[6959fac]1534{
[c40e45b]1535        struct pipe *cpipe;
1536        int error;
[6959fac]1537
[c40e45b]1538        cpipe = fp->f_data;
1539        if (cpipe->pipe_state & PIPE_NAMED)
1540                error = vnops.fo_truncate(fp, length, active_cred, td);
1541        else
1542                error = invfo_truncate(fp, length, active_cred, td);
1543        return (error);
[6959fac]1544}
[b1580fb]1545#endif /* __rtems__ */
[6959fac]1546
1547/*
1548 * we implement a very minimal set of ioctls for compatibility with sockets.
1549 */
1550static int
[3489e3b]1551pipe_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
1552    struct thread *td)
[6959fac]1553{
1554        struct pipe *mpipe = fp->f_data;
1555        int error;
1556
1557        PIPE_LOCK(mpipe);
1558
1559#ifdef MAC
1560        error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data);
1561        if (error) {
1562                PIPE_UNLOCK(mpipe);
1563                return (error);
1564        }
1565#endif
1566
1567        error = 0;
1568        switch (cmd) {
1569
1570        case FIONBIO:
1571                break;
1572
1573        case FIOASYNC:
1574                if (*(int *)data) {
1575                        mpipe->pipe_state |= PIPE_ASYNC;
1576                } else {
1577                        mpipe->pipe_state &= ~PIPE_ASYNC;
1578                }
1579                break;
1580
1581        case FIONREAD:
[c40e45b]1582#ifndef __rtems__
1583                if (!(fp->f_flag & FREAD)) {
1584#else /* __rtems__ */
1585                if (!(rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD)) {
1586#endif /* __rtems__ */
1587                        *(int *)data = 0;
1588                        PIPE_UNLOCK(mpipe);
1589                        return (0);
1590                }
[6959fac]1591                if (mpipe->pipe_state & PIPE_DIRECTW)
1592                        *(int *)data = mpipe->pipe_map.cnt;
1593                else
1594                        *(int *)data = mpipe->pipe_buffer.cnt;
1595                break;
1596
1597        case FIOSETOWN:
1598                PIPE_UNLOCK(mpipe);
1599                error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1600                goto out_unlocked;
1601
1602        case FIOGETOWN:
1603                *(int *)data = fgetown(&mpipe->pipe_sigio);
1604                break;
1605
1606        /* This is deprecated, FIOSETOWN should be used instead. */
1607        case TIOCSPGRP:
1608                PIPE_UNLOCK(mpipe);
1609                error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1610                goto out_unlocked;
1611
1612        /* This is deprecated, FIOGETOWN should be used instead. */
1613        case TIOCGPGRP:
1614                *(int *)data = -fgetown(&mpipe->pipe_sigio);
1615                break;
1616
1617        default:
1618                error = ENOTTY;
1619                break;
1620        }
1621        PIPE_UNLOCK(mpipe);
1622out_unlocked:
1623        return (error);
1624}
[b1580fb]1625#ifdef __rtems__
1626static int
1627rtems_bsd_pipe_ioctl(rtems_libio_t *iop, ioctl_command_t request, void *buffer)
1628{
1629        struct thread *td = rtems_bsd_get_curthread_or_null();
1630        struct file *fp = rtems_bsd_iop_to_fp(iop);
1631        int error;
1632
1633        if (td != NULL) {
1634                error = pipe_ioctl(fp, request, buffer, NULL, td);
1635        } else {
1636                error = ENOMEM;
1637        }
1638
1639        return rtems_bsd_error_to_status_and_errno(error);
1640}
1641#endif /* __rtems__ */
[6959fac]1642
1643static int
[3489e3b]1644pipe_poll(struct file *fp, int events, struct ucred *active_cred,
1645    struct thread *td)
[6959fac]1646{
[c40e45b]1647        struct pipe *rpipe;
[6959fac]1648        struct pipe *wpipe;
[c40e45b]1649        int levents, revents;
[6959fac]1650#ifdef MAC
1651        int error;
1652#endif
1653
[c40e45b]1654        revents = 0;
1655        rpipe = fp->f_data;
1656        wpipe = PIPE_PEER(rpipe);
[6959fac]1657        PIPE_LOCK(rpipe);
1658#ifdef MAC
1659        error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair);
1660        if (error)
1661                goto locked_error;
1662#endif
[c40e45b]1663#ifndef __rtems__
1664        if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM))
1665#else /* __rtems__ */
1666        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD && events & (POLLIN | POLLRDNORM))
1667#endif /* __rtems__ */
[6959fac]1668                if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1669                    (rpipe->pipe_buffer.cnt > 0))
1670                        revents |= events & (POLLIN | POLLRDNORM);
1671
[c40e45b]1672#ifndef __rtems__
1673        if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM))
1674#else /* __rtems__ */
1675        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FWRITE && events & (POLLOUT | POLLWRNORM))
1676#endif /* __rtems__ */
[6959fac]1677                if (wpipe->pipe_present != PIPE_ACTIVE ||
1678                    (wpipe->pipe_state & PIPE_EOF) ||
1679                    (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1680                     ((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF ||
1681                         wpipe->pipe_buffer.size == 0)))
1682                        revents |= events & (POLLOUT | POLLWRNORM);
1683
[c40e45b]1684        levents = events &
1685            (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND);
1686#ifndef __rtems__
1687        if (rpipe->pipe_state & PIPE_NAMED && fp->f_flag & FREAD && levents &&
1688            fp->f_seqcount == rpipe->pipe_wgen)
1689#else /* __rtems__ */
1690        if (rpipe->pipe_state & PIPE_NAMED && rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD && levents)
1691#endif /* __rtems__ */
1692                events |= POLLINIGNEOF;
1693
[6959fac]1694        if ((events & POLLINIGNEOF) == 0) {
1695                if (rpipe->pipe_state & PIPE_EOF) {
1696                        revents |= (events & (POLLIN | POLLRDNORM));
1697                        if (wpipe->pipe_present != PIPE_ACTIVE ||
1698                            (wpipe->pipe_state & PIPE_EOF))
1699                                revents |= POLLHUP;
1700                }
1701        }
1702
1703        if (revents == 0) {
[c40e45b]1704#ifndef __rtems__
1705                if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) {
1706#else /* __rtems__ */
1707                if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD && events & (POLLIN | POLLRDNORM)) {
1708#endif /* __rtems__ */
[6959fac]1709                        selrecord(td, &rpipe->pipe_sel);
1710                        if (SEL_WAITING(&rpipe->pipe_sel))
1711                                rpipe->pipe_state |= PIPE_SEL;
1712                }
1713
[c40e45b]1714#ifndef __rtems__
1715                if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) {
1716#else /* __rtems__ */
1717                if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FWRITE && events & (POLLOUT | POLLWRNORM)) {
1718#endif /* __rtems__ */
[6959fac]1719                        selrecord(td, &wpipe->pipe_sel);
1720                        if (SEL_WAITING(&wpipe->pipe_sel))
1721                                wpipe->pipe_state |= PIPE_SEL;
1722                }
1723        }
1724#ifdef MAC
1725locked_error:
1726#endif
1727        PIPE_UNLOCK(rpipe);
1728
1729        return (revents);
1730}
[b1580fb]1731#ifdef __rtems__
1732static int
1733rtems_bsd_pipe_poll(rtems_libio_t *iop, int events)
1734{
1735        struct thread *td = rtems_bsd_get_curthread_or_null();
1736        struct file *fp = rtems_bsd_iop_to_fp(iop);
1737        int error;
1738
1739        if (td != NULL) {
1740                error = pipe_poll(fp, events, NULL, td);
1741        } else {
1742                error = ENOMEM;
1743        }
1744
1745        return error;
1746}
1747#endif /* __rtems__ */
[6959fac]1748
1749/*
1750 * We shouldn't need locks here as we're doing a read and this should
1751 * be a natural race.
1752 */
[b1580fb]1753#ifndef __rtems__
[6959fac]1754static int
[3489e3b]1755pipe_stat(struct file *fp, struct stat *ub, struct ucred *active_cred,
1756    struct thread *td)
[6959fac]1757{
1758        struct pipe *pipe;
[b1580fb]1759#else /* __rtems__ */
1760static int
1761pipe_stat(struct pipe *pipe, struct stat *ub)
1762{
1763#endif /* __rtems__ */
[6959fac]1764        int new_unr;
1765#ifdef MAC
1766        int error;
1767#endif
1768
[b1580fb]1769#ifndef __rtems__
[6959fac]1770        pipe = fp->f_data;
[b1580fb]1771#endif /* __rtems__ */
[6959fac]1772        PIPE_LOCK(pipe);
1773#ifdef MAC
1774        error = mac_pipe_check_stat(active_cred, pipe->pipe_pair);
1775        if (error) {
1776                PIPE_UNLOCK(pipe);
1777                return (error);
1778        }
1779#endif
[c40e45b]1780
1781        /* For named pipes ask the underlying filesystem. */
1782        if (pipe->pipe_state & PIPE_NAMED) {
1783                PIPE_UNLOCK(pipe);
1784#ifndef __rtems__
1785                return (vnops.fo_stat(fp, ub, active_cred, td));
1786#else /* __rtems__ */
1787                return (ENXIO);
1788#endif /* __rtems__ */
1789        }
1790
[6959fac]1791        /*
1792         * Lazily allocate an inode number for the pipe.  Most pipe
1793         * users do not call fstat(2) on the pipe, which means that
1794         * postponing the inode allocation until it is must be
1795         * returned to userland is useful.  If alloc_unr failed,
1796         * assign st_ino zero instead of returning an error.
1797         * Special pipe_ino values:
1798         *  -1 - not yet initialized;
1799         *  0  - alloc_unr failed, return 0 as st_ino forever.
1800         */
1801        if (pipe->pipe_ino == (ino_t)-1) {
1802                new_unr = alloc_unr(pipeino_unr);
1803                if (new_unr != -1)
1804                        pipe->pipe_ino = new_unr;
1805                else
1806                        pipe->pipe_ino = 0;
1807        }
1808        PIPE_UNLOCK(pipe);
1809
[b1580fb]1810#ifndef __rtems__
[6959fac]1811        bzero(ub, sizeof(*ub));
[b1580fb]1812#endif /* __rtems__ */
[6959fac]1813        ub->st_mode = S_IFIFO;
1814        ub->st_blksize = PAGE_SIZE;
1815        if (pipe->pipe_state & PIPE_DIRECTW)
1816                ub->st_size = pipe->pipe_map.cnt;
1817        else
1818                ub->st_size = pipe->pipe_buffer.cnt;
[c40e45b]1819        ub->st_blocks = howmany(ub->st_size, ub->st_blksize);
[6959fac]1820        ub->st_atim = pipe->pipe_atime;
1821        ub->st_mtim = pipe->pipe_mtime;
1822        ub->st_ctim = pipe->pipe_ctime;
[b1580fb]1823#ifndef __rtems__
[6959fac]1824        ub->st_uid = fp->f_cred->cr_uid;
1825        ub->st_gid = fp->f_cred->cr_gid;
1826        ub->st_dev = pipedev_ino;
1827        ub->st_ino = pipe->pipe_ino;
[b1580fb]1828#else /* __rtems__ */
1829        ub->st_uid = BSD_DEFAULT_UID;
1830        ub->st_gid = BSD_DEFAULT_GID;
1831        ub->st_dev = rtems_filesystem_make_dev_t(0xcc494cd6U, 0x1d970b4dU);
1832        ub->st_ino = pipe->pipe_ino;
1833#endif /* __rtems__ */
[6959fac]1834        /*
1835         * Left as 0: st_nlink, st_rdev, st_flags, st_gen.
1836         */
1837        return (0);
1838}
[b1580fb]1839#ifdef __rtems__
1840static int
1841rtems_bsd_pipe_stat(
1842        const rtems_filesystem_location_info_t *loc,
1843        struct stat *buf
1844)
1845{
1846        struct pipe *pipe = rtems_bsd_loc_to_f_data(loc);
1847        int error = pipe_stat(pipe, buf);
1848
1849        return rtems_bsd_error_to_status_and_errno(error);
1850}
1851#endif /* __rtems__ */
[6959fac]1852
1853/* ARGSUSED */
1854static int
[3489e3b]1855pipe_close(struct file *fp, struct thread *td)
[6959fac]1856{
1857
[b1580fb]1858#ifndef __rtems__
[c40e45b]1859        if (fp->f_vnode != NULL)
1860                return vnops.fo_close(fp, td);
[6959fac]1861        fp->f_ops = &badfileops;
[b1580fb]1862#else /* __rtems__ */
1863        fp->f_io.pathinfo.handlers = &rtems_filesystem_handlers_default;
1864#endif /* __rtems__ */
[c40e45b]1865        pipe_dtor(fp->f_data);
[6959fac]1866        fp->f_data = NULL;
1867        return (0);
1868}
1869
[c40e45b]1870#ifndef __rtems__
1871static int
1872pipe_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td)
1873{
1874        struct pipe *cpipe;
1875        int error;
1876
1877        cpipe = fp->f_data;
1878        if (cpipe->pipe_state & PIPE_NAMED)
1879                error = vn_chmod(fp, mode, active_cred, td);
1880        else
1881                error = invfo_chmod(fp, mode, active_cred, td);
1882        return (error);
1883}
1884
1885static int
[3489e3b]1886pipe_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1887    struct thread *td)
[c40e45b]1888{
1889        struct pipe *cpipe;
1890        int error;
1891
1892        cpipe = fp->f_data;
1893        if (cpipe->pipe_state & PIPE_NAMED)
1894                error = vn_chown(fp, uid, gid, active_cred, td);
1895        else
1896                error = invfo_chown(fp, uid, gid, active_cred, td);
1897        return (error);
1898}
1899
1900static int
1901pipe_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
1902{
1903        struct pipe *pi;
1904
1905        if (fp->f_type == DTYPE_FIFO)
1906                return (vn_fill_kinfo(fp, kif, fdp));
1907        kif->kf_type = KF_TYPE_PIPE;
1908        pi = fp->f_data;
1909        kif->kf_un.kf_pipe.kf_pipe_addr = (uintptr_t)pi;
1910        kif->kf_un.kf_pipe.kf_pipe_peer = (uintptr_t)pi->pipe_peer;
1911        kif->kf_un.kf_pipe.kf_pipe_buffer_cnt = pi->pipe_buffer.cnt;
1912        return (0);
1913}
1914#endif /* __rtems__ */
1915
[6959fac]1916static void
[3489e3b]1917pipe_free_kmem(struct pipe *cpipe)
[6959fac]1918{
1919
1920        KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
1921            ("pipe_free_kmem: pipe mutex locked"));
1922
1923        if (cpipe->pipe_buffer.buffer != NULL) {
1924                atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size);
[b1580fb]1925#ifndef __rtems__
[6959fac]1926                vm_map_remove(pipe_map,
1927                    (vm_offset_t)cpipe->pipe_buffer.buffer,
1928                    (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size);
[b1580fb]1929#else /* __rtems__ */
1930                free(cpipe->pipe_buffer.buffer, M_TEMP);
1931#endif /* __rtems__ */
[6959fac]1932                cpipe->pipe_buffer.buffer = NULL;
1933        }
1934#ifndef PIPE_NODIRECT
1935        {
1936                cpipe->pipe_map.cnt = 0;
1937                cpipe->pipe_map.pos = 0;
1938                cpipe->pipe_map.npages = 0;
1939        }
1940#endif
1941}
1942
1943/*
1944 * shutdown the pipe
1945 */
1946static void
[3489e3b]1947pipeclose(struct pipe *cpipe)
[6959fac]1948{
1949        struct pipepair *pp;
1950        struct pipe *ppipe;
1951
1952        KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL"));
1953
1954        PIPE_LOCK(cpipe);
1955        pipelock(cpipe, 0);
1956        pp = cpipe->pipe_pair;
1957
1958        pipeselwakeup(cpipe);
1959
1960        /*
1961         * If the other side is blocked, wake it up saying that
1962         * we want to close it down.
1963         */
1964        cpipe->pipe_state |= PIPE_EOF;
1965        while (cpipe->pipe_busy) {
1966                wakeup(cpipe);
1967                cpipe->pipe_state |= PIPE_WANT;
1968                pipeunlock(cpipe);
1969                msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1970                pipelock(cpipe, 0);
1971        }
1972
1973
1974        /*
1975         * Disconnect from peer, if any.
1976         */
1977        ppipe = cpipe->pipe_peer;
1978        if (ppipe->pipe_present == PIPE_ACTIVE) {
1979                pipeselwakeup(ppipe);
1980
1981                ppipe->pipe_state |= PIPE_EOF;
1982                wakeup(ppipe);
1983                KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0);
1984        }
1985
1986        /*
1987         * Mark this endpoint as free.  Release kmem resources.  We
1988         * don't mark this endpoint as unused until we've finished
1989         * doing that, or the pipe might disappear out from under
1990         * us.
1991         */
1992        PIPE_UNLOCK(cpipe);
1993        pipe_free_kmem(cpipe);
1994        PIPE_LOCK(cpipe);
1995        cpipe->pipe_present = PIPE_CLOSING;
1996        pipeunlock(cpipe);
1997
1998        /*
1999         * knlist_clear() may sleep dropping the PIPE_MTX. Set the
2000         * PIPE_FINALIZED, that allows other end to free the
2001         * pipe_pair, only after the knotes are completely dismantled.
2002         */
2003        knlist_clear(&cpipe->pipe_sel.si_note, 1);
2004        cpipe->pipe_present = PIPE_FINALIZED;
2005        seldrain(&cpipe->pipe_sel);
2006        knlist_destroy(&cpipe->pipe_sel.si_note);
2007
2008        /*
2009         * If both endpoints are now closed, release the memory for the
2010         * pipe pair.  If not, unlock.
2011         */
2012        if (ppipe->pipe_present == PIPE_FINALIZED) {
2013                PIPE_UNLOCK(cpipe);
2014#ifdef MAC
2015                mac_pipe_destroy(pp);
2016#endif
2017                uma_zfree(pipe_zone, cpipe->pipe_pair);
2018        } else
2019                PIPE_UNLOCK(cpipe);
2020}
2021
2022/*ARGSUSED*/
2023static int
2024pipe_kqfilter(struct file *fp, struct knote *kn)
2025{
2026        struct pipe *cpipe;
2027
[c40e45b]2028        /*
2029         * If a filter is requested that is not supported by this file
2030         * descriptor, don't return an error, but also don't ever generate an
2031         * event.
2032         */
2033#ifndef __rtems__
2034        if ((kn->kn_filter == EVFILT_READ) && !(fp->f_flag & FREAD)) {
2035#else /* __rtems__ */
2036        if ((kn->kn_filter == EVFILT_READ) && !(rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD)) {
2037#endif /* __rtems__ */
2038                kn->kn_fop = &pipe_nfiltops;
2039                return (0);
2040        }
2041#ifndef __rtems__
2042        if ((kn->kn_filter == EVFILT_WRITE) && !(fp->f_flag & FWRITE)) {
2043#else /* __rtems__ */
2044        if ((kn->kn_filter == EVFILT_WRITE) && !(rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FWRITE)) {
2045#endif /* __rtems__ */
2046                kn->kn_fop = &pipe_nfiltops;
2047                return (0);
2048        }
2049        cpipe = fp->f_data;
[6959fac]2050        PIPE_LOCK(cpipe);
2051        switch (kn->kn_filter) {
2052        case EVFILT_READ:
2053                kn->kn_fop = &pipe_rfiltops;
2054                break;
2055        case EVFILT_WRITE:
2056                kn->kn_fop = &pipe_wfiltops;
2057                if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) {
2058                        /* other end of pipe has been closed */
2059                        PIPE_UNLOCK(cpipe);
2060                        return (EPIPE);
2061                }
[c40e45b]2062                cpipe = PIPE_PEER(cpipe);
[6959fac]2063                break;
2064        default:
2065                PIPE_UNLOCK(cpipe);
2066                return (EINVAL);
2067        }
2068
[c40e45b]2069        kn->kn_hook = cpipe;
[6959fac]2070        knlist_add(&cpipe->pipe_sel.si_note, kn, 1);
2071        PIPE_UNLOCK(cpipe);
2072        return (0);
2073}
[b1580fb]2074#ifdef __rtems__
2075int
2076rtems_bsd_pipe_kqfilter(rtems_libio_t *iop, struct knote *kn)
2077{
2078        struct file *fp = rtems_bsd_iop_to_fp(iop);
2079
2080        return pipe_kqfilter(fp, kn);
2081}
2082#endif /* __rtems__ */
[6959fac]2083
2084static void
2085filt_pipedetach(struct knote *kn)
2086{
[c40e45b]2087        struct pipe *cpipe = kn->kn_hook;
[6959fac]2088
2089        PIPE_LOCK(cpipe);
2090        knlist_remove(&cpipe->pipe_sel.si_note, kn, 1);
2091        PIPE_UNLOCK(cpipe);
2092}
2093
2094/*ARGSUSED*/
2095static int
2096filt_piperead(struct knote *kn, long hint)
2097{
[c40e45b]2098        struct pipe *rpipe = kn->kn_hook;
[6959fac]2099        struct pipe *wpipe = rpipe->pipe_peer;
2100        int ret;
2101
[c40e45b]2102        PIPE_LOCK_ASSERT(rpipe, MA_OWNED);
[6959fac]2103        kn->kn_data = rpipe->pipe_buffer.cnt;
2104        if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
2105                kn->kn_data = rpipe->pipe_map.cnt;
2106
2107        if ((rpipe->pipe_state & PIPE_EOF) ||
2108            wpipe->pipe_present != PIPE_ACTIVE ||
2109            (wpipe->pipe_state & PIPE_EOF)) {
2110                kn->kn_flags |= EV_EOF;
2111                return (1);
2112        }
2113        ret = kn->kn_data > 0;
2114        return ret;
2115}
2116
2117/*ARGSUSED*/
2118static int
2119filt_pipewrite(struct knote *kn, long hint)
2120{
[c40e45b]2121        struct pipe *wpipe;
2122   
2123        wpipe = kn->kn_hook;
2124        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
[6959fac]2125        if (wpipe->pipe_present != PIPE_ACTIVE ||
2126            (wpipe->pipe_state & PIPE_EOF)) {
2127                kn->kn_data = 0;
2128                kn->kn_flags |= EV_EOF;
2129                return (1);
2130        }
2131        kn->kn_data = (wpipe->pipe_buffer.size > 0) ?
2132            (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) : PIPE_BUF;
2133        if (wpipe->pipe_state & PIPE_DIRECTW)
2134                kn->kn_data = 0;
2135
2136        return (kn->kn_data >= PIPE_BUF);
2137}
[c40e45b]2138
2139static void
2140filt_pipedetach_notsup(struct knote *kn)
2141{
2142
2143}
2144
2145static int
2146filt_pipenotsup(struct knote *kn, long hint)
2147{
2148
2149        return (0);
2150}
[b1580fb]2151#ifdef __rtems__
2152static int
2153rtems_bsd_pipe_open(rtems_libio_t *iop, const char *path, int oflag,
2154    mode_t mode)
2155{
2156        return rtems_bsd_error_to_status_and_errno(ENXIO);
2157}
2158
2159static int
2160rtems_bsd_pipe_close(rtems_libio_t *iop)
2161{
2162        struct file *fp = rtems_bsd_iop_to_fp(iop);
2163        int error = pipe_close(fp, NULL);
2164
2165        return rtems_bsd_error_to_status_and_errno(error);
2166}
2167
2168static int
2169rtems_bsd_pipe_fcntl(rtems_libio_t *iop, int cmd)
2170{
2171        int error = 0;
2172
2173        if (cmd == F_SETFL) {
2174                struct file *fp = rtems_bsd_iop_to_fp(iop);
2175                int nbio = iop->flags & LIBIO_FLAGS_NO_DELAY;
2176
2177                error = pipe_ioctl(fp, FIONBIO, &nbio, NULL, NULL);
2178        }
2179
2180        return rtems_bsd_error_to_status_and_errno(error);
2181}
2182#endif /* __rtems__ */
Note: See TracBrowser for help on using the repository browser.