source: rtems-libbsd/freebsd/sys/kern/sys_pipe.c @ d4bf70e

55-freebsd-126-freebsd-12
Last change on this file since d4bf70e was d4bf70e, checked in by Sebastian Huber <sebastian.huber@…>, on 11/15/18 at 09:06:00

Disable or make static kern_* functions

  • Property mode set to 100755
File size: 54.0 KB
RevLine 
[6959fac]1#include <machine/rtems-bsd-kernel-space.h>
2
3/*-
[bb80d9d]4 * SPDX-License-Identifier: BSD-4-Clause
5 *
[6959fac]6 * Copyright (c) 1996 John S. Dyson
[c40e45b]7 * Copyright (c) 2012 Giovanni Trematerra
[6959fac]8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice immediately at the beginning of the file, without modification,
15 *    this list of conditions, and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. Absolutely no warranty of function or purpose is made by the author
20 *    John S. Dyson.
21 * 4. Modifications may be freely made to this file if the above conditions
22 *    are met.
23 */
24
25/*
26 * This file contains a high-performance replacement for the socket-based
27 * pipes scheme originally used in FreeBSD/4.4Lite.  It does not support
28 * all features of sockets, but does do everything that pipes normally
29 * do.
30 */
31
32/*
33 * This code has two modes of operation, a small write mode and a large
34 * write mode.  The small write mode acts like conventional pipes with
35 * a kernel buffer.  If the buffer is less than PIPE_MINDIRECT, then the
36 * "normal" pipe buffering is done.  If the buffer is between PIPE_MINDIRECT
37 * and PIPE_SIZE in size, the sending process pins the underlying pages in
38 * memory, and the receiving process copies directly from these pinned pages
39 * in the sending process.
40 *
41 * If the sending process receives a signal, it is possible that it will
42 * go away, and certainly its address space can change, because control
43 * is returned back to the user-mode side.  In that case, the pipe code
44 * arranges to copy the buffer supplied by the user process, to a pageable
45 * kernel buffer, and the receiving process will grab the data from the
46 * pageable kernel buffer.  Since signals don't happen all that often,
47 * the copy operation is normally eliminated.
48 *
49 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
50 * happen for small transfers so that the system will not spend all of
51 * its time context switching.
52 *
53 * In order to limit the resource use of pipes, two sysctls exist:
54 *
55 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
56 * address space available to us in pipe_map. This value is normally
57 * autotuned, but may also be loader tuned.
58 *
59 * kern.ipc.pipekva - This read-only sysctl tracks the current amount of
60 * memory in use by pipes.
61 *
62 * Based on how large pipekva is relative to maxpipekva, the following
63 * will happen:
64 *
65 * 0% - 50%:
66 *     New pipes are given 16K of memory backing, pipes may dynamically
67 *     grow to as large as 64K where needed.
68 * 50% - 75%:
69 *     New pipes are given 4K (or PAGE_SIZE) of memory backing,
70 *     existing pipes may NOT grow.
71 * 75% - 100%:
72 *     New pipes are given 4K (or PAGE_SIZE) of memory backing,
73 *     existing pipes will be shrunk down to 4K whenever possible.
74 *
75 * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0.  If
76 * that is set,  the only resize that will occur is the 0 -> SMALL_PIPE_SIZE
77 * resize which MUST occur for reverse-direction pipes when they are
78 * first used.
79 *
80 * Additional information about the current state of pipes may be obtained
81 * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail,
82 * and kern.ipc.piperesizefail.
83 *
84 * Locking rules:  There are two locks present here:  A mutex, used via
85 * PIPE_LOCK, and a flag, used via pipelock().  All locking is done via
86 * the flag, as mutexes can not persist over uiomove.  The mutex
87 * exists only to guard access to the flag, and is not in itself a
88 * locking mechanism.  Also note that there is only a single mutex for
89 * both directions of a pipe.
90 *
91 * As pipelock() may have to sleep before it can acquire the flag, it
92 * is important to reread all data after a call to pipelock(); everything
93 * in the structure may have changed.
94 */
95
96#include <sys/cdefs.h>
97__FBSDID("$FreeBSD$");
98
[0237319]99#include <sys/param.h>
[6959fac]100#include <sys/systm.h>
101#include <sys/conf.h>
102#include <sys/fcntl.h>
103#include <sys/file.h>
104#include <sys/filedesc.h>
105#include <sys/filio.h>
106#include <sys/kernel.h>
[3c967ca]107#include <sys/lock.h>
[6959fac]108#include <sys/mutex.h>
109#include <sys/ttycom.h>
110#include <sys/stat.h>
111#include <sys/malloc.h>
112#include <sys/poll.h>
113#include <sys/selinfo.h>
114#include <sys/signalvar.h>
115#include <sys/syscallsubr.h>
116#include <sys/sysctl.h>
117#include <sys/sysproto.h>
118#include <sys/pipe.h>
119#include <sys/proc.h>
120#include <sys/vnode.h>
121#include <sys/uio.h>
[c40e45b]122#include <sys/user.h>
[6959fac]123#include <sys/event.h>
124
125#include <security/mac/mac_framework.h>
126
127#include <vm/vm.h>
128#include <vm/vm_param.h>
129#include <vm/vm_object.h>
130#include <vm/vm_kern.h>
131#include <vm/vm_extern.h>
132#include <vm/pmap.h>
133#include <vm/vm_map.h>
134#include <vm/vm_page.h>
135#include <vm/uma.h>
136
137/*
138 * Use this define if you want to disable *fancy* VM things.  Expect an
139 * approx 30% decrease in transfer rate.  This could be useful for
140 * NetBSD or OpenBSD.
141 */
142/* #define PIPE_NODIRECT */
143
[c40e45b]144#define PIPE_PEER(pipe) \
145        (((pipe)->pipe_state & PIPE_NAMED) ? (pipe) : ((pipe)->pipe_peer))
146
[6959fac]147/*
148 * interfaces to the outside world
149 */
[b1580fb]150#ifndef __rtems__
[6959fac]151static fo_rdwr_t        pipe_read;
152static fo_rdwr_t        pipe_write;
153static fo_truncate_t    pipe_truncate;
154static fo_ioctl_t       pipe_ioctl;
155static fo_poll_t        pipe_poll;
156static fo_kqfilter_t    pipe_kqfilter;
157static fo_stat_t        pipe_stat;
158static fo_close_t       pipe_close;
[c40e45b]159static fo_chmod_t       pipe_chmod;
160static fo_chown_t       pipe_chown;
161static fo_fill_kinfo_t  pipe_fill_kinfo;
[6959fac]162
[c40e45b]163struct fileops pipeops = {
[6959fac]164        .fo_read = pipe_read,
165        .fo_write = pipe_write,
166        .fo_truncate = pipe_truncate,
167        .fo_ioctl = pipe_ioctl,
168        .fo_poll = pipe_poll,
169        .fo_kqfilter = pipe_kqfilter,
170        .fo_stat = pipe_stat,
171        .fo_close = pipe_close,
[c40e45b]172        .fo_chmod = pipe_chmod,
173        .fo_chown = pipe_chown,
174        .fo_sendfile = invfo_sendfile,
175        .fo_fill_kinfo = pipe_fill_kinfo,
[6959fac]176        .fo_flags = DFLAG_PASSABLE
177};
[b1580fb]178#else /* __rtems__ */
179#define PIPE_NODIRECT
180#define PRIBIO                  (0)
181
[17ab62c]182static int rtems_bsd_pipe_open(rtems_libio_t *iop, const char *path,
183    int oflag, mode_t mode);
[b1580fb]184static int rtems_bsd_pipe_close(rtems_libio_t *iop);
[17ab62c]185static ssize_t rtems_bsd_pipe_read(rtems_libio_t *iop, void *buffer,
186    size_t count);
187static ssize_t rtems_bsd_pipe_readv(rtems_libio_t *iop,
188    const struct iovec *iov, int iovcnt, ssize_t total);
189static ssize_t rtems_bsd_pipe_write(rtems_libio_t *iop, const void *buffer,
190    size_t count);
191static ssize_t rtems_bsd_pipe_writev(rtems_libio_t *iop,
192    const struct iovec *iov, int iovcnt, ssize_t total);
193static int rtems_bsd_pipe_ioctl(rtems_libio_t *iop, ioctl_command_t request,
194    void *buffer);
195static int rtems_bsd_pipe_stat(const rtems_filesystem_location_info_t *loc,
196    struct stat *buf);
[b1580fb]197static int rtems_bsd_pipe_fcntl(rtems_libio_t *iop, int cmd);
198static int rtems_bsd_pipe_poll(rtems_libio_t *iop, int events);
199int rtems_bsd_pipe_kqfilter(rtems_libio_t *iop, struct knote *kn);
200
201static const rtems_filesystem_file_handlers_r pipeops = {
202        .open_h = rtems_bsd_pipe_open,
203        .close_h = rtems_bsd_pipe_close,
204        .read_h = rtems_bsd_pipe_read,
205        .write_h = rtems_bsd_pipe_write,
206        .ioctl_h = rtems_bsd_pipe_ioctl,
207        .lseek_h = rtems_filesystem_default_lseek,
208        .fstat_h = rtems_bsd_pipe_stat,
209        .ftruncate_h = rtems_filesystem_default_ftruncate,
210        .fsync_h = rtems_filesystem_default_fsync_or_fdatasync,
211        .fdatasync_h = rtems_filesystem_default_fsync_or_fdatasync,
212        .fcntl_h = rtems_bsd_pipe_fcntl,
213        .poll_h = rtems_bsd_pipe_poll,
[17ab62c]214        .kqfilter_h = rtems_bsd_pipe_kqfilter,
215        .readv_h = rtems_bsd_pipe_readv,
216        .writev_h = rtems_bsd_pipe_writev,
217        .mmap_h = rtems_filesystem_default_mmap
[b1580fb]218};
219
220long    maxpipekva;                     /* Limit on pipe KVA */
221
[d4bf70e]222static int kern_pipe(struct thread *, int [2], int, struct filecaps *,
223    struct filecaps *);
[b1580fb]224#endif /* __rtems__ */
[6959fac]225
226static void     filt_pipedetach(struct knote *kn);
[c40e45b]227static void     filt_pipedetach_notsup(struct knote *kn);
228static int      filt_pipenotsup(struct knote *kn, long hint);
[6959fac]229static int      filt_piperead(struct knote *kn, long hint);
230static int      filt_pipewrite(struct knote *kn, long hint);
231
[c40e45b]232static struct filterops pipe_nfiltops = {
233        .f_isfd = 1,
234        .f_detach = filt_pipedetach_notsup,
235        .f_event = filt_pipenotsup
236};
[6959fac]237static struct filterops pipe_rfiltops = {
238        .f_isfd = 1,
239        .f_detach = filt_pipedetach,
240        .f_event = filt_piperead
241};
242static struct filterops pipe_wfiltops = {
243        .f_isfd = 1,
244        .f_detach = filt_pipedetach,
245        .f_event = filt_pipewrite
246};
247
248/*
249 * Default pipe buffer size(s), this can be kind-of large now because pipe
250 * space is pageable.  The pipe code will try to maintain locality of
251 * reference for performance reasons, so small amounts of outstanding I/O
252 * will not wipe the cache.
253 */
254#define MINPIPESIZE (PIPE_SIZE/3)
255#define MAXPIPESIZE (2*PIPE_SIZE/3)
256
257static long amountpipekva;
258static int pipefragretry;
259static int pipeallocfail;
260static int piperesizefail;
261static int piperesizeallowed = 1;
262
[c40e45b]263SYSCTL_LONG(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
[6959fac]264           &maxpipekva, 0, "Pipe KVA limit");
265SYSCTL_LONG(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD,
266           &amountpipekva, 0, "Pipe KVA usage");
267SYSCTL_INT(_kern_ipc, OID_AUTO, pipefragretry, CTLFLAG_RD,
268          &pipefragretry, 0, "Pipe allocation retries due to fragmentation");
269SYSCTL_INT(_kern_ipc, OID_AUTO, pipeallocfail, CTLFLAG_RD,
270          &pipeallocfail, 0, "Pipe allocation failures");
271SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizefail, CTLFLAG_RD,
272          &piperesizefail, 0, "Pipe resize failures");
273SYSCTL_INT(_kern_ipc, OID_AUTO, piperesizeallowed, CTLFLAG_RW,
274          &piperesizeallowed, 0, "Pipe resizing allowed");
275
276static void pipeinit(void *dummy __unused);
277static void pipeclose(struct pipe *cpipe);
278static void pipe_free_kmem(struct pipe *cpipe);
[c40e45b]279static void pipe_create(struct pipe *pipe, int backing);
280static void pipe_paircreate(struct thread *td, struct pipepair **p_pp);
[6959fac]281static __inline int pipelock(struct pipe *cpipe, int catch);
282static __inline void pipeunlock(struct pipe *cpipe);
283#ifndef PIPE_NODIRECT
284static int pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio);
285static void pipe_destroy_write_buffer(struct pipe *wpipe);
286static int pipe_direct_write(struct pipe *wpipe, struct uio *uio);
287static void pipe_clone_write_buffer(struct pipe *wpipe);
288#endif
289static int pipespace(struct pipe *cpipe, int size);
290static int pipespace_new(struct pipe *cpipe, int size);
291
292static int      pipe_zone_ctor(void *mem, int size, void *arg, int flags);
293static int      pipe_zone_init(void *mem, int size, int flags);
294static void     pipe_zone_fini(void *mem, int size);
295
296static uma_zone_t pipe_zone;
297static struct unrhdr *pipeino_unr;
298static dev_t pipedev_ino;
299
300SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL);
301
302static void
303pipeinit(void *dummy __unused)
304{
305
306        pipe_zone = uma_zcreate("pipe", sizeof(struct pipepair),
307            pipe_zone_ctor, NULL, pipe_zone_init, pipe_zone_fini,
308            UMA_ALIGN_PTR, 0);
309        KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
310        pipeino_unr = new_unrhdr(1, INT32_MAX, NULL);
311        KASSERT(pipeino_unr != NULL, ("pipe fake inodes not initialized"));
312        pipedev_ino = devfs_alloc_cdp_inode();
313        KASSERT(pipedev_ino > 0, ("pipe dev inode not initialized"));
314}
315
316static int
317pipe_zone_ctor(void *mem, int size, void *arg, int flags)
318{
319        struct pipepair *pp;
320        struct pipe *rpipe, *wpipe;
321
322        KASSERT(size == sizeof(*pp), ("pipe_zone_ctor: wrong size"));
323
324        pp = (struct pipepair *)mem;
325
326        /*
327         * We zero both pipe endpoints to make sure all the kmem pointers
328         * are NULL, flag fields are zero'd, etc.  We timestamp both
329         * endpoints with the same time.
330         */
331        rpipe = &pp->pp_rpipe;
332        bzero(rpipe, sizeof(*rpipe));
[b1580fb]333#ifndef __rtems__
[6959fac]334        vfs_timestamp(&rpipe->pipe_ctime);
[b1580fb]335#else /* __rtems__ */
336        rpipe->pipe_ctime.tv_sec = time(NULL);
337#endif /* __rtems__ */
[6959fac]338        rpipe->pipe_atime = rpipe->pipe_mtime = rpipe->pipe_ctime;
339
340        wpipe = &pp->pp_wpipe;
341        bzero(wpipe, sizeof(*wpipe));
342        wpipe->pipe_ctime = rpipe->pipe_ctime;
343        wpipe->pipe_atime = wpipe->pipe_mtime = rpipe->pipe_ctime;
344
345        rpipe->pipe_peer = wpipe;
346        rpipe->pipe_pair = pp;
347        wpipe->pipe_peer = rpipe;
348        wpipe->pipe_pair = pp;
349
350        /*
351         * Mark both endpoints as present; they will later get free'd
352         * one at a time.  When both are free'd, then the whole pair
353         * is released.
354         */
355        rpipe->pipe_present = PIPE_ACTIVE;
356        wpipe->pipe_present = PIPE_ACTIVE;
357
358        /*
359         * Eventually, the MAC Framework may initialize the label
360         * in ctor or init, but for now we do it elswhere to avoid
361         * blocking in ctor or init.
362         */
363        pp->pp_label = NULL;
364
365        return (0);
366}
367
368static int
369pipe_zone_init(void *mem, int size, int flags)
370{
371        struct pipepair *pp;
372
373        KASSERT(size == sizeof(*pp), ("pipe_zone_init: wrong size"));
374
375        pp = (struct pipepair *)mem;
376
[c40e45b]377        mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_NEW);
[6959fac]378        return (0);
379}
380
381static void
382pipe_zone_fini(void *mem, int size)
383{
384        struct pipepair *pp;
385
386        KASSERT(size == sizeof(*pp), ("pipe_zone_fini: wrong size"));
387
388        pp = (struct pipepair *)mem;
389
390        mtx_destroy(&pp->pp_mtx);
391}
392
[c40e45b]393static void
394pipe_paircreate(struct thread *td, struct pipepair **p_pp)
[6959fac]395{
396        struct pipepair *pp;
397        struct pipe *rpipe, *wpipe;
398
[c40e45b]399        *p_pp = pp = uma_zalloc(pipe_zone, M_WAITOK);
[6959fac]400#ifdef MAC
401        /*
402         * The MAC label is shared between the connected endpoints.  As a
403         * result mac_pipe_init() and mac_pipe_create() are called once
404         * for the pair, and not on the endpoints.
405         */
406        mac_pipe_init(pp);
407        mac_pipe_create(td->td_ucred, pp);
408#endif
409        rpipe = &pp->pp_rpipe;
410        wpipe = &pp->pp_wpipe;
411
412        knlist_init_mtx(&rpipe->pipe_sel.si_note, PIPE_MTX(rpipe));
413        knlist_init_mtx(&wpipe->pipe_sel.si_note, PIPE_MTX(wpipe));
414
415        /* Only the forward direction pipe is backed by default */
[c40e45b]416        pipe_create(rpipe, 1);
417        pipe_create(wpipe, 0);
[6959fac]418
419        rpipe->pipe_state |= PIPE_DIRECTOK;
420        wpipe->pipe_state |= PIPE_DIRECTOK;
[c40e45b]421}
422
423void
424pipe_named_ctor(struct pipe **ppipe, struct thread *td)
425{
426        struct pipepair *pp;
427
428        pipe_paircreate(td, &pp);
429        pp->pp_rpipe.pipe_state |= PIPE_NAMED;
430        *ppipe = &pp->pp_rpipe;
431}
432
433void
434pipe_dtor(struct pipe *dpipe)
435{
436        struct pipe *peer;
437        ino_t ino;
[6959fac]438
[c40e45b]439        ino = dpipe->pipe_ino;
440        peer = (dpipe->pipe_state & PIPE_NAMED) != 0 ? dpipe->pipe_peer : NULL;
441        funsetown(&dpipe->pipe_sigio);
442        pipeclose(dpipe);
443        if (peer != NULL) {
444                funsetown(&peer->pipe_sigio);
445                pipeclose(peer);
446        }
447        if (ino != 0 && ino != (ino_t)-1)
448                free_unr(pipeino_unr, ino);
449}
450
451/*
452 * The pipe system call for the DTYPE_PIPE type of pipes.  If we fail, let
453 * the zone pick up the pieces via pipeclose().
454 */
455int
456kern_pipe(struct thread *td, int fildes[2], int flags, struct filecaps *fcaps1,
457    struct filecaps *fcaps2)
458{
459        struct file *rf, *wf;
460        struct pipe *rpipe, *wpipe;
461        struct pipepair *pp;
462        int fd, fflags, error;
463
464        pipe_paircreate(td, &pp);
465        rpipe = &pp->pp_rpipe;
466        wpipe = &pp->pp_wpipe;
467        error = falloc_caps(td, &rf, &fd, flags, fcaps1);
[6959fac]468        if (error) {
469                pipeclose(rpipe);
470                pipeclose(wpipe);
471                return (error);
472        }
[c40e45b]473        /* An extra reference on `rf' has been held for us by falloc_caps(). */
[6959fac]474        fildes[0] = fd;
475
476        fflags = FREAD | FWRITE;
477        if ((flags & O_NONBLOCK) != 0)
478                fflags |= FNONBLOCK;
479
480        /*
481         * Warning: once we've gotten past allocation of the fd for the
482         * read-side, we can only drop the read side via fdrop() in order
483         * to avoid races against processes which manage to dup() the read
484         * side while we are blocked trying to allocate the write side.
485         */
486        finit(rf, fflags, DTYPE_PIPE, rpipe, &pipeops);
[c40e45b]487        error = falloc_caps(td, &wf, &fd, flags, fcaps2);
[6959fac]488        if (error) {
[c40e45b]489                fdclose(td, rf, fildes[0]);
[72d5fa1]490#ifndef __rtems__
[6959fac]491                fdrop(rf, td);
[72d5fa1]492#endif /* __rtems__ */
[6959fac]493                /* rpipe has been closed by fdrop(). */
494                pipeclose(wpipe);
495                return (error);
496        }
[c40e45b]497        /* An extra reference on `wf' has been held for us by falloc_caps(). */
[6959fac]498        finit(wf, fflags, DTYPE_PIPE, wpipe, &pipeops);
[72d5fa1]499#ifndef __rtems__
[6959fac]500        fdrop(wf, td);
[72d5fa1]501#endif /* __rtems__ */
[6959fac]502        fildes[1] = fd;
[72d5fa1]503#ifndef __rtems__
[6959fac]504        fdrop(rf, td);
[72d5fa1]505#endif /* __rtems__ */
[6959fac]506
507        return (0);
508}
509
[c40e45b]510#ifdef COMPAT_FREEBSD10
[6959fac]511/* ARGSUSED */
512int
[c40e45b]513freebsd10_pipe(struct thread *td, struct freebsd10_pipe_args *uap __unused)
[6959fac]514{
515        int error;
516        int fildes[2];
517
[c40e45b]518        error = kern_pipe(td, fildes, 0, NULL, NULL);
[6959fac]519        if (error)
520                return (error);
521
522        td->td_retval[0] = fildes[0];
523        td->td_retval[1] = fildes[1];
524
525        return (0);
526}
[c40e45b]527#endif
528
529#ifndef __rtems__
530int
531sys_pipe2(struct thread *td, struct pipe2_args *uap)
532{
533        int error, fildes[2];
534
535        if (uap->flags & ~(O_CLOEXEC | O_NONBLOCK))
536                return (EINVAL);
537        error = kern_pipe(td, fildes, uap->flags, NULL, NULL);
538        if (error)
539                return (error);
540        error = copyout(fildes, uap->fildes, 2 * sizeof(int));
541        if (error) {
542                (void)kern_close(td, fildes[0]);
543                (void)kern_close(td, fildes[1]);
544        }
545        return (error);
546}
547#endif /* __rtems__ */
548
[b1580fb]549#ifdef __rtems__
550int
551pipe(int fildes[2])
552{
553        struct thread *td = rtems_bsd_get_curthread_or_null();
554        int error;
555
556        if (td != NULL) {
[c40e45b]557                error = kern_pipe(td, fildes, 0, NULL, NULL);
[b1580fb]558        } else {
559                error = ENOMEM;
560        }
561
562        if (error == 0) {
563                return error;
564        } else {
565                rtems_set_errno_and_return_minus_one(error);
566        }
567}
568#endif /* __rtems__ */
[6959fac]569
570/*
571 * Allocate kva for pipe circular buffer, the space is pageable
572 * This routine will 'realloc' the size of a pipe safely, if it fails
573 * it will retain the old buffer.
574 * If it fails it will return ENOMEM.
575 */
576static int
[3489e3b]577pipespace_new(struct pipe *cpipe, int size)
[6959fac]578{
579        caddr_t buffer;
580        int error, cnt, firstseg;
581        static int curfail = 0;
582        static struct timeval lastfail;
583
584        KASSERT(!mtx_owned(PIPE_MTX(cpipe)), ("pipespace: pipe mutex locked"));
585        KASSERT(!(cpipe->pipe_state & PIPE_DIRECTW),
586                ("pipespace: resize of direct writes not allowed"));
587retry:
588        cnt = cpipe->pipe_buffer.cnt;
589        if (cnt > size)
590                size = cnt;
591
592        size = round_page(size);
[b1580fb]593#ifndef __rtems__
[6959fac]594        buffer = (caddr_t) vm_map_min(pipe_map);
595
596        error = vm_map_find(pipe_map, NULL, 0,
[c40e45b]597                (vm_offset_t *) &buffer, size, 0, VMFS_ANY_SPACE,
[6959fac]598                VM_PROT_ALL, VM_PROT_ALL, 0);
599        if (error != KERN_SUCCESS) {
[b1580fb]600#else /* __rtems__ */
[c40e45b]601        (void)error;
[b1580fb]602        buffer = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
603        if (buffer == NULL) {
604#endif /* __rtems__ */
[6959fac]605                if ((cpipe->pipe_buffer.buffer == NULL) &&
606                        (size > SMALL_PIPE_SIZE)) {
607                        size = SMALL_PIPE_SIZE;
608                        pipefragretry++;
609                        goto retry;
610                }
611                if (cpipe->pipe_buffer.buffer == NULL) {
612                        pipeallocfail++;
613                        if (ppsratecheck(&lastfail, &curfail, 1))
614                                printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n");
615                } else {
616                        piperesizefail++;
617                }
618                return (ENOMEM);
619        }
620
621        /* copy data, then free old resources if we're resizing */
622        if (cnt > 0) {
623                if (cpipe->pipe_buffer.in <= cpipe->pipe_buffer.out) {
624                        firstseg = cpipe->pipe_buffer.size - cpipe->pipe_buffer.out;
625                        bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
626                                buffer, firstseg);
627                        if ((cnt - firstseg) > 0)
628                                bcopy(cpipe->pipe_buffer.buffer, &buffer[firstseg],
629                                        cpipe->pipe_buffer.in);
630                } else {
631                        bcopy(&cpipe->pipe_buffer.buffer[cpipe->pipe_buffer.out],
632                                buffer, cnt);
633                }
634        }
635        pipe_free_kmem(cpipe);
636        cpipe->pipe_buffer.buffer = buffer;
637        cpipe->pipe_buffer.size = size;
638        cpipe->pipe_buffer.in = cnt;
639        cpipe->pipe_buffer.out = 0;
640        cpipe->pipe_buffer.cnt = cnt;
641        atomic_add_long(&amountpipekva, cpipe->pipe_buffer.size);
642        return (0);
643}
644
645/*
646 * Wrapper for pipespace_new() that performs locking assertions.
647 */
648static int
[3489e3b]649pipespace(struct pipe *cpipe, int size)
[6959fac]650{
651
652        KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
653                ("Unlocked pipe passed to pipespace"));
654        return (pipespace_new(cpipe, size));
655}
656
657/*
658 * lock a pipe for I/O, blocking other access
659 */
660static __inline int
[3489e3b]661pipelock(struct pipe *cpipe, int catch)
[6959fac]662{
663        int error;
664
665        PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
666        while (cpipe->pipe_state & PIPE_LOCKFL) {
667                cpipe->pipe_state |= PIPE_LWANT;
668                error = msleep(cpipe, PIPE_MTX(cpipe),
669                    catch ? (PRIBIO | PCATCH) : PRIBIO,
670                    "pipelk", 0);
671                if (error != 0)
672                        return (error);
673        }
674        cpipe->pipe_state |= PIPE_LOCKFL;
675        return (0);
676}
677
678/*
679 * unlock a pipe I/O lock
680 */
681static __inline void
[3489e3b]682pipeunlock(struct pipe *cpipe)
[6959fac]683{
684
685        PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
686        KASSERT(cpipe->pipe_state & PIPE_LOCKFL,
687                ("Unlocked pipe passed to pipeunlock"));
688        cpipe->pipe_state &= ~PIPE_LOCKFL;
689        if (cpipe->pipe_state & PIPE_LWANT) {
690                cpipe->pipe_state &= ~PIPE_LWANT;
691                wakeup(cpipe);
692        }
693}
694
[c40e45b]695void
[3489e3b]696pipeselwakeup(struct pipe *cpipe)
[6959fac]697{
698
699        PIPE_LOCK_ASSERT(cpipe, MA_OWNED);
700        if (cpipe->pipe_state & PIPE_SEL) {
701                selwakeuppri(&cpipe->pipe_sel, PSOCK);
702                if (!SEL_WAITING(&cpipe->pipe_sel))
703                        cpipe->pipe_state &= ~PIPE_SEL;
704        }
[c40e45b]705#ifndef __rtems__
[6959fac]706        if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
707                pgsigio(&cpipe->pipe_sigio, SIGIO, 0);
[c40e45b]708#endif /* __rtems__ */
[6959fac]709        KNOTE_LOCKED(&cpipe->pipe_sel.si_note, 0);
710}
711
712/*
713 * Initialize and allocate VM and memory for pipe.  The structure
714 * will start out zero'd from the ctor, so we just manage the kmem.
715 */
[c40e45b]716static void
[3489e3b]717pipe_create(struct pipe *pipe, int backing)
[6959fac]718{
719
720        if (backing) {
[c40e45b]721                /*
722                 * Note that these functions can fail if pipe map is exhausted
723                 * (as a result of too many pipes created), but we ignore the
724                 * error as it is not fatal and could be provoked by
725                 * unprivileged users. The only consequence is worse performance
726                 * with given pipe.
727                 */
[6959fac]728                if (amountpipekva > maxpipekva / 2)
[c40e45b]729                        (void)pipespace_new(pipe, SMALL_PIPE_SIZE);
[6959fac]730                else
[c40e45b]731                        (void)pipespace_new(pipe, PIPE_SIZE);
[6959fac]732        }
[c40e45b]733
[6959fac]734        pipe->pipe_ino = -1;
735}
736
737/* ARGSUSED */
738static int
[3489e3b]739pipe_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
740    int flags, struct thread *td)
[6959fac]741{
[c40e45b]742        struct pipe *rpipe;
[6959fac]743        int error;
744        int nread = 0;
745        int size;
746
[c40e45b]747        rpipe = fp->f_data;
[6959fac]748        PIPE_LOCK(rpipe);
749        ++rpipe->pipe_busy;
750        error = pipelock(rpipe, 1);
751        if (error)
752                goto unlocked_error;
753
754#ifdef MAC
755        error = mac_pipe_check_read(active_cred, rpipe->pipe_pair);
756        if (error)
757                goto locked_error;
758#endif
759        if (amountpipekva > (3 * maxpipekva) / 4) {
760                if (!(rpipe->pipe_state & PIPE_DIRECTW) &&
761                        (rpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
762                        (rpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
763                        (piperesizeallowed == 1)) {
764                        PIPE_UNLOCK(rpipe);
765                        pipespace(rpipe, SMALL_PIPE_SIZE);
766                        PIPE_LOCK(rpipe);
767                }
768        }
769
770        while (uio->uio_resid) {
771                /*
772                 * normal pipe buffer receive
773                 */
774                if (rpipe->pipe_buffer.cnt > 0) {
775                        size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
776                        if (size > rpipe->pipe_buffer.cnt)
777                                size = rpipe->pipe_buffer.cnt;
778                        if (size > uio->uio_resid)
779                                size = uio->uio_resid;
780
781                        PIPE_UNLOCK(rpipe);
782                        error = uiomove(
783                            &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
784                            size, uio);
785                        PIPE_LOCK(rpipe);
786                        if (error)
787                                break;
788
789                        rpipe->pipe_buffer.out += size;
790                        if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
791                                rpipe->pipe_buffer.out = 0;
792
793                        rpipe->pipe_buffer.cnt -= size;
794
795                        /*
796                         * If there is no more to read in the pipe, reset
797                         * its pointers to the beginning.  This improves
798                         * cache hit stats.
799                         */
800                        if (rpipe->pipe_buffer.cnt == 0) {
801                                rpipe->pipe_buffer.in = 0;
802                                rpipe->pipe_buffer.out = 0;
803                        }
804                        nread += size;
805#ifndef PIPE_NODIRECT
806                /*
807                 * Direct copy, bypassing a kernel buffer.
808                 */
809                } else if ((size = rpipe->pipe_map.cnt) &&
810                           (rpipe->pipe_state & PIPE_DIRECTW)) {
811                        if (size > uio->uio_resid)
812                                size = (u_int) uio->uio_resid;
813
814                        PIPE_UNLOCK(rpipe);
815                        error = uiomove_fromphys(rpipe->pipe_map.ms,
816                            rpipe->pipe_map.pos, size, uio);
817                        PIPE_LOCK(rpipe);
818                        if (error)
819                                break;
820                        nread += size;
821                        rpipe->pipe_map.pos += size;
822                        rpipe->pipe_map.cnt -= size;
823                        if (rpipe->pipe_map.cnt == 0) {
[c40e45b]824                                rpipe->pipe_state &= ~(PIPE_DIRECTW|PIPE_WANTW);
[6959fac]825                                wakeup(rpipe);
826                        }
827#endif
828                } else {
829                        /*
830                         * detect EOF condition
831                         * read returns 0 on EOF, no need to set error
832                         */
833                        if (rpipe->pipe_state & PIPE_EOF)
834                                break;
835
836                        /*
837                         * If the "write-side" has been blocked, wake it up now.
838                         */
839                        if (rpipe->pipe_state & PIPE_WANTW) {
840                                rpipe->pipe_state &= ~PIPE_WANTW;
841                                wakeup(rpipe);
842                        }
843
844                        /*
845                         * Break if some data was read.
846                         */
847                        if (nread > 0)
848                                break;
849
850                        /*
851                         * Unlock the pipe buffer for our remaining processing.
852                         * We will either break out with an error or we will
853                         * sleep and relock to loop.
854                         */
855                        pipeunlock(rpipe);
856
857                        /*
858                         * Handle non-blocking mode operation or
859                         * wait for more data.
860                         */
[b1580fb]861#ifndef __rtems__
[6959fac]862                        if (fp->f_flag & FNONBLOCK) {
[b1580fb]863#else /* __rtems__ */
864                        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FNONBLOCK) {
865#endif /* __rtems__ */
[6959fac]866                                error = EAGAIN;
867                        } else {
868                                rpipe->pipe_state |= PIPE_WANTR;
869                                if ((error = msleep(rpipe, PIPE_MTX(rpipe),
870                                    PRIBIO | PCATCH,
871                                    "piperd", 0)) == 0)
872                                        error = pipelock(rpipe, 1);
873                        }
874                        if (error)
875                                goto unlocked_error;
876                }
877        }
878#ifdef MAC
879locked_error:
880#endif
881        pipeunlock(rpipe);
882
883        /* XXX: should probably do this before getting any locks. */
884        if (error == 0)
[b1580fb]885#ifndef __rtems__
[6959fac]886                vfs_timestamp(&rpipe->pipe_atime);
[b1580fb]887#else /* __rtems__ */
888                rpipe->pipe_atime.tv_sec = time(NULL);
889#endif /* __rtems__ */
[6959fac]890unlocked_error:
891        --rpipe->pipe_busy;
892
893        /*
894         * PIPE_WANT processing only makes sense if pipe_busy is 0.
895         */
896        if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
897                rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
898                wakeup(rpipe);
899        } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
900                /*
901                 * Handle write blocking hysteresis.
902                 */
903                if (rpipe->pipe_state & PIPE_WANTW) {
904                        rpipe->pipe_state &= ~PIPE_WANTW;
905                        wakeup(rpipe);
906                }
907        }
908
909        if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
910                pipeselwakeup(rpipe);
911
912        PIPE_UNLOCK(rpipe);
913        return (error);
914}
[b1580fb]915#ifdef __rtems__
916static ssize_t
917rtems_bsd_pipe_read(rtems_libio_t *iop, void *buffer, size_t count)
918{
919        struct thread *td = rtems_bsd_get_curthread_or_null();
920        struct file *fp = rtems_bsd_iop_to_fp(iop);
921        struct iovec iov = {
922                .iov_base = buffer,
923                .iov_len = count
924        };
925        struct uio auio = {
926                .uio_iov = &iov,
927                .uio_iovcnt = 1,
928                .uio_offset = 0,
929                .uio_resid = count,
930                .uio_segflg = UIO_USERSPACE,
931                .uio_rw = UIO_READ,
932                .uio_td = td
933        };
934        int error;
935
936        if (td != NULL) {
937                error = pipe_read(fp, &auio, NULL, 0, NULL);
938        } else {
939                error = ENOMEM;
940        }
941
942        if (error == 0) {
943                return (count - auio.uio_resid);
944        } else {
945                rtems_set_errno_and_return_minus_one(error);
946        }
947}
[17ab62c]948
949static ssize_t
950rtems_bsd_pipe_readv(rtems_libio_t *iop, const struct iovec *iov,
951    int iovcnt, ssize_t total)
952{
953        struct thread *td = rtems_bsd_get_curthread_or_null();
954        struct file *fp = rtems_bsd_iop_to_fp(iop);
955        struct uio auio = {
956                .uio_iov = __DECONST(struct iovec *, iov),
957                .uio_iovcnt = iovcnt,
958                .uio_offset = 0,
959                .uio_resid = total,
960                .uio_segflg = UIO_USERSPACE,
961                .uio_rw = UIO_READ,
962                .uio_td = td
963        };
964        int error;
965
966        if (td != NULL) {
967                error = pipe_read(fp, &auio, NULL, 0, NULL);
968        } else {
969                error = ENOMEM;
970        }
971
972        if (error == 0) {
973                return (total - auio.uio_resid);
974        } else {
975                rtems_set_errno_and_return_minus_one(error);
976        }
977}
[b1580fb]978#endif /* __rtems__ */
[6959fac]979
980#ifndef PIPE_NODIRECT
981/*
982 * Map the sending processes' buffer into kernel space and wire it.
983 * This is similar to a physical write operation.
984 */
985static int
[3489e3b]986pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio)
[6959fac]987{
988        u_int size;
989        int i;
990
991        PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
992        KASSERT(wpipe->pipe_state & PIPE_DIRECTW,
993                ("Clone attempt on non-direct write pipe!"));
994
995        if (uio->uio_iov->iov_len > wpipe->pipe_buffer.size)
996                size = wpipe->pipe_buffer.size;
997        else
998                size = uio->uio_iov->iov_len;
999
1000        if ((i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
1001            (vm_offset_t)uio->uio_iov->iov_base, size, VM_PROT_READ,
1002            wpipe->pipe_map.ms, PIPENPAGES)) < 0)
1003                return (EFAULT);
1004
1005/*
1006 * set up the control block
1007 */
1008        wpipe->pipe_map.npages = i;
1009        wpipe->pipe_map.pos =
1010            ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
1011        wpipe->pipe_map.cnt = size;
1012
1013/*
1014 * and update the uio data
1015 */
1016
1017        uio->uio_iov->iov_len -= size;
1018        uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + size;
1019        if (uio->uio_iov->iov_len == 0)
1020                uio->uio_iov++;
1021        uio->uio_resid -= size;
1022        uio->uio_offset += size;
1023        return (0);
1024}
1025
1026/*
1027 * unmap and unwire the process buffer
1028 */
1029static void
[3489e3b]1030pipe_destroy_write_buffer(struct pipe *wpipe)
[6959fac]1031{
1032
1033        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
1034        vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages);
1035        wpipe->pipe_map.npages = 0;
1036}
1037
1038/*
1039 * In the case of a signal, the writing process might go away.  This
1040 * code copies the data into the circular buffer so that the source
1041 * pages can be freed without loss of data.
1042 */
1043static void
[3489e3b]1044pipe_clone_write_buffer(struct pipe *wpipe)
[6959fac]1045{
1046        struct uio uio;
1047        struct iovec iov;
1048        int size;
1049        int pos;
1050
1051        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
1052        size = wpipe->pipe_map.cnt;
1053        pos = wpipe->pipe_map.pos;
1054
1055        wpipe->pipe_buffer.in = size;
1056        wpipe->pipe_buffer.out = 0;
1057        wpipe->pipe_buffer.cnt = size;
1058        wpipe->pipe_state &= ~PIPE_DIRECTW;
1059
1060        PIPE_UNLOCK(wpipe);
1061        iov.iov_base = wpipe->pipe_buffer.buffer;
1062        iov.iov_len = size;
1063        uio.uio_iov = &iov;
1064        uio.uio_iovcnt = 1;
1065        uio.uio_offset = 0;
1066        uio.uio_resid = size;
1067        uio.uio_segflg = UIO_SYSSPACE;
1068        uio.uio_rw = UIO_READ;
1069        uio.uio_td = curthread;
1070        uiomove_fromphys(wpipe->pipe_map.ms, pos, size, &uio);
1071        PIPE_LOCK(wpipe);
1072        pipe_destroy_write_buffer(wpipe);
1073}
1074
1075/*
1076 * This implements the pipe buffer write mechanism.  Note that only
1077 * a direct write OR a normal pipe write can be pending at any given time.
1078 * If there are any characters in the pipe buffer, the direct write will
1079 * be deferred until the receiving process grabs all of the bytes from
1080 * the pipe buffer.  Then the direct mapping write is set-up.
1081 */
1082static int
[3489e3b]1083pipe_direct_write(struct pipe *wpipe, struct uio *uio)
[6959fac]1084{
1085        int error;
1086
1087retry:
1088        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
1089        error = pipelock(wpipe, 1);
[c40e45b]1090        if (error != 0)
1091                goto error1;
1092        if ((wpipe->pipe_state & PIPE_EOF) != 0) {
[6959fac]1093                error = EPIPE;
1094                pipeunlock(wpipe);
1095                goto error1;
1096        }
1097        while (wpipe->pipe_state & PIPE_DIRECTW) {
1098                if (wpipe->pipe_state & PIPE_WANTR) {
1099                        wpipe->pipe_state &= ~PIPE_WANTR;
1100                        wakeup(wpipe);
1101                }
1102                pipeselwakeup(wpipe);
1103                wpipe->pipe_state |= PIPE_WANTW;
1104                pipeunlock(wpipe);
1105                error = msleep(wpipe, PIPE_MTX(wpipe),
1106                    PRIBIO | PCATCH, "pipdww", 0);
1107                if (error)
1108                        goto error1;
1109                else
1110                        goto retry;
1111        }
1112        wpipe->pipe_map.cnt = 0;        /* transfer not ready yet */
1113        if (wpipe->pipe_buffer.cnt > 0) {
1114                if (wpipe->pipe_state & PIPE_WANTR) {
1115                        wpipe->pipe_state &= ~PIPE_WANTR;
1116                        wakeup(wpipe);
1117                }
1118                pipeselwakeup(wpipe);
1119                wpipe->pipe_state |= PIPE_WANTW;
1120                pipeunlock(wpipe);
1121                error = msleep(wpipe, PIPE_MTX(wpipe),
1122                    PRIBIO | PCATCH, "pipdwc", 0);
1123                if (error)
1124                        goto error1;
1125                else
1126                        goto retry;
1127        }
1128
1129        wpipe->pipe_state |= PIPE_DIRECTW;
1130
1131        PIPE_UNLOCK(wpipe);
1132        error = pipe_build_write_buffer(wpipe, uio);
1133        PIPE_LOCK(wpipe);
1134        if (error) {
1135                wpipe->pipe_state &= ~PIPE_DIRECTW;
1136                pipeunlock(wpipe);
1137                goto error1;
1138        }
1139
1140        error = 0;
1141        while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
1142                if (wpipe->pipe_state & PIPE_EOF) {
1143                        pipe_destroy_write_buffer(wpipe);
1144                        pipeselwakeup(wpipe);
1145                        pipeunlock(wpipe);
1146                        error = EPIPE;
1147                        goto error1;
1148                }
1149                if (wpipe->pipe_state & PIPE_WANTR) {
1150                        wpipe->pipe_state &= ~PIPE_WANTR;
1151                        wakeup(wpipe);
1152                }
1153                pipeselwakeup(wpipe);
[c40e45b]1154                wpipe->pipe_state |= PIPE_WANTW;
[6959fac]1155                pipeunlock(wpipe);
1156                error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH,
1157                    "pipdwt", 0);
1158                pipelock(wpipe, 0);
1159        }
1160
1161        if (wpipe->pipe_state & PIPE_EOF)
1162                error = EPIPE;
1163        if (wpipe->pipe_state & PIPE_DIRECTW) {
1164                /*
1165                 * this bit of trickery substitutes a kernel buffer for
1166                 * the process that might be going away.
1167                 */
1168                pipe_clone_write_buffer(wpipe);
1169        } else {
1170                pipe_destroy_write_buffer(wpipe);
1171        }
1172        pipeunlock(wpipe);
1173        return (error);
1174
1175error1:
1176        wakeup(wpipe);
1177        return (error);
1178}
1179#endif
1180
1181static int
[3489e3b]1182pipe_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
1183    int flags, struct thread *td)
[6959fac]1184{
1185        int error = 0;
1186        int desiredsize;
1187        ssize_t orig_resid;
1188        struct pipe *wpipe, *rpipe;
1189
1190        rpipe = fp->f_data;
[c40e45b]1191        wpipe = PIPE_PEER(rpipe);
[6959fac]1192        PIPE_LOCK(rpipe);
1193        error = pipelock(wpipe, 1);
1194        if (error) {
1195                PIPE_UNLOCK(rpipe);
1196                return (error);
1197        }
1198        /*
1199         * detect loss of pipe read side, issue SIGPIPE if lost.
1200         */
1201        if (wpipe->pipe_present != PIPE_ACTIVE ||
1202            (wpipe->pipe_state & PIPE_EOF)) {
1203                pipeunlock(wpipe);
1204                PIPE_UNLOCK(rpipe);
1205                return (EPIPE);
1206        }
1207#ifdef MAC
1208        error = mac_pipe_check_write(active_cred, wpipe->pipe_pair);
1209        if (error) {
1210                pipeunlock(wpipe);
1211                PIPE_UNLOCK(rpipe);
1212                return (error);
1213        }
1214#endif
1215        ++wpipe->pipe_busy;
1216
1217        /* Choose a larger size if it's advantageous */
1218        desiredsize = max(SMALL_PIPE_SIZE, wpipe->pipe_buffer.size);
1219        while (desiredsize < wpipe->pipe_buffer.cnt + uio->uio_resid) {
1220                if (piperesizeallowed != 1)
1221                        break;
1222                if (amountpipekva > maxpipekva / 2)
1223                        break;
1224                if (desiredsize == BIG_PIPE_SIZE)
1225                        break;
1226                desiredsize = desiredsize * 2;
1227        }
1228
1229        /* Choose a smaller size if we're in a OOM situation */
1230        if ((amountpipekva > (3 * maxpipekva) / 4) &&
1231                (wpipe->pipe_buffer.size > SMALL_PIPE_SIZE) &&
1232                (wpipe->pipe_buffer.cnt <= SMALL_PIPE_SIZE) &&
1233                (piperesizeallowed == 1))
1234                desiredsize = SMALL_PIPE_SIZE;
1235
1236        /* Resize if the above determined that a new size was necessary */
1237        if ((desiredsize != wpipe->pipe_buffer.size) &&
1238                ((wpipe->pipe_state & PIPE_DIRECTW) == 0)) {
1239                PIPE_UNLOCK(wpipe);
1240                pipespace(wpipe, desiredsize);
1241                PIPE_LOCK(wpipe);
1242        }
1243        if (wpipe->pipe_buffer.size == 0) {
1244                /*
1245                 * This can only happen for reverse direction use of pipes
1246                 * in a complete OOM situation.
1247                 */
1248                error = ENOMEM;
1249                --wpipe->pipe_busy;
1250                pipeunlock(wpipe);
1251                PIPE_UNLOCK(wpipe);
1252                return (error);
1253        }
1254
1255        pipeunlock(wpipe);
1256
1257        orig_resid = uio->uio_resid;
1258
1259        while (uio->uio_resid) {
1260                int space;
1261
1262                pipelock(wpipe, 0);
1263                if (wpipe->pipe_state & PIPE_EOF) {
1264                        pipeunlock(wpipe);
1265                        error = EPIPE;
1266                        break;
1267                }
1268#ifndef PIPE_NODIRECT
1269                /*
1270                 * If the transfer is large, we can gain performance if
1271                 * we do process-to-process copies directly.
1272                 * If the write is non-blocking, we don't use the
1273                 * direct write mechanism.
1274                 *
1275                 * The direct write mechanism will detect the reader going
1276                 * away on us.
1277                 */
1278                if (uio->uio_segflg == UIO_USERSPACE &&
1279                    uio->uio_iov->iov_len >= PIPE_MINDIRECT &&
1280                    wpipe->pipe_buffer.size >= PIPE_MINDIRECT &&
1281                    (fp->f_flag & FNONBLOCK) == 0) {
1282                        pipeunlock(wpipe);
1283                        error = pipe_direct_write(wpipe, uio);
1284                        if (error)
1285                                break;
1286                        continue;
1287                }
1288#endif
1289
1290                /*
1291                 * Pipe buffered writes cannot be coincidental with
1292                 * direct writes.  We wait until the currently executing
1293                 * direct write is completed before we start filling the
1294                 * pipe buffer.  We break out if a signal occurs or the
1295                 * reader goes away.
1296                 */
1297                if (wpipe->pipe_state & PIPE_DIRECTW) {
1298                        if (wpipe->pipe_state & PIPE_WANTR) {
1299                                wpipe->pipe_state &= ~PIPE_WANTR;
1300                                wakeup(wpipe);
1301                        }
1302                        pipeselwakeup(wpipe);
1303                        wpipe->pipe_state |= PIPE_WANTW;
1304                        pipeunlock(wpipe);
1305                        error = msleep(wpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH,
1306                            "pipbww", 0);
1307                        if (error)
1308                                break;
1309                        else
1310                                continue;
1311                }
1312
1313                space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1314
1315                /* Writes of size <= PIPE_BUF must be atomic. */
1316                if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
1317                        space = 0;
1318
1319                if (space > 0) {
1320                        int size;       /* Transfer size */
1321                        int segsize;    /* first segment to transfer */
1322
1323                        /*
1324                         * Transfer size is minimum of uio transfer
1325                         * and free space in pipe buffer.
1326                         */
1327                        if (space > uio->uio_resid)
1328                                size = uio->uio_resid;
1329                        else
1330                                size = space;
1331                        /*
1332                         * First segment to transfer is minimum of
1333                         * transfer size and contiguous space in
1334                         * pipe buffer.  If first segment to transfer
1335                         * is less than the transfer size, we've got
1336                         * a wraparound in the buffer.
1337                         */
1338                        segsize = wpipe->pipe_buffer.size -
1339                                wpipe->pipe_buffer.in;
1340                        if (segsize > size)
1341                                segsize = size;
1342
1343                        /* Transfer first segment */
1344
1345                        PIPE_UNLOCK(rpipe);
1346                        error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
1347                                        segsize, uio);
1348                        PIPE_LOCK(rpipe);
1349
1350                        if (error == 0 && segsize < size) {
1351                                KASSERT(wpipe->pipe_buffer.in + segsize ==
1352                                        wpipe->pipe_buffer.size,
1353                                        ("Pipe buffer wraparound disappeared"));
1354                                /*
1355                                 * Transfer remaining part now, to
1356                                 * support atomic writes.  Wraparound
1357                                 * happened.
1358                                 */
1359
1360                                PIPE_UNLOCK(rpipe);
1361                                error = uiomove(
1362                                    &wpipe->pipe_buffer.buffer[0],
1363                                    size - segsize, uio);
1364                                PIPE_LOCK(rpipe);
1365                        }
1366                        if (error == 0) {
1367                                wpipe->pipe_buffer.in += size;
1368                                if (wpipe->pipe_buffer.in >=
1369                                    wpipe->pipe_buffer.size) {
1370                                        KASSERT(wpipe->pipe_buffer.in ==
1371                                                size - segsize +
1372                                                wpipe->pipe_buffer.size,
1373                                                ("Expected wraparound bad"));
1374                                        wpipe->pipe_buffer.in = size - segsize;
1375                                }
1376
1377                                wpipe->pipe_buffer.cnt += size;
1378                                KASSERT(wpipe->pipe_buffer.cnt <=
1379                                        wpipe->pipe_buffer.size,
1380                                        ("Pipe buffer overflow"));
1381                        }
1382                        pipeunlock(wpipe);
1383                        if (error != 0)
1384                                break;
1385                } else {
1386                        /*
1387                         * If the "read-side" has been blocked, wake it up now.
1388                         */
1389                        if (wpipe->pipe_state & PIPE_WANTR) {
1390                                wpipe->pipe_state &= ~PIPE_WANTR;
1391                                wakeup(wpipe);
1392                        }
1393
1394                        /*
1395                         * don't block on non-blocking I/O
1396                         */
[b1580fb]1397#ifndef __rtems__
[6959fac]1398                        if (fp->f_flag & FNONBLOCK) {
[b1580fb]1399#else /* __rtems__ */
1400                        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FNONBLOCK) {
1401#endif /* __rtems__ */
[6959fac]1402                                error = EAGAIN;
1403                                pipeunlock(wpipe);
1404                                break;
1405                        }
1406
1407                        /*
1408                         * We have no more space and have something to offer,
1409                         * wake up select/poll.
1410                         */
1411                        pipeselwakeup(wpipe);
1412
1413                        wpipe->pipe_state |= PIPE_WANTW;
1414                        pipeunlock(wpipe);
1415                        error = msleep(wpipe, PIPE_MTX(rpipe),
1416                            PRIBIO | PCATCH, "pipewr", 0);
1417                        if (error != 0)
1418                                break;
1419                }
1420        }
1421
1422        pipelock(wpipe, 0);
1423        --wpipe->pipe_busy;
1424
1425        if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
1426                wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
1427                wakeup(wpipe);
1428        } else if (wpipe->pipe_buffer.cnt > 0) {
1429                /*
1430                 * If we have put any characters in the buffer, we wake up
1431                 * the reader.
1432                 */
1433                if (wpipe->pipe_state & PIPE_WANTR) {
1434                        wpipe->pipe_state &= ~PIPE_WANTR;
1435                        wakeup(wpipe);
1436                }
1437        }
1438
1439        /*
[c40e45b]1440         * Don't return EPIPE if any byte was written.
1441         * EINTR and other interrupts are handled by generic I/O layer.
1442         * Do not pretend that I/O succeeded for obvious user error
1443         * like EFAULT.
[6959fac]1444         */
[c40e45b]1445        if (uio->uio_resid != orig_resid && error == EPIPE)
[6959fac]1446                error = 0;
1447
1448        if (error == 0)
[b1580fb]1449#ifndef __rtems__
[6959fac]1450                vfs_timestamp(&wpipe->pipe_mtime);
[b1580fb]1451#else /* __rtems__ */
1452                wpipe->pipe_mtime.tv_sec = time(NULL);
1453#endif /* __rtems__ */
[6959fac]1454
1455        /*
1456         * We have something to offer,
1457         * wake up select/poll.
1458         */
1459        if (wpipe->pipe_buffer.cnt)
1460                pipeselwakeup(wpipe);
1461
1462        pipeunlock(wpipe);
1463        PIPE_UNLOCK(rpipe);
1464        return (error);
1465}
[b1580fb]1466#ifdef __rtems__
1467static ssize_t
1468rtems_bsd_pipe_write(rtems_libio_t *iop, const void *buffer, size_t count)
1469{
1470        struct thread *td = rtems_bsd_get_curthread_or_null();
1471        struct file *fp = rtems_bsd_iop_to_fp(iop);
1472        struct iovec iov = {
1473                .iov_base = __DECONST(void *, buffer),
1474                .iov_len = count
1475        };
1476        struct uio auio = {
1477                .uio_iov = &iov,
1478                .uio_iovcnt = 1,
1479                .uio_offset = 0,
1480                .uio_resid = count,
1481                .uio_segflg = UIO_USERSPACE,
1482                .uio_rw = UIO_WRITE,
1483                .uio_td = td
1484        };
1485        int error;
1486
1487        if (td != NULL) {
1488                error = pipe_write(fp, &auio, NULL, 0, NULL);
1489        } else {
1490                error = ENOMEM;
1491        }
1492
1493        if (error == 0) {
1494                return (count - auio.uio_resid);
1495        } else {
1496                rtems_set_errno_and_return_minus_one(error);
1497        }
1498}
[17ab62c]1499
1500static ssize_t
1501rtems_bsd_pipe_writev(rtems_libio_t *iop, const struct iovec *iov,
1502    int iovcnt, ssize_t total)
1503{
1504        struct thread *td = rtems_bsd_get_curthread_or_null();
1505        struct file *fp = rtems_bsd_iop_to_fp(iop);
1506        struct uio auio = {
1507                .uio_iov = __DECONST(struct iovec *, iov),
1508                .uio_iovcnt = iovcnt,
1509                .uio_offset = 0,
1510                .uio_resid = total,
1511                .uio_segflg = UIO_USERSPACE,
1512                .uio_rw = UIO_WRITE,
1513                .uio_td = td
1514        };
1515        int error;
1516
1517        if (td != NULL) {
1518                error = pipe_write(fp, &auio, NULL, 0, NULL);
1519        } else {
1520                error = ENOMEM;
1521        }
1522
1523        if (error == 0) {
1524                return (total - auio.uio_resid);
1525        } else {
1526                rtems_set_errno_and_return_minus_one(error);
1527        }
1528}
[b1580fb]1529#endif /* __rtems__ */
[6959fac]1530
1531/* ARGSUSED */
[b1580fb]1532#ifndef __rtems__
[6959fac]1533static int
[3489e3b]1534pipe_truncate(struct file *fp, off_t length, struct ucred *active_cred,
1535    struct thread *td)
[6959fac]1536{
[c40e45b]1537        struct pipe *cpipe;
1538        int error;
[6959fac]1539
[c40e45b]1540        cpipe = fp->f_data;
1541        if (cpipe->pipe_state & PIPE_NAMED)
1542                error = vnops.fo_truncate(fp, length, active_cred, td);
1543        else
1544                error = invfo_truncate(fp, length, active_cred, td);
1545        return (error);
[6959fac]1546}
[b1580fb]1547#endif /* __rtems__ */
[6959fac]1548
1549/*
1550 * we implement a very minimal set of ioctls for compatibility with sockets.
1551 */
1552static int
[3489e3b]1553pipe_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
1554    struct thread *td)
[6959fac]1555{
1556        struct pipe *mpipe = fp->f_data;
1557        int error;
1558
1559        PIPE_LOCK(mpipe);
1560
1561#ifdef MAC
1562        error = mac_pipe_check_ioctl(active_cred, mpipe->pipe_pair, cmd, data);
1563        if (error) {
1564                PIPE_UNLOCK(mpipe);
1565                return (error);
1566        }
1567#endif
1568
1569        error = 0;
1570        switch (cmd) {
1571
1572        case FIONBIO:
1573                break;
1574
1575        case FIOASYNC:
1576                if (*(int *)data) {
1577                        mpipe->pipe_state |= PIPE_ASYNC;
1578                } else {
1579                        mpipe->pipe_state &= ~PIPE_ASYNC;
1580                }
1581                break;
1582
1583        case FIONREAD:
[c40e45b]1584#ifndef __rtems__
1585                if (!(fp->f_flag & FREAD)) {
1586#else /* __rtems__ */
1587                if (!(rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD)) {
1588#endif /* __rtems__ */
1589                        *(int *)data = 0;
1590                        PIPE_UNLOCK(mpipe);
1591                        return (0);
1592                }
[6959fac]1593                if (mpipe->pipe_state & PIPE_DIRECTW)
1594                        *(int *)data = mpipe->pipe_map.cnt;
1595                else
1596                        *(int *)data = mpipe->pipe_buffer.cnt;
1597                break;
1598
1599        case FIOSETOWN:
1600                PIPE_UNLOCK(mpipe);
1601                error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1602                goto out_unlocked;
1603
1604        case FIOGETOWN:
1605                *(int *)data = fgetown(&mpipe->pipe_sigio);
1606                break;
1607
1608        /* This is deprecated, FIOSETOWN should be used instead. */
1609        case TIOCSPGRP:
1610                PIPE_UNLOCK(mpipe);
1611                error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1612                goto out_unlocked;
1613
1614        /* This is deprecated, FIOGETOWN should be used instead. */
1615        case TIOCGPGRP:
1616                *(int *)data = -fgetown(&mpipe->pipe_sigio);
1617                break;
1618
1619        default:
1620                error = ENOTTY;
1621                break;
1622        }
1623        PIPE_UNLOCK(mpipe);
1624out_unlocked:
1625        return (error);
1626}
[b1580fb]1627#ifdef __rtems__
1628static int
1629rtems_bsd_pipe_ioctl(rtems_libio_t *iop, ioctl_command_t request, void *buffer)
1630{
1631        struct thread *td = rtems_bsd_get_curthread_or_null();
1632        struct file *fp = rtems_bsd_iop_to_fp(iop);
1633        int error;
1634
1635        if (td != NULL) {
1636                error = pipe_ioctl(fp, request, buffer, NULL, td);
1637        } else {
1638                error = ENOMEM;
1639        }
1640
1641        return rtems_bsd_error_to_status_and_errno(error);
1642}
1643#endif /* __rtems__ */
[6959fac]1644
1645static int
[3489e3b]1646pipe_poll(struct file *fp, int events, struct ucred *active_cred,
1647    struct thread *td)
[6959fac]1648{
[c40e45b]1649        struct pipe *rpipe;
[6959fac]1650        struct pipe *wpipe;
[c40e45b]1651        int levents, revents;
[6959fac]1652#ifdef MAC
1653        int error;
1654#endif
1655
[c40e45b]1656        revents = 0;
1657        rpipe = fp->f_data;
1658        wpipe = PIPE_PEER(rpipe);
[6959fac]1659        PIPE_LOCK(rpipe);
1660#ifdef MAC
1661        error = mac_pipe_check_poll(active_cred, rpipe->pipe_pair);
1662        if (error)
1663                goto locked_error;
1664#endif
[c40e45b]1665#ifndef __rtems__
1666        if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM))
1667#else /* __rtems__ */
1668        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD && events & (POLLIN | POLLRDNORM))
1669#endif /* __rtems__ */
[6959fac]1670                if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1671                    (rpipe->pipe_buffer.cnt > 0))
1672                        revents |= events & (POLLIN | POLLRDNORM);
1673
[c40e45b]1674#ifndef __rtems__
1675        if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM))
1676#else /* __rtems__ */
1677        if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FWRITE && events & (POLLOUT | POLLWRNORM))
1678#endif /* __rtems__ */
[6959fac]1679                if (wpipe->pipe_present != PIPE_ACTIVE ||
1680                    (wpipe->pipe_state & PIPE_EOF) ||
1681                    (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1682                     ((wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF ||
1683                         wpipe->pipe_buffer.size == 0)))
1684                        revents |= events & (POLLOUT | POLLWRNORM);
1685
[c40e45b]1686        levents = events &
1687            (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM | POLLRDBAND);
1688#ifndef __rtems__
1689        if (rpipe->pipe_state & PIPE_NAMED && fp->f_flag & FREAD && levents &&
1690            fp->f_seqcount == rpipe->pipe_wgen)
1691#else /* __rtems__ */
1692        if (rpipe->pipe_state & PIPE_NAMED && rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD && levents)
1693#endif /* __rtems__ */
1694                events |= POLLINIGNEOF;
1695
[6959fac]1696        if ((events & POLLINIGNEOF) == 0) {
1697                if (rpipe->pipe_state & PIPE_EOF) {
1698                        revents |= (events & (POLLIN | POLLRDNORM));
1699                        if (wpipe->pipe_present != PIPE_ACTIVE ||
1700                            (wpipe->pipe_state & PIPE_EOF))
1701                                revents |= POLLHUP;
1702                }
1703        }
1704
1705        if (revents == 0) {
[c40e45b]1706#ifndef __rtems__
1707                if (fp->f_flag & FREAD && events & (POLLIN | POLLRDNORM)) {
1708#else /* __rtems__ */
1709                if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD && events & (POLLIN | POLLRDNORM)) {
1710#endif /* __rtems__ */
[6959fac]1711                        selrecord(td, &rpipe->pipe_sel);
1712                        if (SEL_WAITING(&rpipe->pipe_sel))
1713                                rpipe->pipe_state |= PIPE_SEL;
1714                }
1715
[c40e45b]1716#ifndef __rtems__
1717                if (fp->f_flag & FWRITE && events & (POLLOUT | POLLWRNORM)) {
1718#else /* __rtems__ */
1719                if (rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FWRITE && events & (POLLOUT | POLLWRNORM)) {
1720#endif /* __rtems__ */
[6959fac]1721                        selrecord(td, &wpipe->pipe_sel);
1722                        if (SEL_WAITING(&wpipe->pipe_sel))
1723                                wpipe->pipe_state |= PIPE_SEL;
1724                }
1725        }
1726#ifdef MAC
1727locked_error:
1728#endif
1729        PIPE_UNLOCK(rpipe);
1730
1731        return (revents);
1732}
[b1580fb]1733#ifdef __rtems__
1734static int
1735rtems_bsd_pipe_poll(rtems_libio_t *iop, int events)
1736{
1737        struct thread *td = rtems_bsd_get_curthread_or_null();
1738        struct file *fp = rtems_bsd_iop_to_fp(iop);
1739        int error;
1740
1741        if (td != NULL) {
1742                error = pipe_poll(fp, events, NULL, td);
1743        } else {
1744                error = ENOMEM;
1745        }
1746
1747        return error;
1748}
1749#endif /* __rtems__ */
[6959fac]1750
1751/*
1752 * We shouldn't need locks here as we're doing a read and this should
1753 * be a natural race.
1754 */
[b1580fb]1755#ifndef __rtems__
[6959fac]1756static int
[3489e3b]1757pipe_stat(struct file *fp, struct stat *ub, struct ucred *active_cred,
1758    struct thread *td)
[6959fac]1759{
1760        struct pipe *pipe;
[b1580fb]1761#else /* __rtems__ */
1762static int
1763pipe_stat(struct pipe *pipe, struct stat *ub)
1764{
1765#endif /* __rtems__ */
[6959fac]1766        int new_unr;
1767#ifdef MAC
1768        int error;
1769#endif
1770
[b1580fb]1771#ifndef __rtems__
[6959fac]1772        pipe = fp->f_data;
[b1580fb]1773#endif /* __rtems__ */
[6959fac]1774        PIPE_LOCK(pipe);
1775#ifdef MAC
1776        error = mac_pipe_check_stat(active_cred, pipe->pipe_pair);
1777        if (error) {
1778                PIPE_UNLOCK(pipe);
1779                return (error);
1780        }
1781#endif
[c40e45b]1782
1783        /* For named pipes ask the underlying filesystem. */
1784        if (pipe->pipe_state & PIPE_NAMED) {
1785                PIPE_UNLOCK(pipe);
1786#ifndef __rtems__
1787                return (vnops.fo_stat(fp, ub, active_cred, td));
1788#else /* __rtems__ */
1789                return (ENXIO);
1790#endif /* __rtems__ */
1791        }
1792
[6959fac]1793        /*
1794         * Lazily allocate an inode number for the pipe.  Most pipe
1795         * users do not call fstat(2) on the pipe, which means that
1796         * postponing the inode allocation until it is must be
1797         * returned to userland is useful.  If alloc_unr failed,
1798         * assign st_ino zero instead of returning an error.
1799         * Special pipe_ino values:
1800         *  -1 - not yet initialized;
1801         *  0  - alloc_unr failed, return 0 as st_ino forever.
1802         */
1803        if (pipe->pipe_ino == (ino_t)-1) {
1804                new_unr = alloc_unr(pipeino_unr);
1805                if (new_unr != -1)
1806                        pipe->pipe_ino = new_unr;
1807                else
1808                        pipe->pipe_ino = 0;
1809        }
1810        PIPE_UNLOCK(pipe);
1811
[b1580fb]1812#ifndef __rtems__
[6959fac]1813        bzero(ub, sizeof(*ub));
[b1580fb]1814#endif /* __rtems__ */
[6959fac]1815        ub->st_mode = S_IFIFO;
1816        ub->st_blksize = PAGE_SIZE;
1817        if (pipe->pipe_state & PIPE_DIRECTW)
1818                ub->st_size = pipe->pipe_map.cnt;
1819        else
1820                ub->st_size = pipe->pipe_buffer.cnt;
[c40e45b]1821        ub->st_blocks = howmany(ub->st_size, ub->st_blksize);
[6959fac]1822        ub->st_atim = pipe->pipe_atime;
1823        ub->st_mtim = pipe->pipe_mtime;
1824        ub->st_ctim = pipe->pipe_ctime;
[b1580fb]1825#ifndef __rtems__
[6959fac]1826        ub->st_uid = fp->f_cred->cr_uid;
1827        ub->st_gid = fp->f_cred->cr_gid;
1828        ub->st_dev = pipedev_ino;
1829        ub->st_ino = pipe->pipe_ino;
[b1580fb]1830#else /* __rtems__ */
1831        ub->st_uid = BSD_DEFAULT_UID;
1832        ub->st_gid = BSD_DEFAULT_GID;
1833        ub->st_dev = rtems_filesystem_make_dev_t(0xcc494cd6U, 0x1d970b4dU);
1834        ub->st_ino = pipe->pipe_ino;
1835#endif /* __rtems__ */
[6959fac]1836        /*
1837         * Left as 0: st_nlink, st_rdev, st_flags, st_gen.
1838         */
1839        return (0);
1840}
[b1580fb]1841#ifdef __rtems__
1842static int
1843rtems_bsd_pipe_stat(
1844        const rtems_filesystem_location_info_t *loc,
1845        struct stat *buf
1846)
1847{
1848        struct pipe *pipe = rtems_bsd_loc_to_f_data(loc);
1849        int error = pipe_stat(pipe, buf);
1850
1851        return rtems_bsd_error_to_status_and_errno(error);
1852}
1853#endif /* __rtems__ */
[6959fac]1854
1855/* ARGSUSED */
1856static int
[3489e3b]1857pipe_close(struct file *fp, struct thread *td)
[6959fac]1858{
1859
[b1580fb]1860#ifndef __rtems__
[c40e45b]1861        if (fp->f_vnode != NULL)
1862                return vnops.fo_close(fp, td);
[6959fac]1863        fp->f_ops = &badfileops;
[b1580fb]1864#else /* __rtems__ */
1865        fp->f_io.pathinfo.handlers = &rtems_filesystem_handlers_default;
1866#endif /* __rtems__ */
[c40e45b]1867        pipe_dtor(fp->f_data);
[6959fac]1868        fp->f_data = NULL;
1869        return (0);
1870}
1871
[c40e45b]1872#ifndef __rtems__
1873static int
1874pipe_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, struct thread *td)
1875{
1876        struct pipe *cpipe;
1877        int error;
1878
1879        cpipe = fp->f_data;
1880        if (cpipe->pipe_state & PIPE_NAMED)
1881                error = vn_chmod(fp, mode, active_cred, td);
1882        else
1883                error = invfo_chmod(fp, mode, active_cred, td);
1884        return (error);
1885}
1886
1887static int
[3489e3b]1888pipe_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1889    struct thread *td)
[c40e45b]1890{
1891        struct pipe *cpipe;
1892        int error;
1893
1894        cpipe = fp->f_data;
1895        if (cpipe->pipe_state & PIPE_NAMED)
1896                error = vn_chown(fp, uid, gid, active_cred, td);
1897        else
1898                error = invfo_chown(fp, uid, gid, active_cred, td);
1899        return (error);
1900}
1901
1902static int
1903pipe_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
1904{
1905        struct pipe *pi;
1906
1907        if (fp->f_type == DTYPE_FIFO)
1908                return (vn_fill_kinfo(fp, kif, fdp));
1909        kif->kf_type = KF_TYPE_PIPE;
1910        pi = fp->f_data;
1911        kif->kf_un.kf_pipe.kf_pipe_addr = (uintptr_t)pi;
1912        kif->kf_un.kf_pipe.kf_pipe_peer = (uintptr_t)pi->pipe_peer;
1913        kif->kf_un.kf_pipe.kf_pipe_buffer_cnt = pi->pipe_buffer.cnt;
1914        return (0);
1915}
1916#endif /* __rtems__ */
1917
[6959fac]1918static void
[3489e3b]1919pipe_free_kmem(struct pipe *cpipe)
[6959fac]1920{
1921
1922        KASSERT(!mtx_owned(PIPE_MTX(cpipe)),
1923            ("pipe_free_kmem: pipe mutex locked"));
1924
1925        if (cpipe->pipe_buffer.buffer != NULL) {
1926                atomic_subtract_long(&amountpipekva, cpipe->pipe_buffer.size);
[b1580fb]1927#ifndef __rtems__
[6959fac]1928                vm_map_remove(pipe_map,
1929                    (vm_offset_t)cpipe->pipe_buffer.buffer,
1930                    (vm_offset_t)cpipe->pipe_buffer.buffer + cpipe->pipe_buffer.size);
[b1580fb]1931#else /* __rtems__ */
1932                free(cpipe->pipe_buffer.buffer, M_TEMP);
1933#endif /* __rtems__ */
[6959fac]1934                cpipe->pipe_buffer.buffer = NULL;
1935        }
1936#ifndef PIPE_NODIRECT
1937        {
1938                cpipe->pipe_map.cnt = 0;
1939                cpipe->pipe_map.pos = 0;
1940                cpipe->pipe_map.npages = 0;
1941        }
1942#endif
1943}
1944
1945/*
1946 * shutdown the pipe
1947 */
1948static void
[3489e3b]1949pipeclose(struct pipe *cpipe)
[6959fac]1950{
1951        struct pipepair *pp;
1952        struct pipe *ppipe;
1953
1954        KASSERT(cpipe != NULL, ("pipeclose: cpipe == NULL"));
1955
1956        PIPE_LOCK(cpipe);
1957        pipelock(cpipe, 0);
1958        pp = cpipe->pipe_pair;
1959
1960        pipeselwakeup(cpipe);
1961
1962        /*
1963         * If the other side is blocked, wake it up saying that
1964         * we want to close it down.
1965         */
1966        cpipe->pipe_state |= PIPE_EOF;
1967        while (cpipe->pipe_busy) {
1968                wakeup(cpipe);
1969                cpipe->pipe_state |= PIPE_WANT;
1970                pipeunlock(cpipe);
1971                msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0);
1972                pipelock(cpipe, 0);
1973        }
1974
1975
1976        /*
1977         * Disconnect from peer, if any.
1978         */
1979        ppipe = cpipe->pipe_peer;
1980        if (ppipe->pipe_present == PIPE_ACTIVE) {
1981                pipeselwakeup(ppipe);
1982
1983                ppipe->pipe_state |= PIPE_EOF;
1984                wakeup(ppipe);
1985                KNOTE_LOCKED(&ppipe->pipe_sel.si_note, 0);
1986        }
1987
1988        /*
1989         * Mark this endpoint as free.  Release kmem resources.  We
1990         * don't mark this endpoint as unused until we've finished
1991         * doing that, or the pipe might disappear out from under
1992         * us.
1993         */
1994        PIPE_UNLOCK(cpipe);
1995        pipe_free_kmem(cpipe);
1996        PIPE_LOCK(cpipe);
1997        cpipe->pipe_present = PIPE_CLOSING;
1998        pipeunlock(cpipe);
1999
2000        /*
2001         * knlist_clear() may sleep dropping the PIPE_MTX. Set the
2002         * PIPE_FINALIZED, that allows other end to free the
2003         * pipe_pair, only after the knotes are completely dismantled.
2004         */
2005        knlist_clear(&cpipe->pipe_sel.si_note, 1);
2006        cpipe->pipe_present = PIPE_FINALIZED;
2007        seldrain(&cpipe->pipe_sel);
2008        knlist_destroy(&cpipe->pipe_sel.si_note);
2009
2010        /*
2011         * If both endpoints are now closed, release the memory for the
2012         * pipe pair.  If not, unlock.
2013         */
2014        if (ppipe->pipe_present == PIPE_FINALIZED) {
2015                PIPE_UNLOCK(cpipe);
2016#ifdef MAC
2017                mac_pipe_destroy(pp);
2018#endif
2019                uma_zfree(pipe_zone, cpipe->pipe_pair);
2020        } else
2021                PIPE_UNLOCK(cpipe);
2022}
2023
2024/*ARGSUSED*/
2025static int
2026pipe_kqfilter(struct file *fp, struct knote *kn)
2027{
2028        struct pipe *cpipe;
2029
[c40e45b]2030        /*
2031         * If a filter is requested that is not supported by this file
2032         * descriptor, don't return an error, but also don't ever generate an
2033         * event.
2034         */
2035#ifndef __rtems__
2036        if ((kn->kn_filter == EVFILT_READ) && !(fp->f_flag & FREAD)) {
2037#else /* __rtems__ */
2038        if ((kn->kn_filter == EVFILT_READ) && !(rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FREAD)) {
2039#endif /* __rtems__ */
2040                kn->kn_fop = &pipe_nfiltops;
2041                return (0);
2042        }
2043#ifndef __rtems__
2044        if ((kn->kn_filter == EVFILT_WRITE) && !(fp->f_flag & FWRITE)) {
2045#else /* __rtems__ */
2046        if ((kn->kn_filter == EVFILT_WRITE) && !(rtems_bsd_libio_flags_to_fflag(fp->f_io.flags) & FWRITE)) {
2047#endif /* __rtems__ */
2048                kn->kn_fop = &pipe_nfiltops;
2049                return (0);
2050        }
2051        cpipe = fp->f_data;
[6959fac]2052        PIPE_LOCK(cpipe);
2053        switch (kn->kn_filter) {
2054        case EVFILT_READ:
2055                kn->kn_fop = &pipe_rfiltops;
2056                break;
2057        case EVFILT_WRITE:
2058                kn->kn_fop = &pipe_wfiltops;
2059                if (cpipe->pipe_peer->pipe_present != PIPE_ACTIVE) {
2060                        /* other end of pipe has been closed */
2061                        PIPE_UNLOCK(cpipe);
2062                        return (EPIPE);
2063                }
[c40e45b]2064                cpipe = PIPE_PEER(cpipe);
[6959fac]2065                break;
2066        default:
2067                PIPE_UNLOCK(cpipe);
2068                return (EINVAL);
2069        }
2070
[c40e45b]2071        kn->kn_hook = cpipe;
[6959fac]2072        knlist_add(&cpipe->pipe_sel.si_note, kn, 1);
2073        PIPE_UNLOCK(cpipe);
2074        return (0);
2075}
[b1580fb]2076#ifdef __rtems__
2077int
2078rtems_bsd_pipe_kqfilter(rtems_libio_t *iop, struct knote *kn)
2079{
2080        struct file *fp = rtems_bsd_iop_to_fp(iop);
2081
2082        return pipe_kqfilter(fp, kn);
2083}
2084#endif /* __rtems__ */
[6959fac]2085
2086static void
2087filt_pipedetach(struct knote *kn)
2088{
[c40e45b]2089        struct pipe *cpipe = kn->kn_hook;
[6959fac]2090
2091        PIPE_LOCK(cpipe);
2092        knlist_remove(&cpipe->pipe_sel.si_note, kn, 1);
2093        PIPE_UNLOCK(cpipe);
2094}
2095
2096/*ARGSUSED*/
2097static int
2098filt_piperead(struct knote *kn, long hint)
2099{
[c40e45b]2100        struct pipe *rpipe = kn->kn_hook;
[6959fac]2101        struct pipe *wpipe = rpipe->pipe_peer;
2102        int ret;
2103
[c40e45b]2104        PIPE_LOCK_ASSERT(rpipe, MA_OWNED);
[6959fac]2105        kn->kn_data = rpipe->pipe_buffer.cnt;
2106        if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
2107                kn->kn_data = rpipe->pipe_map.cnt;
2108
2109        if ((rpipe->pipe_state & PIPE_EOF) ||
2110            wpipe->pipe_present != PIPE_ACTIVE ||
2111            (wpipe->pipe_state & PIPE_EOF)) {
2112                kn->kn_flags |= EV_EOF;
2113                return (1);
2114        }
2115        ret = kn->kn_data > 0;
2116        return ret;
2117}
2118
2119/*ARGSUSED*/
2120static int
2121filt_pipewrite(struct knote *kn, long hint)
2122{
[c40e45b]2123        struct pipe *wpipe;
2124   
2125        wpipe = kn->kn_hook;
2126        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
[6959fac]2127        if (wpipe->pipe_present != PIPE_ACTIVE ||
2128            (wpipe->pipe_state & PIPE_EOF)) {
2129                kn->kn_data = 0;
2130                kn->kn_flags |= EV_EOF;
2131                return (1);
2132        }
2133        kn->kn_data = (wpipe->pipe_buffer.size > 0) ?
2134            (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) : PIPE_BUF;
2135        if (wpipe->pipe_state & PIPE_DIRECTW)
2136                kn->kn_data = 0;
2137
2138        return (kn->kn_data >= PIPE_BUF);
2139}
[c40e45b]2140
2141static void
2142filt_pipedetach_notsup(struct knote *kn)
2143{
2144
2145}
2146
2147static int
2148filt_pipenotsup(struct knote *kn, long hint)
2149{
2150
2151        return (0);
2152}
[b1580fb]2153#ifdef __rtems__
2154static int
2155rtems_bsd_pipe_open(rtems_libio_t *iop, const char *path, int oflag,
2156    mode_t mode)
2157{
2158        return rtems_bsd_error_to_status_and_errno(ENXIO);
2159}
2160
2161static int
2162rtems_bsd_pipe_close(rtems_libio_t *iop)
2163{
2164        struct file *fp = rtems_bsd_iop_to_fp(iop);
2165        int error = pipe_close(fp, NULL);
2166
2167        return rtems_bsd_error_to_status_and_errno(error);
2168}
2169
2170static int
2171rtems_bsd_pipe_fcntl(rtems_libio_t *iop, int cmd)
2172{
2173        int error = 0;
2174
2175        if (cmd == F_SETFL) {
2176                struct file *fp = rtems_bsd_iop_to_fp(iop);
2177                int nbio = iop->flags & LIBIO_FLAGS_NO_DELAY;
2178
2179                error = pipe_ioctl(fp, FIONBIO, &nbio, NULL, NULL);
2180        }
2181
2182        return rtems_bsd_error_to_status_and_errno(error);
2183}
2184#endif /* __rtems__ */
Note: See TracBrowser for help on using the repository browser.