source: rtems-libbsd/freebsd/sys/sys/pcpu.h @ d652c3b

4.1155-freebsd-126-freebsd-12freebsd-9.3
Last change on this file since d652c3b was d652c3b, checked in by Sebastian Huber <sebastian.huber@…>, on 10/09/13 at 12:03:56

Avoid per-CPU NETISR(9)

SMP support should be enabled once the new stack is ready for this.

  • Property mode set to 100644
File size: 7.0 KB
Line 
1/*-
2 * Copyright (c) 2001 Wind River Systems, Inc.
3 * All rights reserved.
4 * Written by: John Baldwin <jhb@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the author nor the names of any co-contributors
15 *    may be used to endorse or promote products derived from this software
16 *    without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD$
31 */
32
33#ifndef _SYS_PCPU_H_
34#define _SYS_PCPU_H_
35
36#ifdef LOCORE
37#error "no assembler-serviceable parts inside"
38#endif
39
40#include <sys/queue.h>
41#include <sys/vmmeter.h>
42#include <rtems/bsd/sys/resource.h>
43#include <machine/pcpu.h>
44
45/*
46 * Define a set for pcpu data.
47 */
48extern uintptr_t *__start_set_pcpu;
49extern uintptr_t *__stop_set_pcpu;
50
51/*
52 * Array of dynamic pcpu base offsets.  Indexed by id.
53 */
54extern uintptr_t dpcpu_off[];
55
56/*
57 * Convenience defines.
58 */
59#define DPCPU_START             ((uintptr_t)&__start_set_pcpu)
60#define DPCPU_STOP              ((uintptr_t)&__stop_set_pcpu)
61#define DPCPU_BYTES             (DPCPU_STOP - DPCPU_START)
62#define DPCPU_MODMIN            2048
63#define DPCPU_SIZE              roundup2(DPCPU_BYTES, PAGE_SIZE)
64#define DPCPU_MODSIZE           (DPCPU_SIZE - (DPCPU_BYTES - DPCPU_MODMIN))
65
66/*
67 * Declaration and definition.
68 */
69#define DPCPU_NAME(n)           pcpu_entry_##n
70#define DPCPU_DECLARE(t, n)     extern t DPCPU_NAME(n)
71#define DPCPU_DEFINE(t, n)      t DPCPU_NAME(n) __section("set_pcpu") __used
72
73/*
74 * Accessors with a given base.
75 */
76#ifndef __rtems__
77#define _DPCPU_PTR(b, n)                                                \
78    (__typeof(DPCPU_NAME(n))*)((b) + (uintptr_t)&DPCPU_NAME(n))
79#else /* __rtems__ */
80#define _DPCPU_PTR(b, n) NULL
81#endif /* __rtems__ */
82#define _DPCPU_GET(b, n)        (*_DPCPU_PTR(b, n))
83#define _DPCPU_SET(b, n, v)     (*_DPCPU_PTR(b, n) = v)
84
85/*
86 * Accessors for the current cpu.
87 */
88#define DPCPU_PTR(n)            _DPCPU_PTR(PCPU_GET(dynamic), n)
89#define DPCPU_GET(n)            (*DPCPU_PTR(n))
90#define DPCPU_SET(n, v)         (*DPCPU_PTR(n) = v)
91
92/*
93 * Accessors for remote cpus.
94 */
95#define DPCPU_ID_PTR(i, n)      _DPCPU_PTR(dpcpu_off[(i)], n)
96#define DPCPU_ID_GET(i, n)      (*DPCPU_ID_PTR(i, n))
97#define DPCPU_ID_SET(i, n, v)   (*DPCPU_ID_PTR(i, n) = v)
98
99/*
100 * Utility macros.
101 */
102#define DPCPU_SUM(n) __extension__                                      \
103({                                                                      \
104        u_int _i;                                                       \
105        __typeof(*DPCPU_PTR(n)) sum;                                    \
106                                                                        \
107        sum = 0;                                                        \
108        CPU_FOREACH(_i) {                                               \
109                sum += *DPCPU_ID_PTR(_i, n);                            \
110        }                                                               \
111        sum;                                                            \
112})
113
114#define DPCPU_VARSUM(n, var) __extension__                              \
115({                                                                      \
116        u_int _i;                                                       \
117        __typeof((DPCPU_PTR(n))->var) sum;                              \
118                                                                        \
119        sum = 0;                                                        \
120        CPU_FOREACH(_i) {                                               \
121                sum += (DPCPU_ID_PTR(_i, n))->var;                      \
122        }                                                               \
123        sum;                                                            \
124})
125
126#define DPCPU_ZERO(n) do {                                              \
127        u_int _i;                                                       \
128                                                                        \
129        CPU_FOREACH(_i) {                                               \
130                bzero(DPCPU_ID_PTR(_i, n), sizeof(*DPCPU_PTR(n)));      \
131        }                                                               \
132} while(0)
133
134/*
135 * XXXUPS remove as soon as we have per cpu variable
136 * linker sets and can define rm_queue in _rm_lock.h
137 */
138struct rm_queue {
139        struct rm_queue* volatile rmq_next;
140        struct rm_queue* volatile rmq_prev;
141};
142
143#define PCPU_NAME_LEN (sizeof("CPU ") + sizeof(__XSTRING(MAXCPU) + 1))
144
145/*
146 * This structure maps out the global data that needs to be kept on a
147 * per-cpu basis.  The members are accessed via the PCPU_GET/SET/PTR
148 * macros defined in <machine/pcpu.h>.  Machine dependent fields are
149 * defined in the PCPU_MD_FIELDS macro defined in <machine/pcpu.h>.
150 */
151struct pcpu {
152        struct thread   *pc_curthread;          /* Current thread */
153        struct thread   *pc_idlethread;         /* Idle thread */
154        struct thread   *pc_fpcurthread;        /* Fp state owner */
155        struct thread   *pc_deadthread;         /* Zombie thread or NULL */
156        struct pcb      *pc_curpcb;             /* Current pcb */
157        uint64_t        pc_switchtime;          /* cpu_ticks() at last csw */
158        int             pc_switchticks;         /* `ticks' at last csw */
159        u_int           pc_cpuid;               /* This cpu number */
160        cpumask_t       pc_cpumask;             /* This cpu mask */
161        cpumask_t       pc_other_cpus;          /* Mask of all other cpus */
162        SLIST_ENTRY(pcpu) pc_allcpu;
163        struct lock_list_entry *pc_spinlocks;
164#ifdef KTR
165        char            pc_name[PCPU_NAME_LEN]; /* String name for KTR */
166#endif
167#ifndef __rtems__
168        struct vmmeter  pc_cnt;                 /* VM stats counters */
169#endif
170        long            pc_cp_time[CPUSTATES];  /* statclock ticks */
171        struct device   *pc_device;
172        void            *pc_netisr;             /* netisr SWI cookie */
173
174        /*
175         * Stuff for read mostly lock
176         *
177         * XXXUPS remove as soon as we have per cpu variable
178         * linker sets.
179         */
180        struct rm_queue pc_rm_queue;
181
182        uintptr_t       pc_dynamic;             /* Dynamic per-cpu data area */
183
184        /*
185         * Keep MD fields last, so that CPU-specific variations on a
186         * single architecture don't result in offset variations of
187         * the machine-independent fields of the pcpu.  Even though
188         * the pcpu structure is private to the kernel, some ports
189         * (e.g., lsof, part of gtop) define _KERNEL and include this
190         * header.  While strictly speaking this is wrong, there's no
191         * reason not to keep the offsets of the MI fields constant
192         * if only to make kernel debugging easier.
193         */
194        PCPU_MD_FIELDS;
195} __aligned(128);
196
197#ifdef _KERNEL
198
199SLIST_HEAD(cpuhead, pcpu);
200
201extern struct cpuhead cpuhead;
202extern struct pcpu *cpuid_to_pcpu[MAXCPU];
203
204#ifdef __rtems__
205struct thread *rtems_get_curthread(void);
206#define curthread       rtems_get_curthread()
207#endif
208
209#define curcpu          PCPU_GET(cpuid)
210#define curproc         (curthread->td_proc)
211#ifndef curthread
212#define curthread       PCPU_GET(curthread)
213#endif
214#define curvidata       PCPU_GET(vidata)
215
216/*
217 * Machine dependent callouts.  cpu_pcpu_init() is responsible for
218 * initializing machine dependent fields of struct pcpu, and
219 * db_show_mdpcpu() is responsible for handling machine dependent
220 * fields for the DDB 'show pcpu' command.
221 */
222void    cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size);
223void    db_show_mdpcpu(struct pcpu *pcpu);
224
225void    *dpcpu_alloc(int size);
226void    dpcpu_copy(void *s, int size);
227void    dpcpu_free(void *s, int size);
228void    dpcpu_init(void *dpcpu, int cpuid);
229void    pcpu_destroy(struct pcpu *pcpu);
230struct  pcpu *pcpu_find(u_int cpuid);
231void    pcpu_init(struct pcpu *pcpu, int cpuid, size_t size);
232
233#endif /* _KERNEL */
234
235#endif /* !_SYS_PCPU_H_ */
Note: See TracBrowser for help on using the repository browser.