1 | /*- |
---|
2 | * ---------------------------------------------------------------------------- |
---|
3 | * "THE BEER-WARE LICENSE" (Revision 42): |
---|
4 | * <phk@FreeBSD.org> wrote this file. As long as you retain this notice you |
---|
5 | * can do whatever you want with this stuff. If we meet some day, and you think |
---|
6 | * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp |
---|
7 | * ---------------------------------------------------------------------------- |
---|
8 | * |
---|
9 | * $FreeBSD$ |
---|
10 | */ |
---|
11 | |
---|
12 | #ifndef _SYS_SMP_H_ |
---|
13 | #define _SYS_SMP_H_ |
---|
14 | |
---|
15 | #ifdef _KERNEL |
---|
16 | |
---|
17 | #ifndef LOCORE |
---|
18 | |
---|
19 | #ifdef SMP |
---|
20 | |
---|
21 | /* |
---|
22 | * Topology of a NUMA or HTT system. |
---|
23 | * |
---|
24 | * The top level topology is an array of pointers to groups. Each group |
---|
25 | * contains a bitmask of cpus in its group or subgroups. It may also |
---|
26 | * contain a pointer to an array of child groups. |
---|
27 | * |
---|
28 | * The bitmasks at non leaf groups may be used by consumers who support |
---|
29 | * a smaller depth than the hardware provides. |
---|
30 | * |
---|
31 | * The topology may be omitted by systems where all CPUs are equal. |
---|
32 | */ |
---|
33 | |
---|
34 | struct cpu_group { |
---|
35 | struct cpu_group *cg_parent; /* Our parent group. */ |
---|
36 | struct cpu_group *cg_child; /* Optional children groups. */ |
---|
37 | cpumask_t cg_mask; /* Mask of cpus in this group. */ |
---|
38 | int8_t cg_count; /* Count of cpus in this group. */ |
---|
39 | int8_t cg_children; /* Number of children groups. */ |
---|
40 | int8_t cg_level; /* Shared cache level. */ |
---|
41 | int8_t cg_flags; /* Traversal modifiers. */ |
---|
42 | }; |
---|
43 | |
---|
44 | /* |
---|
45 | * Defines common resources for CPUs in the group. The highest level |
---|
46 | * resource should be used when multiple are shared. |
---|
47 | */ |
---|
48 | #define CG_SHARE_NONE 0 |
---|
49 | #define CG_SHARE_L1 1 |
---|
50 | #define CG_SHARE_L2 2 |
---|
51 | #define CG_SHARE_L3 3 |
---|
52 | |
---|
53 | /* |
---|
54 | * Behavior modifiers for load balancing and affinity. |
---|
55 | */ |
---|
56 | #define CG_FLAG_HTT 0x01 /* Schedule the alternate core last. */ |
---|
57 | #define CG_FLAG_SMT 0x02 /* New age htt, less crippled. */ |
---|
58 | #define CG_FLAG_THREAD (CG_FLAG_HTT | CG_FLAG_SMT) /* Any threading. */ |
---|
59 | |
---|
60 | /* |
---|
61 | * Convenience routines for building topologies. |
---|
62 | */ |
---|
63 | struct cpu_group *smp_topo(void); |
---|
64 | struct cpu_group *smp_topo_none(void); |
---|
65 | struct cpu_group *smp_topo_1level(int l1share, int l1count, int l1flags); |
---|
66 | struct cpu_group *smp_topo_2level(int l2share, int l2count, int l1share, |
---|
67 | int l1count, int l1flags); |
---|
68 | struct cpu_group *smp_topo_find(struct cpu_group *top, int cpu); |
---|
69 | |
---|
70 | extern void (*cpustop_restartfunc)(void); |
---|
71 | extern int smp_active; |
---|
72 | extern int smp_cpus; |
---|
73 | extern volatile cpumask_t started_cpus; |
---|
74 | extern volatile cpumask_t stopped_cpus; |
---|
75 | extern cpumask_t idle_cpus_mask; |
---|
76 | extern cpumask_t hlt_cpus_mask; |
---|
77 | extern cpumask_t logical_cpus_mask; |
---|
78 | #endif /* SMP */ |
---|
79 | |
---|
80 | #ifndef __rtems__ |
---|
81 | extern u_int mp_maxid; |
---|
82 | extern int mp_maxcpus; |
---|
83 | extern int mp_ncpus; |
---|
84 | extern volatile int smp_started; |
---|
85 | |
---|
86 | extern cpumask_t all_cpus; |
---|
87 | #else /* __rtems__ */ |
---|
88 | #define mp_maxid 1U |
---|
89 | #define mp_maxcpus 1 |
---|
90 | #define mp_ncpus 1 |
---|
91 | #define all_cpus 1U |
---|
92 | #endif /* __rtems__ */ |
---|
93 | |
---|
94 | /* |
---|
95 | * Macro allowing us to determine whether a CPU is absent at any given |
---|
96 | * time, thus permitting us to configure sparse maps of cpuid-dependent |
---|
97 | * (per-CPU) structures. |
---|
98 | */ |
---|
99 | #define CPU_ABSENT(x_cpu) ((all_cpus & (1 << (x_cpu))) == 0) |
---|
100 | |
---|
101 | /* |
---|
102 | * Macros to iterate over non-absent CPUs. CPU_FOREACH() takes an |
---|
103 | * integer iterator and iterates over the available set of CPUs. |
---|
104 | * CPU_FIRST() returns the id of the first non-absent CPU. CPU_NEXT() |
---|
105 | * returns the id of the next non-absent CPU. It will wrap back to |
---|
106 | * CPU_FIRST() once the end of the list is reached. The iterators are |
---|
107 | * currently implemented via inline functions. |
---|
108 | */ |
---|
109 | #define CPU_FOREACH(i) \ |
---|
110 | for ((i) = 0; (i) <= mp_maxid; (i)++) \ |
---|
111 | if (!CPU_ABSENT((i))) |
---|
112 | |
---|
113 | static __inline int |
---|
114 | cpu_first(void) |
---|
115 | { |
---|
116 | int i; |
---|
117 | |
---|
118 | for (i = 0;; i++) |
---|
119 | if (!CPU_ABSENT(i)) |
---|
120 | return (i); |
---|
121 | } |
---|
122 | |
---|
123 | static __inline int |
---|
124 | cpu_next(int i) |
---|
125 | { |
---|
126 | |
---|
127 | for (;;) { |
---|
128 | i++; |
---|
129 | if (i > mp_maxid) |
---|
130 | i = 0; |
---|
131 | if (!CPU_ABSENT(i)) |
---|
132 | return (i); |
---|
133 | } |
---|
134 | } |
---|
135 | |
---|
136 | #define CPU_FIRST() cpu_first() |
---|
137 | #define CPU_NEXT(i) cpu_next((i)) |
---|
138 | |
---|
139 | #ifdef SMP |
---|
140 | /* |
---|
141 | * Machine dependent functions used to initialize MP support. |
---|
142 | * |
---|
143 | * The cpu_mp_probe() should check to see if MP support is present and return |
---|
144 | * zero if it is not or non-zero if it is. If MP support is present, then |
---|
145 | * cpu_mp_start() will be called so that MP can be enabled. This function |
---|
146 | * should do things such as startup secondary processors. It should also |
---|
147 | * setup mp_ncpus, all_cpus, and smp_cpus. It should also ensure that |
---|
148 | * smp_active and smp_started are initialized at the appropriate time. |
---|
149 | * Once cpu_mp_start() returns, machine independent MP startup code will be |
---|
150 | * executed and a simple message will be output to the console. Finally, |
---|
151 | * cpu_mp_announce() will be called so that machine dependent messages about |
---|
152 | * the MP support may be output to the console if desired. |
---|
153 | * |
---|
154 | * The cpu_setmaxid() function is called very early during the boot process |
---|
155 | * so that the MD code may set mp_maxid to provide an upper bound on CPU IDs |
---|
156 | * that other subsystems may use. If a platform is not able to determine |
---|
157 | * the exact maximum ID that early, then it may set mp_maxid to MAXCPU - 1. |
---|
158 | */ |
---|
159 | struct thread; |
---|
160 | |
---|
161 | struct cpu_group *cpu_topo(void); |
---|
162 | void cpu_mp_announce(void); |
---|
163 | int cpu_mp_probe(void); |
---|
164 | void cpu_mp_setmaxid(void); |
---|
165 | void cpu_mp_start(void); |
---|
166 | |
---|
167 | void forward_signal(struct thread *); |
---|
168 | int restart_cpus(cpumask_t); |
---|
169 | int stop_cpus(cpumask_t); |
---|
170 | int stop_cpus_hard(cpumask_t); |
---|
171 | #if defined(__amd64__) |
---|
172 | int suspend_cpus(cpumask_t); |
---|
173 | #endif |
---|
174 | void smp_rendezvous_action(void); |
---|
175 | extern struct mtx smp_ipi_mtx; |
---|
176 | |
---|
177 | #endif /* SMP */ |
---|
178 | void smp_no_rendevous_barrier(void *); |
---|
179 | void smp_rendezvous(void (*)(void *), |
---|
180 | void (*)(void *), |
---|
181 | void (*)(void *), |
---|
182 | void *arg); |
---|
183 | void smp_rendezvous_cpus(cpumask_t, |
---|
184 | void (*)(void *), |
---|
185 | void (*)(void *), |
---|
186 | void (*)(void *), |
---|
187 | void *arg); |
---|
188 | #endif /* !LOCORE */ |
---|
189 | #endif /* _KERNEL */ |
---|
190 | #endif /* _SYS_SMP_H_ */ |
---|