smp.h revision 292787
1/*- 2 * Copyright (c) 2001 Jake Burkholder. 3 * Copyright (c) 2007 - 2011 Marius Strobl <marius@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: stable/10/sys/sparc64/include/smp.h 292787 2015-12-27 17:58:58Z marius $ 28 */ 29 30#ifndef _MACHINE_SMP_H_ 31#define _MACHINE_SMP_H_ 32 33#ifdef SMP 34 35#define CPU_TICKSYNC 1 36#define CPU_STICKSYNC 2 37#define CPU_INIT 3 38#define CPU_BOOTSTRAP 4 39 40#ifndef LOCORE 41 42#include <sys/param.h> 43#include <sys/cpuset.h> 44#include <sys/lock.h> 45#include <sys/mutex.h> 46#include <sys/proc.h> 47#include <sys/sched.h> 48#include <sys/smp.h> 49 50#include <machine/atomic.h> 51#include <machine/intr_machdep.h> 52#include <machine/tte.h> 53 54#define IDR_BUSY 0x0000000000000001ULL 55#define IDR_NACK 0x0000000000000002ULL 56#define IDR_CHEETAH_ALL_BUSY 0x5555555555555555ULL 57#define IDR_CHEETAH_ALL_NACK (~IDR_CHEETAH_ALL_BUSY) 58#define IDR_CHEETAH_MAX_BN_PAIRS 32 59#define IDR_JALAPENO_MAX_BN_PAIRS 4 60 61#define IDC_ITID_SHIFT 14 62#define IDC_BN_SHIFT 24 63 64#define IPI_AST PIL_AST 65#define IPI_RENDEZVOUS PIL_RENDEZVOUS 66#define IPI_PREEMPT PIL_PREEMPT 67#define IPI_HARDCLOCK PIL_HARDCLOCK 68#define IPI_STOP PIL_STOP 69#define IPI_STOP_HARD PIL_STOP 70 71#define IPI_RETRIES 5000 72 73struct cpu_start_args { 74 u_int csa_count; 75 u_int csa_mid; 76 u_int csa_state; 77 vm_offset_t csa_pcpu; 78 u_long csa_tick; 79 u_long csa_stick; 80 u_long csa_ver; 81 struct tte csa_ttes[PCPU_PAGES]; 82}; 83 84struct ipi_cache_args { 85 cpuset_t ica_mask; 86 vm_paddr_t ica_pa; 87}; 88 89struct ipi_rd_args { 90 cpuset_t ira_mask; 91 register_t *ira_val; 92}; 93 94struct ipi_tlb_args { 95 cpuset_t ita_mask; 96 struct pmap *ita_pmap; 97 u_long ita_start; 98 u_long ita_end; 99}; 100#define ita_va ita_start 101 102struct pcb; 103struct pcpu; 104 105extern struct pcb stoppcbs[]; 106 107void cpu_mp_bootstrap(struct pcpu *pc); 108void cpu_mp_shutdown(void); 109 110typedef void cpu_ipi_selected_t(cpuset_t, u_long, u_long, u_long); 111extern cpu_ipi_selected_t *cpu_ipi_selected; 112typedef void cpu_ipi_single_t(u_int, u_long, u_long, u_long); 113extern cpu_ipi_single_t *cpu_ipi_single; 114 115void mp_init(void); 116 117extern struct mtx ipi_mtx; 118extern struct ipi_cache_args ipi_cache_args; 119extern struct ipi_rd_args ipi_rd_args; 120extern struct ipi_tlb_args ipi_tlb_args; 121 122extern char *mp_tramp_code; 123extern u_long mp_tramp_code_len; 124extern u_long mp_tramp_tlb_slots; 125extern u_long mp_tramp_func; 126 127extern void mp_startup(void); 128 129extern char tl_ipi_cheetah_dcache_page_inval[]; 130extern char tl_ipi_spitfire_dcache_page_inval[]; 131extern char tl_ipi_spitfire_icache_page_inval[]; 132 133extern char tl_ipi_level[]; 134 135extern char tl_ipi_stick_rd[]; 136extern char tl_ipi_tick_rd[]; 137 138extern char tl_ipi_tlb_context_demap[]; 139extern char tl_ipi_tlb_page_demap[]; 140extern char tl_ipi_tlb_range_demap[]; 141 142static __inline void 143ipi_all_but_self(u_int ipi) 144{ 145 cpuset_t cpus; 146 147 if (__predict_false(atomic_load_acq_int(&smp_started) == 0)) 148 return; 149 cpus = all_cpus; 150 sched_pin(); 151 CPU_CLR(PCPU_GET(cpuid), &cpus); 152 mtx_lock_spin(&ipi_mtx); 153 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi); 154 mtx_unlock_spin(&ipi_mtx); 155 sched_unpin(); 156} 157 158static __inline void 159ipi_selected(cpuset_t cpus, u_int ipi) 160{ 161 162 if (__predict_false(atomic_load_acq_int(&smp_started) == 0 || 163 CPU_EMPTY(&cpus))) 164 return; 165 mtx_lock_spin(&ipi_mtx); 166 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi); 167 mtx_unlock_spin(&ipi_mtx); 168} 169 170static __inline void 171ipi_cpu(int cpu, u_int ipi) 172{ 173 174 if (__predict_false(atomic_load_acq_int(&smp_started) == 0)) 175 return; 176 mtx_lock_spin(&ipi_mtx); 177 cpu_ipi_single(cpu, 0, (u_long)tl_ipi_level, ipi); 178 mtx_unlock_spin(&ipi_mtx); 179} 180 181#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_) 182 183static __inline void * 184ipi_dcache_page_inval(void *func, vm_paddr_t pa) 185{ 186 struct ipi_cache_args *ica; 187 188 if (__predict_false(atomic_load_acq_int(&smp_started) == 0)) 189 return (NULL); 190 sched_pin(); 191 ica = &ipi_cache_args; 192 mtx_lock_spin(&ipi_mtx); 193 ica->ica_mask = all_cpus; 194 CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask); 195 ica->ica_pa = pa; 196 cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica); 197 return (&ica->ica_mask); 198} 199 200static __inline void * 201ipi_icache_page_inval(void *func, vm_paddr_t pa) 202{ 203 struct ipi_cache_args *ica; 204 205 if (__predict_false(atomic_load_acq_int(&smp_started) == 0)) 206 return (NULL); 207 sched_pin(); 208 ica = &ipi_cache_args; 209 mtx_lock_spin(&ipi_mtx); 210 ica->ica_mask = all_cpus; 211 CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask); 212 ica->ica_pa = pa; 213 cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica); 214 return (&ica->ica_mask); 215} 216 217static __inline void * 218ipi_rd(u_int cpu, void *func, u_long *val) 219{ 220 struct ipi_rd_args *ira; 221 222 if (__predict_false(atomic_load_acq_int(&smp_started) == 0)) 223 return (NULL); 224 sched_pin(); 225 ira = &ipi_rd_args; 226 mtx_lock_spin(&ipi_mtx); 227 CPU_SETOF(cpu, &ira->ira_mask); 228 ira->ira_val = val; 229 cpu_ipi_single(cpu, 0, (u_long)func, (u_long)ira); 230 return (&ira->ira_mask); 231} 232 233static __inline void * 234ipi_tlb_context_demap(struct pmap *pm) 235{ 236 struct ipi_tlb_args *ita; 237 cpuset_t cpus; 238 239 if (__predict_false(atomic_load_acq_int(&smp_started) == 0)) 240 return (NULL); 241 sched_pin(); 242 cpus = pm->pm_active; 243 CPU_AND(&cpus, &all_cpus); 244 CPU_CLR(PCPU_GET(cpuid), &cpus); 245 if (CPU_EMPTY(&cpus)) { 246 sched_unpin(); 247 return (NULL); 248 } 249 ita = &ipi_tlb_args; 250 mtx_lock_spin(&ipi_mtx); 251 ita->ita_mask = cpus; 252 ita->ita_pmap = pm; 253 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap, 254 (u_long)ita); 255 return (&ita->ita_mask); 256} 257 258static __inline void * 259ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va) 260{ 261 struct ipi_tlb_args *ita; 262 cpuset_t cpus; 263 264 if (__predict_false(atomic_load_acq_int(&smp_started) == 0)) 265 return (NULL); 266 sched_pin(); 267 cpus = pm->pm_active; 268 CPU_AND(&cpus, &all_cpus); 269 CPU_CLR(PCPU_GET(cpuid), &cpus); 270 if (CPU_EMPTY(&cpus)) { 271 sched_unpin(); 272 return (NULL); 273 } 274 ita = &ipi_tlb_args; 275 mtx_lock_spin(&ipi_mtx); 276 ita->ita_mask = cpus; 277 ita->ita_pmap = pm; 278 ita->ita_va = va; 279 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita); 280 return (&ita->ita_mask); 281} 282 283static __inline void * 284ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end) 285{ 286 struct ipi_tlb_args *ita; 287 cpuset_t cpus; 288 289 if (__predict_false(atomic_load_acq_int(&smp_started) == 0)) 290 return (NULL); 291 sched_pin(); 292 cpus = pm->pm_active; 293 CPU_AND(&cpus, &all_cpus); 294 CPU_CLR(PCPU_GET(cpuid), &cpus); 295 if (CPU_EMPTY(&cpus)) { 296 sched_unpin(); 297 return (NULL); 298 } 299 ita = &ipi_tlb_args; 300 mtx_lock_spin(&ipi_mtx); 301 ita->ita_mask = cpus; 302 ita->ita_pmap = pm; 303 ita->ita_start = start; 304 ita->ita_end = end; 305 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, 306 (u_long)ita); 307 return (&ita->ita_mask); 308} 309 310static __inline void 311ipi_wait(void *cookie) 312{ 313 volatile cpuset_t *mask; 314 315 if (__predict_false((mask = cookie) != NULL)) { 316 while (!CPU_EMPTY(mask)) 317 ; 318 mtx_unlock_spin(&ipi_mtx); 319 sched_unpin(); 320 } 321} 322 323#endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */ 324 325#endif /* !LOCORE */ 326 327#else 328 329#ifndef LOCORE 330 331static __inline void * 332ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused) 333{ 334 335 return (NULL); 336} 337 338static __inline void * 339ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused) 340{ 341 342 return (NULL); 343} 344 345static __inline void * 346ipi_rd(u_int cpu __unused, void *func __unused, u_long *val __unused) 347{ 348 349 return (NULL); 350} 351 352static __inline void * 353ipi_tlb_context_demap(struct pmap *pm __unused) 354{ 355 356 return (NULL); 357} 358 359static __inline void * 360ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused) 361{ 362 363 return (NULL); 364} 365 366static __inline void * 367ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused, 368 __unused vm_offset_t end) 369{ 370 371 return (NULL); 372} 373 374static __inline void 375ipi_wait(void *cookie __unused) 376{ 377 378} 379 380static __inline void 381tl_ipi_cheetah_dcache_page_inval(void) 382{ 383 384} 385 386static __inline void 387tl_ipi_spitfire_dcache_page_inval(void) 388{ 389 390} 391 392static __inline void 393tl_ipi_spitfire_icache_page_inval(void) 394{ 395 396} 397 398#endif /* !LOCORE */ 399 400#endif /* SMP */ 401 402#endif /* !_MACHINE_SMP_H_ */ 403