Print this page
4664 CPU->cpu_pri_data hasn't been used for years
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/io/pcplusmp/apic.c
+++ new/usr/src/uts/i86pc/io/pcplusmp/apic.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25 /*
26 26 * Copyright (c) 2010, Intel Corporation.
27 27 * All rights reserved.
28 28 */
29 29 /*
30 30 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
31 31 */
32 32
33 33 /*
34 34 * To understand how the pcplusmp module interacts with the interrupt subsystem
35 35 * read the theory statement in uts/i86pc/os/intr.c.
36 36 */
37 37
38 38 /*
39 39 * PSMI 1.1 extensions are supported only in 2.6 and later versions.
40 40 * PSMI 1.2 extensions are supported only in 2.7 and later versions.
41 41 * PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
42 42 * PSMI 1.5 extensions are supported in Solaris Nevada.
43 43 * PSMI 1.6 extensions are supported in Solaris Nevada.
44 44 * PSMI 1.7 extensions are supported in Solaris Nevada.
45 45 */
46 46 #define PSMI_1_7
47 47
48 48 #include <sys/processor.h>
49 49 #include <sys/time.h>
50 50 #include <sys/psm.h>
51 51 #include <sys/smp_impldefs.h>
52 52 #include <sys/cram.h>
53 53 #include <sys/acpi/acpi.h>
54 54 #include <sys/acpica.h>
55 55 #include <sys/psm_common.h>
56 56 #include <sys/apic.h>
57 57 #include <sys/pit.h>
58 58 #include <sys/ddi.h>
59 59 #include <sys/sunddi.h>
60 60 #include <sys/ddi_impldefs.h>
61 61 #include <sys/pci.h>
62 62 #include <sys/promif.h>
63 63 #include <sys/x86_archext.h>
64 64 #include <sys/cpc_impl.h>
65 65 #include <sys/uadmin.h>
66 66 #include <sys/panic.h>
67 67 #include <sys/debug.h>
68 68 #include <sys/archsystm.h>
69 69 #include <sys/trap.h>
70 70 #include <sys/machsystm.h>
71 71 #include <sys/sysmacros.h>
72 72 #include <sys/cpuvar.h>
73 73 #include <sys/rm_platter.h>
74 74 #include <sys/privregs.h>
75 75 #include <sys/note.h>
76 76 #include <sys/pci_intr_lib.h>
77 77 #include <sys/spl.h>
78 78 #include <sys/clock.h>
79 79 #include <sys/cyclic.h>
80 80 #include <sys/dditypes.h>
81 81 #include <sys/sunddi.h>
82 82 #include <sys/x_call.h>
83 83 #include <sys/reboot.h>
84 84 #include <sys/hpet.h>
85 85 #include <sys/apic_common.h>
86 86 #include <sys/apic_timer.h>
87 87
88 88 /*
89 89 * Local Function Prototypes
90 90 */
91 91 static void apic_init_intr(void);
92 92
93 93 /*
94 94 * standard MP entries
95 95 */
96 96 static int apic_probe(void);
97 97 static int apic_getclkirq(int ipl);
98 98 static void apic_init(void);
99 99 static void apic_picinit(void);
100 100 static int apic_post_cpu_start(void);
101 101 static int apic_intr_enter(int ipl, int *vect);
102 102 static void apic_setspl(int ipl);
103 103 static void x2apic_setspl(int ipl);
104 104 static int apic_addspl(int ipl, int vector, int min_ipl, int max_ipl);
105 105 static int apic_delspl(int ipl, int vector, int min_ipl, int max_ipl);
106 106 static int apic_disable_intr(processorid_t cpun);
107 107 static void apic_enable_intr(processorid_t cpun);
108 108 static int apic_get_ipivect(int ipl, int type);
109 109 static void apic_post_cyclic_setup(void *arg);
110 110
111 111 /*
112 112 * The following vector assignments influence the value of ipltopri and
113 113 * vectortoipl. Note that vectors 0 - 0x1f are not used. We can program
114 114 * idle to 0 and IPL 0 to 0xf to differentiate idle in case
115 115 * we care to do so in future. Note some IPLs which are rarely used
116 116 * will share the vector ranges and heavily used IPLs (5 and 6) have
117 117 * a wide range.
118 118 *
119 119 * This array is used to initialize apic_ipls[] (in apic_init()).
120 120 *
121 121 * IPL Vector range. as passed to intr_enter
122 122 * 0 none.
123 123 * 1,2,3 0x20-0x2f 0x0-0xf
124 124 * 4 0x30-0x3f 0x10-0x1f
125 125 * 5 0x40-0x5f 0x20-0x3f
126 126 * 6 0x60-0x7f 0x40-0x5f
127 127 * 7,8,9 0x80-0x8f 0x60-0x6f
128 128 * 10 0x90-0x9f 0x70-0x7f
129 129 * 11 0xa0-0xaf 0x80-0x8f
130 130 * ... ...
131 131 * 15 0xe0-0xef 0xc0-0xcf
132 132 * 15 0xf0-0xff 0xd0-0xdf
133 133 */
134 134 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
135 135 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
↓ open down ↓ |
135 lines elided |
↑ open up ↑ |
136 136 };
137 137 /*
138 138 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
139 139 * NOTE that this is vector as passed into intr_enter which is
140 140 * programmed vector - 0x20 (APIC_BASE_VECT)
141 141 */
142 142
143 143 uchar_t apic_ipltopri[MAXIPL + 1]; /* unix ipl to apic pri */
144 144 /* The taskpri to be programmed into apic to mask given ipl */
145 145
146 -#if defined(__amd64)
147 -static unsigned char dummy_cpu_pri[MAXIPL + 1];
148 -#endif
149 -
150 146 /*
151 147 * Correlation of the hardware vector to the IPL in use, initialized
152 148 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate
153 149 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
154 150 * connected to errata-stricken IOAPICs
155 151 */
156 152 uchar_t apic_ipls[APIC_AVAIL_VECTOR];
157 153
158 154 /*
159 155 * Patchable global variables.
160 156 */
161 157 int apic_enable_hwsoftint = 0; /* 0 - disable, 1 - enable */
162 158 int apic_enable_bind_log = 1; /* 1 - display interrupt binding log */
163 159
164 160 /*
165 161 * Local static data
166 162 */
167 163 static struct psm_ops apic_ops = {
168 164 apic_probe,
169 165
170 166 apic_init,
171 167 apic_picinit,
172 168 apic_intr_enter,
173 169 apic_intr_exit,
174 170 apic_setspl,
175 171 apic_addspl,
176 172 apic_delspl,
177 173 apic_disable_intr,
178 174 apic_enable_intr,
179 175 (int (*)(int))NULL, /* psm_softlvl_to_irq */
180 176 (void (*)(int))NULL, /* psm_set_softintr */
181 177
182 178 apic_set_idlecpu,
183 179 apic_unset_idlecpu,
184 180
185 181 apic_clkinit,
186 182 apic_getclkirq,
187 183 (void (*)(void))NULL, /* psm_hrtimeinit */
188 184 apic_gethrtime,
189 185
190 186 apic_get_next_processorid,
191 187 apic_cpu_start,
192 188 apic_post_cpu_start,
193 189 apic_shutdown,
194 190 apic_get_ipivect,
195 191 apic_send_ipi,
196 192
197 193 (int (*)(dev_info_t *, int))NULL, /* psm_translate_irq */
198 194 (void (*)(int, char *))NULL, /* psm_notify_error */
199 195 (void (*)(int))NULL, /* psm_notify_func */
200 196 apic_timer_reprogram,
201 197 apic_timer_enable,
202 198 apic_timer_disable,
203 199 apic_post_cyclic_setup,
204 200 apic_preshutdown,
205 201 apic_intr_ops, /* Advanced DDI Interrupt framework */
206 202 apic_state, /* save, restore apic state for S3 */
207 203 apic_cpu_ops, /* CPU control interface. */
208 204 };
209 205
210 206 struct psm_ops *psmops = &apic_ops;
211 207
212 208 static struct psm_info apic_psm_info = {
213 209 PSM_INFO_VER01_7, /* version */
214 210 PSM_OWN_EXCLUSIVE, /* ownership */
215 211 (struct psm_ops *)&apic_ops, /* operation */
216 212 APIC_PCPLUSMP_NAME, /* machine name */
217 213 "pcplusmp v1.4 compatible",
218 214 };
219 215
220 216 static void *apic_hdlp;
221 217
222 218 /*
223 219 * apic_let_idle_redistribute can have the following values:
224 220 * 0 - If clock decremented it from 1 to 0, clock has to call redistribute.
225 221 * apic_redistribute_lock prevents multiple idle cpus from redistributing
226 222 */
227 223 int apic_num_idle_redistributions = 0;
228 224 static int apic_let_idle_redistribute = 0;
229 225
230 226 /* to gather intr data and redistribute */
231 227 static void apic_redistribute_compute(void);
232 228
233 229 /*
234 230 * This is the loadable module wrapper
235 231 */
236 232
237 233 int
238 234 _init(void)
239 235 {
240 236 if (apic_coarse_hrtime)
241 237 apic_ops.psm_gethrtime = &apic_gettime;
242 238 return (psm_mod_init(&apic_hdlp, &apic_psm_info));
243 239 }
244 240
245 241 int
246 242 _fini(void)
247 243 {
248 244 return (psm_mod_fini(&apic_hdlp, &apic_psm_info));
249 245 }
250 246
251 247 int
252 248 _info(struct modinfo *modinfop)
253 249 {
254 250 return (psm_mod_info(&apic_hdlp, &apic_psm_info, modinfop));
255 251 }
256 252
257 253 static int
258 254 apic_probe(void)
259 255 {
260 256 /* check if apix is initialized */
261 257 if (apix_enable && apix_loaded())
262 258 return (PSM_FAILURE);
263 259 else
264 260 apix_enable = 0; /* continue using pcplusmp PSM */
265 261
266 262 return (apic_probe_common(apic_psm_info.p_mach_idstring));
267 263 }
268 264
269 265 static uchar_t
270 266 apic_xlate_vector_by_irq(uchar_t irq)
271 267 {
272 268 if (apic_irq_table[irq] == NULL)
273 269 return (0);
274 270
275 271 return (apic_irq_table[irq]->airq_vector);
276 272 }
277 273
278 274 void
279 275 apic_init(void)
280 276 {
281 277 int i;
282 278 int j = 1;
283 279
284 280 psm_get_ioapicid = apic_get_ioapicid;
285 281 psm_get_localapicid = apic_get_localapicid;
286 282 psm_xlate_vector_by_irq = apic_xlate_vector_by_irq;
287 283
288 284 apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
289 285 for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
290 286 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
291 287 (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
292 288 /* get to highest vector at the same ipl */
↓ open down ↓ |
133 lines elided |
↑ open up ↑ |
293 289 continue;
294 290 for (; j <= apic_vectortoipl[i]; j++) {
295 291 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
296 292 APIC_BASE_VECT;
297 293 }
298 294 }
299 295 for (; j < MAXIPL + 1; j++)
300 296 /* fill up any empty ipltopri slots */
301 297 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
302 298 apic_init_common();
303 -#if defined(__amd64)
304 - CPU->cpu_pri_data = dummy_cpu_pri;
305 -#else
299 +
300 +#if !defined(__amd64)
306 301 if (cpuid_have_cr8access(CPU))
307 302 apic_have_32bit_cr8 = 1;
308 -#endif /* __amd64 */
303 +#endif
309 304 }
310 305
311 306 static void
312 307 apic_init_intr(void)
313 308 {
314 309 processorid_t cpun = psm_get_cpu_id();
315 310 uint_t nlvt;
316 311 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
317 312
318 313 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
319 314
320 315 if (apic_mode == LOCAL_APIC) {
321 316 /*
322 317 * We are running APIC in MMIO mode.
323 318 */
324 319 if (apic_flat_model) {
325 320 apic_reg_ops->apic_write(APIC_FORMAT_REG,
326 321 APIC_FLAT_MODEL);
327 322 } else {
328 323 apic_reg_ops->apic_write(APIC_FORMAT_REG,
329 324 APIC_CLUSTER_MODEL);
330 325 }
331 326
332 327 apic_reg_ops->apic_write(APIC_DEST_REG,
333 328 AV_HIGH_ORDER >> cpun);
334 329 }
335 330
336 331 if (apic_directed_EOI_supported()) {
337 332 /*
338 333 * Setting the 12th bit in the Spurious Interrupt Vector
339 334 * Register suppresses broadcast EOIs generated by the local
340 335 * APIC. The suppression of broadcast EOIs happens only when
341 336 * interrupts are level-triggered.
342 337 */
343 338 svr |= APIC_SVR_SUPPRESS_BROADCAST_EOI;
344 339 }
345 340
346 341 /* need to enable APIC before unmasking NMI */
347 342 apic_reg_ops->apic_write(APIC_SPUR_INT_REG, svr);
348 343
349 344 /*
350 345 * Presence of an invalid vector with delivery mode AV_FIXED can
351 346 * cause an error interrupt, even if the entry is masked...so
352 347 * write a valid vector to LVT entries along with the mask bit
353 348 */
354 349
355 350 /* All APICs have timer and LINT0/1 */
356 351 apic_reg_ops->apic_write(APIC_LOCAL_TIMER, AV_MASK|APIC_RESV_IRQ);
357 352 apic_reg_ops->apic_write(APIC_INT_VECT0, AV_MASK|APIC_RESV_IRQ);
358 353 apic_reg_ops->apic_write(APIC_INT_VECT1, AV_NMI); /* enable NMI */
359 354
360 355 /*
361 356 * On integrated APICs, the number of LVT entries is
362 357 * 'Max LVT entry' + 1; on 82489DX's (non-integrated
363 358 * APICs), nlvt is "3" (LINT0, LINT1, and timer)
364 359 */
365 360
366 361 if (apic_cpus[cpun].aci_local_ver < APIC_INTEGRATED_VERS) {
367 362 nlvt = 3;
368 363 } else {
369 364 nlvt = ((apic_reg_ops->apic_read(APIC_VERS_REG) >> 16) &
370 365 0xFF) + 1;
371 366 }
372 367
373 368 if (nlvt >= 5) {
374 369 /* Enable performance counter overflow interrupt */
375 370
376 371 if (!is_x86_feature(x86_featureset, X86FSET_MSR))
377 372 apic_enable_cpcovf_intr = 0;
378 373 if (apic_enable_cpcovf_intr) {
379 374 if (apic_cpcovf_vect == 0) {
380 375 int ipl = APIC_PCINT_IPL;
381 376 int irq = apic_get_ipivect(ipl, -1);
382 377
383 378 ASSERT(irq != -1);
384 379 apic_cpcovf_vect =
385 380 apic_irq_table[irq]->airq_vector;
386 381 ASSERT(apic_cpcovf_vect);
387 382 (void) add_avintr(NULL, ipl,
388 383 (avfunc)kcpc_hw_overflow_intr,
389 384 "apic pcint", irq, NULL, NULL, NULL, NULL);
390 385 kcpc_hw_overflow_intr_installed = 1;
391 386 kcpc_hw_enable_cpc_intr =
392 387 apic_cpcovf_mask_clear;
393 388 }
394 389 apic_reg_ops->apic_write(APIC_PCINT_VECT,
395 390 apic_cpcovf_vect);
396 391 }
397 392 }
398 393
399 394 if (nlvt >= 6) {
400 395 /* Only mask TM intr if the BIOS apparently doesn't use it */
401 396
402 397 uint32_t lvtval;
403 398
404 399 lvtval = apic_reg_ops->apic_read(APIC_THERM_VECT);
405 400 if (((lvtval & AV_MASK) == AV_MASK) ||
406 401 ((lvtval & AV_DELIV_MODE) != AV_SMI)) {
407 402 apic_reg_ops->apic_write(APIC_THERM_VECT,
408 403 AV_MASK|APIC_RESV_IRQ);
409 404 }
410 405 }
411 406
412 407 /* Enable error interrupt */
413 408
414 409 if (nlvt >= 4 && apic_enable_error_intr) {
415 410 if (apic_errvect == 0) {
416 411 int ipl = 0xf; /* get highest priority intr */
417 412 int irq = apic_get_ipivect(ipl, -1);
418 413
419 414 ASSERT(irq != -1);
420 415 apic_errvect = apic_irq_table[irq]->airq_vector;
421 416 ASSERT(apic_errvect);
422 417 /*
423 418 * Not PSMI compliant, but we are going to merge
424 419 * with ON anyway
425 420 */
426 421 (void) add_avintr((void *)NULL, ipl,
427 422 (avfunc)apic_error_intr, "apic error intr",
428 423 irq, NULL, NULL, NULL, NULL);
429 424 }
430 425 apic_reg_ops->apic_write(APIC_ERR_VECT, apic_errvect);
431 426 apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
432 427 apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
433 428 }
434 429
435 430 /* Enable CMCI interrupt */
436 431 if (cmi_enable_cmci) {
437 432
438 433 mutex_enter(&cmci_cpu_setup_lock);
439 434 if (cmci_cpu_setup_registered == 0) {
440 435 mutex_enter(&cpu_lock);
441 436 register_cpu_setup_func(cmci_cpu_setup, NULL);
442 437 mutex_exit(&cpu_lock);
443 438 cmci_cpu_setup_registered = 1;
444 439 }
445 440 mutex_exit(&cmci_cpu_setup_lock);
446 441
447 442 if (apic_cmci_vect == 0) {
448 443 int ipl = 0x2;
449 444 int irq = apic_get_ipivect(ipl, -1);
450 445
451 446 ASSERT(irq != -1);
452 447 apic_cmci_vect = apic_irq_table[irq]->airq_vector;
453 448 ASSERT(apic_cmci_vect);
454 449
455 450 (void) add_avintr(NULL, ipl,
456 451 (avfunc)cmi_cmci_trap,
457 452 "apic cmci intr", irq, NULL, NULL, NULL, NULL);
458 453 }
459 454 apic_reg_ops->apic_write(APIC_CMCI_VECT, apic_cmci_vect);
460 455 }
461 456 }
462 457
463 458 static void
464 459 apic_picinit(void)
465 460 {
466 461 int i, j;
467 462 uint_t isr;
468 463
469 464 /*
470 465 * Initialize and enable interrupt remapping before apic
471 466 * hardware initialization
472 467 */
473 468 apic_intrmap_init(apic_mode);
474 469
475 470 /*
476 471 * On UniSys Model 6520, the BIOS leaves vector 0x20 isr
477 472 * bit on without clearing it with EOI. Since softint
478 473 * uses vector 0x20 to interrupt itself, so softint will
479 474 * not work on this machine. In order to fix this problem
480 475 * a check is made to verify all the isr bits are clear.
481 476 * If not, EOIs are issued to clear the bits.
482 477 */
483 478 for (i = 7; i >= 1; i--) {
484 479 isr = apic_reg_ops->apic_read(APIC_ISR_REG + (i * 4));
485 480 if (isr != 0)
486 481 for (j = 0; ((j < 32) && (isr != 0)); j++)
487 482 if (isr & (1 << j)) {
488 483 apic_reg_ops->apic_write(
489 484 APIC_EOI_REG, 0);
490 485 isr &= ~(1 << j);
491 486 apic_error |= APIC_ERR_BOOT_EOI;
492 487 }
493 488 }
494 489
495 490 /* set a flag so we know we have run apic_picinit() */
496 491 apic_picinit_called = 1;
497 492 LOCK_INIT_CLEAR(&apic_gethrtime_lock);
498 493 LOCK_INIT_CLEAR(&apic_ioapic_lock);
499 494 LOCK_INIT_CLEAR(&apic_error_lock);
500 495 LOCK_INIT_CLEAR(&apic_mode_switch_lock);
501 496
502 497 picsetup(); /* initialise the 8259 */
503 498
504 499 /* add nmi handler - least priority nmi handler */
505 500 LOCK_INIT_CLEAR(&apic_nmi_lock);
506 501
507 502 if (!psm_add_nmintr(0, (avfunc) apic_nmi_intr,
508 503 "pcplusmp NMI handler", (caddr_t)NULL))
509 504 cmn_err(CE_WARN, "pcplusmp: Unable to add nmi handler");
510 505
511 506 /*
512 507 * Check for directed-EOI capability in the local APIC.
513 508 */
514 509 if (apic_directed_EOI_supported() == 1) {
515 510 apic_set_directed_EOI_handler();
516 511 }
517 512
518 513 apic_init_intr();
519 514
520 515 /* enable apic mode if imcr present */
521 516 if (apic_imcrp) {
522 517 outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT);
523 518 outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_APIC);
524 519 }
525 520
526 521 ioapic_init_intr(IOAPIC_MASK);
527 522 }
528 523
529 524 #ifdef DEBUG
530 525 void
531 526 apic_break(void)
532 527 {
533 528 }
534 529 #endif /* DEBUG */
535 530
536 531 /*
537 532 * platform_intr_enter
538 533 *
539 534 * Called at the beginning of the interrupt service routine to
540 535 * mask all level equal to and below the interrupt priority
541 536 * of the interrupting vector. An EOI should be given to
542 537 * the interrupt controller to enable other HW interrupts.
543 538 *
544 539 * Return -1 for spurious interrupts
545 540 *
546 541 */
547 542 /*ARGSUSED*/
548 543 static int
549 544 apic_intr_enter(int ipl, int *vectorp)
550 545 {
551 546 uchar_t vector;
552 547 int nipl;
553 548 int irq;
554 549 ulong_t iflag;
555 550 apic_cpus_info_t *cpu_infop;
556 551
557 552 /*
558 553 * The real vector delivered is (*vectorp + 0x20), but our caller
559 554 * subtracts 0x20 from the vector before passing it to us.
560 555 * (That's why APIC_BASE_VECT is 0x20.)
561 556 */
562 557 vector = (uchar_t)*vectorp;
563 558
564 559 /* if interrupted by the clock, increment apic_nsec_since_boot */
565 560 if (vector == apic_clkvect) {
566 561 if (!apic_oneshot) {
567 562 /* NOTE: this is not MT aware */
568 563 apic_hrtime_stamp++;
569 564 apic_nsec_since_boot += apic_nsec_per_intr;
570 565 apic_hrtime_stamp++;
571 566 last_count_read = apic_hertz_count;
572 567 apic_redistribute_compute();
573 568 }
574 569
575 570 /* We will avoid all the book keeping overhead for clock */
576 571 nipl = apic_ipls[vector];
577 572
578 573 *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT];
579 574 if (apic_mode == LOCAL_APIC) {
580 575 #if defined(__amd64)
581 576 setcr8((ulong_t)(apic_ipltopri[nipl] >>
582 577 APIC_IPL_SHIFT));
583 578 #else
584 579 if (apic_have_32bit_cr8)
585 580 setcr8((ulong_t)(apic_ipltopri[nipl] >>
586 581 APIC_IPL_SHIFT));
587 582 else
588 583 LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
589 584 (uint32_t)apic_ipltopri[nipl]);
590 585 #endif
591 586 LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
592 587 } else {
593 588 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
594 589 X2APIC_WRITE(APIC_EOI_REG, 0);
595 590 }
596 591
597 592 return (nipl);
598 593 }
599 594
600 595 cpu_infop = &apic_cpus[psm_get_cpu_id()];
601 596
602 597 if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) {
603 598 cpu_infop->aci_spur_cnt++;
604 599 return (APIC_INT_SPURIOUS);
605 600 }
606 601
607 602 /* Check if the vector we got is really what we need */
608 603 if (apic_revector_pending) {
609 604 /*
610 605 * Disable interrupts for the duration of
611 606 * the vector translation to prevent a self-race for
612 607 * the apic_revector_lock. This cannot be done
613 608 * in apic_xlate_vector because it is recursive and
614 609 * we want the vector translation to be atomic with
615 610 * respect to other (higher-priority) interrupts.
616 611 */
617 612 iflag = intr_clear();
618 613 vector = apic_xlate_vector(vector + APIC_BASE_VECT) -
619 614 APIC_BASE_VECT;
620 615 intr_restore(iflag);
621 616 }
622 617
623 618 nipl = apic_ipls[vector];
624 619 *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT];
625 620
626 621 if (apic_mode == LOCAL_APIC) {
627 622 #if defined(__amd64)
628 623 setcr8((ulong_t)(apic_ipltopri[nipl] >> APIC_IPL_SHIFT));
629 624 #else
630 625 if (apic_have_32bit_cr8)
631 626 setcr8((ulong_t)(apic_ipltopri[nipl] >>
632 627 APIC_IPL_SHIFT));
633 628 else
634 629 LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
635 630 (uint32_t)apic_ipltopri[nipl]);
636 631 #endif
637 632 } else {
638 633 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
639 634 }
640 635
641 636 cpu_infop->aci_current[nipl] = (uchar_t)irq;
642 637 cpu_infop->aci_curipl = (uchar_t)nipl;
643 638 cpu_infop->aci_ISR_in_progress |= 1 << nipl;
644 639
645 640 /*
646 641 * apic_level_intr could have been assimilated into the irq struct.
647 642 * but, having it as a character array is more efficient in terms of
648 643 * cache usage. So, we leave it as is.
649 644 */
650 645 if (!apic_level_intr[irq]) {
651 646 if (apic_mode == LOCAL_APIC) {
652 647 LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
653 648 } else {
654 649 X2APIC_WRITE(APIC_EOI_REG, 0);
655 650 }
656 651 }
657 652
658 653 #ifdef DEBUG
659 654 APIC_DEBUG_BUF_PUT(vector);
660 655 APIC_DEBUG_BUF_PUT(irq);
661 656 APIC_DEBUG_BUF_PUT(nipl);
662 657 APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
663 658 if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
664 659 drv_usecwait(apic_stretch_interrupts);
665 660
666 661 if (apic_break_on_cpu == psm_get_cpu_id())
667 662 apic_break();
668 663 #endif /* DEBUG */
669 664 return (nipl);
670 665 }
671 666
672 667 /*
673 668 * This macro is a common code used by MMIO local apic and X2APIC
674 669 * local apic.
675 670 */
676 671 #define APIC_INTR_EXIT() \
677 672 { \
678 673 cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
679 674 if (apic_level_intr[irq]) \
680 675 apic_reg_ops->apic_send_eoi(irq); \
681 676 cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
682 677 /* ISR above current pri could not be in progress */ \
683 678 cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
684 679 }
685 680
686 681 /*
687 682 * Any changes made to this function must also change X2APIC
688 683 * version of intr_exit.
689 684 */
690 685 void
691 686 apic_intr_exit(int prev_ipl, int irq)
692 687 {
693 688 apic_cpus_info_t *cpu_infop;
694 689
695 690 #if defined(__amd64)
696 691 setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
697 692 #else
698 693 if (apic_have_32bit_cr8)
699 694 setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
700 695 else
701 696 apicadr[APIC_TASK_REG] = apic_ipltopri[prev_ipl];
702 697 #endif
703 698
704 699 APIC_INTR_EXIT();
705 700 }
706 701
707 702 /*
708 703 * Same as apic_intr_exit() except it uses MSR rather than MMIO
709 704 * to access local apic registers.
710 705 */
711 706 void
712 707 x2apic_intr_exit(int prev_ipl, int irq)
713 708 {
714 709 apic_cpus_info_t *cpu_infop;
715 710
716 711 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
717 712 APIC_INTR_EXIT();
718 713 }
719 714
720 715 intr_exit_fn_t
721 716 psm_intr_exit_fn(void)
722 717 {
723 718 if (apic_mode == LOCAL_X2APIC)
724 719 return (x2apic_intr_exit);
725 720
726 721 return (apic_intr_exit);
727 722 }
728 723
729 724 /*
730 725 * Mask all interrupts below or equal to the given IPL.
731 726 * Any changes made to this function must also change X2APIC
732 727 * version of setspl.
733 728 */
734 729 static void
735 730 apic_setspl(int ipl)
736 731 {
737 732 #if defined(__amd64)
738 733 setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
739 734 #else
740 735 if (apic_have_32bit_cr8)
741 736 setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
742 737 else
743 738 apicadr[APIC_TASK_REG] = apic_ipltopri[ipl];
744 739 #endif
745 740
746 741 /* interrupts at ipl above this cannot be in progress */
747 742 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
748 743 /*
749 744 * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
750 745 * have enough time to come in before the priority is raised again
751 746 * during the idle() loop.
752 747 */
753 748 if (apic_setspl_delay)
754 749 (void) apic_reg_ops->apic_get_pri();
755 750 }
756 751
757 752 /*
758 753 * X2APIC version of setspl.
759 754 * Mask all interrupts below or equal to the given IPL
760 755 */
761 756 static void
762 757 x2apic_setspl(int ipl)
763 758 {
764 759 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]);
765 760
766 761 /* interrupts at ipl above this cannot be in progress */
767 762 apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
768 763 }
769 764
770 765 /*ARGSUSED*/
771 766 static int
772 767 apic_addspl(int irqno, int ipl, int min_ipl, int max_ipl)
773 768 {
774 769 return (apic_addspl_common(irqno, ipl, min_ipl, max_ipl));
775 770 }
776 771
777 772 static int
778 773 apic_delspl(int irqno, int ipl, int min_ipl, int max_ipl)
779 774 {
780 775 return (apic_delspl_common(irqno, ipl, min_ipl, max_ipl));
781 776 }
782 777
783 778 static int
784 779 apic_post_cpu_start(void)
785 780 {
786 781 int cpun;
787 782 static int cpus_started = 1;
788 783
789 784 /* We know this CPU + BSP started successfully. */
790 785 cpus_started++;
791 786
792 787 /*
793 788 * On BSP we would have enabled X2APIC, if supported by processor,
794 789 * in acpi_probe(), but on AP we do it here.
795 790 *
796 791 * We enable X2APIC mode only if BSP is running in X2APIC & the
797 792 * local APIC mode of the current CPU is MMIO (xAPIC).
798 793 */
799 794 if (apic_mode == LOCAL_X2APIC && apic_detect_x2apic() &&
800 795 apic_local_mode() == LOCAL_APIC) {
801 796 apic_enable_x2apic();
802 797 }
803 798
804 799 /*
805 800 * Switch back to x2apic IPI sending method for performance when target
806 801 * CPU has entered x2apic mode.
807 802 */
808 803 if (apic_mode == LOCAL_X2APIC) {
809 804 apic_switch_ipi_callback(B_FALSE);
810 805 }
811 806
812 807 splx(ipltospl(LOCK_LEVEL));
813 808 apic_init_intr();
814 809
815 810 /*
816 811 * since some systems don't enable the internal cache on the non-boot
817 812 * cpus, so we have to enable them here
818 813 */
819 814 setcr0(getcr0() & ~(CR0_CD | CR0_NW));
820 815
821 816 #ifdef DEBUG
822 817 APIC_AV_PENDING_SET();
823 818 #else
824 819 if (apic_mode == LOCAL_APIC)
825 820 APIC_AV_PENDING_SET();
826 821 #endif /* DEBUG */
827 822
828 823 /*
829 824 * We may be booting, or resuming from suspend; aci_status will
830 825 * be APIC_CPU_INTR_ENABLE if coming from suspend, so we add the
831 826 * APIC_CPU_ONLINE flag here rather than setting aci_status completely.
832 827 */
833 828 cpun = psm_get_cpu_id();
834 829 apic_cpus[cpun].aci_status |= APIC_CPU_ONLINE;
835 830
836 831 apic_reg_ops->apic_write(APIC_DIVIDE_REG, apic_divide_reg_init);
837 832 return (PSM_SUCCESS);
838 833 }
839 834
840 835 /*
841 836 * type == -1 indicates it is an internal request. Do not change
842 837 * resv_vector for these requests
843 838 */
844 839 static int
845 840 apic_get_ipivect(int ipl, int type)
846 841 {
847 842 uchar_t vector;
848 843 int irq;
849 844
850 845 if ((irq = apic_allocate_irq(APIC_VECTOR(ipl))) != -1) {
851 846 if (vector = apic_allocate_vector(ipl, irq, 1)) {
852 847 apic_irq_table[irq]->airq_mps_intr_index =
853 848 RESERVE_INDEX;
854 849 apic_irq_table[irq]->airq_vector = vector;
855 850 if (type != -1) {
856 851 apic_resv_vector[ipl] = vector;
857 852 }
858 853 return (irq);
859 854 }
860 855 }
861 856 apic_error |= APIC_ERR_GET_IPIVECT_FAIL;
862 857 return (-1); /* shouldn't happen */
863 858 }
864 859
865 860 static int
866 861 apic_getclkirq(int ipl)
867 862 {
868 863 int irq;
869 864
870 865 if ((irq = apic_get_ipivect(ipl, -1)) == -1)
871 866 return (-1);
872 867 /*
873 868 * Note the vector in apic_clkvect for per clock handling.
874 869 */
875 870 apic_clkvect = apic_irq_table[irq]->airq_vector - APIC_BASE_VECT;
876 871 APIC_VERBOSE_IOAPIC((CE_NOTE, "get_clkirq: vector = %x\n",
877 872 apic_clkvect));
878 873 return (irq);
879 874 }
880 875
881 876 /*
882 877 * Try and disable all interrupts. We just assign interrupts to other
883 878 * processors based on policy. If any were bound by user request, we
884 879 * let them continue and return failure. We do not bother to check
885 880 * for cache affinity while rebinding.
886 881 */
887 882
888 883 static int
889 884 apic_disable_intr(processorid_t cpun)
890 885 {
891 886 int bind_cpu = 0, i, hardbound = 0;
892 887 apic_irq_t *irq_ptr;
893 888 ulong_t iflag;
894 889
895 890 iflag = intr_clear();
896 891 lock_set(&apic_ioapic_lock);
897 892
898 893 for (i = 0; i <= APIC_MAX_VECTOR; i++) {
899 894 if (apic_reprogram_info[i].done == B_FALSE) {
900 895 if (apic_reprogram_info[i].bindcpu == cpun) {
901 896 /*
902 897 * CPU is busy -- it's the target of
903 898 * a pending reprogramming attempt
904 899 */
905 900 lock_clear(&apic_ioapic_lock);
906 901 intr_restore(iflag);
907 902 return (PSM_FAILURE);
908 903 }
909 904 }
910 905 }
911 906
912 907 apic_cpus[cpun].aci_status &= ~APIC_CPU_INTR_ENABLE;
913 908
914 909 apic_cpus[cpun].aci_curipl = 0;
915 910
916 911 i = apic_min_device_irq;
917 912 for (; i <= apic_max_device_irq; i++) {
918 913 /*
919 914 * If there are bound interrupts on this cpu, then
920 915 * rebind them to other processors.
921 916 */
922 917 if ((irq_ptr = apic_irq_table[i]) != NULL) {
923 918 ASSERT((irq_ptr->airq_temp_cpu == IRQ_UNBOUND) ||
924 919 (irq_ptr->airq_temp_cpu == IRQ_UNINIT) ||
925 920 (apic_cpu_in_range(irq_ptr->airq_temp_cpu)));
926 921
927 922 if (irq_ptr->airq_temp_cpu == (cpun | IRQ_USER_BOUND)) {
928 923 hardbound = 1;
929 924 continue;
930 925 }
931 926
932 927 if (irq_ptr->airq_temp_cpu == cpun) {
933 928 do {
934 929 bind_cpu =
935 930 apic_find_cpu(APIC_CPU_INTR_ENABLE);
936 931 } while (apic_rebind_all(irq_ptr, bind_cpu));
937 932 }
938 933 }
939 934 }
940 935
941 936 lock_clear(&apic_ioapic_lock);
942 937 intr_restore(iflag);
943 938
944 939 if (hardbound) {
945 940 cmn_err(CE_WARN, "Could not disable interrupts on %d"
946 941 "due to user bound interrupts", cpun);
947 942 return (PSM_FAILURE);
948 943 }
949 944 else
950 945 return (PSM_SUCCESS);
951 946 }
952 947
953 948 /*
954 949 * Bind interrupts to the CPU's local APIC.
955 950 * Interrupts should not be bound to a CPU's local APIC until the CPU
956 951 * is ready to receive interrupts.
957 952 */
958 953 static void
959 954 apic_enable_intr(processorid_t cpun)
960 955 {
961 956 int i;
962 957 apic_irq_t *irq_ptr;
963 958 ulong_t iflag;
964 959
965 960 iflag = intr_clear();
966 961 lock_set(&apic_ioapic_lock);
967 962
968 963 apic_cpus[cpun].aci_status |= APIC_CPU_INTR_ENABLE;
969 964
970 965 i = apic_min_device_irq;
971 966 for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) {
972 967 if ((irq_ptr = apic_irq_table[i]) != NULL) {
973 968 if ((irq_ptr->airq_cpu & ~IRQ_USER_BOUND) == cpun) {
974 969 (void) apic_rebind_all(irq_ptr,
975 970 irq_ptr->airq_cpu);
976 971 }
977 972 }
978 973 }
979 974
980 975 if (apic_cpus[cpun].aci_status & APIC_CPU_SUSPEND)
981 976 apic_cpus[cpun].aci_status &= ~APIC_CPU_SUSPEND;
982 977
983 978 lock_clear(&apic_ioapic_lock);
984 979 intr_restore(iflag);
985 980 }
986 981
987 982 /*
988 983 * If this module needs a periodic handler for the interrupt distribution, it
989 984 * can be added here. The argument to the periodic handler is not currently
990 985 * used, but is reserved for future.
991 986 */
992 987 static void
993 988 apic_post_cyclic_setup(void *arg)
994 989 {
995 990 _NOTE(ARGUNUSED(arg))
996 991
997 992 cyc_handler_t cyh;
998 993 cyc_time_t cyt;
999 994
1000 995 /* cpu_lock is held */
1001 996 /* set up a periodic handler for intr redistribution */
1002 997
1003 998 /*
1004 999 * In peridoc mode intr redistribution processing is done in
1005 1000 * apic_intr_enter during clk intr processing
1006 1001 */
1007 1002 if (!apic_oneshot)
1008 1003 return;
1009 1004
1010 1005 /*
1011 1006 * Register a periodical handler for the redistribution processing.
1012 1007 * Though we would generally prefer to use the DDI interface for
1013 1008 * periodic handler invocation, ddi_periodic_add(9F), we are
1014 1009 * unfortunately already holding cpu_lock, which ddi_periodic_add will
1015 1010 * attempt to take for us. Thus, we add our own cyclic directly:
1016 1011 */
1017 1012 cyh.cyh_func = (void (*)(void *))apic_redistribute_compute;
1018 1013 cyh.cyh_arg = NULL;
1019 1014 cyh.cyh_level = CY_LOW_LEVEL;
1020 1015
1021 1016 cyt.cyt_when = 0;
1022 1017 cyt.cyt_interval = apic_redistribute_sample_interval;
1023 1018
1024 1019 apic_cyclic_id = cyclic_add(&cyh, &cyt);
1025 1020 }
1026 1021
1027 1022 static void
1028 1023 apic_redistribute_compute(void)
1029 1024 {
1030 1025 int i, j, max_busy;
1031 1026
1032 1027 if (apic_enable_dynamic_migration) {
1033 1028 if (++apic_nticks == apic_sample_factor_redistribution) {
1034 1029 /*
1035 1030 * Time to call apic_intr_redistribute().
1036 1031 * reset apic_nticks. This will cause max_busy
1037 1032 * to be calculated below and if it is more than
1038 1033 * apic_int_busy, we will do the whole thing
1039 1034 */
1040 1035 apic_nticks = 0;
1041 1036 }
1042 1037 max_busy = 0;
1043 1038 for (i = 0; i < apic_nproc; i++) {
1044 1039 if (!apic_cpu_in_range(i))
1045 1040 continue;
1046 1041
1047 1042 /*
1048 1043 * Check if curipl is non zero & if ISR is in
1049 1044 * progress
1050 1045 */
1051 1046 if (((j = apic_cpus[i].aci_curipl) != 0) &&
1052 1047 (apic_cpus[i].aci_ISR_in_progress & (1 << j))) {
1053 1048
1054 1049 int irq;
1055 1050 apic_cpus[i].aci_busy++;
1056 1051 irq = apic_cpus[i].aci_current[j];
1057 1052 apic_irq_table[irq]->airq_busy++;
1058 1053 }
1059 1054
1060 1055 if (!apic_nticks &&
1061 1056 (apic_cpus[i].aci_busy > max_busy))
1062 1057 max_busy = apic_cpus[i].aci_busy;
1063 1058 }
1064 1059 if (!apic_nticks) {
1065 1060 if (max_busy > apic_int_busy_mark) {
1066 1061 /*
1067 1062 * We could make the following check be
1068 1063 * skipped > 1 in which case, we get a
1069 1064 * redistribution at half the busy mark (due to
1070 1065 * double interval). Need to be able to collect
1071 1066 * more empirical data to decide if that is a
1072 1067 * good strategy. Punt for now.
1073 1068 */
1074 1069 if (apic_skipped_redistribute) {
1075 1070 apic_cleanup_busy();
1076 1071 apic_skipped_redistribute = 0;
1077 1072 } else {
1078 1073 apic_intr_redistribute();
1079 1074 }
1080 1075 } else
1081 1076 apic_skipped_redistribute++;
1082 1077 }
1083 1078 }
1084 1079 }
1085 1080
1086 1081
1087 1082 /*
1088 1083 * The following functions are in the platform specific file so that they
1089 1084 * can be different functions depending on whether we are running on
1090 1085 * bare metal or a hypervisor.
1091 1086 */
1092 1087
1093 1088 /*
1094 1089 * Check to make sure there are enough irq slots
1095 1090 */
1096 1091 int
1097 1092 apic_check_free_irqs(int count)
1098 1093 {
1099 1094 int i, avail;
1100 1095
1101 1096 avail = 0;
1102 1097 for (i = APIC_FIRST_FREE_IRQ; i < APIC_RESV_IRQ; i++) {
1103 1098 if ((apic_irq_table[i] == NULL) ||
1104 1099 apic_irq_table[i]->airq_mps_intr_index == FREE_INDEX) {
1105 1100 if (++avail >= count)
1106 1101 return (PSM_SUCCESS);
1107 1102 }
1108 1103 }
1109 1104 return (PSM_FAILURE);
1110 1105 }
1111 1106
1112 1107 /*
1113 1108 * This function allocates "count" MSI vector(s) for the given "dip/pri/type"
1114 1109 */
1115 1110 int
1116 1111 apic_alloc_msi_vectors(dev_info_t *dip, int inum, int count, int pri,
1117 1112 int behavior)
1118 1113 {
1119 1114 int rcount, i;
1120 1115 uchar_t start, irqno;
1121 1116 uint32_t cpu;
1122 1117 major_t major;
1123 1118 apic_irq_t *irqptr;
1124 1119
1125 1120 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: dip=0x%p "
1126 1121 "inum=0x%x pri=0x%x count=0x%x behavior=%d\n",
1127 1122 (void *)dip, inum, pri, count, behavior));
1128 1123
1129 1124 if (count > 1) {
1130 1125 if (behavior == DDI_INTR_ALLOC_STRICT &&
1131 1126 apic_multi_msi_enable == 0)
1132 1127 return (0);
1133 1128 if (apic_multi_msi_enable == 0)
1134 1129 count = 1;
1135 1130 }
1136 1131
1137 1132 if ((rcount = apic_navail_vector(dip, pri)) > count)
1138 1133 rcount = count;
1139 1134 else if (rcount == 0 || (rcount < count &&
1140 1135 behavior == DDI_INTR_ALLOC_STRICT))
1141 1136 return (0);
1142 1137
1143 1138 /* if not ISP2, then round it down */
1144 1139 if (!ISP2(rcount))
1145 1140 rcount = 1 << (highbit(rcount) - 1);
1146 1141
1147 1142 mutex_enter(&airq_mutex);
1148 1143
1149 1144 for (start = 0; rcount > 0; rcount >>= 1) {
1150 1145 if ((start = apic_find_multi_vectors(pri, rcount)) != 0 ||
1151 1146 behavior == DDI_INTR_ALLOC_STRICT)
1152 1147 break;
1153 1148 }
1154 1149
1155 1150 if (start == 0) {
1156 1151 /* no vector available */
1157 1152 mutex_exit(&airq_mutex);
1158 1153 return (0);
1159 1154 }
1160 1155
1161 1156 if (apic_check_free_irqs(rcount) == PSM_FAILURE) {
1162 1157 /* not enough free irq slots available */
1163 1158 mutex_exit(&airq_mutex);
1164 1159 return (0);
1165 1160 }
1166 1161
1167 1162 major = (dip != NULL) ? ddi_driver_major(dip) : 0;
1168 1163 for (i = 0; i < rcount; i++) {
1169 1164 if ((irqno = apic_allocate_irq(apic_first_avail_irq)) ==
1170 1165 (uchar_t)-1) {
1171 1166 /*
1172 1167 * shouldn't happen because of the
1173 1168 * apic_check_free_irqs() check earlier
1174 1169 */
1175 1170 mutex_exit(&airq_mutex);
1176 1171 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: "
1177 1172 "apic_allocate_irq failed\n"));
1178 1173 return (i);
1179 1174 }
1180 1175 apic_max_device_irq = max(irqno, apic_max_device_irq);
1181 1176 apic_min_device_irq = min(irqno, apic_min_device_irq);
1182 1177 irqptr = apic_irq_table[irqno];
1183 1178 #ifdef DEBUG
1184 1179 if (apic_vector_to_irq[start + i] != APIC_RESV_IRQ)
1185 1180 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: "
1186 1181 "apic_vector_to_irq is not APIC_RESV_IRQ\n"));
1187 1182 #endif
1188 1183 apic_vector_to_irq[start + i] = (uchar_t)irqno;
1189 1184
1190 1185 irqptr->airq_vector = (uchar_t)(start + i);
1191 1186 irqptr->airq_ioapicindex = (uchar_t)inum; /* start */
1192 1187 irqptr->airq_intin_no = (uchar_t)rcount;
1193 1188 irqptr->airq_ipl = pri;
1194 1189 irqptr->airq_vector = start + i;
1195 1190 irqptr->airq_origirq = (uchar_t)(inum + i);
1196 1191 irqptr->airq_share_id = 0;
1197 1192 irqptr->airq_mps_intr_index = MSI_INDEX;
1198 1193 irqptr->airq_dip = dip;
1199 1194 irqptr->airq_major = major;
1200 1195 if (i == 0) /* they all bound to the same cpu */
1201 1196 cpu = irqptr->airq_cpu = apic_bind_intr(dip, irqno,
1202 1197 0xff, 0xff);
1203 1198 else
1204 1199 irqptr->airq_cpu = cpu;
1205 1200 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msi_vectors: irq=0x%x "
1206 1201 "dip=0x%p vector=0x%x origirq=0x%x pri=0x%x\n", irqno,
1207 1202 (void *)irqptr->airq_dip, irqptr->airq_vector,
1208 1203 irqptr->airq_origirq, pri));
1209 1204 }
1210 1205 mutex_exit(&airq_mutex);
1211 1206 return (rcount);
1212 1207 }
1213 1208
1214 1209 /*
1215 1210 * This function allocates "count" MSI-X vector(s) for the given "dip/pri/type"
1216 1211 */
1217 1212 int
1218 1213 apic_alloc_msix_vectors(dev_info_t *dip, int inum, int count, int pri,
1219 1214 int behavior)
1220 1215 {
1221 1216 int rcount, i;
1222 1217 major_t major;
1223 1218
1224 1219 mutex_enter(&airq_mutex);
1225 1220
1226 1221 if ((rcount = apic_navail_vector(dip, pri)) > count)
1227 1222 rcount = count;
1228 1223 else if (rcount == 0 || (rcount < count &&
1229 1224 behavior == DDI_INTR_ALLOC_STRICT)) {
1230 1225 rcount = 0;
1231 1226 goto out;
1232 1227 }
1233 1228
1234 1229 if (apic_check_free_irqs(rcount) == PSM_FAILURE) {
1235 1230 /* not enough free irq slots available */
1236 1231 rcount = 0;
1237 1232 goto out;
1238 1233 }
1239 1234
1240 1235 major = (dip != NULL) ? ddi_driver_major(dip) : 0;
1241 1236 for (i = 0; i < rcount; i++) {
1242 1237 uchar_t vector, irqno;
1243 1238 apic_irq_t *irqptr;
1244 1239
1245 1240 if ((irqno = apic_allocate_irq(apic_first_avail_irq)) ==
1246 1241 (uchar_t)-1) {
1247 1242 /*
1248 1243 * shouldn't happen because of the
1249 1244 * apic_check_free_irqs() check earlier
1250 1245 */
1251 1246 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msix_vectors: "
1252 1247 "apic_allocate_irq failed\n"));
1253 1248 rcount = i;
1254 1249 goto out;
1255 1250 }
1256 1251 if ((vector = apic_allocate_vector(pri, irqno, 1)) == 0) {
1257 1252 /*
1258 1253 * shouldn't happen because of the
1259 1254 * apic_navail_vector() call earlier
1260 1255 */
1261 1256 DDI_INTR_IMPLDBG((CE_CONT, "apic_alloc_msix_vectors: "
1262 1257 "apic_allocate_vector failed\n"));
1263 1258 rcount = i;
1264 1259 goto out;
1265 1260 }
1266 1261 apic_max_device_irq = max(irqno, apic_max_device_irq);
1267 1262 apic_min_device_irq = min(irqno, apic_min_device_irq);
1268 1263 irqptr = apic_irq_table[irqno];
1269 1264 irqptr->airq_vector = (uchar_t)vector;
1270 1265 irqptr->airq_ipl = pri;
1271 1266 irqptr->airq_origirq = (uchar_t)(inum + i);
1272 1267 irqptr->airq_share_id = 0;
1273 1268 irqptr->airq_mps_intr_index = MSIX_INDEX;
1274 1269 irqptr->airq_dip = dip;
1275 1270 irqptr->airq_major = major;
1276 1271 irqptr->airq_cpu = apic_bind_intr(dip, irqno, 0xff, 0xff);
1277 1272 }
1278 1273 out:
1279 1274 mutex_exit(&airq_mutex);
1280 1275 return (rcount);
1281 1276 }
1282 1277
1283 1278 /*
1284 1279 * Allocate a free vector for irq at ipl. Takes care of merging of multiple
1285 1280 * IPLs into a single APIC level as well as stretching some IPLs onto multiple
1286 1281 * levels. APIC_HI_PRI_VECTS interrupts are reserved for high priority
1287 1282 * requests and allocated only when pri is set.
1288 1283 */
1289 1284 uchar_t
1290 1285 apic_allocate_vector(int ipl, int irq, int pri)
1291 1286 {
1292 1287 int lowest, highest, i;
1293 1288
1294 1289 highest = apic_ipltopri[ipl] + APIC_VECTOR_MASK;
1295 1290 lowest = apic_ipltopri[ipl - 1] + APIC_VECTOR_PER_IPL;
1296 1291
1297 1292 if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */
1298 1293 lowest -= APIC_VECTOR_PER_IPL;
1299 1294
1300 1295 #ifdef DEBUG
1301 1296 if (apic_restrict_vector) /* for testing shared interrupt logic */
1302 1297 highest = lowest + apic_restrict_vector + APIC_HI_PRI_VECTS;
1303 1298 #endif /* DEBUG */
1304 1299 if (pri == 0)
1305 1300 highest -= APIC_HI_PRI_VECTS;
1306 1301
1307 1302 for (i = lowest; i <= highest; i++) {
1308 1303 if (APIC_CHECK_RESERVE_VECTORS(i))
1309 1304 continue;
1310 1305 if (apic_vector_to_irq[i] == APIC_RESV_IRQ) {
1311 1306 apic_vector_to_irq[i] = (uchar_t)irq;
1312 1307 return (i);
1313 1308 }
1314 1309 }
1315 1310
1316 1311 return (0);
1317 1312 }
1318 1313
1319 1314 /* Mark vector as not being used by any irq */
1320 1315 void
1321 1316 apic_free_vector(uchar_t vector)
1322 1317 {
1323 1318 apic_vector_to_irq[vector] = APIC_RESV_IRQ;
1324 1319 }
1325 1320
1326 1321 /*
1327 1322 * Call rebind to do the actual programming.
1328 1323 * Must be called with interrupts disabled and apic_ioapic_lock held
1329 1324 * 'p' is polymorphic -- if this function is called to process a deferred
1330 1325 * reprogramming, p is of type 'struct ioapic_reprogram_data *', from which
1331 1326 * the irq pointer is retrieved. If not doing deferred reprogramming,
1332 1327 * p is of the type 'apic_irq_t *'.
1333 1328 *
1334 1329 * apic_ioapic_lock must be held across this call, as it protects apic_rebind
1335 1330 * and it protects apic_get_next_bind_cpu() from a race in which a CPU can be
1336 1331 * taken offline after a cpu is selected, but before apic_rebind is called to
1337 1332 * bind interrupts to it.
1338 1333 */
1339 1334 int
1340 1335 apic_setup_io_intr(void *p, int irq, boolean_t deferred)
1341 1336 {
1342 1337 apic_irq_t *irqptr;
1343 1338 struct ioapic_reprogram_data *drep = NULL;
1344 1339 int rv;
1345 1340
1346 1341 if (deferred) {
1347 1342 drep = (struct ioapic_reprogram_data *)p;
1348 1343 ASSERT(drep != NULL);
1349 1344 irqptr = drep->irqp;
1350 1345 } else
1351 1346 irqptr = (apic_irq_t *)p;
1352 1347
1353 1348 ASSERT(irqptr != NULL);
1354 1349
1355 1350 rv = apic_rebind(irqptr, apic_irq_table[irq]->airq_cpu, drep);
1356 1351 if (rv) {
1357 1352 /*
1358 1353 * CPU is not up or interrupts are disabled. Fall back to
1359 1354 * the first available CPU
1360 1355 */
1361 1356 rv = apic_rebind(irqptr, apic_find_cpu(APIC_CPU_INTR_ENABLE),
1362 1357 drep);
1363 1358 }
1364 1359
1365 1360 return (rv);
1366 1361 }
1367 1362
1368 1363
1369 1364 uchar_t
1370 1365 apic_modify_vector(uchar_t vector, int irq)
1371 1366 {
1372 1367 apic_vector_to_irq[vector] = (uchar_t)irq;
1373 1368 return (vector);
1374 1369 }
1375 1370
1376 1371 char *
1377 1372 apic_get_apic_type(void)
1378 1373 {
1379 1374 return (apic_psm_info.p_mach_idstring);
1380 1375 }
1381 1376
1382 1377 void
1383 1378 x2apic_update_psm(void)
1384 1379 {
1385 1380 struct psm_ops *pops = &apic_ops;
1386 1381
1387 1382 ASSERT(pops != NULL);
1388 1383
1389 1384 pops->psm_intr_exit = x2apic_intr_exit;
1390 1385 pops->psm_setspl = x2apic_setspl;
1391 1386
1392 1387 pops->psm_send_ipi = x2apic_send_ipi;
1393 1388 send_dirintf = pops->psm_send_ipi;
1394 1389
1395 1390 apic_mode = LOCAL_X2APIC;
1396 1391 apic_change_ops();
1397 1392 }
↓ open down ↓ |
1079 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX