126 * 6 0x60-0x7f 0x40-0x5f
127 * 7,8,9 0x80-0x8f 0x60-0x6f
128 * 10 0x90-0x9f 0x70-0x7f
129 * 11 0xa0-0xaf 0x80-0x8f
130 * ... ...
131 * 15 0xe0-0xef 0xc0-0xcf
132 * 15 0xf0-0xff 0xd0-0xdf
133 */
134 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
135 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
136 };
137 /*
138 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
139 * NOTE that this is vector as passed into intr_enter which is
140 * programmed vector - 0x20 (APIC_BASE_VECT)
141 */
142
143 uchar_t apic_ipltopri[MAXIPL + 1]; /* unix ipl to apic pri */
144 /* The taskpri to be programmed into apic to mask given ipl */
145
146 #if defined(__amd64)
147 static unsigned char dummy_cpu_pri[MAXIPL + 1];
148 #endif
149
150 /*
151 * Correlation of the hardware vector to the IPL in use, initialized
152 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate
153 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
154 * connected to errata-stricken IOAPICs
155 */
156 uchar_t apic_ipls[APIC_AVAIL_VECTOR];
157
158 /*
159 * Patchable global variables.
160 */
161 int apic_enable_hwsoftint = 0; /* 0 - disable, 1 - enable */
162 int apic_enable_bind_log = 1; /* 1 - display interrupt binding log */
163
164 /*
165 * Local static data
166 */
167 static struct psm_ops apic_ops = {
168 apic_probe,
169
283
284 psm_get_ioapicid = apic_get_ioapicid;
285 psm_get_localapicid = apic_get_localapicid;
286 psm_xlate_vector_by_irq = apic_xlate_vector_by_irq;
287
288 apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
289 for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
290 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
291 (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
292 /* get to highest vector at the same ipl */
293 continue;
294 for (; j <= apic_vectortoipl[i]; j++) {
295 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
296 APIC_BASE_VECT;
297 }
298 }
299 for (; j < MAXIPL + 1; j++)
300 /* fill up any empty ipltopri slots */
301 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
302 apic_init_common();
303 #if defined(__amd64)
304 CPU->cpu_pri_data = dummy_cpu_pri;
305 #else
306 if (cpuid_have_cr8access(CPU))
307 apic_have_32bit_cr8 = 1;
308 #endif /* __amd64 */
309 }
310
311 static void
312 apic_init_intr(void)
313 {
314 processorid_t cpun = psm_get_cpu_id();
315 uint_t nlvt;
316 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
317
318 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
319
320 if (apic_mode == LOCAL_APIC) {
321 /*
322 * We are running APIC in MMIO mode.
323 */
324 if (apic_flat_model) {
325 apic_reg_ops->apic_write(APIC_FORMAT_REG,
326 APIC_FLAT_MODEL);
327 } else {
328 apic_reg_ops->apic_write(APIC_FORMAT_REG,
|
126 * 6 0x60-0x7f 0x40-0x5f
127 * 7,8,9 0x80-0x8f 0x60-0x6f
128 * 10 0x90-0x9f 0x70-0x7f
129 * 11 0xa0-0xaf 0x80-0x8f
130 * ... ...
131 * 15 0xe0-0xef 0xc0-0xcf
132 * 15 0xf0-0xff 0xd0-0xdf
133 */
134 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
135 3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
136 };
137 /*
138 * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
139 * NOTE that this is vector as passed into intr_enter which is
140 * programmed vector - 0x20 (APIC_BASE_VECT)
141 */
142
143 uchar_t apic_ipltopri[MAXIPL + 1]; /* unix ipl to apic pri */
144 /* The taskpri to be programmed into apic to mask given ipl */
145
146 /*
147 * Correlation of the hardware vector to the IPL in use, initialized
148 * from apic_vectortoipl[] in apic_init(). The final IPLs may not correlate
149 * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
150 * connected to errata-stricken IOAPICs
151 */
152 uchar_t apic_ipls[APIC_AVAIL_VECTOR];
153
154 /*
155 * Patchable global variables.
156 */
157 int apic_enable_hwsoftint = 0; /* 0 - disable, 1 - enable */
158 int apic_enable_bind_log = 1; /* 1 - display interrupt binding log */
159
160 /*
161 * Local static data
162 */
163 static struct psm_ops apic_ops = {
164 apic_probe,
165
279
280 psm_get_ioapicid = apic_get_ioapicid;
281 psm_get_localapicid = apic_get_localapicid;
282 psm_xlate_vector_by_irq = apic_xlate_vector_by_irq;
283
284 apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
285 for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
286 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
287 (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
288 /* get to highest vector at the same ipl */
289 continue;
290 for (; j <= apic_vectortoipl[i]; j++) {
291 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
292 APIC_BASE_VECT;
293 }
294 }
295 for (; j < MAXIPL + 1; j++)
296 /* fill up any empty ipltopri slots */
297 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
298 apic_init_common();
299
300 #if !defined(__amd64)
301 if (cpuid_have_cr8access(CPU))
302 apic_have_32bit_cr8 = 1;
303 #endif
304 }
305
306 static void
307 apic_init_intr(void)
308 {
309 processorid_t cpun = psm_get_cpu_id();
310 uint_t nlvt;
311 uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
312
313 apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
314
315 if (apic_mode == LOCAL_APIC) {
316 /*
317 * We are running APIC in MMIO mode.
318 */
319 if (apic_flat_model) {
320 apic_reg_ops->apic_write(APIC_FORMAT_REG,
321 APIC_FLAT_MODEL);
322 } else {
323 apic_reg_ops->apic_write(APIC_FORMAT_REG,
|