Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4/io/px/px_ib.c
+++ new/usr/src/uts/sun4/io/px/px_ib.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * PX Interrupt Block implementation
27 27 */
28 28
29 29 #include <sys/types.h>
30 30 #include <sys/kmem.h>
31 31 #include <sys/async.h>
32 32 #include <sys/systm.h> /* panicstr */
33 33 #include <sys/spl.h>
34 34 #include <sys/sunddi.h>
35 35 #include <sys/machsystm.h> /* intr_dist_add */
36 36 #include <sys/ddi_impldefs.h>
37 37 #include <sys/cpuvar.h>
38 38 #include <sys/time.h>
39 39 #include "px_obj.h"
40 40
41 41 /*LINTLIBRARY*/
42 42
43 43 static void px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight);
44 44 static void px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p,
45 45 uint32_t cpu_id);
46 46 static uint_t px_ib_intr_reset(void *arg);
47 47 static void px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name,
48 48 char *path_name, int instance);
49 49
50 50 extern uint64_t xc_tick_jump_limit;
51 51
52 52 int
53 53 px_ib_attach(px_t *px_p)
54 54 {
55 55 dev_info_t *dip = px_p->px_dip;
56 56 px_ib_t *ib_p;
57 57 sysino_t sysino;
58 58 px_fault_t *fault_p = &px_p->px_fault;
59 59
60 60 DBG(DBG_IB, dip, "px_ib_attach\n");
61 61
62 62 if (px_lib_intr_devino_to_sysino(px_p->px_dip,
63 63 px_p->px_inos[PX_INTR_PEC], &sysino) != DDI_SUCCESS)
64 64 return (DDI_FAILURE);
65 65
66 66 /*
67 67 * Allocate interrupt block state structure and link it to
68 68 * the px state structure.
69 69 */
70 70 ib_p = kmem_zalloc(sizeof (px_ib_t), KM_SLEEP);
71 71 px_p->px_ib_p = ib_p;
72 72 ib_p->ib_px_p = px_p;
73 73 ib_p->ib_ino_lst = (px_ino_t *)NULL;
74 74
75 75 mutex_init(&ib_p->ib_intr_lock, NULL, MUTEX_DRIVER, NULL);
76 76 mutex_init(&ib_p->ib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL);
77 77
78 78 bus_func_register(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p);
79 79
80 80 intr_dist_add_weighted(px_ib_intr_redist, ib_p);
81 81
82 82 /*
83 83 * Initialize PEC fault data structure
84 84 */
85 85 fault_p->px_fh_dip = dip;
86 86 fault_p->px_fh_sysino = sysino;
87 87 fault_p->px_err_func = px_err_dmc_pec_intr;
88 88 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_PEC];
89 89
90 90 return (DDI_SUCCESS);
91 91 }
92 92
93 93 void
94 94 px_ib_detach(px_t *px_p)
95 95 {
96 96 px_ib_t *ib_p = px_p->px_ib_p;
97 97 dev_info_t *dip = px_p->px_dip;
98 98
99 99 DBG(DBG_IB, dip, "px_ib_detach\n");
100 100
101 101 bus_func_unregister(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p);
102 102 intr_dist_rem_weighted(px_ib_intr_redist, ib_p);
103 103
104 104 mutex_destroy(&ib_p->ib_ino_lst_mutex);
105 105 mutex_destroy(&ib_p->ib_intr_lock);
106 106
107 107 px_ib_free_ino_all(ib_p);
108 108
109 109 px_p->px_ib_p = NULL;
110 110 kmem_free(ib_p, sizeof (px_ib_t));
111 111 }
112 112
113 113 void
114 114 px_ib_intr_enable(px_t *px_p, cpuid_t cpu_id, devino_t ino)
115 115 {
116 116 px_ib_t *ib_p = px_p->px_ib_p;
117 117 sysino_t sysino;
118 118
119 119 /*
120 120 * Determine the cpu for the interrupt
121 121 */
122 122 mutex_enter(&ib_p->ib_intr_lock);
123 123
124 124 DBG(DBG_IB, px_p->px_dip,
125 125 "px_ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id);
126 126
127 127 if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino,
128 128 &sysino) != DDI_SUCCESS) {
129 129 DBG(DBG_IB, px_p->px_dip,
130 130 "px_ib_intr_enable: px_intr_devino_to_sysino() failed\n");
131 131
132 132 mutex_exit(&ib_p->ib_intr_lock);
133 133 return;
134 134 }
135 135
136 136 PX_INTR_ENABLE(px_p->px_dip, sysino, cpu_id);
137 137 px_lib_intr_setstate(px_p->px_dip, sysino, INTR_IDLE_STATE);
138 138
139 139 mutex_exit(&ib_p->ib_intr_lock);
140 140 }
141 141
142 142 /*ARGSUSED*/
143 143 void
144 144 px_ib_intr_disable(px_ib_t *ib_p, devino_t ino, int wait)
145 145 {
146 146 sysino_t sysino;
147 147
148 148 mutex_enter(&ib_p->ib_intr_lock);
149 149
150 150 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_disable: ino=%x\n", ino);
151 151
152 152 /* Disable the interrupt */
153 153 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino,
154 154 &sysino) != DDI_SUCCESS) {
155 155 DBG(DBG_IB, ib_p->ib_px_p->px_dip,
156 156 "px_ib_intr_disable: px_intr_devino_to_sysino() failed\n");
157 157
158 158 mutex_exit(&ib_p->ib_intr_lock);
159 159 return;
160 160 }
161 161
162 162 PX_INTR_DISABLE(ib_p->ib_px_p->px_dip, sysino);
163 163
164 164 mutex_exit(&ib_p->ib_intr_lock);
165 165 }
166 166
167 167 int
168 168 px_ib_intr_pend(dev_info_t *dip, sysino_t sysino)
169 169 {
170 170 int ret = DDI_SUCCESS;
171 171 hrtime_t start_time, prev, curr, interval, jump;
172 172 hrtime_t intr_timeout;
173 173 intr_state_t intr_state;
174 174
175 175 /* Disable the interrupt */
176 176 PX_INTR_DISABLE(dip, sysino);
177 177
178 178 intr_timeout = px_intrpend_timeout;
179 179 jump = TICK_TO_NSEC(xc_tick_jump_limit);
180 180
181 181 /* Busy wait on pending interrupt */
182 182 for (curr = start_time = gethrtime(); !panicstr &&
183 183 ((ret = px_lib_intr_getstate(dip, sysino,
184 184 &intr_state)) == DDI_SUCCESS) &&
185 185 (intr_state == INTR_DELIVERED_STATE); /* */) {
186 186 /*
187 187 * If we have a really large jump in hrtime, it is most
188 188 * probably because we entered the debugger (or OBP,
189 189 * in general). So, we adjust the timeout accordingly
190 190 * to prevent declaring an interrupt timeout. The
191 191 * master-interrupt mechanism in OBP should deliver
192 192 * the interrupts properly.
193 193 */
194 194 prev = curr;
195 195 curr = gethrtime();
196 196 interval = curr - prev;
197 197 if (interval > jump)
198 198 intr_timeout += interval;
199 199 if (curr - start_time > intr_timeout) {
200 200 ret = DDI_FAILURE;
201 201 break;
202 202 }
203 203 }
204 204 return (ret);
205 205 }
206 206
207 207 void
208 208 px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino,
209 209 boolean_t wait_flag)
210 210 {
211 211 uint32_t old_cpu_id;
212 212 sysino_t sysino;
213 213 intr_valid_state_t enabled = 0;
214 214
215 215 DBG(DBG_IB, dip, "px_ib_intr_dist_en: ino=0x%x\n", ino);
216 216
217 217 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) {
218 218 DBG(DBG_IB, dip, "px_ib_intr_dist_en: "
219 219 "px_intr_devino_to_sysino() failed, ino 0x%x\n", ino);
220 220 return;
221 221 }
222 222
223 223 /* Skip enabling disabled interrupts */
224 224 if (px_lib_intr_getvalid(dip, sysino, &enabled) != DDI_SUCCESS) {
225 225 DBG(DBG_IB, dip, "px_ib_intr_dist_en: px_intr_getvalid() "
226 226 "failed, sysino 0x%x\n", sysino);
227 227 return;
228 228 }
229 229 if (!enabled)
230 230 return;
231 231
232 232 /* Done if redistributed onto the same cpuid */
233 233 if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) {
234 234 DBG(DBG_IB, dip, "px_ib_intr_dist_en: "
235 235 "px_intr_gettarget() failed\n");
236 236 return;
237 237 }
238 238 if (cpu_id == old_cpu_id)
239 239 return;
240 240
241 241 /* Wait on pending interrupts */
242 242 if (wait_flag != 0 && px_ib_intr_pend(dip, sysino) != DDI_SUCCESS) {
243 243 cmn_err(CE_WARN,
244 244 "%s%d: px_ib_intr_dist_en: sysino 0x%lx(ino 0x%x) "
245 245 "from cpu id 0x%x to 0x%x timeout",
246 246 ddi_driver_name(dip), ddi_get_instance(dip),
247 247 sysino, ino, old_cpu_id, cpu_id);
248 248
249 249 DBG(DBG_IB, dip, "px_ib_intr_dist_en: failed, "
250 250 "ino 0x%x sysino 0x%x\n", ino, sysino);
251 251 }
252 252
253 253 PX_INTR_ENABLE(dip, sysino, cpu_id);
254 254 }
255 255
256 256 static void
257 257 px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, uint32_t cpu_id)
258 258 {
259 259 extern kmutex_t pxintr_ks_template_lock;
260 260 hrtime_t ticks;
261 261
262 262 /*
263 263 * Because we are updating two fields in ih_t we must lock
264 264 * pxintr_ks_template_lock to prevent someone from reading the
265 265 * kstats after we set ih_ticks to 0 and before we increment
266 266 * ih_nsec to compensate.
267 267 *
268 268 * We must also protect against the interrupt arriving and incrementing
269 269 * ih_ticks between the time we read it and when we reset it to 0.
270 270 * To do this we use atomic_swap.
271 271 */
272 272
273 273 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
274 274
275 275 mutex_enter(&pxintr_ks_template_lock);
276 276 ticks = atomic_swap_64(&ih_p->ih_ticks, 0);
277 277 ih_p->ih_nsec += (uint64_t)tick2ns(ticks, cpu_id);
278 278 mutex_exit(&pxintr_ks_template_lock);
279 279 }
280 280
281 281
282 282 /*
283 283 * Redistribute interrupts of the specified weight. The first call has a weight
284 284 * of weight_max, which can be used to trigger initialization for
285 285 * redistribution. The inos with weight [weight_max, inf.) should be processed
286 286 * on the "weight == weight_max" call. This first call is followed by calls
287 287 * of decreasing weights, inos of that weight should be processed. The final
288 288 * call specifies a weight of zero, this can be used to trigger processing of
289 289 * stragglers.
290 290 */
291 291 static void
292 292 px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight)
293 293 {
294 294 px_ib_t *ib_p = (px_ib_t *)arg;
295 295 px_t *px_p = ib_p->ib_px_p;
296 296 dev_info_t *dip = px_p->px_dip;
297 297 px_ino_t *ino_p;
298 298 px_ino_pil_t *ipil_p;
299 299 px_ih_t *ih_lst;
300 300 int32_t dweight = 0;
301 301 int i;
302 302
303 303 /* Redistribute internal interrupts */
304 304 if (weight == 0) {
305 305 mutex_enter(&ib_p->ib_intr_lock);
306 306 px_ib_intr_dist_en(dip, intr_dist_cpuid(),
307 307 px_p->px_inos[PX_INTR_PEC], B_FALSE);
308 308 mutex_exit(&ib_p->ib_intr_lock);
309 309
310 310 px_hp_intr_redist(px_p);
311 311 }
312 312
313 313 /* Redistribute device interrupts */
314 314 mutex_enter(&ib_p->ib_ino_lst_mutex);
315 315 px_msiq_redist(px_p);
316 316
317 317 for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next_p) {
318 318 /*
319 319 * Recomputes the sum of interrupt weights of devices that
320 320 * share the same ino upon first call marked by
321 321 * (weight == weight_max).
322 322 */
323 323 if (weight == weight_max) {
324 324 ino_p->ino_intr_weight = 0;
325 325
326 326 for (ipil_p = ino_p->ino_ipil_p; ipil_p;
327 327 ipil_p = ipil_p->ipil_next_p) {
328 328 for (i = 0, ih_lst = ipil_p->ipil_ih_head;
329 329 i < ipil_p->ipil_ih_size; i++,
330 330 ih_lst = ih_lst->ih_next) {
331 331 dweight = i_ddi_get_intr_weight(
332 332 ih_lst->ih_dip);
333 333 if (dweight > 0)
334 334 ino_p->ino_intr_weight +=
335 335 dweight;
336 336 }
337 337 }
338 338 }
339 339
340 340 /*
341 341 * As part of redistributing weighted interrupts over cpus,
342 342 * nexus redistributes device interrupts and updates
343 343 * cpu weight. The purpose is for the most light weighted
344 344 * cpu to take the next interrupt and gain weight, therefore
345 345 * attention demanding device gains more cpu attention by
346 346 * making itself heavy.
347 347 */
348 348 if ((weight == ino_p->ino_intr_weight) ||
349 349 ((weight >= weight_max) &&
350 350 (ino_p->ino_intr_weight >= weight_max))) {
351 351 uint32_t orig_cpuid = ino_p->ino_cpuid;
352 352
353 353 if (cpu[orig_cpuid] == NULL)
354 354 orig_cpuid = CPU->cpu_id;
355 355
356 356 DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx "
357 357 "current cpuid 0x%x current default cpuid 0x%x\n",
358 358 ino_p->ino_sysino, ino_p->ino_cpuid,
359 359 ino_p->ino_default_cpuid);
360 360
361 361 /* select target cpuid and mark ino established */
362 362 if (ino_p->ino_default_cpuid == -1)
363 363 ino_p->ino_cpuid = ino_p->ino_default_cpuid =
364 364 intr_dist_cpuid();
365 365 else if ((ino_p->ino_cpuid !=
366 366 ino_p->ino_default_cpuid) &&
367 367 cpu[ino_p->ino_default_cpuid] &&
368 368 cpu_intr_on(cpu[ino_p->ino_default_cpuid]))
369 369 ino_p->ino_cpuid = ino_p->ino_default_cpuid;
370 370 else if (!cpu_intr_on(cpu[ino_p->ino_cpuid]))
371 371 ino_p->ino_cpuid = intr_dist_cpuid();
372 372
373 373 DBG(DBG_IB, dip, "px_ib_intr_redist: sysino 0x%llx "
374 374 "new cpuid 0x%x new default cpuid 0x%x\n",
375 375 ino_p->ino_sysino, ino_p->ino_cpuid,
376 376 ino_p->ino_default_cpuid);
377 377
378 378 /* Add device weight to targeted cpu. */
379 379 for (ipil_p = ino_p->ino_ipil_p; ipil_p;
380 380 ipil_p = ipil_p->ipil_next_p) {
381 381 for (i = 0, ih_lst = ipil_p->ipil_ih_head;
382 382 i < ipil_p->ipil_ih_size; i++,
383 383 ih_lst = ih_lst->ih_next) {
384 384
385 385 dweight = i_ddi_get_intr_weight(
386 386 ih_lst->ih_dip);
387 387 intr_dist_cpuid_add_device_weight(
388 388 ino_p->ino_cpuid, ih_lst->ih_dip,
389 389 dweight);
390 390
391 391 /*
392 392 * Different cpus may have different
393 393 * clock speeds. to account for this,
394 394 * whenever an interrupt is moved to a
395 395 * new CPU, we convert the accumulated
396 396 * ticks into nsec, based upon the clock
397 397 * rate of the prior CPU.
398 398 *
399 399 * It is possible that the prior CPU no
400 400 * longer exists. In this case, fall
401 401 * back to using this CPU's clock rate.
402 402 *
403 403 * Note that the value in ih_ticks has
404 404 * already been corrected for any power
405 405 * savings mode which might have been
406 406 * in effect.
407 407 */
408 408 px_ib_cpu_ticks_to_ih_nsec(ib_p, ih_lst,
409 409 orig_cpuid);
410 410 }
411 411 }
412 412
413 413 /* enable interrupt on new targeted cpu */
414 414 px_ib_intr_dist_en(dip, ino_p->ino_cpuid,
415 415 ino_p->ino_ino, B_TRUE);
416 416 }
417 417 }
418 418 mutex_exit(&ib_p->ib_ino_lst_mutex);
419 419 }
420 420
421 421 /*
422 422 * Reset interrupts to IDLE. This function is called during
423 423 * panic handling after redistributing interrupts; it's needed to
424 424 * support dumping to network devices after 'sync' from OBP.
425 425 *
426 426 * N.B. This routine runs in a context where all other threads
427 427 * are permanently suspended.
428 428 */
429 429 static uint_t
430 430 px_ib_intr_reset(void *arg)
431 431 {
432 432 px_ib_t *ib_p = (px_ib_t *)arg;
433 433
434 434 DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_reset\n");
435 435
436 436 if (px_lib_intr_reset(ib_p->ib_px_p->px_dip) != DDI_SUCCESS)
437 437 return (BF_FATAL);
438 438
439 439 return (BF_NONE);
440 440 }
441 441
442 442 /*
443 443 * Locate px_ino_t structure on ib_p->ib_ino_lst according to ino#
444 444 * returns NULL if not found.
445 445 */
446 446 px_ino_t *
447 447 px_ib_locate_ino(px_ib_t *ib_p, devino_t ino_num)
448 448 {
449 449 px_ino_t *ino_p = ib_p->ib_ino_lst;
450 450
451 451 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
452 452
453 453 for (; ino_p && ino_p->ino_ino != ino_num; ino_p = ino_p->ino_next_p)
454 454 ;
455 455
456 456 return (ino_p);
457 457 }
458 458
459 459 px_ino_t *
460 460 px_ib_alloc_ino(px_ib_t *ib_p, devino_t ino_num)
461 461 {
462 462 sysino_t sysino;
463 463 px_ino_t *ino_p;
464 464
465 465 if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip,
466 466 ino_num, &sysino) != DDI_SUCCESS)
467 467 return (NULL);
468 468
469 469 ino_p = kmem_zalloc(sizeof (px_ino_t), KM_SLEEP);
470 470
471 471 ino_p->ino_next_p = ib_p->ib_ino_lst;
472 472 ib_p->ib_ino_lst = ino_p;
473 473
474 474 ino_p->ino_ino = ino_num;
475 475 ino_p->ino_sysino = sysino;
476 476 ino_p->ino_ib_p = ib_p;
477 477 ino_p->ino_unclaimed_intrs = 0;
478 478 ino_p->ino_lopil = 0;
479 479 ino_p->ino_cpuid = ino_p->ino_default_cpuid = (cpuid_t)-1;
480 480
481 481 return (ino_p);
482 482 }
483 483
484 484 px_ino_pil_t *
485 485 px_ib_new_ino_pil(px_ib_t *ib_p, devino_t ino_num, uint_t pil, px_ih_t *ih_p)
486 486 {
487 487 px_ino_pil_t *ipil_p = kmem_zalloc(sizeof (px_ino_pil_t), KM_SLEEP);
488 488 px_ino_t *ino_p;
489 489
490 490 if ((ino_p = px_ib_locate_ino(ib_p, ino_num)) == NULL)
491 491 ino_p = px_ib_alloc_ino(ib_p, ino_num);
492 492
493 493 ASSERT(ino_p != NULL);
494 494
495 495 ih_p->ih_next = ih_p;
496 496 ipil_p->ipil_pil = pil;
497 497 ipil_p->ipil_ih_head = ih_p;
498 498 ipil_p->ipil_ih_tail = ih_p;
499 499 ipil_p->ipil_ih_start = ih_p;
500 500 ipil_p->ipil_ih_size = 1;
501 501 ipil_p->ipil_ino_p = ino_p;
502 502
503 503 ipil_p->ipil_next_p = ino_p->ino_ipil_p;
504 504 ino_p->ino_ipil_p = ipil_p;
505 505 ino_p->ino_ipil_size++;
506 506
507 507 if ((ino_p->ino_lopil == 0) || (ino_p->ino_lopil > pil))
508 508 ino_p->ino_lopil = pil;
509 509
510 510 return (ipil_p);
511 511 }
512 512
513 513 void
514 514 px_ib_delete_ino_pil(px_ib_t *ib_p, px_ino_pil_t *ipil_p)
515 515 {
516 516 px_ino_t *ino_p = ipil_p->ipil_ino_p;
517 517 ushort_t pil = ipil_p->ipil_pil;
518 518 px_ino_pil_t *prev, *next;
519 519
520 520 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
521 521
522 522 if (ino_p->ino_ipil_p == ipil_p)
523 523 ino_p->ino_ipil_p = ipil_p->ipil_next_p;
524 524 else {
525 525 for (prev = next = ino_p->ino_ipil_p; next != ipil_p;
526 526 prev = next, next = next->ipil_next_p)
527 527 ;
528 528
529 529 if (prev)
530 530 prev->ipil_next_p = ipil_p->ipil_next_p;
531 531 }
532 532
533 533 kmem_free(ipil_p, sizeof (px_ino_pil_t));
534 534
535 535 if ((--ino_p->ino_ipil_size) && (ino_p->ino_lopil == pil)) {
536 536 for (next = ino_p->ino_ipil_p, pil = next->ipil_pil;
537 537 next; next = next->ipil_next_p) {
538 538
539 539 if (pil > next->ipil_pil)
540 540 pil = next->ipil_pil;
541 541 }
542 542
543 543 /*
544 544 * Value stored in pil should be the lowest pil.
545 545 */
546 546 ino_p->ino_lopil = pil;
547 547 }
548 548
549 549 if (ino_p->ino_ipil_size)
550 550 return;
551 551
552 552 ino_p->ino_lopil = 0;
553 553
554 554 if (ino_p->ino_msiq_p)
555 555 return;
556 556
557 557 if (ib_p->ib_ino_lst == ino_p)
558 558 ib_p->ib_ino_lst = ino_p->ino_next_p;
559 559 else {
560 560 px_ino_t *list = ib_p->ib_ino_lst;
561 561
562 562 for (; list->ino_next_p != ino_p; list = list->ino_next_p)
563 563 ;
564 564 list->ino_next_p = ino_p->ino_next_p;
565 565 }
566 566 }
567 567
568 568 /*
569 569 * Free all ino when we are detaching.
570 570 */
571 571 void
572 572 px_ib_free_ino_all(px_ib_t *ib_p)
573 573 {
574 574 px_ino_t *ino_p = ib_p->ib_ino_lst;
575 575 px_ino_t *next = NULL;
576 576
577 577 while (ino_p) {
578 578 next = ino_p->ino_next_p;
579 579 kmem_free(ino_p, sizeof (px_ino_t));
580 580 ino_p = next;
581 581 }
582 582 }
583 583
584 584 /*
585 585 * Locate px_ino_pil_t structure on ino_p->ino_ipil_p according to ino#
586 586 * returns NULL if not found.
587 587 */
588 588 px_ino_pil_t *
589 589 px_ib_ino_locate_ipil(px_ino_t *ino_p, uint_t pil)
590 590 {
591 591 px_ino_pil_t *ipil_p = ino_p->ino_ipil_p;
592 592
593 593 for (; ipil_p && ipil_p->ipil_pil != pil; ipil_p = ipil_p->ipil_next_p)
594 594 ;
595 595
596 596 return (ipil_p);
597 597 }
598 598
599 599 int
600 600 px_ib_ino_add_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p)
601 601 {
602 602 px_ino_t *ino_p = ipil_p->ipil_ino_p;
603 603 px_ib_t *ib_p = ino_p->ino_ib_p;
604 604 devino_t ino = ino_p->ino_ino;
605 605 sysino_t sysino = ino_p->ino_sysino;
606 606 dev_info_t *dip = px_p->px_dip;
607 607 cpuid_t curr_cpu;
608 608 int ret = DDI_SUCCESS;
609 609
610 610 ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
611 611 ASSERT(ib_p == px_p->px_ib_p);
612 612
613 613 DBG(DBG_IB, dip, "px_ib_ino_add_intr ino=%x\n", ino_p->ino_ino);
614 614
615 615 /* Disable the interrupt */
616 616 if ((ret = px_lib_intr_gettarget(dip, sysino,
617 617 &curr_cpu)) != DDI_SUCCESS) {
618 618 DBG(DBG_IB, dip,
619 619 "px_ib_ino_add_intr px_intr_gettarget() failed\n");
620 620
621 621 return (ret);
622 622 }
623 623
624 624 /* Wait on pending interrupt */
625 625 if ((ret = px_ib_intr_pend(dip, sysino)) != DDI_SUCCESS) {
626 626 cmn_err(CE_WARN, "%s%d: px_ib_ino_add_intr: pending "
627 627 "sysino 0x%lx(ino 0x%x) timeout",
628 628 ddi_driver_name(dip), ddi_get_instance(dip),
629 629 sysino, ino);
630 630 }
631 631
632 632 /*
633 633 * If the interrupt was previously blocked (left in pending state)
634 634 * because of jabber we need to clear the pending state in case the
635 635 * jabber has gone away.
636 636 */
637 637 if (ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) {
638 638 cmn_err(CE_WARN,
639 639 "%s%d: px_ib_ino_add_intr: ino 0x%x has been unblocked",
640 640 ddi_driver_name(dip), ddi_get_instance(dip), ino);
641 641
642 642 ino_p->ino_unclaimed_intrs = 0;
643 643 ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE);
644 644 }
645 645
646 646 if (ret != DDI_SUCCESS) {
647 647 DBG(DBG_IB, dip, "px_ib_ino_add_intr: failed, "
648 648 "ino 0x%x sysino 0x%x\n", ino, sysino);
649 649
650 650 return (ret);
651 651 }
652 652
653 653 /* Link up px_ih_t */
654 654 ih_p->ih_next = ipil_p->ipil_ih_head;
655 655 ipil_p->ipil_ih_tail->ih_next = ih_p;
656 656 ipil_p->ipil_ih_tail = ih_p;
657 657
658 658 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head;
659 659 ipil_p->ipil_ih_size++;
660 660
661 661 /* Re-enable interrupt */
662 662 PX_INTR_ENABLE(dip, sysino, curr_cpu);
663 663
664 664 return (ret);
665 665 }
666 666
667 667 /*
668 668 * Removes px_ih_t from the ino's link list.
669 669 * uses hardware mutex to lock out interrupt threads.
670 670 * Side effects: interrupt belongs to that ino is turned off on return.
671 671 * if we are sharing PX slot with other inos, the caller needs
672 672 * to turn it back on.
673 673 */
674 674 int
675 675 px_ib_ino_rem_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p)
676 676 {
677 677 px_ino_t *ino_p = ipil_p->ipil_ino_p;
678 678 devino_t ino = ino_p->ino_ino;
679 679 sysino_t sysino = ino_p->ino_sysino;
680 680 dev_info_t *dip = px_p->px_dip;
681 681 px_ih_t *ih_lst = ipil_p->ipil_ih_head;
682 682 int i, ret = DDI_SUCCESS;
683 683
684 684 ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex));
685 685
686 686 DBG(DBG_IB, px_p->px_dip, "px_ib_ino_rem_intr ino=%x\n",
687 687 ino_p->ino_ino);
688 688
689 689 /* Wait on pending interrupt */
690 690 if ((ret = px_ib_intr_pend(dip, sysino)) != DDI_SUCCESS) {
691 691 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: pending "
692 692 "sysino 0x%lx(ino 0x%x) timeout",
693 693 ddi_driver_name(dip), ddi_get_instance(dip),
694 694 sysino, ino);
695 695 }
696 696
697 697 /*
698 698 * If the interrupt was previously blocked (left in pending state)
699 699 * because of jabber we need to clear the pending state in case the
700 700 * jabber has gone away.
701 701 */
702 702 if (ret == DDI_SUCCESS &&
703 703 ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) {
704 704 cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: "
705 705 "ino 0x%x has been unblocked",
706 706 ddi_driver_name(dip), ddi_get_instance(dip), ino);
707 707
708 708 ino_p->ino_unclaimed_intrs = 0;
709 709 ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE);
710 710 }
711 711
712 712 if (ret != DDI_SUCCESS) {
713 713 DBG(DBG_IB, dip, "px_ib_ino_rem_intr: failed, "
714 714 "ino 0x%x sysino 0x%x\n", ino, sysino);
715 715
716 716 return (ret);
717 717 }
718 718
719 719 if (ipil_p->ipil_ih_size == 1) {
720 720 if (ih_lst != ih_p)
721 721 goto not_found;
722 722
723 723 /* No need to set head/tail as ino_p will be freed */
724 724 goto reset;
725 725 }
726 726
727 727 /* Search the link list for ih_p */
728 728 for (i = 0; (i < ipil_p->ipil_ih_size) &&
729 729 (ih_lst->ih_next != ih_p); i++, ih_lst = ih_lst->ih_next)
730 730 ;
731 731
732 732 if (ih_lst->ih_next != ih_p)
733 733 goto not_found;
734 734
735 735 /* Remove ih_p from the link list and maintain the head/tail */
736 736 ih_lst->ih_next = ih_p->ih_next;
737 737
738 738 if (ipil_p->ipil_ih_head == ih_p)
739 739 ipil_p->ipil_ih_head = ih_p->ih_next;
740 740 if (ipil_p->ipil_ih_tail == ih_p)
741 741 ipil_p->ipil_ih_tail = ih_lst;
742 742
743 743 ipil_p->ipil_ih_start = ipil_p->ipil_ih_head;
744 744
745 745 reset:
746 746 if (ih_p->ih_config_handle)
747 747 pci_config_teardown(&ih_p->ih_config_handle);
748 748 if (ih_p->ih_ksp != NULL)
749 749 kstat_delete(ih_p->ih_ksp);
750 750
751 751 kmem_free(ih_p, sizeof (px_ih_t));
752 752 ipil_p->ipil_ih_size--;
753 753
754 754 return (ret);
755 755
756 756 not_found:
757 757 DBG(DBG_R_INTX, ino_p->ino_ib_p->ib_px_p->px_dip,
758 758 "ino_p=%x does not have ih_p=%x\n", ino_p, ih_p);
759 759
760 760 return (DDI_FAILURE);
761 761 }
762 762
763 763 px_ih_t *
764 764 px_ib_intr_locate_ih(px_ino_pil_t *ipil_p, dev_info_t *rdip,
765 765 uint32_t inum, msiq_rec_type_t rec_type, msgcode_t msg_code)
766 766 {
767 767 px_ih_t *ih_p = ipil_p->ipil_ih_head;
768 768 int i;
769 769
770 770 for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) {
771 771 if ((ih_p->ih_dip == rdip) && (ih_p->ih_inum == inum) &&
772 772 (ih_p->ih_rec_type == rec_type) &&
773 773 (ih_p->ih_msg_code == msg_code))
774 774 return (ih_p);
775 775 }
776 776
777 777 return ((px_ih_t *)NULL);
778 778 }
779 779
780 780 px_ih_t *
781 781 px_ib_alloc_ih(dev_info_t *rdip, uint32_t inum,
782 782 uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2),
783 783 caddr_t int_handler_arg1, caddr_t int_handler_arg2,
784 784 msiq_rec_type_t rec_type, msgcode_t msg_code)
785 785 {
786 786 px_ih_t *ih_p;
787 787
788 788 ih_p = kmem_alloc(sizeof (px_ih_t), KM_SLEEP);
789 789 ih_p->ih_dip = rdip;
790 790 ih_p->ih_inum = inum;
791 791 ih_p->ih_intr_state = PX_INTR_STATE_DISABLE;
792 792 ih_p->ih_intr_flags = PX_INTR_IDLE;
793 793 ih_p->ih_handler = int_handler;
794 794 ih_p->ih_handler_arg1 = int_handler_arg1;
795 795 ih_p->ih_handler_arg2 = int_handler_arg2;
796 796 ih_p->ih_config_handle = NULL;
797 797 ih_p->ih_rec_type = rec_type;
798 798 ih_p->ih_msg_code = msg_code;
799 799 ih_p->ih_nsec = 0;
800 800 ih_p->ih_ticks = 0;
801 801 ih_p->ih_ksp = NULL;
802 802
803 803 return (ih_p);
804 804 }
805 805
806 806 int
807 807 px_ib_update_intr_state(px_t *px_p, dev_info_t *rdip,
808 808 uint_t inum, devino_t ino, uint_t pil,
809 809 uint_t new_intr_state, msiq_rec_type_t rec_type,
810 810 msgcode_t msg_code)
811 811 {
812 812 px_ib_t *ib_p = px_p->px_ib_p;
813 813 px_ino_t *ino_p;
814 814 px_ino_pil_t *ipil_p;
815 815 px_ih_t *ih_p;
816 816 int ret = DDI_FAILURE;
817 817
818 818 DBG(DBG_IB, px_p->px_dip, "px_ib_update_intr_state: %s%d "
819 819 "inum %x devino %x pil %x state %x\n", ddi_driver_name(rdip),
820 820 ddi_get_instance(rdip), inum, ino, pil, new_intr_state);
821 821
822 822 mutex_enter(&ib_p->ib_ino_lst_mutex);
823 823
824 824 ino_p = px_ib_locate_ino(ib_p, ino);
825 825 if (ino_p && (ipil_p = px_ib_ino_locate_ipil(ino_p, pil))) {
826 826 if (ih_p = px_ib_intr_locate_ih(ipil_p, rdip, inum, rec_type,
827 827 msg_code)) {
828 828 ih_p->ih_intr_state = new_intr_state;
829 829 ret = DDI_SUCCESS;
830 830 }
831 831 }
832 832
833 833 mutex_exit(&ib_p->ib_ino_lst_mutex);
834 834 return (ret);
835 835 }
836 836
837 837
838 838 /*
839 839 * Get interrupt CPU for a given ino.
840 840 * Return info only for inos which are already mapped to devices.
841 841 */
842 842 /*ARGSUSED*/
843 843 int
844 844 px_ib_get_intr_target(px_t *px_p, devino_t ino, cpuid_t *cpu_id_p)
845 845 {
846 846 dev_info_t *dip = px_p->px_dip;
847 847 sysino_t sysino;
848 848 int ret;
849 849
850 850 DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: devino %x\n", ino);
851 851
852 852 /* Convert leaf-wide intr to system-wide intr */
853 853 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS)
854 854 return (DDI_FAILURE);
855 855
856 856 ret = px_lib_intr_gettarget(dip, sysino, cpu_id_p);
857 857
858 858 DBG(DBG_IB, px_p->px_dip, "px_ib_get_intr_target: cpu_id %x\n",
859 859 *cpu_id_p);
860 860
861 861 return (ret);
862 862 }
863 863
864 864
865 865 /*
866 866 * Associate a new CPU with a given ino.
867 867 * Operate only on INOs which are already mapped to devices.
868 868 */
869 869 int
870 870 px_ib_set_intr_target(px_t *px_p, devino_t ino, cpuid_t cpu_id)
871 871 {
872 872 dev_info_t *dip = px_p->px_dip;
873 873 cpuid_t old_cpu_id;
874 874 sysino_t sysino;
875 875 int ret = DDI_SUCCESS;
876 876 extern const int _ncpu;
877 877 extern cpu_t *cpu[];
878 878
879 879 DBG(DBG_IB, px_p->px_dip, "px_ib_set_intr_target: devino %x "
880 880 "cpu_id %x\n", ino, cpu_id);
881 881
882 882 mutex_enter(&cpu_lock);
883 883
884 884 /* Convert leaf-wide intr to system-wide intr */
885 885 if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) {
886 886 ret = DDI_FAILURE;
887 887 goto done;
888 888 }
889 889
890 890 if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) {
891 891 ret = DDI_FAILURE;
892 892 goto done;
893 893 }
894 894
895 895 /*
896 896 * Get lock, validate cpu and write it.
897 897 */
898 898 if ((cpu_id < _ncpu) && (cpu[cpu_id] && cpu_is_online(cpu[cpu_id]))) {
899 899 DBG(DBG_IB, dip, "px_ib_set_intr_target: Enabling CPU %d\n",
900 900 cpu_id);
901 901 px_ib_intr_dist_en(dip, cpu_id, ino, B_TRUE);
902 902 px_ib_log_new_cpu(px_p->px_ib_p, old_cpu_id, cpu_id, ino);
903 903 } else { /* Invalid cpu */
904 904 DBG(DBG_IB, dip, "px_ib_set_intr_target: Invalid cpuid %x\n",
905 905 cpu_id);
906 906 ret = DDI_EINVAL;
907 907 }
908 908
909 909 done:
910 910 mutex_exit(&cpu_lock);
911 911 return (ret);
912 912 }
913 913
914 914 hrtime_t px_ib_msix_retarget_timeout = 120ll * NANOSEC; /* 120 seconds */
915 915
916 916 /*
917 917 * Associate a new CPU with a given MSI/X.
918 918 * Operate only on MSI/Xs which are already mapped to devices.
919 919 */
920 920 int
921 921 px_ib_set_msix_target(px_t *px_p, ddi_intr_handle_impl_t *hdlp,
922 922 msinum_t msi_num, cpuid_t cpu_id)
923 923 {
924 924 px_ib_t *ib_p = px_p->px_ib_p;
925 925 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state;
926 926 dev_info_t *dip = px_p->px_dip;
927 927 dev_info_t *rdip = hdlp->ih_dip;
928 928 msiqid_t msiq_id, old_msiq_id;
929 929 pci_msi_state_t msi_state;
930 930 msiq_rec_type_t msiq_rec_type;
931 931 msi_type_t msi_type;
932 932 px_ino_t *ino_p;
933 933 px_ih_t *ih_p, *old_ih_p;
934 934 cpuid_t old_cpu_id;
935 935 hrtime_t start_time, end_time;
936 936 int ret = DDI_SUCCESS;
937 937 extern const int _ncpu;
938 938 extern cpu_t *cpu[];
939 939
940 940 DBG(DBG_IB, dip, "px_ib_set_msix_target: msi_num %x new cpu_id %x\n",
941 941 msi_num, cpu_id);
942 942
943 943 mutex_enter(&cpu_lock);
944 944
945 945 /* Check for MSI64 support */
946 946 if ((hdlp->ih_cap & DDI_INTR_FLAG_MSI64) && msi_state_p->msi_addr64) {
947 947 msiq_rec_type = MSI64_REC;
948 948 msi_type = MSI64_TYPE;
949 949 } else {
950 950 msiq_rec_type = MSI32_REC;
951 951 msi_type = MSI32_TYPE;
952 952 }
953 953
954 954 if ((ret = px_lib_msi_getmsiq(dip, msi_num,
955 955 &old_msiq_id)) != DDI_SUCCESS) {
956 956
957 957 mutex_exit(&cpu_lock);
958 958 return (ret);
959 959 }
960 960
961 961 DBG(DBG_IB, dip, "px_ib_set_msix_target: current msiq 0x%x\n",
962 962 old_msiq_id);
963 963
964 964 if ((ret = px_ib_get_intr_target(px_p,
965 965 px_msiqid_to_devino(px_p, old_msiq_id),
966 966 &old_cpu_id)) != DDI_SUCCESS) {
967 967
968 968 mutex_exit(&cpu_lock);
969 969 return (ret);
970 970 }
971 971
972 972 DBG(DBG_IB, dip, "px_ib_set_msix_target: current cpuid 0x%x\n",
973 973 old_cpu_id);
974 974
975 975 if (cpu_id == old_cpu_id) {
976 976
977 977 mutex_exit(&cpu_lock);
978 978 return (DDI_SUCCESS);
979 979 }
980 980
981 981 /*
982 982 * Get lock, validate cpu and write it.
983 983 */
984 984 if (!((cpu_id < _ncpu) && (cpu[cpu_id] &&
985 985 cpu_is_online(cpu[cpu_id])))) {
986 986 /* Invalid cpu */
987 987 DBG(DBG_IB, dip, "px_ib_set_msix_target: Invalid cpuid %x\n",
988 988 cpu_id);
989 989
990 990 mutex_exit(&cpu_lock);
991 991 return (DDI_EINVAL);
992 992 }
993 993
994 994 DBG(DBG_IB, dip, "px_ib_set_msix_target: Enabling CPU %d\n", cpu_id);
995 995
996 996 if ((ret = px_add_msiq_intr(dip, rdip, hdlp,
997 997 msiq_rec_type, msi_num, cpu_id, &msiq_id)) != DDI_SUCCESS) {
998 998 DBG(DBG_IB, dip, "px_ib_set_msix_target: Add MSI handler "
999 999 "failed, rdip 0x%p msi 0x%x\n", rdip, msi_num);
1000 1000
1001 1001 mutex_exit(&cpu_lock);
1002 1002 return (ret);
1003 1003 }
1004 1004
1005 1005 if ((ret = px_lib_msi_setmsiq(dip, msi_num,
1006 1006 msiq_id, msi_type)) != DDI_SUCCESS) {
1007 1007 mutex_exit(&cpu_lock);
1008 1008
1009 1009 (void) px_rem_msiq_intr(dip, rdip,
1010 1010 hdlp, msiq_rec_type, msi_num, msiq_id);
1011 1011
1012 1012 return (ret);
1013 1013 }
1014 1014
1015 1015 if ((ret = px_ib_update_intr_state(px_p, rdip, hdlp->ih_inum,
1016 1016 px_msiqid_to_devino(px_p, msiq_id), hdlp->ih_pri,
1017 1017 PX_INTR_STATE_ENABLE, msiq_rec_type, msi_num)) != DDI_SUCCESS) {
1018 1018 mutex_exit(&cpu_lock);
1019 1019
1020 1020 (void) px_rem_msiq_intr(dip, rdip,
1021 1021 hdlp, msiq_rec_type, msi_num, msiq_id);
1022 1022
1023 1023 return (ret);
1024 1024 }
1025 1025
1026 1026 mutex_exit(&cpu_lock);
1027 1027
1028 1028 /*
1029 1029 * Remove the old handler, but first ensure it is finished.
1030 1030 *
1031 1031 * Each handler sets its PENDING flag before it clears the MSI state.
1032 1032 * Then it clears that flag when finished. If a re-target occurs while
1033 1033 * the MSI state is DELIVERED, then it is not yet known which of the
1034 1034 * two handlers will take the interrupt. So the re-target operation
1035 1035 * sets a RETARGET flag on both handlers in that case. Monitoring both
1036 1036 * flags on both handlers then determines when the old handler can be
1037 1037 * be safely removed.
1038 1038 */
1039 1039 mutex_enter(&ib_p->ib_ino_lst_mutex);
1040 1040
1041 1041 ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, old_msiq_id));
1042 1042 old_ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p,
1043 1043 hdlp->ih_pri), rdip, hdlp->ih_inum, msiq_rec_type, msi_num);
1044 1044
1045 1045 ino_p = px_ib_locate_ino(ib_p, px_msiqid_to_devino(px_p, msiq_id));
1046 1046 ih_p = px_ib_intr_locate_ih(px_ib_ino_locate_ipil(ino_p, hdlp->ih_pri),
1047 1047 rdip, hdlp->ih_inum, msiq_rec_type, msi_num);
1048 1048
1049 1049 if ((ret = px_lib_msi_getstate(dip, msi_num,
1050 1050 &msi_state)) != DDI_SUCCESS) {
1051 1051 (void) px_rem_msiq_intr(dip, rdip,
1052 1052 hdlp, msiq_rec_type, msi_num, msiq_id);
1053 1053
1054 1054 mutex_exit(&ib_p->ib_ino_lst_mutex);
1055 1055 return (ret);
1056 1056 }
1057 1057
1058 1058 if (msi_state == PCI_MSI_STATE_DELIVERED) {
↓ open down ↓ |
1058 lines elided |
↑ open up ↑ |
1059 1059 ih_p->ih_intr_flags |= PX_INTR_RETARGET;
1060 1060 old_ih_p->ih_intr_flags |= PX_INTR_RETARGET;
1061 1061 }
1062 1062
1063 1063 start_time = gethrtime();
1064 1064 while (((ih_p->ih_intr_flags & PX_INTR_RETARGET) &&
1065 1065 (old_ih_p->ih_intr_flags & PX_INTR_RETARGET)) ||
1066 1066 (old_ih_p->ih_intr_flags & PX_INTR_PENDING)) {
1067 1067
1068 1068 /* Wait for one second */
1069 - delay(drv_usectohz(1000000));
1069 + delay(drv_sectohz(1));
1070 1070
1071 1071 end_time = gethrtime() - start_time;
1072 1072 if (end_time > px_ib_msix_retarget_timeout) {
1073 1073 cmn_err(CE_WARN, "MSIX retarget %x is not completed, "
1074 1074 "even after waiting %llx ticks\n",
1075 1075 msi_num, end_time);
1076 1076 break;
1077 1077 }
1078 1078 }
1079 1079
1080 1080 ih_p->ih_intr_flags &= ~(PX_INTR_RETARGET);
1081 1081
1082 1082 mutex_exit(&ib_p->ib_ino_lst_mutex);
1083 1083
1084 1084 ret = px_rem_msiq_intr(dip, rdip,
1085 1085 hdlp, msiq_rec_type, msi_num, old_msiq_id);
1086 1086
1087 1087 return (ret);
1088 1088 }
1089 1089
1090 1090
1091 1091 static void
1092 1092 px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name,
1093 1093 char *path_name, int instance)
1094 1094 {
1095 1095 (void) strlcpy(dev->driver_name, driver_name, MAXMODCONFNAME);
1096 1096 (void) strlcpy(dev->path, path_name, MAXPATHLEN);
1097 1097 dev->dev_inst = instance;
1098 1098 }
1099 1099
1100 1100
1101 1101 /*
1102 1102 * Return the dips or number of dips associated with a given interrupt block.
1103 1103 * Size of dips array arg is passed in as dips_ret arg.
1104 1104 * Number of dips returned is returned in dips_ret arg.
1105 1105 * Array of dips gets returned in the dips argument.
1106 1106 * Function returns number of dips existing for the given interrupt block.
1107 1107 *
1108 1108 * Note: this function assumes an enabled/valid INO, which is why it returns
1109 1109 * the px node and (Internal) when it finds no other devices (and *devs_ret > 0)
1110 1110 */
1111 1111 uint8_t
1112 1112 pxtool_ib_get_ino_devs(px_t *px_p, uint32_t ino, uint32_t msi_num,
1113 1113 uint8_t *devs_ret, pcitool_intr_dev_t *devs)
1114 1114 {
1115 1115 px_ib_t *ib_p = px_p->px_ib_p;
1116 1116 px_ino_t *ino_p;
1117 1117 px_ino_pil_t *ipil_p;
1118 1118 px_ih_t *ih_p;
1119 1119 uint32_t num_devs = 0;
1120 1120 char pathname[MAXPATHLEN];
1121 1121 int i, j;
1122 1122
1123 1123 mutex_enter(&ib_p->ib_ino_lst_mutex);
1124 1124 ino_p = px_ib_locate_ino(ib_p, ino);
1125 1125 if (ino_p != NULL) {
1126 1126 for (j = 0, ipil_p = ino_p->ino_ipil_p; ipil_p;
1127 1127 ipil_p = ipil_p->ipil_next_p) {
1128 1128 num_devs += ipil_p->ipil_ih_size;
1129 1129
1130 1130 for (i = 0, ih_p = ipil_p->ipil_ih_head;
1131 1131 ((i < ipil_p->ipil_ih_size) && (i < *devs_ret));
1132 1132 i++, j++, ih_p = ih_p->ih_next) {
1133 1133 (void) ddi_pathname(ih_p->ih_dip, pathname);
1134 1134
1135 1135 if (ih_p->ih_msg_code == msi_num) {
1136 1136 num_devs = *devs_ret = 1;
1137 1137 px_fill_in_intr_devs(&devs[0],
1138 1138 (char *)ddi_driver_name(
1139 1139 ih_p->ih_dip), pathname,
1140 1140 ddi_get_instance(ih_p->ih_dip));
1141 1141 goto done;
1142 1142 }
1143 1143
1144 1144 px_fill_in_intr_devs(&devs[j],
1145 1145 (char *)ddi_driver_name(ih_p->ih_dip),
1146 1146 pathname, ddi_get_instance(ih_p->ih_dip));
1147 1147 }
1148 1148 }
1149 1149
1150 1150 *devs_ret = j;
1151 1151 } else if (*devs_ret > 0) {
1152 1152 (void) ddi_pathname(px_p->px_dip, pathname);
1153 1153 strcat(pathname, " (Internal)");
1154 1154 px_fill_in_intr_devs(&devs[0],
1155 1155 (char *)ddi_driver_name(px_p->px_dip), pathname,
1156 1156 ddi_get_instance(px_p->px_dip));
1157 1157 num_devs = *devs_ret = 1;
1158 1158 }
1159 1159
1160 1160 done:
1161 1161 mutex_exit(&ib_p->ib_ino_lst_mutex);
1162 1162
1163 1163 return (num_devs);
1164 1164 }
1165 1165
1166 1166
1167 1167 int
1168 1168 pxtool_ib_get_msi_info(px_t *px_p, devino_t ino, msinum_t msi_num,
1169 1169 ddi_intr_handle_impl_t *hdlp)
1170 1170 {
1171 1171 px_ib_t *ib_p = px_p->px_ib_p;
1172 1172 px_ino_t *ino_p;
1173 1173 px_ino_pil_t *ipil_p;
1174 1174 px_ih_t *ih_p;
1175 1175 int i;
1176 1176
1177 1177 mutex_enter(&ib_p->ib_ino_lst_mutex);
1178 1178
1179 1179 if ((ino_p = px_ib_locate_ino(ib_p, ino)) == NULL) {
1180 1180 mutex_exit(&ib_p->ib_ino_lst_mutex);
1181 1181 return (DDI_FAILURE);
1182 1182 }
1183 1183
1184 1184 for (ipil_p = ino_p->ino_ipil_p; ipil_p;
1185 1185 ipil_p = ipil_p->ipil_next_p) {
1186 1186 for (i = 0, ih_p = ipil_p->ipil_ih_head;
1187 1187 ((i < ipil_p->ipil_ih_size) && ih_p);
1188 1188 i++, ih_p = ih_p->ih_next) {
1189 1189
1190 1190 if (ih_p->ih_msg_code != msi_num)
1191 1191 continue;
1192 1192
1193 1193 hdlp->ih_dip = ih_p->ih_dip;
1194 1194 hdlp->ih_inum = ih_p->ih_inum;
1195 1195 hdlp->ih_cb_func = ih_p->ih_handler;
1196 1196 hdlp->ih_cb_arg1 = ih_p->ih_handler_arg1;
1197 1197 hdlp->ih_cb_arg2 = ih_p->ih_handler_arg2;
1198 1198 if (ih_p->ih_rec_type == MSI64_REC)
1199 1199 hdlp->ih_cap = DDI_INTR_FLAG_MSI64;
1200 1200 hdlp->ih_pri = ipil_p->ipil_pil;
1201 1201 hdlp->ih_ver = DDI_INTR_VERSION;
1202 1202
1203 1203 mutex_exit(&ib_p->ib_ino_lst_mutex);
1204 1204 return (DDI_SUCCESS);
1205 1205 }
1206 1206 }
1207 1207
1208 1208 mutex_exit(&ib_p->ib_ino_lst_mutex);
1209 1209 return (DDI_FAILURE);
1210 1210 }
1211 1211
1212 1212 void
1213 1213 px_ib_log_new_cpu(px_ib_t *ib_p, cpuid_t old_cpu_id, cpuid_t new_cpu_id,
1214 1214 uint32_t ino)
1215 1215 {
1216 1216 px_ino_t *ino_p;
1217 1217 px_ino_pil_t *ipil_p;
1218 1218 px_ih_t *ih_p;
1219 1219 int i;
1220 1220
1221 1221 mutex_enter(&ib_p->ib_ino_lst_mutex);
1222 1222
1223 1223 /* Log in OS data structures the new CPU. */
1224 1224 if (ino_p = px_ib_locate_ino(ib_p, ino)) {
1225 1225
1226 1226 /* Log in OS data structures the new CPU. */
1227 1227 ino_p->ino_cpuid = new_cpu_id;
1228 1228
1229 1229 for (ipil_p = ino_p->ino_ipil_p; ipil_p;
1230 1230 ipil_p = ipil_p->ipil_next_p) {
1231 1231 for (i = 0, ih_p = ipil_p->ipil_ih_head;
1232 1232 (i < ipil_p->ipil_ih_size);
1233 1233 i++, ih_p = ih_p->ih_next) {
1234 1234 /*
1235 1235 * Account for any residual time
1236 1236 * to be logged for old cpu.
1237 1237 */
1238 1238 px_ib_cpu_ticks_to_ih_nsec(ib_p,
1239 1239 ih_p, old_cpu_id);
1240 1240 }
1241 1241 }
1242 1242 }
1243 1243
1244 1244 mutex_exit(&ib_p->ib_ino_lst_mutex);
1245 1245 }
↓ open down ↓ |
166 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX