Print this page
patch apic-simplify
patch remove-apic_cr8pri
patch spacing-fix
patch apic-task-reg-write-dup


 126  *      6               0x60-0x7f               0x40-0x5f
 127  *      7,8,9           0x80-0x8f               0x60-0x6f
 128  *      10              0x90-0x9f               0x70-0x7f
 129  *      11              0xa0-0xaf               0x80-0x8f
 130  *      ...             ...
 131  *      15              0xe0-0xef               0xc0-0xcf
 132  *      15              0xf0-0xff               0xd0-0xdf
 133  */
 134 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
 135         3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
 136 };
 137         /*
 138          * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
 139          * NOTE that this is vector as passed into intr_enter which is
 140          * programmed vector - 0x20 (APIC_BASE_VECT)
 141          */
 142 
 143 uchar_t apic_ipltopri[MAXIPL + 1];      /* unix ipl to apic pri */
 144         /* The taskpri to be programmed into apic to mask given ipl */
 145 
 146 #if defined(__amd64)
 147 uchar_t apic_cr8pri[MAXIPL + 1];        /* unix ipl to cr8 pri  */
 148 #endif
 149 
 150 /*
 151  * Correlation of the hardware vector to the IPL in use, initialized
 152  * from apic_vectortoipl[] in apic_init().  The final IPLs may not correlate
 153  * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
 154  * connected to errata-stricken IOAPICs
 155  */
 156 uchar_t apic_ipls[APIC_AVAIL_VECTOR];
 157 
 158 /*
 159  * Patchable global variables.
 160  */
 161 int     apic_enable_hwsoftint = 0;      /* 0 - disable, 1 - enable      */
 162 int     apic_enable_bind_log = 1;       /* 1 - display interrupt binding log */
 163 
 164 /*
 165  *      Local static data
 166  */
 167 static struct   psm_ops apic_ops = {
 168         apic_probe,
 169 


 283 
 284         psm_get_ioapicid = apic_get_ioapicid;
 285         psm_get_localapicid = apic_get_localapicid;
 286         psm_xlate_vector_by_irq = apic_xlate_vector_by_irq;
 287 
 288         apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
 289         for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
 290                 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
 291                     (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
 292                         /* get to highest vector at the same ipl */
 293                         continue;
 294                 for (; j <= apic_vectortoipl[i]; j++) {
 295                         apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
 296                             APIC_BASE_VECT;
 297                 }
 298         }
 299         for (; j < MAXIPL + 1; j++)
 300                 /* fill up any empty ipltopri slots */
 301                 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
 302         apic_init_common();
 303 #if defined(__amd64)
 304         /*
 305          * Make cpu-specific interrupt info point to cr8pri vector
 306          */
 307         for (i = 0; i <= MAXIPL; i++)
 308                 apic_cr8pri[i] = apic_ipltopri[i] >> APIC_IPL_SHIFT;
 309         CPU->cpu_pri_data = apic_cr8pri;
 310 #else
 311         if (cpuid_have_cr8access(CPU))
 312                 apic_have_32bit_cr8 = 1;
 313 #endif  /* __amd64 */
 314 }
 315 
 316 static void
 317 apic_init_intr(void)
 318 {
 319         processorid_t   cpun = psm_get_cpu_id();
 320         uint_t nlvt;
 321         uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
 322 
 323         apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
 324 
 325         if (apic_mode == LOCAL_APIC) {
 326                 /*
 327                  * We are running APIC in MMIO mode.
 328                  */
 329                 if (apic_flat_model) {
 330                         apic_reg_ops->apic_write(APIC_FORMAT_REG,
 331                             APIC_FLAT_MODEL);
 332                 } else {
 333                         apic_reg_ops->apic_write(APIC_FORMAT_REG,


 564          * subtracts 0x20 from the vector before passing it to us.
 565          * (That's why APIC_BASE_VECT is 0x20.)
 566          */
 567         vector = (uchar_t)*vectorp;
 568 
 569         /* if interrupted by the clock, increment apic_nsec_since_boot */
 570         if (vector == apic_clkvect) {
 571                 if (!apic_oneshot) {
 572                         /* NOTE: this is not MT aware */
 573                         apic_hrtime_stamp++;
 574                         apic_nsec_since_boot += apic_nsec_per_intr;
 575                         apic_hrtime_stamp++;
 576                         last_count_read = apic_hertz_count;
 577                         apic_redistribute_compute();
 578                 }
 579 
 580                 /* We will avoid all the book keeping overhead for clock */
 581                 nipl = apic_ipls[vector];
 582 
 583                 *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT];
 584                 if (apic_mode == LOCAL_APIC) {
 585 #if defined(__amd64)
 586                         setcr8((ulong_t)(apic_ipltopri[nipl] >>
 587                             APIC_IPL_SHIFT));
 588 #else
 589                         if (apic_have_32bit_cr8)
 590                                 setcr8((ulong_t)(apic_ipltopri[nipl] >>
 591                                     APIC_IPL_SHIFT));
 592                         else
 593                                 LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
 594                                     (uint32_t)apic_ipltopri[nipl]);
 595 #endif
 596                         LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
 597                 } else {
 598                         X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
 599                         X2APIC_WRITE(APIC_EOI_REG, 0);
 600                 }
 601 
 602                 return (nipl);
 603         }
 604 
 605         cpu_infop = &apic_cpus[psm_get_cpu_id()];
 606 
 607         if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) {
 608                 cpu_infop->aci_spur_cnt++;
 609                 return (APIC_INT_SPURIOUS);
 610         }
 611 
 612         /* Check if the vector we got is really what we need */
 613         if (apic_revector_pending) {
 614                 /*
 615                  * Disable interrupts for the duration of
 616                  * the vector translation to prevent a self-race for
 617                  * the apic_revector_lock.  This cannot be done
 618                  * in apic_xlate_vector because it is recursive and
 619                  * we want the vector translation to be atomic with
 620                  * respect to other (higher-priority) interrupts.
 621                  */
 622                 iflag = intr_clear();
 623                 vector = apic_xlate_vector(vector + APIC_BASE_VECT) -
 624                     APIC_BASE_VECT;
 625                 intr_restore(iflag);
 626         }
 627 
 628         nipl = apic_ipls[vector];
 629         *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT];
 630 
 631         if (apic_mode == LOCAL_APIC) {
 632 #if defined(__amd64)
 633                 setcr8((ulong_t)(apic_ipltopri[nipl] >> APIC_IPL_SHIFT));
 634 #else
 635                 if (apic_have_32bit_cr8)
 636                         setcr8((ulong_t)(apic_ipltopri[nipl] >>
 637                             APIC_IPL_SHIFT));
 638                 else
 639                         LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
 640                             (uint32_t)apic_ipltopri[nipl]);
 641 #endif
 642         } else {
 643                 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
 644         }
 645 
 646         cpu_infop->aci_current[nipl] = (uchar_t)irq;
 647         cpu_infop->aci_curipl = (uchar_t)nipl;
 648         cpu_infop->aci_ISR_in_progress |= 1 << nipl;
 649 
 650         /*
 651          * apic_level_intr could have been assimilated into the irq struct.
 652          * but, having it as a character array is more efficient in terms of
 653          * cache usage. So, we leave it as is.
 654          */
 655         if (!apic_level_intr[irq]) {
 656                 if (apic_mode == LOCAL_APIC) {
 657                         LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
 658                 } else {
 659                         X2APIC_WRITE(APIC_EOI_REG, 0);
 660                 }
 661         }
 662 
 663 #ifdef  DEBUG
 664         APIC_DEBUG_BUF_PUT(vector);
 665         APIC_DEBUG_BUF_PUT(irq);
 666         APIC_DEBUG_BUF_PUT(nipl);
 667         APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
 668         if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
 669                 drv_usecwait(apic_stretch_interrupts);
 670 
 671         if (apic_break_on_cpu == psm_get_cpu_id())
 672                 apic_break();
 673 #endif /* DEBUG */
 674         return (nipl);
 675 }
 676 
 677 /*
 678  * This macro is a common code used by MMIO local apic and X2APIC
 679  * local apic.
 680  */
 681 #define APIC_INTR_EXIT() \
 682 { \
 683         cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
 684         if (apic_level_intr[irq]) \
 685                 apic_reg_ops->apic_send_eoi(irq); \
 686         cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
 687         /* ISR above current pri could not be in progress */ \
 688         cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
 689 }
 690 
 691 /*
 692  * Any changes made to this function must also change X2APIC
 693  * version of intr_exit.
 694  */
 695 void
 696 apic_intr_exit(int prev_ipl, int irq)
 697 {
 698         apic_cpus_info_t *cpu_infop;
 699 
 700 #if defined(__amd64)
 701         setcr8((ulong_t)apic_cr8pri[prev_ipl]);
 702 #else
 703         if (apic_have_32bit_cr8)
 704                 setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
 705         else
 706                 apicadr[APIC_TASK_REG] = apic_ipltopri[prev_ipl];
 707 #endif
 708 
 709         APIC_INTR_EXIT();
 710 }
 711 
 712 /*
 713  * Same as apic_intr_exit() except it uses MSR rather than MMIO
 714  * to access local apic registers.
 715  */
 716 void
 717 x2apic_intr_exit(int prev_ipl, int irq)
 718 {
 719         apic_cpus_info_t *cpu_infop;
 720 
 721         X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
 722         APIC_INTR_EXIT();
 723 }
 724 
 725 intr_exit_fn_t
 726 psm_intr_exit_fn(void)
 727 {
 728         if (apic_mode == LOCAL_X2APIC)
 729                 return (x2apic_intr_exit);
 730 
 731         return (apic_intr_exit);
 732 }
 733 
 734 /*
 735  * Mask all interrupts below or equal to the given IPL.
 736  * Any changes made to this function must also change X2APIC
 737  * version of setspl.
 738  */
 739 static void
 740 apic_setspl(int ipl)
 741 {
 742 #if defined(__amd64)
 743         setcr8((ulong_t)apic_cr8pri[ipl]);
 744 #else
 745         if (apic_have_32bit_cr8)
 746                 setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
 747         else
 748                 apicadr[APIC_TASK_REG] = apic_ipltopri[ipl];
 749 #endif
 750 
 751         /* interrupts at ipl above this cannot be in progress */
 752         apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
 753         /*
 754          * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
 755          * have enough time to come in before the priority is raised again
 756          * during the idle() loop.
 757          */
 758         if (apic_setspl_delay)
 759                 (void) apic_reg_ops->apic_get_pri();
 760 }
 761 
 762 /*
 763  * X2APIC version of setspl.
 764  * Mask all interrupts below or equal to the given IPL
 765  */
 766 static void
 767 x2apic_setspl(int ipl)
 768 {
 769         X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]);




 126  *      6               0x60-0x7f               0x40-0x5f
 127  *      7,8,9           0x80-0x8f               0x60-0x6f
 128  *      10              0x90-0x9f               0x70-0x7f
 129  *      11              0xa0-0xaf               0x80-0x8f
 130  *      ...             ...
 131  *      15              0xe0-0xef               0xc0-0xcf
 132  *      15              0xf0-0xff               0xd0-0xdf
 133  */
 134 uchar_t apic_vectortoipl[APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL] = {
 135         3, 4, 5, 5, 6, 6, 9, 10, 11, 12, 13, 14, 15, 15
 136 };
 137         /*
 138          * The ipl of an ISR at vector X is apic_vectortoipl[X>>4]
 139          * NOTE that this is vector as passed into intr_enter which is
 140          * programmed vector - 0x20 (APIC_BASE_VECT)
 141          */
 142 
 143 uchar_t apic_ipltopri[MAXIPL + 1];      /* unix ipl to apic pri */
 144         /* The taskpri to be programmed into apic to mask given ipl */
 145 




 146 /*
 147  * Correlation of the hardware vector to the IPL in use, initialized
 148  * from apic_vectortoipl[] in apic_init().  The final IPLs may not correlate
 149  * to the IPLs in apic_vectortoipl on some systems that share interrupt lines
 150  * connected to errata-stricken IOAPICs
 151  */
 152 uchar_t apic_ipls[APIC_AVAIL_VECTOR];
 153 
 154 /*
 155  * Patchable global variables.
 156  */
 157 int     apic_enable_hwsoftint = 0;      /* 0 - disable, 1 - enable      */
 158 int     apic_enable_bind_log = 1;       /* 1 - display interrupt binding log */
 159 
 160 /*
 161  *      Local static data
 162  */
 163 static struct   psm_ops apic_ops = {
 164         apic_probe,
 165 


 279 
 280         psm_get_ioapicid = apic_get_ioapicid;
 281         psm_get_localapicid = apic_get_localapicid;
 282         psm_xlate_vector_by_irq = apic_xlate_vector_by_irq;
 283 
 284         apic_ipltopri[0] = APIC_VECTOR_PER_IPL; /* leave 0 for idle */
 285         for (i = 0; i < (APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL); i++) {
 286                 if ((i < ((APIC_AVAIL_VECTOR / APIC_VECTOR_PER_IPL) - 1)) &&
 287                     (apic_vectortoipl[i + 1] == apic_vectortoipl[i]))
 288                         /* get to highest vector at the same ipl */
 289                         continue;
 290                 for (; j <= apic_vectortoipl[i]; j++) {
 291                         apic_ipltopri[j] = (i << APIC_IPL_SHIFT) +
 292                             APIC_BASE_VECT;
 293                 }
 294         }
 295         for (; j < MAXIPL + 1; j++)
 296                 /* fill up any empty ipltopri slots */
 297                 apic_ipltopri[j] = (i << APIC_IPL_SHIFT) + APIC_BASE_VECT;
 298         apic_init_common();
 299 #ifndef __amd64







 300         if (cpuid_have_cr8access(CPU))
 301                 apic_have_32bit_cr8 = 1;
 302 #endif  /* !__amd64 */
 303 }
 304 
 305 static void
 306 apic_init_intr(void)
 307 {
 308         processorid_t   cpun = psm_get_cpu_id();
 309         uint_t nlvt;
 310         uint32_t svr = AV_UNIT_ENABLE | APIC_SPUR_INTR;
 311 
 312         apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
 313 
 314         if (apic_mode == LOCAL_APIC) {
 315                 /*
 316                  * We are running APIC in MMIO mode.
 317                  */
 318                 if (apic_flat_model) {
 319                         apic_reg_ops->apic_write(APIC_FORMAT_REG,
 320                             APIC_FLAT_MODEL);
 321                 } else {
 322                         apic_reg_ops->apic_write(APIC_FORMAT_REG,


 553          * subtracts 0x20 from the vector before passing it to us.
 554          * (That's why APIC_BASE_VECT is 0x20.)
 555          */
 556         vector = (uchar_t)*vectorp;
 557 
 558         /* if interrupted by the clock, increment apic_nsec_since_boot */
 559         if (vector == apic_clkvect) {
 560                 if (!apic_oneshot) {
 561                         /* NOTE: this is not MT aware */
 562                         apic_hrtime_stamp++;
 563                         apic_nsec_since_boot += apic_nsec_per_intr;
 564                         apic_hrtime_stamp++;
 565                         last_count_read = apic_hertz_count;
 566                         apic_redistribute_compute();
 567                 }
 568 
 569                 /* We will avoid all the book keeping overhead for clock */
 570                 nipl = apic_ipls[vector];
 571 
 572                 *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT];
 573 
 574                 apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]);
 575                 apic_reg_ops->apic_send_eoi(0);














 576 
 577                 return (nipl);
 578         }
 579 
 580         cpu_infop = &apic_cpus[psm_get_cpu_id()];
 581 
 582         if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) {
 583                 cpu_infop->aci_spur_cnt++;
 584                 return (APIC_INT_SPURIOUS);
 585         }
 586 
 587         /* Check if the vector we got is really what we need */
 588         if (apic_revector_pending) {
 589                 /*
 590                  * Disable interrupts for the duration of
 591                  * the vector translation to prevent a self-race for
 592                  * the apic_revector_lock.  This cannot be done
 593                  * in apic_xlate_vector because it is recursive and
 594                  * we want the vector translation to be atomic with
 595                  * respect to other (higher-priority) interrupts.
 596                  */
 597                 iflag = intr_clear();
 598                 vector = apic_xlate_vector(vector + APIC_BASE_VECT) -
 599                     APIC_BASE_VECT;
 600                 intr_restore(iflag);
 601         }
 602 
 603         nipl = apic_ipls[vector];
 604         *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT];
 605 
 606         apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]);













 607 
 608         cpu_infop->aci_current[nipl] = (uchar_t)irq;
 609         cpu_infop->aci_curipl = (uchar_t)nipl;
 610         cpu_infop->aci_ISR_in_progress |= 1 << nipl;
 611 
 612         /*
 613          * apic_level_intr could have been assimilated into the irq struct.
 614          * but, having it as a character array is more efficient in terms of
 615          * cache usage. So, we leave it as is.
 616          */
 617         if (!apic_level_intr[irq]) {
 618                 apic_reg_ops->apic_send_eoi(0);




 619         }
 620 
 621 #ifdef  DEBUG
 622         APIC_DEBUG_BUF_PUT(vector);
 623         APIC_DEBUG_BUF_PUT(irq);
 624         APIC_DEBUG_BUF_PUT(nipl);
 625         APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
 626         if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
 627                 drv_usecwait(apic_stretch_interrupts);
 628 
 629         if (apic_break_on_cpu == psm_get_cpu_id())
 630                 apic_break();
 631 #endif /* DEBUG */
 632         return (nipl);
 633 }
 634 
 635 /*
 636  * This macro is a common code used by MMIO local apic and X2APIC
 637  * local apic.
 638  */
 639 #define APIC_INTR_EXIT() \
 640 { \
 641         cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
 642         if (apic_level_intr[irq]) \
 643                 apic_reg_ops->apic_send_eoi(irq); \
 644         cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
 645         /* ISR above current pri could not be in progress */ \
 646         cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
 647 }
 648 
 649 /*
 650  * Any changes made to this function must also change X2APIC
 651  * version of intr_exit.
 652  */
 653 void
 654 apic_intr_exit(int prev_ipl, int irq)
 655 {
 656         apic_cpus_info_t *cpu_infop;
 657 
 658         local_apic_write_task_reg(apic_ipltopri[prev_ipl]);







 659 
 660         APIC_INTR_EXIT();
 661 }
 662 
 663 /*
 664  * Same as apic_intr_exit() except it uses MSR rather than MMIO
 665  * to access local apic registers.
 666  */
 667 void
 668 x2apic_intr_exit(int prev_ipl, int irq)
 669 {
 670         apic_cpus_info_t *cpu_infop;
 671 
 672         X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
 673         APIC_INTR_EXIT();
 674 }
 675 
 676 intr_exit_fn_t
 677 psm_intr_exit_fn(void)
 678 {
 679         if (apic_mode == LOCAL_X2APIC)
 680                 return (x2apic_intr_exit);
 681 
 682         return (apic_intr_exit);
 683 }
 684 
 685 /*
 686  * Mask all interrupts below or equal to the given IPL.
 687  * Any changes made to this function must also change X2APIC
 688  * version of setspl.
 689  */
 690 static void
 691 apic_setspl(int ipl)
 692 {
 693         local_apic_write_task_reg(apic_ipltopri[ipl]);







 694 
 695         /* interrupts at ipl above this cannot be in progress */
 696         apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
 697         /*
 698          * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
 699          * have enough time to come in before the priority is raised again
 700          * during the idle() loop.
 701          */
 702         if (apic_setspl_delay)
 703                 (void) apic_reg_ops->apic_get_pri();
 704 }
 705 
 706 /*
 707  * X2APIC version of setspl.
 708  * Mask all interrupts below or equal to the given IPL
 709  */
 710 static void
 711 x2apic_setspl(int ipl)
 712 {
 713         X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]);