Print this page
4665 pcplusmp open-codes register operations


 554          * subtracts 0x20 from the vector before passing it to us.
 555          * (That's why APIC_BASE_VECT is 0x20.)
 556          */
 557         vector = (uchar_t)*vectorp;
 558 
 559         /* if interrupted by the clock, increment apic_nsec_since_boot */
 560         if (vector == apic_clkvect) {
 561                 if (!apic_oneshot) {
 562                         /* NOTE: this is not MT aware */
 563                         apic_hrtime_stamp++;
 564                         apic_nsec_since_boot += apic_nsec_per_intr;
 565                         apic_hrtime_stamp++;
 566                         last_count_read = apic_hertz_count;
 567                         apic_redistribute_compute();
 568                 }
 569 
 570                 /* We will avoid all the book keeping overhead for clock */
 571                 nipl = apic_ipls[vector];
 572 
 573                 *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT];
 574                 if (apic_mode == LOCAL_APIC) {
 575 #if defined(__amd64)
 576                         setcr8((ulong_t)(apic_ipltopri[nipl] >>
 577                             APIC_IPL_SHIFT));
 578 #else
 579                         if (apic_have_32bit_cr8)
 580                                 setcr8((ulong_t)(apic_ipltopri[nipl] >>
 581                                     APIC_IPL_SHIFT));
 582                         else
 583                                 LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
 584                                     (uint32_t)apic_ipltopri[nipl]);
 585 #endif
 586                         LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
 587                 } else {
 588                         X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
 589                         X2APIC_WRITE(APIC_EOI_REG, 0);
 590                 }
 591 
 592                 return (nipl);
 593         }
 594 
 595         cpu_infop = &apic_cpus[psm_get_cpu_id()];
 596 
 597         if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) {
 598                 cpu_infop->aci_spur_cnt++;
 599                 return (APIC_INT_SPURIOUS);
 600         }
 601 
 602         /* Check if the vector we got is really what we need */
 603         if (apic_revector_pending) {
 604                 /*
 605                  * Disable interrupts for the duration of
 606                  * the vector translation to prevent a self-race for
 607                  * the apic_revector_lock.  This cannot be done
 608                  * in apic_xlate_vector because it is recursive and
 609                  * we want the vector translation to be atomic with
 610                  * respect to other (higher-priority) interrupts.
 611                  */
 612                 iflag = intr_clear();
 613                 vector = apic_xlate_vector(vector + APIC_BASE_VECT) -
 614                     APIC_BASE_VECT;
 615                 intr_restore(iflag);
 616         }
 617 
 618         nipl = apic_ipls[vector];
 619         *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT];
 620 
 621         if (apic_mode == LOCAL_APIC) {
 622 #if defined(__amd64)
 623                 setcr8((ulong_t)(apic_ipltopri[nipl] >> APIC_IPL_SHIFT));
 624 #else
 625                 if (apic_have_32bit_cr8)
 626                         setcr8((ulong_t)(apic_ipltopri[nipl] >>
 627                             APIC_IPL_SHIFT));
 628                 else
 629                         LOCAL_APIC_WRITE_REG(APIC_TASK_REG,
 630                             (uint32_t)apic_ipltopri[nipl]);
 631 #endif
 632         } else {
 633                 X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[nipl]);
 634         }
 635 
 636         cpu_infop->aci_current[nipl] = (uchar_t)irq;
 637         cpu_infop->aci_curipl = (uchar_t)nipl;
 638         cpu_infop->aci_ISR_in_progress |= 1 << nipl;
 639 
 640         /*
 641          * apic_level_intr could have been assimilated into the irq struct.
 642          * but, having it as a character array is more efficient in terms of
 643          * cache usage. So, we leave it as is.
 644          */
 645         if (!apic_level_intr[irq]) {
 646                 if (apic_mode == LOCAL_APIC) {
 647                         LOCAL_APIC_WRITE_REG(APIC_EOI_REG, 0);
 648                 } else {
 649                         X2APIC_WRITE(APIC_EOI_REG, 0);
 650                 }
 651         }
 652 
 653 #ifdef  DEBUG
 654         APIC_DEBUG_BUF_PUT(vector);
 655         APIC_DEBUG_BUF_PUT(irq);
 656         APIC_DEBUG_BUF_PUT(nipl);
 657         APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
 658         if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
 659                 drv_usecwait(apic_stretch_interrupts);
 660 
 661         if (apic_break_on_cpu == psm_get_cpu_id())
 662                 apic_break();
 663 #endif /* DEBUG */
 664         return (nipl);
 665 }
 666 
 667 /*
 668  * This macro is a common code used by MMIO local apic and X2APIC
 669  * local apic.
 670  */
 671 #define APIC_INTR_EXIT() \
 672 { \
 673         cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
 674         if (apic_level_intr[irq]) \
 675                 apic_reg_ops->apic_send_eoi(irq); \
 676         cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
 677         /* ISR above current pri could not be in progress */ \
 678         cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
 679 }
 680 
 681 /*
 682  * Any changes made to this function must also change X2APIC
 683  * version of intr_exit.
 684  */
 685 void
 686 apic_intr_exit(int prev_ipl, int irq)
 687 {
 688         apic_cpus_info_t *cpu_infop;
 689 
 690 #if defined(__amd64)
 691         setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
 692 #else
 693         if (apic_have_32bit_cr8)
 694                 setcr8((ulong_t)(apic_ipltopri[prev_ipl] >> APIC_IPL_SHIFT));
 695         else
 696                 apicadr[APIC_TASK_REG] = apic_ipltopri[prev_ipl];
 697 #endif
 698 
 699         APIC_INTR_EXIT();
 700 }
 701 
 702 /*
 703  * Same as apic_intr_exit() except it uses MSR rather than MMIO
 704  * to access local apic registers.
 705  */
 706 void
 707 x2apic_intr_exit(int prev_ipl, int irq)
 708 {
 709         apic_cpus_info_t *cpu_infop;
 710 
 711         X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
 712         APIC_INTR_EXIT();
 713 }
 714 
 715 intr_exit_fn_t
 716 psm_intr_exit_fn(void)
 717 {
 718         if (apic_mode == LOCAL_X2APIC)
 719                 return (x2apic_intr_exit);
 720 
 721         return (apic_intr_exit);
 722 }
 723 
 724 /*
 725  * Mask all interrupts below or equal to the given IPL.
 726  * Any changes made to this function must also change X2APIC
 727  * version of setspl.
 728  */
 729 static void
 730 apic_setspl(int ipl)
 731 {
 732 #if defined(__amd64)
 733         setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
 734 #else
 735         if (apic_have_32bit_cr8)
 736                 setcr8((ulong_t)(apic_ipltopri[ipl] >> APIC_IPL_SHIFT));
 737         else
 738                 apicadr[APIC_TASK_REG] = apic_ipltopri[ipl];
 739 #endif
 740 
 741         /* interrupts at ipl above this cannot be in progress */
 742         apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
 743         /*
 744          * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
 745          * have enough time to come in before the priority is raised again
 746          * during the idle() loop.
 747          */
 748         if (apic_setspl_delay)
 749                 (void) apic_reg_ops->apic_get_pri();
 750 }
 751 
 752 /*
 753  * X2APIC version of setspl.
 754  * Mask all interrupts below or equal to the given IPL
 755  */
 756 static void
 757 x2apic_setspl(int ipl)
 758 {
 759         X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]);




 554          * subtracts 0x20 from the vector before passing it to us.
 555          * (That's why APIC_BASE_VECT is 0x20.)
 556          */
 557         vector = (uchar_t)*vectorp;
 558 
 559         /* if interrupted by the clock, increment apic_nsec_since_boot */
 560         if (vector == apic_clkvect) {
 561                 if (!apic_oneshot) {
 562                         /* NOTE: this is not MT aware */
 563                         apic_hrtime_stamp++;
 564                         apic_nsec_since_boot += apic_nsec_per_intr;
 565                         apic_hrtime_stamp++;
 566                         last_count_read = apic_hertz_count;
 567                         apic_redistribute_compute();
 568                 }
 569 
 570                 /* We will avoid all the book keeping overhead for clock */
 571                 nipl = apic_ipls[vector];
 572 
 573                 *vectorp = apic_vector_to_irq[vector + APIC_BASE_VECT];
 574 
 575                 apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]);
 576                 apic_reg_ops->apic_send_eoi(0);














 577 
 578                 return (nipl);
 579         }
 580 
 581         cpu_infop = &apic_cpus[psm_get_cpu_id()];
 582 
 583         if (vector == (APIC_SPUR_INTR - APIC_BASE_VECT)) {
 584                 cpu_infop->aci_spur_cnt++;
 585                 return (APIC_INT_SPURIOUS);
 586         }
 587 
 588         /* Check if the vector we got is really what we need */
 589         if (apic_revector_pending) {
 590                 /*
 591                  * Disable interrupts for the duration of
 592                  * the vector translation to prevent a self-race for
 593                  * the apic_revector_lock.  This cannot be done
 594                  * in apic_xlate_vector because it is recursive and
 595                  * we want the vector translation to be atomic with
 596                  * respect to other (higher-priority) interrupts.
 597                  */
 598                 iflag = intr_clear();
 599                 vector = apic_xlate_vector(vector + APIC_BASE_VECT) -
 600                     APIC_BASE_VECT;
 601                 intr_restore(iflag);
 602         }
 603 
 604         nipl = apic_ipls[vector];
 605         *vectorp = irq = apic_vector_to_irq[vector + APIC_BASE_VECT];
 606 
 607         apic_reg_ops->apic_write_task_reg(apic_ipltopri[nipl]);













 608 
 609         cpu_infop->aci_current[nipl] = (uchar_t)irq;
 610         cpu_infop->aci_curipl = (uchar_t)nipl;
 611         cpu_infop->aci_ISR_in_progress |= 1 << nipl;
 612 
 613         /*
 614          * apic_level_intr could have been assimilated into the irq struct.
 615          * but, having it as a character array is more efficient in terms of
 616          * cache usage. So, we leave it as is.
 617          */
 618         if (!apic_level_intr[irq]) {
 619                 apic_reg_ops->apic_send_eoi(0);




 620         }
 621 
 622 #ifdef  DEBUG
 623         APIC_DEBUG_BUF_PUT(vector);
 624         APIC_DEBUG_BUF_PUT(irq);
 625         APIC_DEBUG_BUF_PUT(nipl);
 626         APIC_DEBUG_BUF_PUT(psm_get_cpu_id());
 627         if ((apic_stretch_interrupts) && (apic_stretch_ISR & (1 << nipl)))
 628                 drv_usecwait(apic_stretch_interrupts);
 629 
 630         if (apic_break_on_cpu == psm_get_cpu_id())
 631                 apic_break();
 632 #endif /* DEBUG */
 633         return (nipl);
 634 }
 635 
 636 /*
 637  * This macro is a common code used by MMIO local apic and X2APIC
 638  * local apic.
 639  */
 640 #define APIC_INTR_EXIT() \
 641 { \
 642         cpu_infop = &apic_cpus[psm_get_cpu_id()]; \
 643         if (apic_level_intr[irq]) \
 644                 apic_reg_ops->apic_send_eoi(irq); \
 645         cpu_infop->aci_curipl = (uchar_t)prev_ipl; \
 646         /* ISR above current pri could not be in progress */ \
 647         cpu_infop->aci_ISR_in_progress &= (2 << prev_ipl) - 1; \
 648 }
 649 
 650 /*
 651  * Any changes made to this function must also change X2APIC
 652  * version of intr_exit.
 653  */
 654 void
 655 apic_intr_exit(int prev_ipl, int irq)
 656 {
 657         apic_cpus_info_t *cpu_infop;
 658 
 659         apic_reg_ops->apic_write_task_reg(apic_ipltopri[prev_ipl]);







 660 
 661         APIC_INTR_EXIT();
 662 }
 663 
 664 /*
 665  * Same as apic_intr_exit() except it uses MSR rather than MMIO
 666  * to access local apic registers.
 667  */
 668 void
 669 x2apic_intr_exit(int prev_ipl, int irq)
 670 {
 671         apic_cpus_info_t *cpu_infop;
 672 
 673         X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[prev_ipl]);
 674         APIC_INTR_EXIT();
 675 }
 676 
 677 intr_exit_fn_t
 678 psm_intr_exit_fn(void)
 679 {
 680         if (apic_mode == LOCAL_X2APIC)
 681                 return (x2apic_intr_exit);
 682 
 683         return (apic_intr_exit);
 684 }
 685 
 686 /*
 687  * Mask all interrupts below or equal to the given IPL.
 688  * Any changes made to this function must also change X2APIC
 689  * version of setspl.
 690  */
 691 static void
 692 apic_setspl(int ipl)
 693 {
 694         apic_reg_ops->apic_write_task_reg(apic_ipltopri[ipl]);







 695 
 696         /* interrupts at ipl above this cannot be in progress */
 697         apic_cpus[psm_get_cpu_id()].aci_ISR_in_progress &= (2 << ipl) - 1;
 698         /*
 699          * this is a patch fix for the ALR QSMP P5 machine, so that interrupts
 700          * have enough time to come in before the priority is raised again
 701          * during the idle() loop.
 702          */
 703         if (apic_setspl_delay)
 704                 (void) apic_reg_ops->apic_get_pri();
 705 }
 706 
 707 /*
 708  * X2APIC version of setspl.
 709  * Mask all interrupts below or equal to the given IPL
 710  */
 711 static void
 712 x2apic_setspl(int ipl)
 713 {
 714         X2APIC_WRITE(APIC_TASK_REG, apic_ipltopri[ipl]);