Print this page
XXXX introduce drv_sectohz


 106 
 107         mutex_exit(&gcpu_xpv_polldata_lock);
 108 
 109         return (DDI_INTR_CLAIMED);
 110 }
 111 
 112 static void
 113 gcpu_xpv_mch_poll(void *arg)
 114 {
 115         cmi_hdl_t hdl = cmi_hdl_any();
 116 
 117         if (hdl != NULL) {
 118                 cmi_mc_logout(hdl, 0, 0);
 119                 cmi_hdl_rele(hdl);
 120         }
 121 
 122         if (arg == GCPU_XPV_MCH_POLL_REARM &&
 123             gcpu_xpv_mch_poll_interval_secs != 0) {
 124                 gcpu_xpv_mch_poll_timeoutid = timeout(gcpu_xpv_mch_poll,
 125                     GCPU_XPV_MCH_POLL_REARM,
 126                     drv_usectohz(gcpu_xpv_mch_poll_interval_secs * MICROSEC));
 127         }
 128 }
 129 
 130 /*
 131  * gcpu_mca_poll_init is called from gcpu_mca_init for each cpu handle
 132  * that we initialize for.  It should prepare for polling by allocating
 133  * control structures and the like, but must not kick polling off yet.
 134  *
 135  * Since we initialize all cpus in a serialized loop there is no race
 136  * on allocating the bankregs structure, nor in free'ing and enlarging
 137  * it if we find the number of MCA banks is not uniform in the system
 138  * (unlikely) since polling is only started post mp startup.
 139  */
 140 
 141 void
 142 gcpu_mca_poll_init(cmi_hdl_t hdl)
 143 {
 144         gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
 145         int nbanks = gcpu->gcpu_mca.gcpu_mca_nbanks;
 146         size_t sz = nbanks * GCPU_XPV_ARCH_NREGS * sizeof (cmi_mca_regs_t);


 166 gcpu_mca_poll_fini(cmi_hdl_t hdl)
 167 {
 168 }
 169 
 170 void
 171 gcpu_mca_poll_start(cmi_hdl_t hdl)
 172 {
 173         ASSERT(cmi_hdl_class(hdl) == CMI_HDL_SOLARIS_xVM_MCA);
 174         /*
 175          * We are on the boot cpu (cpu 0), called at the end of its
 176          * multiprocessor startup.
 177          */
 178         if (gcpu_xpv_poll_bankregs_sz != 0 && gcpu_xpv_virq_vect == -1) {
 179                 /*
 180                  * The hypervisor will poll MCA state for us, but it cannot
 181                  * poll MCH state so we do that via a timeout.
 182                  */
 183                 if (gcpu_xpv_mch_poll_interval_secs != 0) {
 184                         gcpu_xpv_mch_poll_timeoutid =
 185                             timeout(gcpu_xpv_mch_poll, GCPU_XPV_MCH_POLL_REARM,
 186                             drv_usectohz(gcpu_xpv_mch_poll_interval_secs *
 187                             MICROSEC));
 188                 }
 189 
 190                 /*
 191                  * Register handler for VIRQ_MCA; once this is in place
 192                  * the hypervisor will begin to forward polled MCA observations
 193                  * to us.
 194                  */
 195                 gcpu_xpv_virq_vect = ec_bind_virq_to_irq(VIRQ_MCA, 0);
 196                 (void) add_avintr(NULL, gcpu_xpv_virq_level,
 197                     (avfunc)gcpu_xpv_virq_intr, "MCA", gcpu_xpv_virq_vect,
 198                     NULL, NULL, NULL, NULL);
 199         }
 200 }


 106 
 107         mutex_exit(&gcpu_xpv_polldata_lock);
 108 
 109         return (DDI_INTR_CLAIMED);
 110 }
 111 
 112 static void
 113 gcpu_xpv_mch_poll(void *arg)
 114 {
 115         cmi_hdl_t hdl = cmi_hdl_any();
 116 
 117         if (hdl != NULL) {
 118                 cmi_mc_logout(hdl, 0, 0);
 119                 cmi_hdl_rele(hdl);
 120         }
 121 
 122         if (arg == GCPU_XPV_MCH_POLL_REARM &&
 123             gcpu_xpv_mch_poll_interval_secs != 0) {
 124                 gcpu_xpv_mch_poll_timeoutid = timeout(gcpu_xpv_mch_poll,
 125                     GCPU_XPV_MCH_POLL_REARM,
 126                     drv_sectohz(gcpu_xpv_mch_poll_interval_secs));
 127         }
 128 }
 129 
 130 /*
 131  * gcpu_mca_poll_init is called from gcpu_mca_init for each cpu handle
 132  * that we initialize for.  It should prepare for polling by allocating
 133  * control structures and the like, but must not kick polling off yet.
 134  *
 135  * Since we initialize all cpus in a serialized loop there is no race
 136  * on allocating the bankregs structure, nor in free'ing and enlarging
 137  * it if we find the number of MCA banks is not uniform in the system
 138  * (unlikely) since polling is only started post mp startup.
 139  */
 140 
 141 void
 142 gcpu_mca_poll_init(cmi_hdl_t hdl)
 143 {
 144         gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
 145         int nbanks = gcpu->gcpu_mca.gcpu_mca_nbanks;
 146         size_t sz = nbanks * GCPU_XPV_ARCH_NREGS * sizeof (cmi_mca_regs_t);


 166 gcpu_mca_poll_fini(cmi_hdl_t hdl)
 167 {
 168 }
 169 
 170 void
 171 gcpu_mca_poll_start(cmi_hdl_t hdl)
 172 {
 173         ASSERT(cmi_hdl_class(hdl) == CMI_HDL_SOLARIS_xVM_MCA);
 174         /*
 175          * We are on the boot cpu (cpu 0), called at the end of its
 176          * multiprocessor startup.
 177          */
 178         if (gcpu_xpv_poll_bankregs_sz != 0 && gcpu_xpv_virq_vect == -1) {
 179                 /*
 180                  * The hypervisor will poll MCA state for us, but it cannot
 181                  * poll MCH state so we do that via a timeout.
 182                  */
 183                 if (gcpu_xpv_mch_poll_interval_secs != 0) {
 184                         gcpu_xpv_mch_poll_timeoutid =
 185                             timeout(gcpu_xpv_mch_poll, GCPU_XPV_MCH_POLL_REARM,
 186                             drv_sectohz(gcpu_xpv_mch_poll_interval_secs));

 187                 }
 188 
 189                 /*
 190                  * Register handler for VIRQ_MCA; once this is in place
 191                  * the hypervisor will begin to forward polled MCA observations
 192                  * to us.
 193                  */
 194                 gcpu_xpv_virq_vect = ec_bind_virq_to_irq(VIRQ_MCA, 0);
 195                 (void) add_avintr(NULL, gcpu_xpv_virq_level,
 196                     (avfunc)gcpu_xpv_virq_intr, "MCA", gcpu_xpv_virq_vect,
 197                     NULL, NULL, NULL, NULL);
 198         }
 199 }