1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #include <sys/sysmacros.h>
  27 #include <sys/machsystm.h>
  28 #include <sys/cpu.h>
  29 #include <sys/intreg.h>
  30 #include <sys/machcpuvar.h>
  31 #include <vm/hat_sfmmu.h>
  32 #include <sys/error.h>
  33 #include <sys/hypervisor_api.h>
  34 
  35 void
  36 cpu_intrq_register(struct cpu *cpu)
  37 {
  38         struct machcpu *mcpup = &cpu->cpu_m;
  39         uint64_t ret;
  40 
  41         ret = hv_cpu_qconf(INTR_CPU_Q, mcpup->cpu_q_base_pa, cpu_q_entries);
  42         if (ret != H_EOK)
  43                 cmn_err(CE_PANIC, "cpu%d: cpu_mondo queue configuration "
  44                     "failed, error %lu", cpu->cpu_id, ret);
  45 
  46         ret = hv_cpu_qconf(INTR_DEV_Q, mcpup->dev_q_base_pa, dev_q_entries);
  47         if (ret != H_EOK)
  48                 cmn_err(CE_PANIC, "cpu%d: dev_mondo queue configuration "
  49                     "failed, error %lu", cpu->cpu_id, ret);
  50 
  51         ret = hv_cpu_qconf(CPU_RQ, mcpup->cpu_rq_base_pa, cpu_rq_entries);
  52         if (ret != H_EOK)
  53                 cmn_err(CE_PANIC, "cpu%d: resumable error queue configuration "
  54                     "failed, error %lu", cpu->cpu_id, ret);
  55 
  56         ret = hv_cpu_qconf(CPU_NRQ, mcpup->cpu_nrq_base_pa, cpu_nrq_entries);
  57         if (ret != H_EOK)
  58                 cmn_err(CE_PANIC, "cpu%d: non-resumable error queue "
  59                     "configuration failed, error %lu", cpu->cpu_id, ret);
  60 }
  61 
  62 int
  63 cpu_intrq_setup(struct cpu *cpu)
  64 {
  65         struct machcpu *mcpup = &cpu->cpu_m;
  66         size_t size;
  67 
  68         /*
  69          * This routine will return with an error return if any
  70          * contig_mem_alloc() fails.  It is expected that the caller will
  71          * call cpu_intrq_cleanup() (or cleanup_cpu_common() which will).
  72          * That will cleanly free only those blocks that were alloc'd.
  73          */
  74 
  75         /*
  76          * Allocate mondo data for xcalls.
  77          */
  78         mcpup->mondo_data = contig_mem_alloc(INTR_REPORT_SIZE);
  79 
  80         if (mcpup->mondo_data == NULL) {
  81                 cmn_err(CE_NOTE, "cpu%d: cpu mondo_data allocation failed",
  82                     cpu->cpu_id);
  83                 return (ENOMEM);
  84         }
  85         /*
  86          * va_to_pa() is too expensive to call for every crosscall
  87          * so we do it here at init time and save it in machcpu.
  88          */
  89         mcpup->mondo_data_ra = va_to_pa(mcpup->mondo_data);
  90 
  91         /*
  92          *  Allocate a per-cpu list of ncpu_guest_max for xcalls
  93          */
  94         size = ncpu_guest_max * sizeof (uint16_t);
  95         if (size < INTR_REPORT_SIZE)
  96                 size = INTR_REPORT_SIZE;
  97 
  98         /*
  99          * contig_mem_alloc() requires size to be a power of 2.
 100          * Increase size to a power of 2 if necessary.
 101          */
 102         if (!ISP2(size)) {
 103                 size = 1 << highbit(size);
 104         }
 105 
 106         mcpup->cpu_list = contig_mem_alloc(size);
 107 
 108         if (mcpup->cpu_list == NULL) {
 109                 cmn_err(CE_NOTE, "cpu%d: cpu cpu_list allocation failed",
 110                     cpu->cpu_id);
 111                 return (ENOMEM);
 112         }
 113         mcpup->cpu_list_ra = va_to_pa(mcpup->cpu_list);
 114 
 115         /*
 116          * Allocate sun4v interrupt and error queues.
 117          */
 118         size = cpu_q_entries * INTR_REPORT_SIZE;
 119 
 120         mcpup->cpu_q_va = contig_mem_alloc(size);
 121 
 122         if (mcpup->cpu_q_va == NULL) {
 123                 cmn_err(CE_NOTE, "cpu%d: cpu intrq allocation failed",
 124                     cpu->cpu_id);
 125                 return (ENOMEM);
 126         }
 127         mcpup->cpu_q_base_pa = va_to_pa(mcpup->cpu_q_va);
 128         mcpup->cpu_q_size = size;
 129 
 130         /*
 131          * Allocate device queues
 132          */
 133         size = dev_q_entries * INTR_REPORT_SIZE;
 134 
 135         mcpup->dev_q_va = contig_mem_alloc(size);
 136 
 137         if (mcpup->dev_q_va == NULL) {
 138                 cmn_err(CE_NOTE, "cpu%d: dev intrq allocation failed",
 139                     cpu->cpu_id);
 140                 return (ENOMEM);
 141         }
 142         mcpup->dev_q_base_pa = va_to_pa(mcpup->dev_q_va);
 143         mcpup->dev_q_size = size;
 144 
 145         /*
 146          * Allocate resumable queue and its kernel buffer
 147          */
 148         size = cpu_rq_entries * Q_ENTRY_SIZE;
 149 
 150         mcpup->cpu_rq_va = contig_mem_alloc(2 * size);
 151 
 152         if (mcpup->cpu_rq_va == NULL) {
 153                 cmn_err(CE_NOTE, "cpu%d: resumable queue allocation failed",
 154                     cpu->cpu_id);
 155                 return (ENOMEM);
 156         }
 157         mcpup->cpu_rq_base_pa = va_to_pa(mcpup->cpu_rq_va);
 158         mcpup->cpu_rq_size = size;
 159         /* zero out the memory */
 160         bzero(mcpup->cpu_rq_va, 2 * size);
 161 
 162         /*
 163          * Allocate non-resumable queues
 164          */
 165         size = cpu_nrq_entries * Q_ENTRY_SIZE;
 166 
 167         mcpup->cpu_nrq_va = contig_mem_alloc(2 * size);
 168 
 169         if (mcpup->cpu_nrq_va == NULL) {
 170                 cmn_err(CE_NOTE, "cpu%d: nonresumable queue allocation failed",
 171                     cpu->cpu_id);
 172                 return (ENOMEM);
 173         }
 174         mcpup->cpu_nrq_base_pa = va_to_pa(mcpup->cpu_nrq_va);
 175         mcpup->cpu_nrq_size = size;
 176         /* zero out the memory */
 177         bzero(mcpup->cpu_nrq_va, 2 * size);
 178 
 179         return (0);
 180 }
 181 
 182 void
 183 cpu_intrq_cleanup(struct cpu *cpu)
 184 {
 185         struct machcpu *mcpup = &cpu->cpu_m;
 186         int cpu_list_size;
 187         uint64_t cpu_q_size;
 188         uint64_t dev_q_size;
 189         uint64_t cpu_rq_size;
 190         uint64_t cpu_nrq_size;
 191 
 192         /*
 193          * Free mondo data for xcalls.
 194          */
 195         if (mcpup->mondo_data) {
 196                 contig_mem_free(mcpup->mondo_data, INTR_REPORT_SIZE);
 197                 mcpup->mondo_data = NULL;
 198                 mcpup->mondo_data_ra = NULL;
 199         }
 200 
 201         /*
 202          *  Free per-cpu list of ncpu_guest_max for xcalls
 203          */
 204         cpu_list_size = ncpu_guest_max * sizeof (uint16_t);
 205         if (cpu_list_size < INTR_REPORT_SIZE)
 206                 cpu_list_size = INTR_REPORT_SIZE;
 207 
 208         /*
 209          * contig_mem_alloc() requires size to be a power of 2.
 210          * Increase size to a power of 2 if necessary.
 211          */
 212         if (!ISP2(cpu_list_size)) {
 213                 cpu_list_size = 1 << highbit(cpu_list_size);
 214         }
 215 
 216         if (mcpup->cpu_list) {
 217                 contig_mem_free(mcpup->cpu_list, cpu_list_size);
 218                 mcpup->cpu_list = NULL;
 219                 mcpup->cpu_list_ra = NULL;
 220         }
 221 
 222         /*
 223          * Free sun4v interrupt and error queues.
 224          */
 225         if (mcpup->cpu_q_va) {
 226                 cpu_q_size = cpu_q_entries * INTR_REPORT_SIZE;
 227                 contig_mem_free(mcpup->cpu_q_va, cpu_q_size);
 228                 mcpup->cpu_q_va = NULL;
 229                 mcpup->cpu_q_base_pa = NULL;
 230                 mcpup->cpu_q_size = 0;
 231         }
 232 
 233         if (mcpup->dev_q_va) {
 234                 dev_q_size = dev_q_entries * INTR_REPORT_SIZE;
 235                 contig_mem_free(mcpup->dev_q_va, dev_q_size);
 236                 mcpup->dev_q_va = NULL;
 237                 mcpup->dev_q_base_pa = NULL;
 238                 mcpup->dev_q_size = 0;
 239         }
 240 
 241         if (mcpup->cpu_rq_va) {
 242                 cpu_rq_size = cpu_rq_entries * Q_ENTRY_SIZE;
 243                 contig_mem_free(mcpup->cpu_rq_va, 2 * cpu_rq_size);
 244                 mcpup->cpu_rq_va = NULL;
 245                 mcpup->cpu_rq_base_pa = NULL;
 246                 mcpup->cpu_rq_size = 0;
 247         }
 248 
 249         if (mcpup->cpu_nrq_va) {
 250                 cpu_nrq_size = cpu_nrq_entries * Q_ENTRY_SIZE;
 251                 contig_mem_free(mcpup->cpu_nrq_va, 2 * cpu_nrq_size);
 252                 mcpup->cpu_nrq_va = NULL;
 253                 mcpup->cpu_nrq_base_pa = NULL;
 254                 mcpup->cpu_nrq_size = 0;
 255         }
 256 }