1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2012 Garrett D'Amore <garrett@damore.org>.  All rights reserved.
  24  */
  25 
  26 /*
  27  * PX mmu initialization and configuration
  28  */
  29 #include <sys/types.h>
  30 #include <sys/kmem.h>
  31 #include <sys/async.h>
  32 #include <sys/sysmacros.h>
  33 #include <sys/sunddi.h>
  34 #include <sys/ddi_impldefs.h>
  35 #include <sys/vmem.h>
  36 #include <sys/machsystm.h>        /* lddphys() */
  37 #include <sys/iommutsb.h>
  38 #include "px_obj.h"
  39 
  40 int
  41 px_mmu_attach(px_t *px_p)
  42 {
  43         dev_info_t              *dip = px_p->px_dip;
  44         px_mmu_t                        *mmu_p;
  45         uint32_t                tsb_i = 0;
  46         char                    map_name[32];
  47         px_dvma_range_prop_t    *dvma_prop;
  48         int                     dvma_prop_len;
  49         uint32_t                cache_size, tsb_entries;
  50 
  51         /*
  52          * Allocate mmu state structure and link it to the
  53          * px state structure.
  54          */
  55         mmu_p = kmem_zalloc(sizeof (px_mmu_t), KM_SLEEP);
  56         if (mmu_p == NULL)
  57                 return (DDI_FAILURE);
  58 
  59         px_p->px_mmu_p = mmu_p;
  60         mmu_p->mmu_px_p = px_p;
  61         mmu_p->mmu_inst = ddi_get_instance(dip);
  62 
  63         /*
  64          * Check for "virtual-dma" property that specifies
  65          * the DVMA range.
  66          */
  67         if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
  68             "virtual-dma", (caddr_t)&dvma_prop, &dvma_prop_len) !=
  69             DDI_PROP_SUCCESS) {
  70 
  71                 DBG(DBG_ATTACH, dip, "Getting virtual-dma failed\n");
  72 
  73                 kmem_free(mmu_p, sizeof (px_mmu_t));
  74                 px_p->px_mmu_p = NULL;
  75 
  76                 return (DDI_FAILURE);
  77         }
  78 
  79         mmu_p->mmu_dvma_base = dvma_prop->dvma_base;
  80         mmu_p->mmu_dvma_end = dvma_prop->dvma_base +
  81             (dvma_prop->dvma_len - 1);
  82         tsb_entries = MMU_BTOP(dvma_prop->dvma_len);
  83 
  84         kmem_free(dvma_prop, dvma_prop_len);
  85 
  86         /*
  87          * Setup base and bounds for DVMA and bypass mappings.
  88          */
  89         mmu_p->mmu_dvma_cache_locks =
  90             kmem_zalloc(px_dvma_page_cache_entries, KM_SLEEP);
  91 
  92         mmu_p->dvma_base_pg = MMU_BTOP(mmu_p->mmu_dvma_base);
  93         mmu_p->mmu_dvma_reserve = tsb_entries >> 1;
  94         mmu_p->dvma_end_pg = MMU_BTOP(mmu_p->mmu_dvma_end);
  95 
  96         /*
  97          * Create a virtual memory map for dvma address space.
  98          * Reserve 'size' bytes of low dvma space for fast track cache.
  99          */
 100         (void) snprintf(map_name, sizeof (map_name), "%s%d_dvma",
 101             ddi_driver_name(dip), ddi_get_instance(dip));
 102 
 103         cache_size = MMU_PTOB(px_dvma_page_cache_entries *
 104             px_dvma_page_cache_clustsz);
 105         mmu_p->mmu_dvma_fast_end = mmu_p->mmu_dvma_base +
 106             cache_size - 1;
 107 
 108         mmu_p->mmu_dvma_map = vmem_create(map_name,
 109             (void *)(mmu_p->mmu_dvma_fast_end + 1),
 110             MMU_PTOB(tsb_entries) - cache_size, MMU_PAGE_SIZE,
 111             NULL, NULL, NULL, MMU_PAGE_SIZE, VM_SLEEP);
 112 
 113         mutex_init(&mmu_p->dvma_debug_lock, NULL, MUTEX_DRIVER, NULL);
 114 
 115         for (tsb_i = 0; tsb_i < tsb_entries; tsb_i++) {
 116                 r_addr_t ra = 0;
 117                 io_attributes_t attr;
 118                 caddr_t va;
 119 
 120                 if (px_lib_iommu_getmap(px_p->px_dip, PCI_TSBID(0, tsb_i),
 121                     &attr, &ra) != DDI_SUCCESS)
 122                         continue;
 123 
 124                 va = (caddr_t)(MMU_PTOB(mmu_p->dvma_base_pg + tsb_i));
 125 
 126                 if (va <= (caddr_t)mmu_p->mmu_dvma_fast_end) {
 127                         uint32_t cache_i;
 128 
 129                         /*
 130                          * the va is within the *fast* dvma range; therefore,
 131                          * lock its fast dvma page cache cluster in order to
 132                          * both preserve the TTE and prevent the use of this
 133                          * fast dvma page cache cluster by px_dvma_map_fast().
 134                          * the lock value 0xFF comes from ldstub().
 135                          */
 136                         cache_i = tsb_i / px_dvma_page_cache_clustsz;
 137                         ASSERT(cache_i < px_dvma_page_cache_entries);
 138                         mmu_p->mmu_dvma_cache_locks[cache_i] = 0xFF;
 139                 } else {
 140                         (void) vmem_xalloc(mmu_p->mmu_dvma_map, MMU_PAGE_SIZE,
 141                             MMU_PAGE_SIZE, 0, 0, va, va + MMU_PAGE_SIZE,
 142                             VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
 143                 }
 144         }
 145 
 146         return (DDI_SUCCESS);
 147 }
 148 
 149 void
 150 px_mmu_detach(px_t *px_p)
 151 {
 152         px_mmu_t *mmu_p = px_p->px_mmu_p;
 153 
 154         (void) px_lib_iommu_detach(px_p);
 155 
 156         /*
 157          * Free the dvma resource map.
 158          */
 159         vmem_destroy(mmu_p->mmu_dvma_map);
 160 
 161         kmem_free(mmu_p->mmu_dvma_cache_locks,
 162             px_dvma_page_cache_entries);
 163 
 164         if (PX_DVMA_DBG_ON(mmu_p))
 165                 px_dvma_debug_fini(mmu_p);
 166 
 167         mutex_destroy(&mmu_p->dvma_debug_lock);
 168 
 169         /*
 170          * Free the mmu state structure.
 171          */
 172         kmem_free(mmu_p, sizeof (px_mmu_t));
 173         px_p->px_mmu_p = NULL;
 174 }
 175 
 176 int
 177 px_mmu_map_pages(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_dvma_addr_t dvma_pg,
 178     size_t npages, size_t pfn_index)
 179 {
 180         dev_info_t      *dip = mmu_p->mmu_px_p->px_dip;
 181         px_dvma_addr_t  pg_index = MMU_PAGE_INDEX(mmu_p, dvma_pg);
 182         io_attributes_t attr = PX_GET_MP_TTE(mp->dmai_tte);
 183 
 184         ASSERT(npages <= mp->dmai_ndvmapages);
 185         DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages:%x+%x=%x "
 186             "npages=0x%x pfn_index=0x%x\n", (uint_t)mmu_p->dvma_base_pg,
 187             (uint_t)pg_index, dvma_pg, (uint_t)npages, (uint_t)pfn_index);
 188 
 189         if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages,
 190             PX_ADD_ATTR_EXTNS(attr, mp->dmai_bdf), (void *)mp, pfn_index,
 191             MMU_MAP_PFN) != DDI_SUCCESS) {
 192                 DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: "
 193                     "px_lib_iommu_map failed\n");
 194 
 195                 return (DDI_FAILURE);
 196         }
 197 
 198         if (!PX_MAP_BUFZONE(mp))
 199                 goto done;
 200 
 201         DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: redzone pg=%x\n",
 202             pg_index + npages);
 203 
 204         ASSERT(PX_HAS_REDZONE(mp));
 205 
 206         if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index + npages), 1,
 207             PX_ADD_ATTR_EXTNS(attr, mp->dmai_bdf), (void *)mp,
 208             pfn_index + npages - 1, MMU_MAP_PFN) != DDI_SUCCESS) {
 209                 DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: mapping "
 210                     "REDZONE page failed\n");
 211 
 212                 if (px_lib_iommu_demap(dip, PCI_TSBID(0, pg_index), npages)
 213                     != DDI_SUCCESS) {
 214                         DBG(DBG_MAP_WIN, dip, "px_lib_iommu_demap: failed\n");
 215                 }
 216                 return (DDI_FAILURE);
 217         }
 218 
 219 done:
 220         if (PX_DVMA_DBG_ON(mmu_p))
 221                 px_dvma_alloc_debug(mmu_p, (char *)mp->dmai_mapping,
 222                     mp->dmai_size, mp);
 223 
 224         return (DDI_SUCCESS);
 225 }
 226 
 227 void
 228 px_mmu_unmap_pages(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_dvma_addr_t dvma_pg,
 229     uint_t npages)
 230 {
 231         px_dvma_addr_t  pg_index = MMU_PAGE_INDEX(mmu_p, dvma_pg);
 232 
 233         DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip,
 234             "px_mmu_unmap_pages:%x+%x=%x npages=0x%x\n",
 235             (uint_t)mmu_p->dvma_base_pg, (uint_t)pg_index, dvma_pg,
 236             (uint_t)npages);
 237 
 238         if (px_lib_iommu_demap(mmu_p->mmu_px_p->px_dip,
 239             PCI_TSBID(0, pg_index), npages) != DDI_SUCCESS) {
 240                 DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip,
 241                     "px_lib_iommu_demap: failed\n");
 242         }
 243 
 244         if (!PX_MAP_BUFZONE(mp))
 245                 return;
 246 
 247         DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip, "px_mmu_unmap_pages: "
 248             "redzone pg=%x\n", pg_index + npages);
 249 
 250         ASSERT(PX_HAS_REDZONE(mp));
 251 
 252         if (px_lib_iommu_demap(mmu_p->mmu_px_p->px_dip,
 253             PCI_TSBID(0, pg_index + npages), 1) != DDI_SUCCESS) {
 254                 DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip,
 255                     "px_lib_iommu_demap: failed\n");
 256         }
 257 }
 258 
 259 /*
 260  * px_mmu_map_window - map a dvma window into the mmu
 261  * used by: px_dma_win()
 262  * return value: none
 263  */
 264 /*ARGSUSED*/
 265 int
 266 px_mmu_map_window(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_window_t win_no)
 267 {
 268         uint32_t obj_pg0_off = mp->dmai_roffset;
 269         uint32_t win_pg0_off = win_no ? 0 : obj_pg0_off;
 270         size_t win_size = mp->dmai_winsize;
 271         size_t pfn_index = win_size * win_no;                   /* temp value */
 272         size_t obj_off = win_no ? pfn_index - obj_pg0_off : 0;  /* xferred sz */
 273         px_dvma_addr_t dvma_pg = MMU_BTOP(mp->dmai_mapping);
 274         size_t res_size = mp->dmai_object.dmao_size - obj_off + win_pg0_off;
 275         int ret = DDI_SUCCESS;
 276 
 277         ASSERT(!(win_size & MMU_PAGE_OFFSET));
 278         if (win_no >= mp->dmai_nwin)
 279                 return (ret);
 280         if (res_size < win_size)             /* last window */
 281                 win_size = res_size;            /* mp->dmai_winsize unchanged */
 282 
 283         mp->dmai_mapping = MMU_PTOB(dvma_pg) | win_pg0_off;
 284         mp->dmai_size = win_size - win_pg0_off;      /* cur win xferrable size */
 285         mp->dmai_offset = obj_off;           /* win offset into object */
 286         pfn_index = MMU_BTOP(pfn_index);        /* index into pfnlist */
 287         ret = px_mmu_map_pages(mmu_p, mp, dvma_pg, MMU_BTOPR(win_size),
 288             pfn_index);
 289 
 290         return (ret);
 291 }
 292 
 293 /*
 294  * px_mmu_unmap_window
 295  * This routine is called to break down the mmu mappings to a dvma window.
 296  * Non partial mappings are viewed as single window mapping.
 297  * used by: px_dma_unbindhdl(), px_dma_window(),
 298  *      and px_dma_ctlops() - DDI_DMA_FREE, DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN
 299  * return value: none
 300  */
 301 /*ARGSUSED*/
 302 void
 303 px_mmu_unmap_window(px_mmu_t *mmu_p, ddi_dma_impl_t *mp)
 304 {
 305         px_dvma_addr_t dvma_pg = MMU_BTOP(mp->dmai_mapping);
 306         uint_t npages = MMU_BTOP(mp->dmai_winsize);
 307 
 308         px_mmu_unmap_pages(mmu_p, mp, dvma_pg, npages);
 309 
 310         if (PX_DVMA_DBG_ON(mmu_p))
 311                 px_dvma_free_debug(mmu_p, (char *)mp->dmai_mapping,
 312                     mp->dmai_size, mp);
 313 }
 314 
 315 
 316 #if 0
 317 /*
 318  * The following table is for reference only. It denotes the
 319  * the TSB table size measured in number of 8 byte entries.
 320  * It is represented by bits 3:0 in the MMU TSB CTRL REG.
 321  */
 322 static int px_mmu_tsb_sizes[] = {
 323         0x0,            /* 1K */
 324         0x1,            /* 2K */
 325         0x2,            /* 4K */
 326         0x3,            /* 8K */
 327         0x4,            /* 16K */
 328         0x5,            /* 32K */
 329         0x6,            /* 64K */
 330         0x7,            /* 128K */
 331         0x8             /* 256K */
 332 };
 333 #endif
 334 
 335 static char *px_mmu_errsts[] = {
 336         "Protection Error", "Invalid Error", "Timeout", "ECC Error(UE)"
 337 };
 338 
 339 /*ARGSUSED*/
 340 static int
 341 px_log_mmu_err(px_t *px_p)
 342 {
 343         /*
 344          * Place holder, the correct eror bits need tobe logged.
 345          */
 346         return (0);
 347 }