1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 /*
  28  * Machine frame segment driver.  This segment driver allows dom0 processes to
  29  * map pages of other domains or Xen (e.g. during save/restore).  ioctl()s on
  30  * the privcmd driver provide the MFN values backing each mapping, and we map
  31  * them into the process's address space at this time.  Demand-faulting is not
  32  * supported by this driver due to the requirements upon some of the ioctl()s.
  33  */
  34 
  35 
  36 #include <sys/types.h>
  37 #include <sys/systm.h>
  38 #include <sys/vmsystm.h>
  39 #include <sys/mman.h>
  40 #include <sys/errno.h>
  41 #include <sys/kmem.h>
  42 #include <sys/cmn_err.h>
  43 #include <sys/vnode.h>
  44 #include <sys/conf.h>
  45 #include <sys/debug.h>
  46 #include <sys/lgrp.h>
  47 #include <sys/hypervisor.h>
  48 
  49 #include <vm/page.h>
  50 #include <vm/hat.h>
  51 #include <vm/as.h>
  52 #include <vm/seg.h>
  53 
  54 #include <vm/hat_pte.h>
  55 #include <vm/hat_i86.h>
  56 #include <vm/seg_mf.h>
  57 
  58 #include <sys/fs/snode.h>
  59 
  60 #define VTOCVP(vp)      (VTOS(vp)->s_commonvp)
  61 
  62 typedef struct segmf_mfn_s {
  63         mfn_t           m_mfn;
  64 } segmf_mfn_t;
  65 
  66 /* g_flags */
  67 #define SEGMF_GFLAGS_WR         0x1
  68 #define SEGMF_GFLAGS_MAPPED     0x2
  69 typedef struct segmf_gref_s {
  70         uint64_t        g_ptep;
  71         grant_ref_t     g_gref;
  72         uint32_t        g_flags;
  73         grant_handle_t  g_handle;
  74 } segmf_gref_t;
  75 
  76 typedef union segmf_mu_u {
  77         segmf_mfn_t     m;
  78         segmf_gref_t    g;
  79 } segmf_mu_t;
  80 
  81 typedef enum {
  82         SEGMF_MAP_EMPTY = 0,
  83         SEGMF_MAP_MFN,
  84         SEGMF_MAP_GREF
  85 } segmf_map_type_t;
  86 
  87 typedef struct segmf_map_s {
  88         segmf_map_type_t        t_type;
  89         segmf_mu_t              u;
  90 } segmf_map_t;
  91 
  92 struct segmf_data {
  93         kmutex_t        lock;
  94         struct vnode    *vp;
  95         uchar_t         prot;
  96         uchar_t         maxprot;
  97         size_t          softlockcnt;
  98         domid_t         domid;
  99         segmf_map_t     *map;
 100 };
 101 
 102 static struct seg_ops segmf_ops;
 103 
 104 static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
 105 
 106 static struct segmf_data *
 107 segmf_data_zalloc(struct seg *seg)
 108 {
 109         struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
 110 
 111         mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
 112         seg->s_ops = &segmf_ops;
 113         seg->s_data = data;
 114         return (data);
 115 }
 116 
 117 int
 118 segmf_create(struct seg *seg, void *args)
 119 {
 120         struct segmf_crargs *a = args;
 121         struct segmf_data *data;
 122         struct as *as = seg->s_as;
 123         pgcnt_t i, npages = seg_pages(seg);
 124         int error;
 125 
 126         hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
 127 
 128         data = segmf_data_zalloc(seg);
 129         data->vp = specfind(a->dev, VCHR);
 130         data->prot = a->prot;
 131         data->maxprot = a->maxprot;
 132 
 133         data->map = kmem_alloc(npages * sizeof (segmf_map_t), KM_SLEEP);
 134         for (i = 0; i < npages; i++) {
 135                 data->map[i].t_type = SEGMF_MAP_EMPTY;
 136         }
 137 
 138         error = VOP_ADDMAP(VTOCVP(data->vp), 0, as, seg->s_base, seg->s_size,
 139             data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
 140 
 141         if (error != 0)
 142                 hat_unload(as->a_hat,
 143                     seg->s_base, seg->s_size, HAT_UNLOAD_UNMAP);
 144         return (error);
 145 }
 146 
 147 /*
 148  * Duplicate a seg and return new segment in newseg.
 149  */
 150 static int
 151 segmf_dup(struct seg *seg, struct seg *newseg)
 152 {
 153         struct segmf_data *data = seg->s_data;
 154         struct segmf_data *ndata;
 155         pgcnt_t npages = seg_pages(newseg);
 156         size_t sz;
 157 
 158         ndata = segmf_data_zalloc(newseg);
 159 
 160         VN_HOLD(data->vp);
 161         ndata->vp = data->vp;
 162         ndata->prot = data->prot;
 163         ndata->maxprot = data->maxprot;
 164         ndata->domid = data->domid;
 165 
 166         sz = npages * sizeof (segmf_map_t);
 167         ndata->map = kmem_alloc(sz, KM_SLEEP);
 168         bcopy(data->map, ndata->map, sz);
 169 
 170         return (VOP_ADDMAP(VTOCVP(ndata->vp), 0, newseg->s_as,
 171             newseg->s_base, newseg->s_size, ndata->prot, ndata->maxprot,
 172             MAP_SHARED, CRED(), NULL));
 173 }
 174 
 175 /*
 176  * We only support unmapping the whole segment, and we automatically unlock
 177  * what we previously soft-locked.
 178  */
 179 static int
 180 segmf_unmap(struct seg *seg, caddr_t addr, size_t len)
 181 {
 182         struct segmf_data *data = seg->s_data;
 183         offset_t off;
 184 
 185         if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
 186             (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
 187                 panic("segmf_unmap");
 188 
 189         if (addr != seg->s_base || len != seg->s_size)
 190                 return (ENOTSUP);
 191 
 192         hat_unload(seg->s_as->a_hat, addr, len,
 193             HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);
 194 
 195         off = (offset_t)seg_page(seg, addr);
 196 
 197         ASSERT(data->vp != NULL);
 198 
 199         (void) VOP_DELMAP(VTOCVP(data->vp), off, seg->s_as, addr, len,
 200             data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);
 201 
 202         seg_free(seg);
 203         return (0);
 204 }
 205 
 206 static void
 207 segmf_free(struct seg *seg)
 208 {
 209         struct segmf_data *data = seg->s_data;
 210         pgcnt_t npages = seg_pages(seg);
 211 
 212         kmem_free(data->map, npages * sizeof (segmf_map_t));
 213         VN_RELE(data->vp);
 214         mutex_destroy(&data->lock);
 215         kmem_free(data, sizeof (*data));
 216 }
 217 
 218 static int segmf_faultpage_debug = 0;
 219 /*ARGSUSED*/
 220 static int
 221 segmf_faultpage(struct hat *hat, struct seg *seg, caddr_t addr,
 222     enum fault_type type, uint_t prot)
 223 {
 224         struct segmf_data *data = seg->s_data;
 225         uint_t hat_flags = HAT_LOAD_NOCONSIST;
 226         mfn_t mfn;
 227         x86pte_t pte;
 228         segmf_map_t *map;
 229         uint_t idx;
 230 
 231 
 232         idx = seg_page(seg, addr);
 233         map = &data->map[idx];
 234         ASSERT(map->t_type == SEGMF_MAP_MFN);
 235 
 236         mfn = map->u.m.m_mfn;
 237 
 238         if (type == F_SOFTLOCK) {
 239                 mutex_enter(&freemem_lock);
 240                 data->softlockcnt++;
 241                 mutex_exit(&freemem_lock);
 242                 hat_flags |= HAT_LOAD_LOCK;
 243         } else
 244                 hat_flags |= HAT_LOAD;
 245 
 246         if (segmf_faultpage_debug > 0) {
 247                 uprintf("segmf_faultpage: addr %p domid %x mfn %lx prot %x\n",
 248                     (void *)addr, data->domid, mfn, prot);
 249                 segmf_faultpage_debug--;
 250         }
 251 
 252         /*
 253          * Ask the HAT to load a throwaway mapping to page zero, then
 254          * overwrite it with our foreign domain mapping. It gets removed
 255          * later via hat_unload()
 256          */
 257         hat_devload(hat, addr, MMU_PAGESIZE, (pfn_t)0,
 258             PROT_READ | HAT_UNORDERED_OK, hat_flags);
 259 
 260         pte = mmu_ptob((x86pte_t)mfn) | PT_VALID | PT_USER | PT_FOREIGN;
 261         if (prot & PROT_WRITE)
 262                 pte |= PT_WRITABLE;
 263 
 264         if (HYPERVISOR_update_va_mapping_otherdomain((uintptr_t)addr, pte,
 265             UVMF_INVLPG | UVMF_ALL, data->domid) != 0) {
 266                 hat_flags = HAT_UNLOAD_UNMAP;
 267 
 268                 if (type == F_SOFTLOCK) {
 269                         hat_flags |= HAT_UNLOAD_UNLOCK;
 270                         mutex_enter(&freemem_lock);
 271                         data->softlockcnt--;
 272                         mutex_exit(&freemem_lock);
 273                 }
 274 
 275                 hat_unload(hat, addr, MMU_PAGESIZE, hat_flags);
 276                 return (FC_MAKE_ERR(EFAULT));
 277         }
 278 
 279         return (0);
 280 }
 281 
 282 static int
 283 seg_rw_to_prot(enum seg_rw rw)
 284 {
 285         switch (rw) {
 286         case S_READ:
 287                 return (PROT_READ);
 288         case S_WRITE:
 289                 return (PROT_WRITE);
 290         case S_EXEC:
 291                 return (PROT_EXEC);
 292         case S_OTHER:
 293         default:
 294                 break;
 295         }
 296         return (PROT_READ | PROT_WRITE | PROT_EXEC);
 297 }
 298 
 299 static void
 300 segmf_softunlock(struct hat *hat, struct seg *seg, caddr_t addr, size_t len)
 301 {
 302         struct segmf_data *data = seg->s_data;
 303 
 304         hat_unlock(hat, addr, len);
 305 
 306         mutex_enter(&freemem_lock);
 307         ASSERT(data->softlockcnt >= btopr(len));
 308         data->softlockcnt -= btopr(len);
 309         mutex_exit(&freemem_lock);
 310 
 311         if (data->softlockcnt == 0) {
 312                 struct as *as = seg->s_as;
 313 
 314                 if (AS_ISUNMAPWAIT(as)) {
 315                         mutex_enter(&as->a_contents);
 316                         if (AS_ISUNMAPWAIT(as)) {
 317                                 AS_CLRUNMAPWAIT(as);
 318                                 cv_broadcast(&as->a_cv);
 319                         }
 320                         mutex_exit(&as->a_contents);
 321                 }
 322         }
 323 }
 324 
 325 static int
 326 segmf_fault_range(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
 327     enum fault_type type, enum seg_rw rw)
 328 {
 329         struct segmf_data *data = seg->s_data;
 330         int error = 0;
 331         caddr_t a;
 332 
 333         if ((data->prot & seg_rw_to_prot(rw)) == 0)
 334                 return (FC_PROT);
 335 
 336         /* loop over the address range handling each fault */
 337 
 338         for (a = addr; a < addr + len; a += PAGESIZE) {
 339                 error = segmf_faultpage(hat, seg, a, type, data->prot);
 340                 if (error != 0)
 341                         break;
 342         }
 343 
 344         if (error != 0 && type == F_SOFTLOCK) {
 345                 size_t done = (size_t)(a - addr);
 346 
 347                 /*
 348                  * Undo what's been done so far.
 349                  */
 350                 if (done > 0)
 351                         segmf_softunlock(hat, seg, addr, done);
 352         }
 353 
 354         return (error);
 355 }
 356 
 357 /*
 358  * We never demand-fault for seg_mf.
 359  */
 360 /*ARGSUSED*/
 361 static int
 362 segmf_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
 363     enum fault_type type, enum seg_rw rw)
 364 {
 365         return (FC_MAKE_ERR(EFAULT));
 366 }
 367 
 368 /*ARGSUSED*/
 369 static int
 370 segmf_faulta(struct seg *seg, caddr_t addr)
 371 {
 372         return (0);
 373 }
 374 
 375 /*ARGSUSED*/
 376 static int
 377 segmf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
 378 {
 379         return (EINVAL);
 380 }
 381 
 382 /*ARGSUSED*/
 383 static int
 384 segmf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
 385 {
 386         return (EINVAL);
 387 }
 388 
 389 /*ARGSUSED*/
 390 static int
 391 segmf_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
 392 {
 393         return (-1);
 394 }
 395 
 396 /*ARGSUSED*/
 397 static int
 398 segmf_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
 399 {
 400         return (0);
 401 }
 402 
 403 /*
 404  * XXPV Hmm.  Should we say that mf mapping are "in core?"
 405  */
 406 
 407 /*ARGSUSED*/
 408 static size_t
 409 segmf_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
 410 {
 411         size_t v;
 412 
 413         for (v = 0, len = (len + PAGEOFFSET) & PAGEMASK; len;
 414             len -= PAGESIZE, v += PAGESIZE)
 415                 *vec++ = 1;
 416         return (v);
 417 }
 418 
 419 /*ARGSUSED*/
 420 static int
 421 segmf_lockop(struct seg *seg, caddr_t addr,
 422     size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
 423 {
 424         return (0);
 425 }
 426 
 427 static int
 428 segmf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
 429 {
 430         struct segmf_data *data = seg->s_data;
 431         pgcnt_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
 432 
 433         if (pgno != 0) {
 434                 do
 435                         protv[--pgno] = data->prot;
 436                 while (pgno != 0)
 437                         ;
 438         }
 439         return (0);
 440 }
 441 
 442 static u_offset_t
 443 segmf_getoffset(struct seg *seg, caddr_t addr)
 444 {
 445         return (addr - seg->s_base);
 446 }
 447 
 448 /*ARGSUSED*/
 449 static int
 450 segmf_gettype(struct seg *seg, caddr_t addr)
 451 {
 452         return (MAP_SHARED);
 453 }
 454 
 455 /*ARGSUSED1*/
 456 static int
 457 segmf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
 458 {
 459         struct segmf_data *data = seg->s_data;
 460 
 461         *vpp = VTOCVP(data->vp);
 462         return (0);
 463 }
 464 
 465 /*ARGSUSED*/
 466 static int
 467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
 468 {
 469         return (0);
 470 }
 471 
 472 /*ARGSUSED*/
 473 static void
 474 segmf_dump(struct seg *seg)
 475 {}
 476 
 477 /*ARGSUSED*/
 478 static int
 479 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
 480     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 481 {
 482         return (ENOTSUP);
 483 }
 484 
 485 /*ARGSUSED*/
 486 static int
 487 segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
 488 {
 489         return (ENOTSUP);
 490 }
 491 
 492 static int
 493 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
 494 {
 495         struct segmf_data *data = seg->s_data;
 496 
 497         memid->val[0] = (uintptr_t)VTOCVP(data->vp);
 498         memid->val[1] = (uintptr_t)seg_page(seg, addr);
 499         return (0);
 500 }
 501 
 502 /*ARGSUSED*/
 503 static int
 504 segmf_capable(struct seg *seg, segcapability_t capability)
 505 {
 506         return (0);
 507 }
 508 
 509 /*
 510  * Add a set of contiguous foreign MFNs to the segment. soft-locking them.  The
 511  * pre-faulting is necessary due to live migration; in particular we must
 512  * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
 513  * later on a bad MFN.  Whilst this isn't necessary for the other MMAP
 514  * ioctl()s, we lock them too, as they should be transitory.
 515  */
 516 int
 517 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
 518     pgcnt_t pgcnt, domid_t domid)
 519 {
 520         struct segmf_data *data = seg->s_data;
 521         pgcnt_t base;
 522         faultcode_t fc;
 523         pgcnt_t i;
 524         int error = 0;
 525 
 526         if (seg->s_ops != &segmf_ops)
 527                 return (EINVAL);
 528 
 529         /*
 530          * Don't mess with dom0.
 531          *
 532          * Only allow the domid to be set once for the segment.
 533          * After that attempts to add mappings to this segment for
 534          * other domains explicitly fails.
 535          */
 536 
 537         if (domid == 0 || domid == DOMID_SELF)
 538                 return (EACCES);
 539 
 540         mutex_enter(&data->lock);
 541 
 542         if (data->domid == 0)
 543                 data->domid = domid;
 544 
 545         if (data->domid != domid) {
 546                 error = EINVAL;
 547                 goto out;
 548         }
 549 
 550         base = seg_page(seg, addr);
 551 
 552         for (i = 0; i < pgcnt; i++) {
 553                 data->map[base + i].t_type = SEGMF_MAP_MFN;
 554                 data->map[base + i].u.m.m_mfn = mfn++;
 555         }
 556 
 557         fc = segmf_fault_range(seg->s_as->a_hat, seg, addr,
 558             pgcnt * MMU_PAGESIZE, F_SOFTLOCK, S_OTHER);
 559 
 560         if (fc != 0) {
 561                 error = fc_decode(fc);
 562                 for (i = 0; i < pgcnt; i++) {
 563                         data->map[base + i].t_type = SEGMF_MAP_EMPTY;
 564                 }
 565         }
 566 
 567 out:
 568         mutex_exit(&data->lock);
 569         return (error);
 570 }
 571 
 572 int
 573 segmf_add_grefs(struct seg *seg, caddr_t addr, uint_t flags,
 574     grant_ref_t *grefs, uint_t cnt, domid_t domid)
 575 {
 576         struct segmf_data *data;
 577         segmf_map_t *map;
 578         faultcode_t fc;
 579         uint_t idx;
 580         uint_t i;
 581         int e;
 582 
 583         if (seg->s_ops != &segmf_ops)
 584                 return (EINVAL);
 585 
 586         /*
 587          * Don't mess with dom0.
 588          *
 589          * Only allow the domid to be set once for the segment.
 590          * After that attempts to add mappings to this segment for
 591          * other domains explicitly fails.
 592          */
 593 
 594         if (domid == 0 || domid == DOMID_SELF)
 595                 return (EACCES);
 596 
 597         data = seg->s_data;
 598         idx = seg_page(seg, addr);
 599         map = &data->map[idx];
 600         e = 0;
 601 
 602         mutex_enter(&data->lock);
 603 
 604         if (data->domid == 0)
 605                 data->domid = domid;
 606 
 607         if (data->domid != domid) {
 608                 e = EINVAL;
 609                 goto out;
 610         }
 611 
 612         /* store away the grefs passed in then fault in the pages */
 613         for (i = 0; i < cnt; i++) {
 614                 map[i].t_type = SEGMF_MAP_GREF;
 615                 map[i].u.g.g_gref = grefs[i];
 616                 map[i].u.g.g_handle = 0;
 617                 map[i].u.g.g_flags = 0;
 618                 if (flags & SEGMF_GREF_WR) {
 619                         map[i].u.g.g_flags |= SEGMF_GFLAGS_WR;
 620                 }
 621         }
 622         fc = segmf_fault_gref_range(seg, addr, cnt);
 623         if (fc != 0) {
 624                 e = fc_decode(fc);
 625                 for (i = 0; i < cnt; i++) {
 626                         data->map[i].t_type = SEGMF_MAP_EMPTY;
 627                 }
 628         }
 629 
 630 out:
 631         mutex_exit(&data->lock);
 632         return (e);
 633 }
 634 
 635 int
 636 segmf_release_grefs(struct seg *seg, caddr_t addr, uint_t cnt)
 637 {
 638         gnttab_unmap_grant_ref_t mapop[SEGMF_MAX_GREFS];
 639         struct segmf_data *data;
 640         segmf_map_t *map;
 641         uint_t idx;
 642         long e;
 643         int i;
 644         int n;
 645 
 646 
 647         if (cnt > SEGMF_MAX_GREFS) {
 648                 return (-1);
 649         }
 650 
 651         idx = seg_page(seg, addr);
 652         data = seg->s_data;
 653         map = &data->map[idx];
 654 
 655         bzero(mapop, sizeof (gnttab_unmap_grant_ref_t) * cnt);
 656 
 657         /*
 658          * for each entry which isn't empty and is currently mapped,
 659          * set it up for an unmap then mark them empty.
 660          */
 661         n = 0;
 662         for (i = 0; i < cnt; i++) {
 663                 ASSERT(map[i].t_type != SEGMF_MAP_MFN);
 664                 if ((map[i].t_type == SEGMF_MAP_GREF) &&
 665                     (map[i].u.g.g_flags & SEGMF_GFLAGS_MAPPED)) {
 666                         mapop[n].handle = map[i].u.g.g_handle;
 667                         mapop[n].host_addr = map[i].u.g.g_ptep;
 668                         mapop[n].dev_bus_addr = 0;
 669                         n++;
 670                 }
 671                 map[i].t_type = SEGMF_MAP_EMPTY;
 672         }
 673 
 674         /* if there's nothing to unmap, just return */
 675         if (n == 0) {
 676                 return (0);
 677         }
 678 
 679         e = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &mapop, n);
 680         if (e != 0) {
 681                 return (-1);
 682         }
 683 
 684         return (0);
 685 }
 686 
 687 
 688 void
 689 segmf_add_gref_pte(struct seg *seg, caddr_t addr, uint64_t pte_ma)
 690 {
 691         struct segmf_data *data;
 692         uint_t idx;
 693 
 694         idx = seg_page(seg, addr);
 695         data = seg->s_data;
 696 
 697         data->map[idx].u.g.g_ptep = pte_ma;
 698 }
 699 
 700 
 701 static int
 702 segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t cnt)
 703 {
 704         gnttab_map_grant_ref_t mapop[SEGMF_MAX_GREFS];
 705         struct segmf_data *data;
 706         segmf_map_t *map;
 707         uint_t idx;
 708         int e;
 709         int i;
 710 
 711 
 712         if (cnt > SEGMF_MAX_GREFS) {
 713                 return (-1);
 714         }
 715 
 716         data = seg->s_data;
 717         idx = seg_page(seg, addr);
 718         map = &data->map[idx];
 719 
 720         bzero(mapop, sizeof (gnttab_map_grant_ref_t) * cnt);
 721 
 722         ASSERT(map->t_type == SEGMF_MAP_GREF);
 723 
 724         /*
 725          * map in each page passed in into the user apps AS. We do this by
 726          * passing the MA of the actual pte of the mapping to the hypervisor.
 727          */
 728         for (i = 0; i < cnt; i++) {
 729                 mapop[i].host_addr = map[i].u.g.g_ptep;
 730                 mapop[i].dom = data->domid;
 731                 mapop[i].ref = map[i].u.g.g_gref;
 732                 mapop[i].flags = GNTMAP_host_map | GNTMAP_application_map |
 733                     GNTMAP_contains_pte;
 734                 if (!(map[i].u.g.g_flags & SEGMF_GFLAGS_WR)) {
 735                         mapop[i].flags |= GNTMAP_readonly;
 736                 }
 737         }
 738         e = xen_map_gref(GNTTABOP_map_grant_ref, mapop, cnt, B_TRUE);
 739         if ((e != 0) || (mapop[0].status != GNTST_okay)) {
 740                 return (FC_MAKE_ERR(EFAULT));
 741         }
 742 
 743         /* save handle for segmf_release_grefs() and mark it as mapped */
 744         for (i = 0; i < cnt; i++) {
 745                 ASSERT(mapop[i].status == GNTST_okay);
 746                 map[i].u.g.g_handle = mapop[i].handle;
 747                 map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
 748         }
 749 
 750         return (0);
 751 }
 752 
 753 static struct seg_ops segmf_ops = {
 754         .dup            = segmf_dup,
 755         .unmap          = segmf_unmap,
 756         .free           = segmf_free,
 757         .fault          = segmf_fault,
 758         .faulta         = segmf_faulta,
 759         .setprot        = segmf_setprot,
 760         .checkprot      = segmf_checkprot,
 761         .kluster        = segmf_kluster,
 762         .sync           = segmf_sync,
 763         .incore         = segmf_incore,
 764         .lockop         = segmf_lockop,
 765         .getprot        = segmf_getprot,
 766         .getoffset      = segmf_getoffset,
 767         .gettype        = segmf_gettype,
 768         .getvp          = segmf_getvp,
 769         .advise         = segmf_advise,
 770         .dump           = segmf_dump,
 771         .pagelock       = segmf_pagelock,
 772         .setpagesize    = segmf_setpagesize,
 773         .getmemid       = segmf_getmemid,
 774         .capable        = segmf_capable,
 775 };