Print this page
patch lower-case-segops


  55 #include <vm/seg_vn.h>
  56 #include <vm/seg_spt.h>
  57 #include <vm/seg_kmem.h>
  58 
  59 extern struct seg_ops segdev_ops;       /* needs a header file */
  60 extern struct seg_ops segspt_shmops;    /* needs a header file */
  61 
  62 static int
  63 page_valid(struct seg *seg, caddr_t addr)
  64 {
  65         struct segvn_data *svd;
  66         vnode_t *vp;
  67         vattr_t vattr;
  68 
  69         /*
  70          * Fail if the page doesn't map to a page in the underlying
  71          * mapped file, if an underlying mapped file exists.
  72          */
  73         vattr.va_mask = AT_SIZE;
  74         if (seg->s_ops == &segvn_ops &&
  75             SEGOP_GETVP(seg, addr, &vp) == 0 &&
  76             vp != NULL && vp->v_type == VREG &&
  77             VOP_GETATTR(vp, &vattr, 0, CRED(), NULL) == 0) {
  78                 u_offset_t size = roundup(vattr.va_size, (u_offset_t)PAGESIZE);
  79                 u_offset_t offset = SEGOP_GETOFFSET(seg, addr);
  80 
  81                 if (offset >= size)
  82                         return (0);
  83         }
  84 
  85         /*
  86          * Fail if this is an ISM shared segment and the address is
  87          * not within the real size of the spt segment that backs it.
  88          */
  89         if (seg->s_ops == &segspt_shmops &&
  90             addr >= seg->s_base + spt_realsize(seg))
  91                 return (0);
  92 
  93         /*
  94          * Fail if the segment is mapped from /dev/null.
  95          * The key is that the mapping comes from segdev and the
  96          * type is neither MAP_SHARED nor MAP_PRIVATE.
  97          */
  98         if (seg->s_ops == &segdev_ops &&
  99             ((SEGOP_GETTYPE(seg, addr) & (MAP_SHARED | MAP_PRIVATE)) == 0))
 100                 return (0);
 101 
 102         /*
 103          * Fail if the page is a MAP_NORESERVE page that has
 104          * not actually materialized.
 105          * We cheat by knowing that segvn is the only segment
 106          * driver that supports MAP_NORESERVE.
 107          */
 108         if (seg->s_ops == &segvn_ops &&
 109             (svd = (struct segvn_data *)seg->s_data) != NULL &&
 110             (svd->vp == NULL || svd->vp->v_type != VREG) &&
 111             (svd->flags & MAP_NORESERVE)) {
 112                 /*
 113                  * Guilty knowledge here.  We know that
 114                  * segvn_incore returns more than just the
 115                  * low-order bit that indicates the page is
 116                  * actually in memory.  If any bits are set,
 117                  * then there is backing store for the page.
 118                  */
 119                 char incore = 0;
 120                 (void) SEGOP_INCORE(seg, addr, PAGESIZE, &incore);
 121                 if (incore == 0)
 122                         return (0);
 123         }
 124         return (1);
 125 }
 126 
 127 /*
 128  * Map address "addr" in address space "as" into a kernel virtual address.
 129  * The memory is guaranteed to be resident and locked down.
 130  */
 131 static caddr_t
 132 mapin(struct as *as, caddr_t addr, int writing)
 133 {
 134         page_t *pp;
 135         caddr_t kaddr;
 136         pfn_t pfnum;
 137 
 138         /*
 139          * NB: Because of past mistakes, we have bits being returned
 140          * by getpfnum that are actually the page type bits of the pte.


 192         uint_t prot;
 193         uint_t prot_rw = writing ? PROT_WRITE : PROT_READ;
 194         int protchanged;
 195         on_trap_data_t otd;
 196         int retrycnt;
 197         struct as *as = p->p_as;
 198         enum seg_rw rw;
 199 
 200         /*
 201          * Locate segment containing address of interest.
 202          */
 203         page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK);
 204         retrycnt = 0;
 205         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 206 retry:
 207         if ((seg = as_segat(as, page)) == NULL ||
 208             !page_valid(seg, page)) {
 209                 AS_LOCK_EXIT(as, &as->a_lock);
 210                 return (ENXIO);
 211         }
 212         SEGOP_GETPROT(seg, page, 0, &prot);
 213 
 214         protchanged = 0;
 215         if ((prot & prot_rw) == 0) {
 216                 protchanged = 1;
 217                 err = SEGOP_SETPROT(seg, page, PAGESIZE, prot | prot_rw);
 218 
 219                 if (err == IE_RETRY) {
 220                         protchanged = 0;
 221                         ASSERT(retrycnt == 0);
 222                         retrycnt++;
 223                         goto retry;
 224                 }
 225 
 226                 if (err != 0) {
 227                         AS_LOCK_EXIT(as, &as->a_lock);
 228                         return (ENXIO);
 229                 }
 230         }
 231 
 232         /*
 233          * segvn may do a copy-on-write for F_SOFTLOCK/S_READ case to break
 234          * sharing to avoid a copy on write of a softlocked page by another
 235          * thread. But since we locked the address space as a writer no other
 236          * thread can cause a copy on write. S_READ_NOCOW is passed as the
 237          * access type to tell segvn that it's ok not to do a copy-on-write
 238          * for this SOFTLOCK fault.
 239          */
 240         if (writing)
 241                 rw = S_WRITE;
 242         else if (seg->s_ops == &segvn_ops)
 243                 rw = S_READ_NOCOW;
 244         else
 245                 rw = S_READ;
 246 
 247         if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) {
 248                 if (protchanged)
 249                         (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);
 250                 AS_LOCK_EXIT(as, &as->a_lock);
 251                 return (ENXIO);
 252         }
 253         CPU_STATS_ADD_K(vm, softlock, 1);
 254 
 255         /*
 256          * Make sure we're not trying to read or write off the end of the page.
 257          */
 258         ASSERT(len <= page + PAGESIZE - addr);
 259 
 260         /*
 261          * Map in the locked page, copy to our local buffer,
 262          * then map the page out and unlock it.
 263          */
 264         vaddr = mapin(as, addr, writing);
 265 
 266         /*
 267          * Since we are copying memory on behalf of the user process,
 268          * protect against memory error correction faults.
 269          */


 286                         else
 287                                 bcopy(vaddr, buf, len);
 288                 }
 289         } else {
 290                 error = EIO;
 291         }
 292         no_trap();
 293 
 294         /*
 295          * If we're writing to an executable page, we may need to sychronize
 296          * the I$ with the modifications we made through the D$.
 297          */
 298         if (writing && (prot & PROT_EXEC))
 299                 sync_icache(vaddr, (uint_t)len);
 300 
 301         mapout(as, addr, vaddr, writing);
 302 
 303         if (rw == S_READ_NOCOW)
 304                 rw = S_READ;
 305 
 306         (void) SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw);
 307 
 308         if (protchanged)
 309                 (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);
 310 
 311         AS_LOCK_EXIT(as, &as->a_lock);
 312 
 313         return (error);
 314 }
 315 
 316 int
 317 uread(proc_t *p, void *buf, size_t len, uintptr_t a)
 318 {
 319         return (urw(p, 0, buf, len, a));
 320 }
 321 
 322 int
 323 uwrite(proc_t *p, void *buf, size_t len, uintptr_t a)
 324 {
 325         return (urw(p, 1, buf, len, a));
 326 }


  55 #include <vm/seg_vn.h>
  56 #include <vm/seg_spt.h>
  57 #include <vm/seg_kmem.h>
  58 
  59 extern struct seg_ops segdev_ops;       /* needs a header file */
  60 extern struct seg_ops segspt_shmops;    /* needs a header file */
  61 
  62 static int
  63 page_valid(struct seg *seg, caddr_t addr)
  64 {
  65         struct segvn_data *svd;
  66         vnode_t *vp;
  67         vattr_t vattr;
  68 
  69         /*
  70          * Fail if the page doesn't map to a page in the underlying
  71          * mapped file, if an underlying mapped file exists.
  72          */
  73         vattr.va_mask = AT_SIZE;
  74         if (seg->s_ops == &segvn_ops &&
  75             segop_getvp(seg, addr, &vp) == 0 &&
  76             vp != NULL && vp->v_type == VREG &&
  77             VOP_GETATTR(vp, &vattr, 0, CRED(), NULL) == 0) {
  78                 u_offset_t size = roundup(vattr.va_size, (u_offset_t)PAGESIZE);
  79                 u_offset_t offset = segop_getoffset(seg, addr);
  80 
  81                 if (offset >= size)
  82                         return (0);
  83         }
  84 
  85         /*
  86          * Fail if this is an ISM shared segment and the address is
  87          * not within the real size of the spt segment that backs it.
  88          */
  89         if (seg->s_ops == &segspt_shmops &&
  90             addr >= seg->s_base + spt_realsize(seg))
  91                 return (0);
  92 
  93         /*
  94          * Fail if the segment is mapped from /dev/null.
  95          * The key is that the mapping comes from segdev and the
  96          * type is neither MAP_SHARED nor MAP_PRIVATE.
  97          */
  98         if (seg->s_ops == &segdev_ops &&
  99             ((segop_gettype(seg, addr) & (MAP_SHARED | MAP_PRIVATE)) == 0))
 100                 return (0);
 101 
 102         /*
 103          * Fail if the page is a MAP_NORESERVE page that has
 104          * not actually materialized.
 105          * We cheat by knowing that segvn is the only segment
 106          * driver that supports MAP_NORESERVE.
 107          */
 108         if (seg->s_ops == &segvn_ops &&
 109             (svd = (struct segvn_data *)seg->s_data) != NULL &&
 110             (svd->vp == NULL || svd->vp->v_type != VREG) &&
 111             (svd->flags & MAP_NORESERVE)) {
 112                 /*
 113                  * Guilty knowledge here.  We know that
 114                  * segvn_incore returns more than just the
 115                  * low-order bit that indicates the page is
 116                  * actually in memory.  If any bits are set,
 117                  * then there is backing store for the page.
 118                  */
 119                 char incore = 0;
 120                 (void) segop_incore(seg, addr, PAGESIZE, &incore);
 121                 if (incore == 0)
 122                         return (0);
 123         }
 124         return (1);
 125 }
 126 
 127 /*
 128  * Map address "addr" in address space "as" into a kernel virtual address.
 129  * The memory is guaranteed to be resident and locked down.
 130  */
 131 static caddr_t
 132 mapin(struct as *as, caddr_t addr, int writing)
 133 {
 134         page_t *pp;
 135         caddr_t kaddr;
 136         pfn_t pfnum;
 137 
 138         /*
 139          * NB: Because of past mistakes, we have bits being returned
 140          * by getpfnum that are actually the page type bits of the pte.


 192         uint_t prot;
 193         uint_t prot_rw = writing ? PROT_WRITE : PROT_READ;
 194         int protchanged;
 195         on_trap_data_t otd;
 196         int retrycnt;
 197         struct as *as = p->p_as;
 198         enum seg_rw rw;
 199 
 200         /*
 201          * Locate segment containing address of interest.
 202          */
 203         page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK);
 204         retrycnt = 0;
 205         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 206 retry:
 207         if ((seg = as_segat(as, page)) == NULL ||
 208             !page_valid(seg, page)) {
 209                 AS_LOCK_EXIT(as, &as->a_lock);
 210                 return (ENXIO);
 211         }
 212         segop_getprot(seg, page, 0, &prot);
 213 
 214         protchanged = 0;
 215         if ((prot & prot_rw) == 0) {
 216                 protchanged = 1;
 217                 err = segop_setprot(seg, page, PAGESIZE, prot | prot_rw);
 218 
 219                 if (err == IE_RETRY) {
 220                         protchanged = 0;
 221                         ASSERT(retrycnt == 0);
 222                         retrycnt++;
 223                         goto retry;
 224                 }
 225 
 226                 if (err != 0) {
 227                         AS_LOCK_EXIT(as, &as->a_lock);
 228                         return (ENXIO);
 229                 }
 230         }
 231 
 232         /*
 233          * segvn may do a copy-on-write for F_SOFTLOCK/S_READ case to break
 234          * sharing to avoid a copy on write of a softlocked page by another
 235          * thread. But since we locked the address space as a writer no other
 236          * thread can cause a copy on write. S_READ_NOCOW is passed as the
 237          * access type to tell segvn that it's ok not to do a copy-on-write
 238          * for this SOFTLOCK fault.
 239          */
 240         if (writing)
 241                 rw = S_WRITE;
 242         else if (seg->s_ops == &segvn_ops)
 243                 rw = S_READ_NOCOW;
 244         else
 245                 rw = S_READ;
 246 
 247         if (segop_fault(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) {
 248                 if (protchanged)
 249                         (void) segop_setprot(seg, page, PAGESIZE, prot);
 250                 AS_LOCK_EXIT(as, &as->a_lock);
 251                 return (ENXIO);
 252         }
 253         CPU_STATS_ADD_K(vm, softlock, 1);
 254 
 255         /*
 256          * Make sure we're not trying to read or write off the end of the page.
 257          */
 258         ASSERT(len <= page + PAGESIZE - addr);
 259 
 260         /*
 261          * Map in the locked page, copy to our local buffer,
 262          * then map the page out and unlock it.
 263          */
 264         vaddr = mapin(as, addr, writing);
 265 
 266         /*
 267          * Since we are copying memory on behalf of the user process,
 268          * protect against memory error correction faults.
 269          */


 286                         else
 287                                 bcopy(vaddr, buf, len);
 288                 }
 289         } else {
 290                 error = EIO;
 291         }
 292         no_trap();
 293 
 294         /*
 295          * If we're writing to an executable page, we may need to sychronize
 296          * the I$ with the modifications we made through the D$.
 297          */
 298         if (writing && (prot & PROT_EXEC))
 299                 sync_icache(vaddr, (uint_t)len);
 300 
 301         mapout(as, addr, vaddr, writing);
 302 
 303         if (rw == S_READ_NOCOW)
 304                 rw = S_READ;
 305 
 306         (void) segop_fault(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw);
 307 
 308         if (protchanged)
 309                 (void) segop_setprot(seg, page, PAGESIZE, prot);
 310 
 311         AS_LOCK_EXIT(as, &as->a_lock);
 312 
 313         return (error);
 314 }
 315 
 316 int
 317 uread(proc_t *p, void *buf, size_t len, uintptr_t a)
 318 {
 319         return (urw(p, 0, buf, len, a));
 320 }
 321 
 322 int
 323 uwrite(proc_t *p, void *buf, size_t len, uintptr_t a)
 324 {
 325         return (urw(p, 1, buf, len, a));
 326 }