Print this page
patch as-lock-macro-simplification

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/seg_dev.c
          +++ new/usr/src/uts/common/vm/seg_dev.c
↓ open down ↓ 360 lines elided ↑ open up ↑
 361  361  {
 362  362          struct segdev_data *sdp;
 363  363          struct segdev_crargs *a = (struct segdev_crargs *)argsp;
 364  364          devmap_handle_t *dhp = (devmap_handle_t *)a->devmap_data;
 365  365          int error;
 366  366  
 367  367          /*
 368  368           * Since the address space is "write" locked, we
 369  369           * don't need the segment lock to protect "segdev" data.
 370  370           */
 371      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
      371 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 372  372  
 373  373          hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
 374  374  
 375  375          sdp = sdp_alloc();
 376  376  
 377  377          sdp->mapfunc = a->mapfunc;
 378  378          sdp->offset = a->offset;
 379  379          sdp->prot = a->prot;
 380  380          sdp->maxprot = a->maxprot;
 381  381          sdp->type = a->type;
↓ open down ↓ 85 lines elided ↑ open up ↑
 467  467          TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DUP,
 468  468              "segdev_dup:start dhp=%p, seg=%p", (void *)dhp, (void *)seg);
 469  469  
 470  470          DEBUGF(3, (CE_CONT, "segdev_dup: dhp %p seg %p\n",
 471  471              (void *)dhp, (void *)seg));
 472  472  
 473  473          /*
 474  474           * Since the address space is "write" locked, we
 475  475           * don't need the segment lock to protect "segdev" data.
 476  476           */
 477      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
      477 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 478  478  
 479  479          newsdp = sdp_alloc();
 480  480  
 481  481          newseg->s_ops = seg->s_ops;
 482  482          newseg->s_data = (void *)newsdp;
 483  483  
 484  484          VN_HOLD(sdp->vp);
 485  485          newsdp->vp      = sdp->vp;
 486  486          newsdp->mapfunc = sdp->mapfunc;
 487  487          newsdp->offset  = sdp->offset;
↓ open down ↓ 151 lines elided ↑ open up ↑
 639  639              "segdev_unmap:start dhp=%p, seg=%p addr=%p len=%lx",
 640  640              (void *)dhp, (void *)seg, (void *)addr, len);
 641  641  
 642  642          DEBUGF(3, (CE_CONT, "segdev_unmap: dhp %p seg %p addr %p len %lx\n",
 643  643              (void *)dhp, (void *)seg, (void *)addr, len));
 644  644  
 645  645          /*
 646  646           * Since the address space is "write" locked, we
 647  647           * don't need the segment lock to protect "segdev" data.
 648  648           */
 649      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
      649 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 650  650  
 651  651          if ((sz = sdp->softlockcnt) > 0) {
 652  652                  /*
 653  653                   * Fail the unmap if pages are SOFTLOCKed through this mapping.
 654  654                   * softlockcnt is protected from change by the as write lock.
 655  655                   */
 656  656                  TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK1,
 657  657                      "segdev_unmap:error softlockcnt = %ld", sz);
 658  658                  DEBUGF(1, (CE_CONT, "segdev_unmap: softlockcnt %ld\n", sz));
 659  659                  return (EAGAIN);
↓ open down ↓ 468 lines elided ↑ open up ↑
1128 1128  
1129 1129          TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FREE,
1130 1130              "segdev_free: dhp=%p seg=%p", (void *)dhp, (void *)seg);
1131 1131          DEBUGF(3, (CE_CONT, "segdev_free: dhp %p seg %p\n",
1132 1132              (void *)dhp, (void *)seg));
1133 1133  
1134 1134          /*
1135 1135           * Since the address space is "write" locked, we
1136 1136           * don't need the segment lock to protect "segdev" data.
1137 1137           */
1138      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     1138 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1139 1139  
1140 1140          while (dhp != NULL)
1141 1141                  dhp = devmap_handle_unmap(dhp);
1142 1142  
1143 1143          VN_RELE(sdp->vp);
1144 1144          if (sdp->vpage != NULL)
1145 1145                  kmem_free(sdp->vpage, vpgtob(seg_pages(seg)));
1146 1146  
1147 1147          rw_destroy(&sdp->lock);
1148 1148          kmem_free(sdp, sizeof (*sdp));
↓ open down ↓ 461 lines elided ↑ open up ↑
1610 1610          int err;
1611 1611          int err_is_faultcode = 0;
1612 1612  
1613 1613          TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_FAULT,
1614 1614              "segdev_fault: dhp_head=%p seg=%p addr=%p len=%lx type=%x",
1615 1615              (void *)dhp_head, (void *)seg, (void *)addr, len, type);
1616 1616          DEBUGF(7, (CE_CONT, "segdev_fault: dhp_head %p seg %p "
1617 1617              "addr %p len %lx type %x\n",
1618 1618              (void *)dhp_head, (void *)seg, (void *)addr, len, type));
1619 1619  
1620      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     1620 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1621 1621  
1622 1622          /* Handle non-devmap case */
1623 1623          if (dhp_head == NULL)
1624 1624                  return (segdev_faultpages(hat, seg, addr, len, type, rw, NULL));
1625 1625  
1626 1626          /* Find devmap handle */
1627 1627          if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
1628 1628                  return (FC_NOMAP);
1629 1629  
1630 1630          /*
↓ open down ↓ 419 lines elided ↑ open up ↑
2050 2050  /*
2051 2051   * Asynchronous page fault.  We simply do nothing since this
2052 2052   * entry point is not supposed to load up the translation.
2053 2053   */
2054 2054  /*ARGSUSED*/
2055 2055  static faultcode_t
2056 2056  segdev_faulta(struct seg *seg, caddr_t addr)
2057 2057  {
2058 2058          TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FAULTA,
2059 2059              "segdev_faulta: seg=%p addr=%p", (void *)seg, (void *)addr);
2060      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2060 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2061 2061  
2062 2062          return (0);
2063 2063  }
2064 2064  
2065 2065  static int
2066 2066  segdev_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2067 2067  {
2068 2068          register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2069 2069          register devmap_handle_t *dhp;
2070 2070          register struct vpage *vp, *evp;
2071 2071          devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
2072 2072          ulong_t off;
2073 2073          size_t mlen, sz;
2074 2074  
2075 2075          TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT,
2076 2076              "segdev_setprot:start seg=%p addr=%p len=%lx prot=%x",
2077 2077              (void *)seg, (void *)addr, len, prot);
2078      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2078 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2079 2079  
2080 2080          if ((sz = sdp->softlockcnt) > 0 && dhp_head != NULL) {
2081 2081                  /*
2082 2082                   * Fail the setprot if pages are SOFTLOCKed through this
2083 2083                   * mapping.
2084 2084                   * Softlockcnt is protected from change by the as read lock.
2085 2085                   */
2086 2086                  TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT_CK1,
2087 2087                      "segdev_setprot:error softlockcnt=%lx", sz);
2088 2088                  DEBUGF(1, (CE_CONT, "segdev_setprot: softlockcnt %ld\n", sz));
↓ open down ↓ 102 lines elided ↑ open up ↑
2191 2191  
2192 2192  static int
2193 2193  segdev_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2194 2194  {
2195 2195          struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2196 2196          struct vpage *vp, *evp;
2197 2197  
2198 2198          TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_CHECKPROT,
2199 2199              "segdev_checkprot:start seg=%p addr=%p len=%lx prot=%x",
2200 2200              (void *)seg, (void *)addr, len, prot);
2201      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2201 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2202 2202  
2203 2203          /*
2204 2204           * If segment protection can be used, simply check against them
2205 2205           */
2206 2206          rw_enter(&sdp->lock, RW_READER);
2207 2207          if (sdp->pageprot == 0) {
2208 2208                  register int err;
2209 2209  
2210 2210                  err = ((sdp->prot & prot) != prot) ? EACCES : 0;
2211 2211                  rw_exit(&sdp->lock);
↓ open down ↓ 16 lines elided ↑ open up ↑
2228 2228  
2229 2229  static int
2230 2230  segdev_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2231 2231  {
2232 2232          struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2233 2233          size_t pgno;
2234 2234  
2235 2235          TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_GETPROT,
2236 2236              "segdev_getprot:start seg=%p addr=%p len=%lx protv=%p",
2237 2237              (void *)seg, (void *)addr, len, (void *)protv);
2238      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2238 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2239 2239  
2240 2240          pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
2241 2241          if (pgno != 0) {
2242 2242                  rw_enter(&sdp->lock, RW_READER);
2243 2243                  if (sdp->pageprot == 0) {
2244 2244                          do {
2245 2245                                  protv[--pgno] = sdp->prot;
2246 2246                          } while (pgno != 0);
2247 2247                  } else {
2248 2248                          size_t pgoff = seg_page(seg, addr);
↓ open down ↓ 10 lines elided ↑ open up ↑
2259 2259  }
2260 2260  
2261 2261  static u_offset_t
2262 2262  segdev_getoffset(register struct seg *seg, caddr_t addr)
2263 2263  {
2264 2264          register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2265 2265  
2266 2266          TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETOFFSET,
2267 2267              "segdev_getoffset:start seg=%p addr=%p", (void *)seg, (void *)addr);
2268 2268  
2269      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2269 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2270 2270  
2271 2271          return ((u_offset_t)sdp->offset + (addr - seg->s_base));
2272 2272  }
2273 2273  
2274 2274  /*ARGSUSED*/
2275 2275  static int
2276 2276  segdev_gettype(register struct seg *seg, caddr_t addr)
2277 2277  {
2278 2278          register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2279 2279  
2280 2280          TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETTYPE,
2281 2281              "segdev_gettype:start seg=%p addr=%p", (void *)seg, (void *)addr);
2282 2282  
2283      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2283 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2284 2284  
2285 2285          return (sdp->type);
2286 2286  }
2287 2287  
2288 2288  
2289 2289  /*ARGSUSED*/
2290 2290  static int
2291 2291  segdev_getvp(register struct seg *seg, caddr_t addr, struct vnode **vpp)
2292 2292  {
2293 2293          register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2294 2294  
2295 2295          TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETVP,
2296 2296              "segdev_getvp:start seg=%p addr=%p", (void *)seg, (void *)addr);
2297 2297  
2298      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2298 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2299 2299  
2300 2300          /*
2301 2301           * Note that this vp is the common_vp of the device, where the
2302 2302           * pages are hung ..
2303 2303           */
2304 2304          *vpp = VTOCVP(sdp->vp);
2305 2305  
2306 2306          return (0);
2307 2307  }
2308 2308  
↓ open down ↓ 9 lines elided ↑ open up ↑
2318 2318  /*
2319 2319   * segdev pages are not in the cache, and thus can't really be controlled.
2320 2320   * Hence, syncs are simply always successful.
2321 2321   */
2322 2322  /*ARGSUSED*/
2323 2323  static int
2324 2324  segdev_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
2325 2325  {
2326 2326          TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SYNC, "segdev_sync:start");
2327 2327  
2328      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2328 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2329 2329  
2330 2330          return (0);
2331 2331  }
2332 2332  
2333 2333  /*
2334 2334   * segdev pages are always "in core".
2335 2335   */
2336 2336  /*ARGSUSED*/
2337 2337  static size_t
2338 2338  segdev_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
2339 2339  {
2340 2340          size_t v = 0;
2341 2341  
2342 2342          TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_INCORE, "segdev_incore:start");
2343 2343  
2344      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2344 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2345 2345  
2346 2346          for (len = (len + PAGEOFFSET) & PAGEMASK; len; len -= PAGESIZE,
2347 2347              v += PAGESIZE)
2348 2348                  *vec++ = 1;
2349 2349          return (v);
2350 2350  }
2351 2351  
2352 2352  /*
2353 2353   * segdev pages are not in the cache, and thus can't really be controlled.
2354 2354   * Hence, locks are simply always successful.
2355 2355   */
2356 2356  /*ARGSUSED*/
2357 2357  static int
2358 2358  segdev_lockop(struct seg *seg, caddr_t addr,
2359 2359      size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
2360 2360  {
2361 2361          TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_LOCKOP, "segdev_lockop:start");
2362 2362  
2363      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2363 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2364 2364  
2365 2365          return (0);
2366 2366  }
2367 2367  
2368 2368  /*
2369 2369   * segdev pages are not in the cache, and thus can't really be controlled.
2370 2370   * Hence, advise is simply always successful.
2371 2371   */
2372 2372  /*ARGSUSED*/
2373 2373  static int
2374 2374  segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2375 2375  {
2376 2376          TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
2377 2377  
2378      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2378 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2379 2379  
2380 2380          return (0);
2381 2381  }
2382 2382  
2383 2383  /*
2384 2384   * segdev pages are not dumped, so we just return
2385 2385   */
2386 2386  /*ARGSUSED*/
2387 2387  static void
2388 2388  segdev_dump(struct seg *seg)
↓ open down ↓ 684 lines elided ↑ open up ↑
3073 3073              (void *)dhp, offset, len);
3074 3074  
3075 3075          DEBUGF(7, (CE_CONT, "devmap_load: dhp %p offset %llx len %lx\n",
3076 3076              (void *)dhp, offset, len));
3077 3077  
3078 3078          /*
3079 3079           *      Hat layer only supports devload to process' context for which
3080 3080           *      the as lock is held. Verify here and return error if drivers
3081 3081           *      inadvertently call devmap_load on a wrong devmap handle.
3082 3082           */
3083      -        if ((asp != &kas) && !AS_LOCK_HELD(asp, &asp->a_lock))
     3083 +        if ((asp != &kas) && !AS_LOCK_HELD(asp))
3084 3084                  return (FC_MAKE_ERR(EINVAL));
3085 3085  
3086 3086          soff = (ssize_t)(offset - dhp->dh_uoff);
3087 3087          soff = round_down_p2(soff, PAGESIZE);
3088 3088          if (soff < 0 || soff >= dhp->dh_len)
3089 3089                  return (FC_MAKE_ERR(EINVAL));
3090 3090  
3091 3091          /*
3092 3092           * Address and size must be page aligned.  Len is set to the
3093 3093           * number of bytes in the number of pages that are required to
↓ open down ↓ 1009 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX