Print this page
patch lower-case-segops


 699         while (as->a_callbacks && as_do_callbacks(as, AS_ALL_EVENT, 0, 0))
 700                 ;
 701 
 702         /* This will prevent new XHATs from attaching to as */
 703         if (!called)
 704                 AS_SETBUSY(as);
 705         mutex_exit(&as->a_contents);
 706         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 707 
 708         if (!called) {
 709                 called = 1;
 710                 hat_free_start(hat);
 711                 if (as->a_xhat != NULL)
 712                         xhat_free_start_all(as);
 713         }
 714         for (seg = AS_SEGFIRST(as); seg != NULL; seg = next) {
 715                 int err;
 716 
 717                 next = AS_SEGNEXT(as, seg);
 718 retry:
 719                 err = SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
 720                 if (err == EAGAIN) {
 721                         mutex_enter(&as->a_contents);
 722                         if (as->a_callbacks) {
 723                                 AS_LOCK_EXIT(as, &as->a_lock);
 724                         } else if (!AS_ISNOUNMAPWAIT(as)) {
 725                                 /*
 726                                  * Memory is currently locked. Wait for a
 727                                  * cv_signal that it has been unlocked, then
 728                                  * try the operation again.
 729                                  */
 730                                 if (AS_ISUNMAPWAIT(as) == 0)
 731                                         cv_broadcast(&as->a_cv);
 732                                 AS_SETUNMAPWAIT(as);
 733                                 AS_LOCK_EXIT(as, &as->a_lock);
 734                                 while (AS_ISUNMAPWAIT(as))
 735                                         cv_wait(&as->a_cv, &as->a_contents);
 736                         } else {
 737                                 /*
 738                                  * We may have raced with
 739                                  * segvn_reclaim()/segspt_reclaim(). In this


 805         (void) hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_SRD);
 806 
 807         for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
 808 
 809                 if (seg->s_flags & S_PURGE) {
 810                         purgesize += seg->s_size;
 811                         continue;
 812                 }
 813 
 814                 newseg = seg_alloc(newas, seg->s_base, seg->s_size);
 815                 if (newseg == NULL) {
 816                         AS_LOCK_EXIT(newas, &newas->a_lock);
 817                         as_setwatch(as);
 818                         mutex_enter(&as->a_contents);
 819                         AS_CLRBUSY(as);
 820                         mutex_exit(&as->a_contents);
 821                         AS_LOCK_EXIT(as, &as->a_lock);
 822                         as_free(newas);
 823                         return (-1);
 824                 }
 825                 if ((error = SEGOP_DUP(seg, newseg)) != 0) {
 826                         /*
 827                          * We call seg_free() on the new seg
 828                          * because the segment is not set up
 829                          * completely; i.e. it has no ops.
 830                          */
 831                         as_setwatch(as);
 832                         mutex_enter(&as->a_contents);
 833                         AS_CLRBUSY(as);
 834                         mutex_exit(&as->a_contents);
 835                         AS_LOCK_EXIT(as, &as->a_lock);
 836                         seg_free(newseg);
 837                         AS_LOCK_EXIT(newas, &newas->a_lock);
 838                         as_free(newas);
 839                         return (error);
 840                 }
 841                 newas->a_size += seg->s_size;
 842         }
 843         newas->a_resvsize = as->a_resvsize - purgesize;
 844 
 845         error = hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_ALL);


1001                 }
1002                 if (raddr + rsize > seg->s_base + seg->s_size)
1003                         ssize = seg->s_base + seg->s_size - raddr;
1004                 else
1005                         ssize = rsize;
1006 
1007                 if (!is_xhat || (seg->s_ops != &segdev_ops)) {
1008 
1009                         if (is_xhat && avl_numnodes(&as->a_wpage) != 0 &&
1010                             pr_is_watchpage_as(raddr, rw, as)) {
1011                                 /*
1012                                  * Handle watch pages.  If we're faulting on a
1013                                  * watched page from an X-hat, we have to
1014                                  * restore the original permissions while we
1015                                  * handle the fault.
1016                                  */
1017                                 as_clearwatch(as);
1018                                 holding_wpage = 1;
1019                         }
1020 
1021                         res = SEGOP_FAULT(hat, seg, raddr, ssize, type, rw);
1022 
1023                         /* Restore watchpoints */
1024                         if (holding_wpage) {
1025                                 as_setwatch(as);
1026                                 holding_wpage = 0;
1027                         }
1028 
1029                         if (res != 0)
1030                                 break;
1031                 } else {
1032                         /* XHAT does not support seg_dev */
1033                         res = FC_NOSUPPORT;
1034                         break;
1035                 }
1036         }
1037 
1038         /*
1039          * If we were SOFTLOCKing and encountered a failure,
1040          * we must SOFTUNLOCK the range we already did. (Maybe we
1041          * should just panic if we are SOFTLOCKing or even SOFTUNLOCKing
1042          * right here...)
1043          */
1044         if (res != 0 && type == F_SOFTLOCK) {
1045                 for (seg = segsav; addrsav < raddr; addrsav += ssize) {
1046                         if (addrsav >= seg->s_base + seg->s_size)
1047                                 seg = AS_SEGNEXT(as, seg);
1048                         ASSERT(seg != NULL);
1049                         /*
1050                          * Now call the fault routine again to perform the
1051                          * unlock using S_OTHER instead of the rw variable
1052                          * since we never got a chance to touch the pages.
1053                          */
1054                         if (raddr > seg->s_base + seg->s_size)
1055                                 ssize = seg->s_base + seg->s_size - addrsav;
1056                         else
1057                                 ssize = raddr - addrsav;
1058                         (void) SEGOP_FAULT(hat, seg, addrsav, ssize,
1059                             F_SOFTUNLOCK, S_OTHER);
1060                 }
1061         }
1062         if (as_lock_held)
1063                 AS_LOCK_EXIT(as, &as->a_lock);
1064         if ((lwp != NULL) && (!is_xhat))
1065                 lwp->lwp_nostop--;
1066 
1067         /*
1068          * If the lower levels returned EDEADLK for a fault,
1069          * It means that we should retry the fault.  Let's wait
1070          * a bit also to let the deadlock causing condition clear.
1071          * This is part of a gross hack to work around a design flaw
1072          * in the ufs/sds logging code and should go away when the
1073          * logging code is re-designed to fix the problem. See bug
1074          * 4125102 for details of the problem.
1075          */
1076         if (FC_ERRNO(res) == EDEADLK) {
1077                 delay(deadlk_wait);
1078                 res = 0;


1108         rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
1109             (size_t)raddr;
1110 
1111         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1112         seg = as_segat(as, raddr);
1113         if (seg == NULL) {
1114                 AS_LOCK_EXIT(as, &as->a_lock);
1115                 if (lwp != NULL)
1116                         lwp->lwp_nostop--;
1117                 return (FC_NOMAP);
1118         }
1119 
1120         for (; rsize != 0; rsize -= PAGESIZE, raddr += PAGESIZE) {
1121                 if (raddr >= seg->s_base + seg->s_size) {
1122                         seg = AS_SEGNEXT(as, seg);
1123                         if (seg == NULL || raddr != seg->s_base) {
1124                                 res = FC_NOMAP;
1125                                 break;
1126                         }
1127                 }
1128                 res = SEGOP_FAULTA(seg, raddr);
1129                 if (res != 0)
1130                         break;
1131         }
1132         AS_LOCK_EXIT(as, &as->a_lock);
1133         if (lwp != NULL)
1134                 lwp->lwp_nostop--;
1135         /*
1136          * If the lower levels returned EDEADLK for a fault,
1137          * It means that we should retry the fault.  Let's wait
1138          * a bit also to let the deadlock causing condition clear.
1139          * This is part of a gross hack to work around a design flaw
1140          * in the ufs/sds logging code and should go away when the
1141          * logging code is re-designed to fix the problem. See bug
1142          * 4125102 for details of the problem.
1143          */
1144         if (FC_ERRNO(res) == EDEADLK) {
1145                 delay(deadlk_wait);
1146                 res = 0;
1147                 goto retry;
1148         }


1198         seg = as_segat(as, raddr);
1199         if (seg == NULL) {
1200                 as_setwatch(as);
1201                 AS_LOCK_EXIT(as, &as->a_lock);
1202                 return (ENOMEM);
1203         }
1204 
1205         for (; rsize != 0; rsize -= ssize, raddr += ssize) {
1206                 if (raddr >= seg->s_base + seg->s_size) {
1207                         seg = AS_SEGNEXT(as, seg);
1208                         if (seg == NULL || raddr != seg->s_base) {
1209                                 error = ENOMEM;
1210                                 break;
1211                         }
1212                 }
1213                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
1214                         ssize = seg->s_base + seg->s_size - raddr;
1215                 else
1216                         ssize = rsize;
1217 retry:
1218                 error = SEGOP_SETPROT(seg, raddr, ssize, prot);
1219 
1220                 if (error == IE_NOMEM) {
1221                         error = EAGAIN;
1222                         break;
1223                 }
1224 
1225                 if (error == IE_RETRY) {
1226                         AS_LOCK_EXIT(as, &as->a_lock);
1227                         writer = 1;
1228                         goto setprot_top;
1229                 }
1230 
1231                 if (error == EAGAIN) {
1232                         /*
1233                          * Make sure we have a_lock as writer.
1234                          */
1235                         if (writer == 0) {
1236                                 AS_LOCK_EXIT(as, &as->a_lock);
1237                                 writer = 1;
1238                                 goto setprot_top;


1349         seg = as_segat(as, raddr);
1350         if (seg == NULL) {
1351                 as_setwatch(as);
1352                 AS_LOCK_EXIT(as, &as->a_lock);
1353                 return (ENOMEM);
1354         }
1355 
1356         for (; rsize != 0; rsize -= ssize, raddr += ssize) {
1357                 if (raddr >= seg->s_base + seg->s_size) {
1358                         seg = AS_SEGNEXT(as, seg);
1359                         if (seg == NULL || raddr != seg->s_base) {
1360                                 error = ENOMEM;
1361                                 break;
1362                         }
1363                 }
1364                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
1365                         ssize = seg->s_base + seg->s_size - raddr;
1366                 else
1367                         ssize = rsize;
1368 
1369                 error = SEGOP_CHECKPROT(seg, raddr, ssize, prot);
1370                 if (error != 0)
1371                         break;
1372         }
1373         as_setwatch(as);
1374         AS_LOCK_EXIT(as, &as->a_lock);
1375         return (error);
1376 }
1377 
1378 int
1379 as_unmap(struct as *as, caddr_t addr, size_t size)
1380 {
1381         struct seg *seg, *seg_next;
1382         struct as_callback *cb;
1383         caddr_t raddr, eaddr;
1384         size_t ssize, rsize = 0;
1385         int err;
1386 
1387 top:
1388         raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1389         eaddr = (caddr_t)(((uintptr_t)(addr + size) + PAGEOFFSET) &


1415                 else
1416                         ssize = eaddr - raddr;
1417 
1418                 /*
1419                  * Save next segment pointer since seg can be
1420                  * destroyed during the segment unmap operation.
1421                  */
1422                 seg_next = AS_SEGNEXT(as, seg);
1423 
1424                 /*
1425                  * We didn't count /dev/null mappings, so ignore them here.
1426                  * We'll handle MAP_NORESERVE cases in segvn_unmap(). (Again,
1427                  * we have to do this check here while we have seg.)
1428                  */
1429                 rsize = 0;
1430                 if (!SEG_IS_DEVNULL_MAPPING(seg) &&
1431                     !SEG_IS_PARTIAL_RESV(seg))
1432                         rsize = ssize;
1433 
1434 retry:
1435                 err = SEGOP_UNMAP(seg, raddr, ssize);
1436                 if (err == EAGAIN) {
1437                         /*
1438                          * Memory is currently locked.  It must be unlocked
1439                          * before this operation can succeed through a retry.
1440                          * The possible reasons for locked memory and
1441                          * corresponding strategies for unlocking are:
1442                          * (1) Normal I/O
1443                          *      wait for a signal that the I/O operation
1444                          *      has completed and the memory is unlocked.
1445                          * (2) Asynchronous I/O
1446                          *      The aio subsystem does not unlock pages when
1447                          *      the I/O is completed. Those pages are unlocked
1448                          *      when the application calls aiowait/aioerror.
1449                          *      So, to prevent blocking forever, cv_broadcast()
1450                          *      is done to wake up aio_cleanup_thread.
1451                          *      Subsequently, segvn_reclaim will be called, and
1452                          *      that will do AS_CLRUNMAPWAIT() and wake us up.
1453                          * (3) Long term page locking:
1454                          *      Drivers intending to have pages locked for a
1455                          *      period considerably longer than for normal I/O


1853  */
1854 void
1855 as_purge(struct as *as)
1856 {
1857         struct seg *seg;
1858         struct seg *next_seg;
1859 
1860         /*
1861          * the setting of NEEDSPURGE is protect by as_rangelock(), so
1862          * no need to grab a_contents mutex for this check
1863          */
1864         if ((as->a_flags & AS_NEEDSPURGE) == 0)
1865                 return;
1866 
1867         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
1868         next_seg = NULL;
1869         seg = AS_SEGFIRST(as);
1870         while (seg != NULL) {
1871                 next_seg = AS_SEGNEXT(as, seg);
1872                 if (seg->s_flags & S_PURGE)
1873                         SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
1874                 seg = next_seg;
1875         }
1876         AS_LOCK_EXIT(as, &as->a_lock);
1877 
1878         mutex_enter(&as->a_contents);
1879         as->a_flags &= ~AS_NEEDSPURGE;
1880         mutex_exit(&as->a_contents);
1881 }
1882 
1883 /*
1884  * Find a hole within [*basep, *basep + *lenp), which contains a mappable
1885  * range of addresses at least "minlen" long, where the base of the range is
1886  * at "off" phase from an "align" boundary and there is space for a
1887  * "redzone"-sized redzone on eithe rside of the range.  Thus,
1888  * if align was 4M and off was 16k, the user wants a hole which will start
1889  * 16k into a 4M page.
1890  *
1891  * If flags specifies AH_HI, the hole will have the highest possible address
1892  * in the range.  We use the as->a_lastgap field to figure out where to
1893  * start looking for a gap.


2184 
2185         mutex_enter(&as->a_contents);
2186         AS_CLRBUSY(as);
2187         mutex_exit(&as->a_contents);
2188 
2189         /*
2190          * Call the swapout routines of all segments in the address
2191          * space to do the actual work, accumulating the amount of
2192          * space reclaimed.
2193          */
2194         for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
2195                 struct seg_ops *ov = seg->s_ops;
2196 
2197                 /*
2198                  * We have to check to see if the seg has
2199                  * an ops vector because the seg may have
2200                  * been in the middle of being set up when
2201                  * the process was picked for swapout.
2202                  */
2203                 if ((ov != NULL) && (ov->swapout != NULL))
2204                         swpcnt += SEGOP_SWAPOUT(seg);
2205         }
2206         AS_LOCK_EXIT(as, &as->a_lock);
2207         return (swpcnt);
2208 }
2209 
2210 /*
2211  * Determine whether data from the mappings in interval [addr, addr + size)
2212  * are in the primary memory (core) cache.
2213  */
2214 int
2215 as_incore(struct as *as, caddr_t addr,
2216     size_t size, char *vec, size_t *sizep)
2217 {
2218         struct seg *seg;
2219         size_t ssize;
2220         caddr_t raddr;          /* rounded down addr */
2221         size_t rsize;           /* rounded up size */
2222         size_t isize;                   /* iteration size */
2223         int error = 0;          /* result, assume success */
2224 


2232 
2233         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2234         seg = as_segat(as, raddr);
2235         if (seg == NULL) {
2236                 AS_LOCK_EXIT(as, &as->a_lock);
2237                 return (-1);
2238         }
2239 
2240         for (; rsize != 0; rsize -= ssize, raddr += ssize) {
2241                 if (raddr >= seg->s_base + seg->s_size) {
2242                         seg = AS_SEGNEXT(as, seg);
2243                         if (seg == NULL || raddr != seg->s_base) {
2244                                 error = -1;
2245                                 break;
2246                         }
2247                 }
2248                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
2249                         ssize = seg->s_base + seg->s_size - raddr;
2250                 else
2251                         ssize = rsize;
2252                 *sizep += isize = SEGOP_INCORE(seg, raddr, ssize, vec);
2253                 if (isize != ssize) {
2254                         error = -1;
2255                         break;
2256                 }
2257                 vec += btopr(ssize);
2258         }
2259         AS_LOCK_EXIT(as, &as->a_lock);
2260         return (error);
2261 }
2262 
2263 static void
2264 as_segunlock(struct seg *seg, caddr_t addr, int attr,
2265         ulong_t *bitmap, size_t position, size_t npages)
2266 {
2267         caddr_t range_start;
2268         size_t  pos1 = position;
2269         size_t  pos2;
2270         size_t  size;
2271         size_t  end_pos = npages + position;
2272 
2273         while (bt_range(bitmap, &pos1, &pos2, end_pos)) {
2274                 size = ptob((pos2 - pos1));
2275                 range_start = (caddr_t)((uintptr_t)addr +
2276                     ptob(pos1 - position));
2277 
2278                 (void) SEGOP_LOCKOP(seg, range_start, size, attr, MC_UNLOCK,
2279                     (ulong_t *)NULL, (size_t)NULL);
2280                 pos1 = pos2;
2281         }
2282 }
2283 
2284 static void
2285 as_unlockerr(struct as *as, int attr, ulong_t *mlock_map,
2286         caddr_t raddr, size_t rsize)
2287 {
2288         struct seg *seg = as_segat(as, raddr);
2289         size_t ssize;
2290 
2291         while (rsize != 0) {
2292                 if (raddr >= seg->s_base + seg->s_size)
2293                         seg = AS_SEGNEXT(as, seg);
2294 
2295                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
2296                         ssize = seg->s_base + seg->s_size - raddr;
2297                 else
2298                         ssize = rsize;


2354                 if (seg == NULL) {
2355                         AS_LOCK_EXIT(as, &as->a_lock);
2356                         return (0);
2357                 }
2358 
2359                 do {
2360                         raddr = (caddr_t)((uintptr_t)seg->s_base &
2361                             (uintptr_t)PAGEMASK);
2362                         rlen += (((uintptr_t)(seg->s_base + seg->s_size) +
2363                             PAGEOFFSET) & PAGEMASK) - (uintptr_t)raddr;
2364                 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
2365 
2366                 mlock_size = BT_BITOUL(btopr(rlen));
2367                 if ((mlock_map = (ulong_t *)kmem_zalloc(mlock_size *
2368                     sizeof (ulong_t), KM_NOSLEEP)) == NULL) {
2369                                 AS_LOCK_EXIT(as, &as->a_lock);
2370                                 return (EAGAIN);
2371                 }
2372 
2373                 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
2374                         error = SEGOP_LOCKOP(seg, seg->s_base,
2375                             seg->s_size, attr, MC_LOCK, mlock_map, pos);
2376                         if (error != 0)
2377                                 break;
2378                         pos += seg_pages(seg);
2379                 }
2380 
2381                 if (error) {
2382                         for (seg = AS_SEGFIRST(as); seg != NULL;
2383                             seg = AS_SEGNEXT(as, seg)) {
2384 
2385                                 raddr = (caddr_t)((uintptr_t)seg->s_base &
2386                                     (uintptr_t)PAGEMASK);
2387                                 npages = seg_pages(seg);
2388                                 as_segunlock(seg, raddr, attr, mlock_map,
2389                                     idx, npages);
2390                                 idx += npages;
2391                         }
2392                 }
2393 
2394                 kmem_free(mlock_map, mlock_size * sizeof (ulong_t));
2395                 AS_LOCK_EXIT(as, &as->a_lock);
2396                 goto lockerr;
2397         } else if (func == MC_UNLOCKAS) {
2398                 mutex_enter(&as->a_contents);
2399                 AS_CLRPGLCK(as);
2400                 mutex_exit(&as->a_contents);
2401 
2402                 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
2403                         error = SEGOP_LOCKOP(seg, seg->s_base,
2404                             seg->s_size, attr, MC_UNLOCK, NULL, 0);
2405                         if (error != 0)
2406                                 break;
2407                 }
2408 
2409                 AS_LOCK_EXIT(as, &as->a_lock);
2410                 goto lockerr;
2411         }
2412 
2413         /*
2414          * Normalize addresses and sizes.
2415          */
2416         initraddr = raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2417         initrsize = rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
2418             (size_t)raddr;
2419 
2420         if (raddr + rsize < raddr) {         /* check for wraparound */
2421                 AS_LOCK_EXIT(as, &as->a_lock);
2422                 return (ENOMEM);
2423         }


2461                                 }
2462                                 AS_LOCK_EXIT(as, &as->a_lock);
2463                                 return (ENOMEM);
2464                         }
2465                 }
2466                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
2467                         ssize = seg->s_base + seg->s_size - raddr;
2468                 else
2469                         ssize = rsize;
2470 
2471                 /*
2472                  * Dispatch on specific function.
2473                  */
2474                 switch (func) {
2475 
2476                 /*
2477                  * Synchronize cached data from mappings with backing
2478                  * objects.
2479                  */
2480                 case MC_SYNC:
2481                         if (error = SEGOP_SYNC(seg, raddr, ssize,
2482                             attr, (uint_t)arg)) {
2483                                 AS_LOCK_EXIT(as, &as->a_lock);
2484                                 return (error);
2485                         }
2486                         break;
2487 
2488                 /*
2489                  * Lock pages in memory.
2490                  */
2491                 case MC_LOCK:
2492                         if (error = SEGOP_LOCKOP(seg, raddr, ssize,
2493                             attr, func, mlock_map, pos)) {
2494                                 as_unlockerr(as, attr, mlock_map, initraddr,
2495                                     initrsize - rsize + ssize);
2496                                 kmem_free(mlock_map, mlock_size *
2497                                     sizeof (ulong_t));
2498                                 AS_LOCK_EXIT(as, &as->a_lock);
2499                                 goto lockerr;
2500                         }
2501                         break;
2502 
2503                 /*
2504                  * Unlock mapped pages.
2505                  */
2506                 case MC_UNLOCK:
2507                         (void) SEGOP_LOCKOP(seg, raddr, ssize, attr, func,
2508                             (ulong_t *)NULL, (size_t)NULL);
2509                         break;
2510 
2511                 /*
2512                  * Store VM advise for mapped pages in segment layer.
2513                  */
2514                 case MC_ADVISE:
2515                         error = SEGOP_ADVISE(seg, raddr, ssize, (uint_t)arg);
2516 
2517                         /*
2518                          * Check for regular errors and special retry error
2519                          */
2520                         if (error) {
2521                                 if (error == IE_RETRY) {
2522                                         /*
2523                                          * Need to acquire writers lock, so
2524                                          * have to drop readers lock and start
2525                                          * all over again
2526                                          */
2527                                         AS_LOCK_EXIT(as, &as->a_lock);
2528                                         goto retry;
2529                                 } else if (error == IE_REATTACH) {
2530                                         /*
2531                                          * Find segment for current address
2532                                          * because current segment just got
2533                                          * split or concatenated
2534                                          */
2535                                         seg = as_segat(as, raddr);
2536                                         if (seg == NULL) {
2537                                                 AS_LOCK_EXIT(as, &as->a_lock);
2538                                                 return (ENOMEM);
2539                                         }
2540                                 } else {
2541                                         /*
2542                                          * Regular error
2543                                          */
2544                                         AS_LOCK_EXIT(as, &as->a_lock);
2545                                         return (error);
2546                                 }
2547                         }
2548                         break;
2549 
2550                 case MC_INHERIT_ZERO:
2551                         if (seg->s_ops->inherit == NULL) {
2552                                 error = ENOTSUP;
2553                         } else {
2554                                 error = SEGOP_INHERIT(seg, raddr, ssize,
2555                                     SEGP_INH_ZERO);
2556                         }
2557                         if (error != 0) {
2558                                 AS_LOCK_EXIT(as, &as->a_lock);
2559                                 return (error);
2560                         }
2561                         break;
2562 
2563                 /*
2564                  * Can't happen.
2565                  */
2566                 default:
2567                         panic("as_ctl: bad operation %d", func);
2568                         /*NOTREACHED*/
2569                 }
2570 
2571                 rsize -= ssize;
2572                 raddr += ssize;
2573         }
2574 


2649         /*
2650          * Count the number of segments covered by the range we are about to
2651          * lock. The segment count is used to size the shadow list we return
2652          * back to the caller.
2653          */
2654         for (; size != 0; size -= ssize, addr += ssize) {
2655                 if (addr >= seg->s_base + seg->s_size) {
2656 
2657                         seg = AS_SEGNEXT(as, seg);
2658                         if (seg == NULL || addr != seg->s_base) {
2659                                 AS_LOCK_EXIT(as, &as->a_lock);
2660                                 return (EFAULT);
2661                         }
2662                         /*
2663                          * Do a quick check if subsequent segments
2664                          * will most likely support pagelock.
2665                          */
2666                         if (seg->s_ops == &segvn_ops) {
2667                                 vnode_t *vp;
2668 
2669                                 if (SEGOP_GETVP(seg, addr, &vp) != 0 ||
2670                                     vp != NULL) {
2671                                         AS_LOCK_EXIT(as, &as->a_lock);
2672                                         goto slow;
2673                                 }
2674                         } else if (seg->s_ops != &segspt_shmops) {
2675                                 AS_LOCK_EXIT(as, &as->a_lock);
2676                                 goto slow;
2677                         }
2678                         segcnt++;
2679                 }
2680                 if (addr + size > seg->s_base + seg->s_size) {
2681                         ssize = seg->s_base + seg->s_size - addr;
2682                 } else {
2683                         ssize = size;
2684                 }
2685         }
2686         ASSERT(segcnt > 1);
2687 
2688         plist = kmem_zalloc((npages + segcnt) * sizeof (page_t *), KM_SLEEP);
2689 
2690         addr = sv_addr;
2691         size = sv_size;
2692         seg = sv_seg;
2693 
2694         for (cnt = 0, pl_off = 0; size != 0; size -= ssize, addr += ssize) {
2695                 if (addr >= seg->s_base + seg->s_size) {
2696                         seg = AS_SEGNEXT(as, seg);
2697                         ASSERT(seg != NULL && addr == seg->s_base);
2698                         cnt++;
2699                         ASSERT(cnt < segcnt);
2700                 }
2701                 if (addr + size > seg->s_base + seg->s_size) {
2702                         ssize = seg->s_base + seg->s_size - addr;
2703                 } else {
2704                         ssize = size;
2705                 }
2706                 pl = &plist[npages + cnt];
2707                 error = SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl,
2708                     L_PAGELOCK, rw);
2709                 if (error) {
2710                         break;
2711                 }
2712                 ASSERT(plist[npages + cnt] != NULL);
2713                 ASSERT(pl_off + btop(ssize) <= npages);
2714                 bcopy(plist[npages + cnt], &plist[pl_off],
2715                     btop(ssize) * sizeof (page_t *));
2716                 pl_off += btop(ssize);
2717         }
2718 
2719         if (size == 0) {
2720                 AS_LOCK_EXIT(as, &as->a_lock);
2721                 ASSERT(cnt == segcnt - 1);
2722                 *ppp = plist;
2723                 return (0);
2724         }
2725 
2726         /*
2727          * one of pagelock calls failed. The error type is in error variable.


2730          * back to the caller.
2731          */
2732 
2733         eaddr = addr;
2734         seg = sv_seg;
2735 
2736         for (cnt = 0, addr = sv_addr; addr < eaddr; addr += ssize) {
2737                 if (addr >= seg->s_base + seg->s_size) {
2738                         seg = AS_SEGNEXT(as, seg);
2739                         ASSERT(seg != NULL && addr == seg->s_base);
2740                         cnt++;
2741                         ASSERT(cnt < segcnt);
2742                 }
2743                 if (eaddr > seg->s_base + seg->s_size) {
2744                         ssize = seg->s_base + seg->s_size - addr;
2745                 } else {
2746                         ssize = eaddr - addr;
2747                 }
2748                 pl = &plist[npages + cnt];
2749                 ASSERT(*pl != NULL);
2750                 (void) SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl,
2751                     L_PAGEUNLOCK, rw);
2752         }
2753 
2754         AS_LOCK_EXIT(as, &as->a_lock);
2755 
2756         kmem_free(plist, (npages + segcnt) * sizeof (page_t *));
2757 
2758         if (error != ENOTSUP && error != EFAULT) {
2759                 return (error);
2760         }
2761 
2762 slow:
2763         /*
2764          * If we are here because pagelock failed due to the need to cow fault
2765          * in the pages we want to lock F_SOFTLOCK will do this job and in
2766          * next as_pagelock() call for this address range pagelock will
2767          * hopefully succeed.
2768          */
2769         fault_err = as_fault(as->a_hat, as, sv_addr, sv_size, F_SOFTLOCK, rw);
2770         if (fault_err != 0) {


2805         seg = as_segat(as, raddr);
2806         if (seg == NULL) {
2807                 AS_LOCK_EXIT(as, &as->a_lock);
2808                 return (EFAULT);
2809         }
2810         ASSERT(raddr >= seg->s_base && raddr < seg->s_base + seg->s_size);
2811         if (raddr + rsize > seg->s_base + seg->s_size) {
2812                 return (as_pagelock_segs(as, seg, ppp, raddr, rsize, rw));
2813         }
2814         if (raddr + rsize <= raddr) {
2815                 AS_LOCK_EXIT(as, &as->a_lock);
2816                 return (EFAULT);
2817         }
2818 
2819         TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_START,
2820             "seg_lock_1_start: raddr %p rsize %ld", raddr, rsize);
2821 
2822         /*
2823          * try to lock pages and pass back shadow list
2824          */
2825         err = SEGOP_PAGELOCK(seg, raddr, rsize, ppp, L_PAGELOCK, rw);
2826 
2827         TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_END, "seg_lock_1_end");
2828 
2829         AS_LOCK_EXIT(as, &as->a_lock);
2830 
2831         if (err == 0 || (err != ENOTSUP && err != EFAULT)) {
2832                 return (err);
2833         }
2834 
2835         /*
2836          * Use F_SOFTLOCK to lock the pages because pagelock failed either due
2837          * to no pagelock support for this segment or pages need to be cow
2838          * faulted in. If fault is needed F_SOFTLOCK will do this job for
2839          * this as_pagelock() call and in the next as_pagelock() call for the
2840          * same address range pagelock call will hopefull succeed.
2841          */
2842         fault_err = as_fault(as->a_hat, as, addr, size, F_SOFTLOCK, rw);
2843         if (fault_err != 0) {
2844                 return (fc_decode(fault_err));
2845         }


2868         ASSERT(seg != NULL);
2869         ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size);
2870         ASSERT(addr + size > seg->s_base + seg->s_size);
2871         ASSERT(IS_P2ALIGNED(size, PAGESIZE));
2872         ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
2873         ASSERT(plist != NULL);
2874 
2875         for (cnt = 0; addr < eaddr; addr += ssize) {
2876                 if (addr >= seg->s_base + seg->s_size) {
2877                         seg = AS_SEGNEXT(as, seg);
2878                         ASSERT(seg != NULL && addr == seg->s_base);
2879                         cnt++;
2880                 }
2881                 if (eaddr > seg->s_base + seg->s_size) {
2882                         ssize = seg->s_base + seg->s_size - addr;
2883                 } else {
2884                         ssize = eaddr - addr;
2885                 }
2886                 pl = &plist[npages + cnt];
2887                 ASSERT(*pl != NULL);
2888                 (void) SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl,
2889                     L_PAGEUNLOCK, rw);
2890         }
2891         ASSERT(cnt > 0);
2892         AS_LOCK_EXIT(as, &as->a_lock);
2893 
2894         cnt++;
2895         kmem_free(plist, (npages + cnt) * sizeof (page_t *));
2896 }
2897 
2898 /*
2899  * unlock pages in a given address range
2900  */
2901 void
2902 as_pageunlock(struct as *as, struct page **pp, caddr_t addr, size_t size,
2903     enum seg_rw rw)
2904 {
2905         struct seg *seg;
2906         size_t rsize;
2907         caddr_t raddr;
2908 


2914          * falling back to as_fault
2915          */
2916         if (pp == NULL) {
2917                 (void) as_fault(as->a_hat, as, addr, size, F_SOFTUNLOCK, rw);
2918                 return;
2919         }
2920 
2921         raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2922         rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
2923             (size_t)raddr;
2924 
2925         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2926         seg = as_segat(as, raddr);
2927         ASSERT(seg != NULL);
2928 
2929         TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEG_UNLOCK_START,
2930             "seg_unlock_start: raddr %p rsize %ld", raddr, rsize);
2931 
2932         ASSERT(raddr >= seg->s_base && raddr < seg->s_base + seg->s_size);
2933         if (raddr + rsize <= seg->s_base + seg->s_size) {
2934                 SEGOP_PAGELOCK(seg, raddr, rsize, &pp, L_PAGEUNLOCK, rw);
2935         } else {
2936                 as_pageunlock_segs(as, seg, raddr, rsize, pp, rw);
2937                 return;
2938         }
2939         AS_LOCK_EXIT(as, &as->a_lock);
2940         TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_AS_UNLOCK_END, "as_pageunlock_end");
2941 }
2942 
2943 int
2944 as_setpagesize(struct as *as, caddr_t addr, size_t size, uint_t szc,
2945     boolean_t wait)
2946 {
2947         struct seg *seg;
2948         size_t ssize;
2949         caddr_t raddr;                  /* rounded down addr */
2950         size_t rsize;                   /* rounded up size */
2951         int error = 0;
2952         size_t pgsz = page_get_pagesize(szc);
2953 
2954 setpgsz_top:


2969                 as_setwatch(as);
2970                 AS_LOCK_EXIT(as, &as->a_lock);
2971                 return (ENOMEM);
2972         }
2973 
2974         for (; rsize != 0; rsize -= ssize, raddr += ssize) {
2975                 if (raddr >= seg->s_base + seg->s_size) {
2976                         seg = AS_SEGNEXT(as, seg);
2977                         if (seg == NULL || raddr != seg->s_base) {
2978                                 error = ENOMEM;
2979                                 break;
2980                         }
2981                 }
2982                 if ((raddr + rsize) > (seg->s_base + seg->s_size)) {
2983                         ssize = seg->s_base + seg->s_size - raddr;
2984                 } else {
2985                         ssize = rsize;
2986                 }
2987 
2988 retry:
2989                 error = SEGOP_SETPAGESIZE(seg, raddr, ssize, szc);
2990 
2991                 if (error == IE_NOMEM) {
2992                         error = EAGAIN;
2993                         break;
2994                 }
2995 
2996                 if (error == IE_RETRY) {
2997                         AS_LOCK_EXIT(as, &as->a_lock);
2998                         goto setpgsz_top;
2999                 }
3000 
3001                 if (error == ENOTSUP) {
3002                         error = EINVAL;
3003                         break;
3004                 }
3005 
3006                 if (wait && (error == EAGAIN)) {
3007                         /*
3008                          * Memory is currently locked.  It must be unlocked
3009                          * before this operation can succeed through a retry.


3048                                  * number of retries without sleeping should
3049                                  * be very small. See segvn_reclaim() for
3050                                  * more comments.
3051                                  */
3052                                 AS_CLRNOUNMAPWAIT(as);
3053                                 mutex_exit(&as->a_contents);
3054                                 goto retry;
3055                         }
3056                         mutex_exit(&as->a_contents);
3057                         goto setpgsz_top;
3058                 } else if (error != 0) {
3059                         break;
3060                 }
3061         }
3062         as_setwatch(as);
3063         AS_LOCK_EXIT(as, &as->a_lock);
3064         return (error);
3065 }
3066 
3067 /*
3068  * as_iset3_default_lpsize() just calls SEGOP_SETPAGESIZE() on all segments
3069  * in its chunk where s_szc is less than the szc we want to set.
3070  */
3071 static int
3072 as_iset3_default_lpsize(struct as *as, caddr_t raddr, size_t rsize, uint_t szc,
3073     int *retry)
3074 {
3075         struct seg *seg;
3076         size_t ssize;
3077         int error;
3078 
3079         ASSERT(AS_WRITE_HELD(as, &as->a_lock));
3080 
3081         seg = as_segat(as, raddr);
3082         if (seg == NULL) {
3083                 panic("as_iset3_default_lpsize: no seg");
3084         }
3085 
3086         for (; rsize != 0; rsize -= ssize, raddr += ssize) {
3087                 if (raddr >= seg->s_base + seg->s_size) {
3088                         seg = AS_SEGNEXT(as, seg);
3089                         if (seg == NULL || raddr != seg->s_base) {
3090                                 panic("as_iset3_default_lpsize: as changed");
3091                         }
3092                 }
3093                 if ((raddr + rsize) > (seg->s_base + seg->s_size)) {
3094                         ssize = seg->s_base + seg->s_size - raddr;
3095                 } else {
3096                         ssize = rsize;
3097                 }
3098 
3099                 if (szc > seg->s_szc) {
3100                         error = SEGOP_SETPAGESIZE(seg, raddr, ssize, szc);
3101                         /* Only retry on EINVAL segments that have no vnode. */
3102                         if (error == EINVAL) {
3103                                 vnode_t *vp = NULL;
3104                                 if ((SEGOP_GETTYPE(seg, raddr) & MAP_SHARED) &&
3105                                     (SEGOP_GETVP(seg, raddr, &vp) != 0 ||
3106                                     vp == NULL)) {
3107                                         *retry = 1;
3108                                 } else {
3109                                         *retry = 0;
3110                                 }
3111                         }
3112                         if (error) {
3113                                 return (error);
3114                         }
3115                 }
3116         }
3117         return (0);
3118 }
3119 
3120 /*
3121  * as_iset2_default_lpsize() calls as_iset3_default_lpsize() to set the
3122  * pagesize on each segment in its range, but if any fails with EINVAL,
3123  * then it reduces the pagesizes to the next size in the bitmap and
3124  * retries as_iset3_default_lpsize(). The reason why the code retries
3125  * smaller allowed sizes on EINVAL is because (a) the anon offset may not


3328         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
3329 again:
3330         error = 0;
3331 
3332         raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
3333         rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
3334             (size_t)raddr;
3335 
3336         if (raddr + rsize < raddr) {         /* check for wraparound */
3337                 AS_LOCK_EXIT(as, &as->a_lock);
3338                 return (ENOMEM);
3339         }
3340         as_clearwatchprot(as, raddr, rsize);
3341         seg = as_segat(as, raddr);
3342         if (seg == NULL) {
3343                 as_setwatch(as);
3344                 AS_LOCK_EXIT(as, &as->a_lock);
3345                 return (ENOMEM);
3346         }
3347         if (seg->s_ops == &segvn_ops) {
3348                 rtype = SEGOP_GETTYPE(seg, addr);
3349                 rflags = rtype & (MAP_TEXT | MAP_INITDATA);
3350                 rtype = rtype & (MAP_SHARED | MAP_PRIVATE);
3351                 segvn = 1;
3352         } else {
3353                 segvn = 0;
3354         }
3355         setaddr = raddr;
3356         setsize = 0;
3357 
3358         for (; rsize != 0; rsize -= ssize, raddr += ssize, setsize += ssize) {
3359                 if (raddr >= (seg->s_base + seg->s_size)) {
3360                         seg = AS_SEGNEXT(as, seg);
3361                         if (seg == NULL || raddr != seg->s_base) {
3362                                 error = ENOMEM;
3363                                 break;
3364                         }
3365                         if (seg->s_ops == &segvn_ops) {
3366                                 stype = SEGOP_GETTYPE(seg, raddr);
3367                                 sflags = stype & (MAP_TEXT | MAP_INITDATA);
3368                                 stype &= (MAP_SHARED | MAP_PRIVATE);
3369                                 if (segvn && (rflags != sflags ||
3370                                     rtype != stype)) {
3371                                         /*
3372                                          * The next segment is also segvn but
3373                                          * has different flags and/or type.
3374                                          */
3375                                         ASSERT(setsize != 0);
3376                                         error = as_iset_default_lpsize(as,
3377                                             setaddr, setsize, rflags, rtype);
3378                                         if (error) {
3379                                                 break;
3380                                         }
3381                                         rflags = sflags;
3382                                         rtype = stype;
3383                                         setaddr = raddr;
3384                                         setsize = 0;
3385                                 } else if (!segvn) {
3386                                         rflags = sflags;


3460 as_setwatch(struct as *as)
3461 {
3462         struct watched_page *pwp;
3463         struct seg *seg;
3464         caddr_t vaddr;
3465         uint_t prot;
3466         int  err, retrycnt;
3467 
3468         if (avl_numnodes(&as->a_wpage) == 0)
3469                 return;
3470 
3471         ASSERT(AS_WRITE_HELD(as, &as->a_lock));
3472 
3473         for (pwp = avl_first(&as->a_wpage); pwp != NULL;
3474             pwp = AVL_NEXT(&as->a_wpage, pwp)) {
3475                 retrycnt = 0;
3476         retry:
3477                 vaddr = pwp->wp_vaddr;
3478                 if (pwp->wp_oprot != 0 ||    /* already set up */
3479                     (seg = as_segat(as, vaddr)) == NULL ||
3480                     SEGOP_GETPROT(seg, vaddr, 0, &prot) != 0)
3481                         continue;
3482 
3483                 pwp->wp_oprot = prot;
3484                 if (pwp->wp_read)
3485                         prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3486                 if (pwp->wp_write)
3487                         prot &= ~PROT_WRITE;
3488                 if (pwp->wp_exec)
3489                         prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3490                 if (!(pwp->wp_flags & WP_NOWATCH) && prot != pwp->wp_oprot) {
3491                         err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
3492                         if (err == IE_RETRY) {
3493                                 pwp->wp_oprot = 0;
3494                                 ASSERT(retrycnt == 0);
3495                                 retrycnt++;
3496                                 goto retry;
3497                         }
3498                 }
3499                 pwp->wp_prot = prot;
3500         }
3501 }
3502 
3503 /*
3504  * Clear all of the watched pages in the address space.
3505  */
3506 void
3507 as_clearwatch(struct as *as)
3508 {
3509         struct watched_page *pwp;
3510         struct seg *seg;
3511         caddr_t vaddr;
3512         uint_t prot;
3513         int err, retrycnt;
3514 
3515         if (avl_numnodes(&as->a_wpage) == 0)
3516                 return;
3517 
3518         ASSERT(AS_WRITE_HELD(as, &as->a_lock));
3519 
3520         for (pwp = avl_first(&as->a_wpage); pwp != NULL;
3521             pwp = AVL_NEXT(&as->a_wpage, pwp)) {
3522                 retrycnt = 0;
3523         retry:
3524                 vaddr = pwp->wp_vaddr;
3525                 if (pwp->wp_oprot == 0 ||    /* not set up */
3526                     (seg = as_segat(as, vaddr)) == NULL)
3527                         continue;
3528 
3529                 if ((prot = pwp->wp_oprot) != pwp->wp_prot) {
3530                         err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
3531                         if (err == IE_RETRY) {
3532                                 ASSERT(retrycnt == 0);
3533                                 retrycnt++;
3534                                 goto retry;
3535                         }
3536                 }
3537                 pwp->wp_oprot = 0;
3538                 pwp->wp_prot = 0;
3539         }
3540 }
3541 
3542 /*
3543  * Force a new setup for all the watched pages in the range.
3544  */
3545 static void
3546 as_setwatchprot(struct as *as, caddr_t addr, size_t size, uint_t prot)
3547 {
3548         struct watched_page *pwp;
3549         struct watched_page tpw;
3550         caddr_t eaddr = addr + size;


3564                 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
3565 
3566         while (pwp != NULL && pwp->wp_vaddr < eaddr) {
3567                 retrycnt = 0;
3568                 vaddr = pwp->wp_vaddr;
3569 
3570                 wprot = prot;
3571                 if (pwp->wp_read)
3572                         wprot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3573                 if (pwp->wp_write)
3574                         wprot &= ~PROT_WRITE;
3575                 if (pwp->wp_exec)
3576                         wprot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3577                 if (!(pwp->wp_flags & WP_NOWATCH) && wprot != pwp->wp_oprot) {
3578                 retry:
3579                         seg = as_segat(as, vaddr);
3580                         if (seg == NULL) {
3581                                 panic("as_setwatchprot: no seg");
3582                                 /*NOTREACHED*/
3583                         }
3584                         err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, wprot);
3585                         if (err == IE_RETRY) {
3586                                 ASSERT(retrycnt == 0);
3587                                 retrycnt++;
3588                                 goto retry;
3589                         }
3590                 }
3591                 pwp->wp_oprot = prot;
3592                 pwp->wp_prot = wprot;
3593 
3594                 pwp = AVL_NEXT(&as->a_wpage, pwp);
3595         }
3596 }
3597 
3598 /*
3599  * Clear all of the watched pages in the range.
3600  */
3601 static void
3602 as_clearwatchprot(struct as *as, caddr_t addr, size_t size)
3603 {
3604         caddr_t eaddr = addr + size;


3611 
3612         if (avl_numnodes(&as->a_wpage) == 0)
3613                 return;
3614 
3615         tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
3616         if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
3617                 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
3618 
3619         ASSERT(AS_WRITE_HELD(as, &as->a_lock));
3620 
3621         while (pwp != NULL && pwp->wp_vaddr < eaddr) {
3622 
3623                 if ((prot = pwp->wp_oprot) != 0) {
3624                         retrycnt = 0;
3625 
3626                         if (prot != pwp->wp_prot) {
3627                         retry:
3628                                 seg = as_segat(as, pwp->wp_vaddr);
3629                                 if (seg == NULL)
3630                                         continue;
3631                                 err = SEGOP_SETPROT(seg, pwp->wp_vaddr,
3632                                     PAGESIZE, prot);
3633                                 if (err == IE_RETRY) {
3634                                         ASSERT(retrycnt == 0);
3635                                         retrycnt++;
3636                                         goto retry;
3637 
3638                                 }
3639                         }
3640                         pwp->wp_oprot = 0;
3641                         pwp->wp_prot = 0;
3642                 }
3643 
3644                 pwp = AVL_NEXT(&as->a_wpage, pwp);
3645         }
3646 }
3647 
3648 void
3649 as_signal_proc(struct as *as, k_siginfo_t *siginfo)
3650 {
3651         struct proc *p;


3668 int
3669 as_getmemid(struct as *as, caddr_t addr, memid_t *memidp)
3670 {
3671         struct seg      *seg;
3672         int             sts;
3673 
3674         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
3675         seg = as_segat(as, addr);
3676         if (seg == NULL) {
3677                 AS_LOCK_EXIT(as, &as->a_lock);
3678                 return (EFAULT);
3679         }
3680         /*
3681          * catch old drivers which may not support getmemid
3682          */
3683         if (seg->s_ops->getmemid == NULL) {
3684                 AS_LOCK_EXIT(as, &as->a_lock);
3685                 return (ENODEV);
3686         }
3687 
3688         sts = SEGOP_GETMEMID(seg, addr, memidp);
3689 
3690         AS_LOCK_EXIT(as, &as->a_lock);
3691         return (sts);
3692 }


 699         while (as->a_callbacks && as_do_callbacks(as, AS_ALL_EVENT, 0, 0))
 700                 ;
 701 
 702         /* This will prevent new XHATs from attaching to as */
 703         if (!called)
 704                 AS_SETBUSY(as);
 705         mutex_exit(&as->a_contents);
 706         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 707 
 708         if (!called) {
 709                 called = 1;
 710                 hat_free_start(hat);
 711                 if (as->a_xhat != NULL)
 712                         xhat_free_start_all(as);
 713         }
 714         for (seg = AS_SEGFIRST(as); seg != NULL; seg = next) {
 715                 int err;
 716 
 717                 next = AS_SEGNEXT(as, seg);
 718 retry:
 719                 err = segop_unmap(seg, seg->s_base, seg->s_size);
 720                 if (err == EAGAIN) {
 721                         mutex_enter(&as->a_contents);
 722                         if (as->a_callbacks) {
 723                                 AS_LOCK_EXIT(as, &as->a_lock);
 724                         } else if (!AS_ISNOUNMAPWAIT(as)) {
 725                                 /*
 726                                  * Memory is currently locked. Wait for a
 727                                  * cv_signal that it has been unlocked, then
 728                                  * try the operation again.
 729                                  */
 730                                 if (AS_ISUNMAPWAIT(as) == 0)
 731                                         cv_broadcast(&as->a_cv);
 732                                 AS_SETUNMAPWAIT(as);
 733                                 AS_LOCK_EXIT(as, &as->a_lock);
 734                                 while (AS_ISUNMAPWAIT(as))
 735                                         cv_wait(&as->a_cv, &as->a_contents);
 736                         } else {
 737                                 /*
 738                                  * We may have raced with
 739                                  * segvn_reclaim()/segspt_reclaim(). In this


 805         (void) hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_SRD);
 806 
 807         for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
 808 
 809                 if (seg->s_flags & S_PURGE) {
 810                         purgesize += seg->s_size;
 811                         continue;
 812                 }
 813 
 814                 newseg = seg_alloc(newas, seg->s_base, seg->s_size);
 815                 if (newseg == NULL) {
 816                         AS_LOCK_EXIT(newas, &newas->a_lock);
 817                         as_setwatch(as);
 818                         mutex_enter(&as->a_contents);
 819                         AS_CLRBUSY(as);
 820                         mutex_exit(&as->a_contents);
 821                         AS_LOCK_EXIT(as, &as->a_lock);
 822                         as_free(newas);
 823                         return (-1);
 824                 }
 825                 if ((error = segop_dup(seg, newseg)) != 0) {
 826                         /*
 827                          * We call seg_free() on the new seg
 828                          * because the segment is not set up
 829                          * completely; i.e. it has no ops.
 830                          */
 831                         as_setwatch(as);
 832                         mutex_enter(&as->a_contents);
 833                         AS_CLRBUSY(as);
 834                         mutex_exit(&as->a_contents);
 835                         AS_LOCK_EXIT(as, &as->a_lock);
 836                         seg_free(newseg);
 837                         AS_LOCK_EXIT(newas, &newas->a_lock);
 838                         as_free(newas);
 839                         return (error);
 840                 }
 841                 newas->a_size += seg->s_size;
 842         }
 843         newas->a_resvsize = as->a_resvsize - purgesize;
 844 
 845         error = hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_ALL);


1001                 }
1002                 if (raddr + rsize > seg->s_base + seg->s_size)
1003                         ssize = seg->s_base + seg->s_size - raddr;
1004                 else
1005                         ssize = rsize;
1006 
1007                 if (!is_xhat || (seg->s_ops != &segdev_ops)) {
1008 
1009                         if (is_xhat && avl_numnodes(&as->a_wpage) != 0 &&
1010                             pr_is_watchpage_as(raddr, rw, as)) {
1011                                 /*
1012                                  * Handle watch pages.  If we're faulting on a
1013                                  * watched page from an X-hat, we have to
1014                                  * restore the original permissions while we
1015                                  * handle the fault.
1016                                  */
1017                                 as_clearwatch(as);
1018                                 holding_wpage = 1;
1019                         }
1020 
1021                         res = segop_fault(hat, seg, raddr, ssize, type, rw);
1022 
1023                         /* Restore watchpoints */
1024                         if (holding_wpage) {
1025                                 as_setwatch(as);
1026                                 holding_wpage = 0;
1027                         }
1028 
1029                         if (res != 0)
1030                                 break;
1031                 } else {
1032                         /* XHAT does not support seg_dev */
1033                         res = FC_NOSUPPORT;
1034                         break;
1035                 }
1036         }
1037 
1038         /*
1039          * If we were SOFTLOCKing and encountered a failure,
1040          * we must SOFTUNLOCK the range we already did. (Maybe we
1041          * should just panic if we are SOFTLOCKing or even SOFTUNLOCKing
1042          * right here...)
1043          */
1044         if (res != 0 && type == F_SOFTLOCK) {
1045                 for (seg = segsav; addrsav < raddr; addrsav += ssize) {
1046                         if (addrsav >= seg->s_base + seg->s_size)
1047                                 seg = AS_SEGNEXT(as, seg);
1048                         ASSERT(seg != NULL);
1049                         /*
1050                          * Now call the fault routine again to perform the
1051                          * unlock using S_OTHER instead of the rw variable
1052                          * since we never got a chance to touch the pages.
1053                          */
1054                         if (raddr > seg->s_base + seg->s_size)
1055                                 ssize = seg->s_base + seg->s_size - addrsav;
1056                         else
1057                                 ssize = raddr - addrsav;
1058                         (void) segop_fault(hat, seg, addrsav, ssize,
1059                             F_SOFTUNLOCK, S_OTHER);
1060                 }
1061         }
1062         if (as_lock_held)
1063                 AS_LOCK_EXIT(as, &as->a_lock);
1064         if ((lwp != NULL) && (!is_xhat))
1065                 lwp->lwp_nostop--;
1066 
1067         /*
1068          * If the lower levels returned EDEADLK for a fault,
1069          * It means that we should retry the fault.  Let's wait
1070          * a bit also to let the deadlock causing condition clear.
1071          * This is part of a gross hack to work around a design flaw
1072          * in the ufs/sds logging code and should go away when the
1073          * logging code is re-designed to fix the problem. See bug
1074          * 4125102 for details of the problem.
1075          */
1076         if (FC_ERRNO(res) == EDEADLK) {
1077                 delay(deadlk_wait);
1078                 res = 0;


1108         rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
1109             (size_t)raddr;
1110 
1111         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1112         seg = as_segat(as, raddr);
1113         if (seg == NULL) {
1114                 AS_LOCK_EXIT(as, &as->a_lock);
1115                 if (lwp != NULL)
1116                         lwp->lwp_nostop--;
1117                 return (FC_NOMAP);
1118         }
1119 
1120         for (; rsize != 0; rsize -= PAGESIZE, raddr += PAGESIZE) {
1121                 if (raddr >= seg->s_base + seg->s_size) {
1122                         seg = AS_SEGNEXT(as, seg);
1123                         if (seg == NULL || raddr != seg->s_base) {
1124                                 res = FC_NOMAP;
1125                                 break;
1126                         }
1127                 }
1128                 res = segop_faulta(seg, raddr);
1129                 if (res != 0)
1130                         break;
1131         }
1132         AS_LOCK_EXIT(as, &as->a_lock);
1133         if (lwp != NULL)
1134                 lwp->lwp_nostop--;
1135         /*
1136          * If the lower levels returned EDEADLK for a fault,
1137          * It means that we should retry the fault.  Let's wait
1138          * a bit also to let the deadlock causing condition clear.
1139          * This is part of a gross hack to work around a design flaw
1140          * in the ufs/sds logging code and should go away when the
1141          * logging code is re-designed to fix the problem. See bug
1142          * 4125102 for details of the problem.
1143          */
1144         if (FC_ERRNO(res) == EDEADLK) {
1145                 delay(deadlk_wait);
1146                 res = 0;
1147                 goto retry;
1148         }


1198         seg = as_segat(as, raddr);
1199         if (seg == NULL) {
1200                 as_setwatch(as);
1201                 AS_LOCK_EXIT(as, &as->a_lock);
1202                 return (ENOMEM);
1203         }
1204 
1205         for (; rsize != 0; rsize -= ssize, raddr += ssize) {
1206                 if (raddr >= seg->s_base + seg->s_size) {
1207                         seg = AS_SEGNEXT(as, seg);
1208                         if (seg == NULL || raddr != seg->s_base) {
1209                                 error = ENOMEM;
1210                                 break;
1211                         }
1212                 }
1213                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
1214                         ssize = seg->s_base + seg->s_size - raddr;
1215                 else
1216                         ssize = rsize;
1217 retry:
1218                 error = segop_setprot(seg, raddr, ssize, prot);
1219 
1220                 if (error == IE_NOMEM) {
1221                         error = EAGAIN;
1222                         break;
1223                 }
1224 
1225                 if (error == IE_RETRY) {
1226                         AS_LOCK_EXIT(as, &as->a_lock);
1227                         writer = 1;
1228                         goto setprot_top;
1229                 }
1230 
1231                 if (error == EAGAIN) {
1232                         /*
1233                          * Make sure we have a_lock as writer.
1234                          */
1235                         if (writer == 0) {
1236                                 AS_LOCK_EXIT(as, &as->a_lock);
1237                                 writer = 1;
1238                                 goto setprot_top;


1349         seg = as_segat(as, raddr);
1350         if (seg == NULL) {
1351                 as_setwatch(as);
1352                 AS_LOCK_EXIT(as, &as->a_lock);
1353                 return (ENOMEM);
1354         }
1355 
1356         for (; rsize != 0; rsize -= ssize, raddr += ssize) {
1357                 if (raddr >= seg->s_base + seg->s_size) {
1358                         seg = AS_SEGNEXT(as, seg);
1359                         if (seg == NULL || raddr != seg->s_base) {
1360                                 error = ENOMEM;
1361                                 break;
1362                         }
1363                 }
1364                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
1365                         ssize = seg->s_base + seg->s_size - raddr;
1366                 else
1367                         ssize = rsize;
1368 
1369                 error = segop_checkprot(seg, raddr, ssize, prot);
1370                 if (error != 0)
1371                         break;
1372         }
1373         as_setwatch(as);
1374         AS_LOCK_EXIT(as, &as->a_lock);
1375         return (error);
1376 }
1377 
1378 int
1379 as_unmap(struct as *as, caddr_t addr, size_t size)
1380 {
1381         struct seg *seg, *seg_next;
1382         struct as_callback *cb;
1383         caddr_t raddr, eaddr;
1384         size_t ssize, rsize = 0;
1385         int err;
1386 
1387 top:
1388         raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1389         eaddr = (caddr_t)(((uintptr_t)(addr + size) + PAGEOFFSET) &


1415                 else
1416                         ssize = eaddr - raddr;
1417 
1418                 /*
1419                  * Save next segment pointer since seg can be
1420                  * destroyed during the segment unmap operation.
1421                  */
1422                 seg_next = AS_SEGNEXT(as, seg);
1423 
1424                 /*
1425                  * We didn't count /dev/null mappings, so ignore them here.
1426                  * We'll handle MAP_NORESERVE cases in segvn_unmap(). (Again,
1427                  * we have to do this check here while we have seg.)
1428                  */
1429                 rsize = 0;
1430                 if (!SEG_IS_DEVNULL_MAPPING(seg) &&
1431                     !SEG_IS_PARTIAL_RESV(seg))
1432                         rsize = ssize;
1433 
1434 retry:
1435                 err = segop_unmap(seg, raddr, ssize);
1436                 if (err == EAGAIN) {
1437                         /*
1438                          * Memory is currently locked.  It must be unlocked
1439                          * before this operation can succeed through a retry.
1440                          * The possible reasons for locked memory and
1441                          * corresponding strategies for unlocking are:
1442                          * (1) Normal I/O
1443                          *      wait for a signal that the I/O operation
1444                          *      has completed and the memory is unlocked.
1445                          * (2) Asynchronous I/O
1446                          *      The aio subsystem does not unlock pages when
1447                          *      the I/O is completed. Those pages are unlocked
1448                          *      when the application calls aiowait/aioerror.
1449                          *      So, to prevent blocking forever, cv_broadcast()
1450                          *      is done to wake up aio_cleanup_thread.
1451                          *      Subsequently, segvn_reclaim will be called, and
1452                          *      that will do AS_CLRUNMAPWAIT() and wake us up.
1453                          * (3) Long term page locking:
1454                          *      Drivers intending to have pages locked for a
1455                          *      period considerably longer than for normal I/O


1853  */
1854 void
1855 as_purge(struct as *as)
1856 {
1857         struct seg *seg;
1858         struct seg *next_seg;
1859 
1860         /*
1861          * the setting of NEEDSPURGE is protect by as_rangelock(), so
1862          * no need to grab a_contents mutex for this check
1863          */
1864         if ((as->a_flags & AS_NEEDSPURGE) == 0)
1865                 return;
1866 
1867         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
1868         next_seg = NULL;
1869         seg = AS_SEGFIRST(as);
1870         while (seg != NULL) {
1871                 next_seg = AS_SEGNEXT(as, seg);
1872                 if (seg->s_flags & S_PURGE)
1873                         (void) segop_unmap(seg, seg->s_base, seg->s_size);
1874                 seg = next_seg;
1875         }
1876         AS_LOCK_EXIT(as, &as->a_lock);
1877 
1878         mutex_enter(&as->a_contents);
1879         as->a_flags &= ~AS_NEEDSPURGE;
1880         mutex_exit(&as->a_contents);
1881 }
1882 
1883 /*
1884  * Find a hole within [*basep, *basep + *lenp), which contains a mappable
1885  * range of addresses at least "minlen" long, where the base of the range is
1886  * at "off" phase from an "align" boundary and there is space for a
1887  * "redzone"-sized redzone on eithe rside of the range.  Thus,
1888  * if align was 4M and off was 16k, the user wants a hole which will start
1889  * 16k into a 4M page.
1890  *
1891  * If flags specifies AH_HI, the hole will have the highest possible address
1892  * in the range.  We use the as->a_lastgap field to figure out where to
1893  * start looking for a gap.


2184 
2185         mutex_enter(&as->a_contents);
2186         AS_CLRBUSY(as);
2187         mutex_exit(&as->a_contents);
2188 
2189         /*
2190          * Call the swapout routines of all segments in the address
2191          * space to do the actual work, accumulating the amount of
2192          * space reclaimed.
2193          */
2194         for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
2195                 struct seg_ops *ov = seg->s_ops;
2196 
2197                 /*
2198                  * We have to check to see if the seg has
2199                  * an ops vector because the seg may have
2200                  * been in the middle of being set up when
2201                  * the process was picked for swapout.
2202                  */
2203                 if ((ov != NULL) && (ov->swapout != NULL))
2204                         swpcnt += segop_swapout(seg);
2205         }
2206         AS_LOCK_EXIT(as, &as->a_lock);
2207         return (swpcnt);
2208 }
2209 
2210 /*
2211  * Determine whether data from the mappings in interval [addr, addr + size)
2212  * are in the primary memory (core) cache.
2213  */
2214 int
2215 as_incore(struct as *as, caddr_t addr,
2216     size_t size, char *vec, size_t *sizep)
2217 {
2218         struct seg *seg;
2219         size_t ssize;
2220         caddr_t raddr;          /* rounded down addr */
2221         size_t rsize;           /* rounded up size */
2222         size_t isize;                   /* iteration size */
2223         int error = 0;          /* result, assume success */
2224 


2232 
2233         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2234         seg = as_segat(as, raddr);
2235         if (seg == NULL) {
2236                 AS_LOCK_EXIT(as, &as->a_lock);
2237                 return (-1);
2238         }
2239 
2240         for (; rsize != 0; rsize -= ssize, raddr += ssize) {
2241                 if (raddr >= seg->s_base + seg->s_size) {
2242                         seg = AS_SEGNEXT(as, seg);
2243                         if (seg == NULL || raddr != seg->s_base) {
2244                                 error = -1;
2245                                 break;
2246                         }
2247                 }
2248                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
2249                         ssize = seg->s_base + seg->s_size - raddr;
2250                 else
2251                         ssize = rsize;
2252                 *sizep += isize = segop_incore(seg, raddr, ssize, vec);
2253                 if (isize != ssize) {
2254                         error = -1;
2255                         break;
2256                 }
2257                 vec += btopr(ssize);
2258         }
2259         AS_LOCK_EXIT(as, &as->a_lock);
2260         return (error);
2261 }
2262 
2263 static void
2264 as_segunlock(struct seg *seg, caddr_t addr, int attr,
2265         ulong_t *bitmap, size_t position, size_t npages)
2266 {
2267         caddr_t range_start;
2268         size_t  pos1 = position;
2269         size_t  pos2;
2270         size_t  size;
2271         size_t  end_pos = npages + position;
2272 
2273         while (bt_range(bitmap, &pos1, &pos2, end_pos)) {
2274                 size = ptob((pos2 - pos1));
2275                 range_start = (caddr_t)((uintptr_t)addr +
2276                     ptob(pos1 - position));
2277 
2278                 (void) segop_lockop(seg, range_start, size, attr, MC_UNLOCK,
2279                     (ulong_t *)NULL, (size_t)NULL);
2280                 pos1 = pos2;
2281         }
2282 }
2283 
2284 static void
2285 as_unlockerr(struct as *as, int attr, ulong_t *mlock_map,
2286         caddr_t raddr, size_t rsize)
2287 {
2288         struct seg *seg = as_segat(as, raddr);
2289         size_t ssize;
2290 
2291         while (rsize != 0) {
2292                 if (raddr >= seg->s_base + seg->s_size)
2293                         seg = AS_SEGNEXT(as, seg);
2294 
2295                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
2296                         ssize = seg->s_base + seg->s_size - raddr;
2297                 else
2298                         ssize = rsize;


2354                 if (seg == NULL) {
2355                         AS_LOCK_EXIT(as, &as->a_lock);
2356                         return (0);
2357                 }
2358 
2359                 do {
2360                         raddr = (caddr_t)((uintptr_t)seg->s_base &
2361                             (uintptr_t)PAGEMASK);
2362                         rlen += (((uintptr_t)(seg->s_base + seg->s_size) +
2363                             PAGEOFFSET) & PAGEMASK) - (uintptr_t)raddr;
2364                 } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
2365 
2366                 mlock_size = BT_BITOUL(btopr(rlen));
2367                 if ((mlock_map = (ulong_t *)kmem_zalloc(mlock_size *
2368                     sizeof (ulong_t), KM_NOSLEEP)) == NULL) {
2369                                 AS_LOCK_EXIT(as, &as->a_lock);
2370                                 return (EAGAIN);
2371                 }
2372 
2373                 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
2374                         error = segop_lockop(seg, seg->s_base,
2375                             seg->s_size, attr, MC_LOCK, mlock_map, pos);
2376                         if (error != 0)
2377                                 break;
2378                         pos += seg_pages(seg);
2379                 }
2380 
2381                 if (error) {
2382                         for (seg = AS_SEGFIRST(as); seg != NULL;
2383                             seg = AS_SEGNEXT(as, seg)) {
2384 
2385                                 raddr = (caddr_t)((uintptr_t)seg->s_base &
2386                                     (uintptr_t)PAGEMASK);
2387                                 npages = seg_pages(seg);
2388                                 as_segunlock(seg, raddr, attr, mlock_map,
2389                                     idx, npages);
2390                                 idx += npages;
2391                         }
2392                 }
2393 
2394                 kmem_free(mlock_map, mlock_size * sizeof (ulong_t));
2395                 AS_LOCK_EXIT(as, &as->a_lock);
2396                 goto lockerr;
2397         } else if (func == MC_UNLOCKAS) {
2398                 mutex_enter(&as->a_contents);
2399                 AS_CLRPGLCK(as);
2400                 mutex_exit(&as->a_contents);
2401 
2402                 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
2403                         error = segop_lockop(seg, seg->s_base,
2404                             seg->s_size, attr, MC_UNLOCK, NULL, 0);
2405                         if (error != 0)
2406                                 break;
2407                 }
2408 
2409                 AS_LOCK_EXIT(as, &as->a_lock);
2410                 goto lockerr;
2411         }
2412 
2413         /*
2414          * Normalize addresses and sizes.
2415          */
2416         initraddr = raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2417         initrsize = rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
2418             (size_t)raddr;
2419 
2420         if (raddr + rsize < raddr) {         /* check for wraparound */
2421                 AS_LOCK_EXIT(as, &as->a_lock);
2422                 return (ENOMEM);
2423         }


2461                                 }
2462                                 AS_LOCK_EXIT(as, &as->a_lock);
2463                                 return (ENOMEM);
2464                         }
2465                 }
2466                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
2467                         ssize = seg->s_base + seg->s_size - raddr;
2468                 else
2469                         ssize = rsize;
2470 
2471                 /*
2472                  * Dispatch on specific function.
2473                  */
2474                 switch (func) {
2475 
2476                 /*
2477                  * Synchronize cached data from mappings with backing
2478                  * objects.
2479                  */
2480                 case MC_SYNC:
2481                         if (error = segop_sync(seg, raddr, ssize,
2482                             attr, (uint_t)arg)) {
2483                                 AS_LOCK_EXIT(as, &as->a_lock);
2484                                 return (error);
2485                         }
2486                         break;
2487 
2488                 /*
2489                  * Lock pages in memory.
2490                  */
2491                 case MC_LOCK:
2492                         if (error = segop_lockop(seg, raddr, ssize,
2493                             attr, func, mlock_map, pos)) {
2494                                 as_unlockerr(as, attr, mlock_map, initraddr,
2495                                     initrsize - rsize + ssize);
2496                                 kmem_free(mlock_map, mlock_size *
2497                                     sizeof (ulong_t));
2498                                 AS_LOCK_EXIT(as, &as->a_lock);
2499                                 goto lockerr;
2500                         }
2501                         break;
2502 
2503                 /*
2504                  * Unlock mapped pages.
2505                  */
2506                 case MC_UNLOCK:
2507                         (void) segop_lockop(seg, raddr, ssize, attr, func,
2508                             (ulong_t *)NULL, (size_t)NULL);
2509                         break;
2510 
2511                 /*
2512                  * Store VM advise for mapped pages in segment layer.
2513                  */
2514                 case MC_ADVISE:
2515                         error = segop_advise(seg, raddr, ssize, (uint_t)arg);
2516 
2517                         /*
2518                          * Check for regular errors and special retry error
2519                          */
2520                         if (error) {
2521                                 if (error == IE_RETRY) {
2522                                         /*
2523                                          * Need to acquire writers lock, so
2524                                          * have to drop readers lock and start
2525                                          * all over again
2526                                          */
2527                                         AS_LOCK_EXIT(as, &as->a_lock);
2528                                         goto retry;
2529                                 } else if (error == IE_REATTACH) {
2530                                         /*
2531                                          * Find segment for current address
2532                                          * because current segment just got
2533                                          * split or concatenated
2534                                          */
2535                                         seg = as_segat(as, raddr);
2536                                         if (seg == NULL) {
2537                                                 AS_LOCK_EXIT(as, &as->a_lock);
2538                                                 return (ENOMEM);
2539                                         }
2540                                 } else {
2541                                         /*
2542                                          * Regular error
2543                                          */
2544                                         AS_LOCK_EXIT(as, &as->a_lock);
2545                                         return (error);
2546                                 }
2547                         }
2548                         break;
2549 
2550                 case MC_INHERIT_ZERO:
2551                         if (seg->s_ops->inherit == NULL) {
2552                                 error = ENOTSUP;
2553                         } else {
2554                                 error = segop_inherit(seg, raddr, ssize,
2555                                     SEGP_INH_ZERO);
2556                         }
2557                         if (error != 0) {
2558                                 AS_LOCK_EXIT(as, &as->a_lock);
2559                                 return (error);
2560                         }
2561                         break;
2562 
2563                 /*
2564                  * Can't happen.
2565                  */
2566                 default:
2567                         panic("as_ctl: bad operation %d", func);
2568                         /*NOTREACHED*/
2569                 }
2570 
2571                 rsize -= ssize;
2572                 raddr += ssize;
2573         }
2574 


2649         /*
2650          * Count the number of segments covered by the range we are about to
2651          * lock. The segment count is used to size the shadow list we return
2652          * back to the caller.
2653          */
2654         for (; size != 0; size -= ssize, addr += ssize) {
2655                 if (addr >= seg->s_base + seg->s_size) {
2656 
2657                         seg = AS_SEGNEXT(as, seg);
2658                         if (seg == NULL || addr != seg->s_base) {
2659                                 AS_LOCK_EXIT(as, &as->a_lock);
2660                                 return (EFAULT);
2661                         }
2662                         /*
2663                          * Do a quick check if subsequent segments
2664                          * will most likely support pagelock.
2665                          */
2666                         if (seg->s_ops == &segvn_ops) {
2667                                 vnode_t *vp;
2668 
2669                                 if (segop_getvp(seg, addr, &vp) != 0 ||
2670                                     vp != NULL) {
2671                                         AS_LOCK_EXIT(as, &as->a_lock);
2672                                         goto slow;
2673                                 }
2674                         } else if (seg->s_ops != &segspt_shmops) {
2675                                 AS_LOCK_EXIT(as, &as->a_lock);
2676                                 goto slow;
2677                         }
2678                         segcnt++;
2679                 }
2680                 if (addr + size > seg->s_base + seg->s_size) {
2681                         ssize = seg->s_base + seg->s_size - addr;
2682                 } else {
2683                         ssize = size;
2684                 }
2685         }
2686         ASSERT(segcnt > 1);
2687 
2688         plist = kmem_zalloc((npages + segcnt) * sizeof (page_t *), KM_SLEEP);
2689 
2690         addr = sv_addr;
2691         size = sv_size;
2692         seg = sv_seg;
2693 
2694         for (cnt = 0, pl_off = 0; size != 0; size -= ssize, addr += ssize) {
2695                 if (addr >= seg->s_base + seg->s_size) {
2696                         seg = AS_SEGNEXT(as, seg);
2697                         ASSERT(seg != NULL && addr == seg->s_base);
2698                         cnt++;
2699                         ASSERT(cnt < segcnt);
2700                 }
2701                 if (addr + size > seg->s_base + seg->s_size) {
2702                         ssize = seg->s_base + seg->s_size - addr;
2703                 } else {
2704                         ssize = size;
2705                 }
2706                 pl = &plist[npages + cnt];
2707                 error = segop_pagelock(seg, addr, ssize, (page_t ***)pl,
2708                     L_PAGELOCK, rw);
2709                 if (error) {
2710                         break;
2711                 }
2712                 ASSERT(plist[npages + cnt] != NULL);
2713                 ASSERT(pl_off + btop(ssize) <= npages);
2714                 bcopy(plist[npages + cnt], &plist[pl_off],
2715                     btop(ssize) * sizeof (page_t *));
2716                 pl_off += btop(ssize);
2717         }
2718 
2719         if (size == 0) {
2720                 AS_LOCK_EXIT(as, &as->a_lock);
2721                 ASSERT(cnt == segcnt - 1);
2722                 *ppp = plist;
2723                 return (0);
2724         }
2725 
2726         /*
2727          * one of pagelock calls failed. The error type is in error variable.


2730          * back to the caller.
2731          */
2732 
2733         eaddr = addr;
2734         seg = sv_seg;
2735 
2736         for (cnt = 0, addr = sv_addr; addr < eaddr; addr += ssize) {
2737                 if (addr >= seg->s_base + seg->s_size) {
2738                         seg = AS_SEGNEXT(as, seg);
2739                         ASSERT(seg != NULL && addr == seg->s_base);
2740                         cnt++;
2741                         ASSERT(cnt < segcnt);
2742                 }
2743                 if (eaddr > seg->s_base + seg->s_size) {
2744                         ssize = seg->s_base + seg->s_size - addr;
2745                 } else {
2746                         ssize = eaddr - addr;
2747                 }
2748                 pl = &plist[npages + cnt];
2749                 ASSERT(*pl != NULL);
2750                 (void) segop_pagelock(seg, addr, ssize, (page_t ***)pl,
2751                     L_PAGEUNLOCK, rw);
2752         }
2753 
2754         AS_LOCK_EXIT(as, &as->a_lock);
2755 
2756         kmem_free(plist, (npages + segcnt) * sizeof (page_t *));
2757 
2758         if (error != ENOTSUP && error != EFAULT) {
2759                 return (error);
2760         }
2761 
2762 slow:
2763         /*
2764          * If we are here because pagelock failed due to the need to cow fault
2765          * in the pages we want to lock F_SOFTLOCK will do this job and in
2766          * next as_pagelock() call for this address range pagelock will
2767          * hopefully succeed.
2768          */
2769         fault_err = as_fault(as->a_hat, as, sv_addr, sv_size, F_SOFTLOCK, rw);
2770         if (fault_err != 0) {


2805         seg = as_segat(as, raddr);
2806         if (seg == NULL) {
2807                 AS_LOCK_EXIT(as, &as->a_lock);
2808                 return (EFAULT);
2809         }
2810         ASSERT(raddr >= seg->s_base && raddr < seg->s_base + seg->s_size);
2811         if (raddr + rsize > seg->s_base + seg->s_size) {
2812                 return (as_pagelock_segs(as, seg, ppp, raddr, rsize, rw));
2813         }
2814         if (raddr + rsize <= raddr) {
2815                 AS_LOCK_EXIT(as, &as->a_lock);
2816                 return (EFAULT);
2817         }
2818 
2819         TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_START,
2820             "seg_lock_1_start: raddr %p rsize %ld", raddr, rsize);
2821 
2822         /*
2823          * try to lock pages and pass back shadow list
2824          */
2825         err = segop_pagelock(seg, raddr, rsize, ppp, L_PAGELOCK, rw);
2826 
2827         TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_END, "seg_lock_1_end");
2828 
2829         AS_LOCK_EXIT(as, &as->a_lock);
2830 
2831         if (err == 0 || (err != ENOTSUP && err != EFAULT)) {
2832                 return (err);
2833         }
2834 
2835         /*
2836          * Use F_SOFTLOCK to lock the pages because pagelock failed either due
2837          * to no pagelock support for this segment or pages need to be cow
2838          * faulted in. If fault is needed F_SOFTLOCK will do this job for
2839          * this as_pagelock() call and in the next as_pagelock() call for the
2840          * same address range pagelock call will hopefull succeed.
2841          */
2842         fault_err = as_fault(as->a_hat, as, addr, size, F_SOFTLOCK, rw);
2843         if (fault_err != 0) {
2844                 return (fc_decode(fault_err));
2845         }


2868         ASSERT(seg != NULL);
2869         ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size);
2870         ASSERT(addr + size > seg->s_base + seg->s_size);
2871         ASSERT(IS_P2ALIGNED(size, PAGESIZE));
2872         ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
2873         ASSERT(plist != NULL);
2874 
2875         for (cnt = 0; addr < eaddr; addr += ssize) {
2876                 if (addr >= seg->s_base + seg->s_size) {
2877                         seg = AS_SEGNEXT(as, seg);
2878                         ASSERT(seg != NULL && addr == seg->s_base);
2879                         cnt++;
2880                 }
2881                 if (eaddr > seg->s_base + seg->s_size) {
2882                         ssize = seg->s_base + seg->s_size - addr;
2883                 } else {
2884                         ssize = eaddr - addr;
2885                 }
2886                 pl = &plist[npages + cnt];
2887                 ASSERT(*pl != NULL);
2888                 (void) segop_pagelock(seg, addr, ssize, (page_t ***)pl,
2889                     L_PAGEUNLOCK, rw);
2890         }
2891         ASSERT(cnt > 0);
2892         AS_LOCK_EXIT(as, &as->a_lock);
2893 
2894         cnt++;
2895         kmem_free(plist, (npages + cnt) * sizeof (page_t *));
2896 }
2897 
2898 /*
2899  * unlock pages in a given address range
2900  */
2901 void
2902 as_pageunlock(struct as *as, struct page **pp, caddr_t addr, size_t size,
2903     enum seg_rw rw)
2904 {
2905         struct seg *seg;
2906         size_t rsize;
2907         caddr_t raddr;
2908 


2914          * falling back to as_fault
2915          */
2916         if (pp == NULL) {
2917                 (void) as_fault(as->a_hat, as, addr, size, F_SOFTUNLOCK, rw);
2918                 return;
2919         }
2920 
2921         raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2922         rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
2923             (size_t)raddr;
2924 
2925         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2926         seg = as_segat(as, raddr);
2927         ASSERT(seg != NULL);
2928 
2929         TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEG_UNLOCK_START,
2930             "seg_unlock_start: raddr %p rsize %ld", raddr, rsize);
2931 
2932         ASSERT(raddr >= seg->s_base && raddr < seg->s_base + seg->s_size);
2933         if (raddr + rsize <= seg->s_base + seg->s_size) {
2934                 (void) segop_pagelock(seg, raddr, rsize, &pp, L_PAGEUNLOCK, rw);
2935         } else {
2936                 as_pageunlock_segs(as, seg, raddr, rsize, pp, rw);
2937                 return;
2938         }
2939         AS_LOCK_EXIT(as, &as->a_lock);
2940         TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_AS_UNLOCK_END, "as_pageunlock_end");
2941 }
2942 
2943 int
2944 as_setpagesize(struct as *as, caddr_t addr, size_t size, uint_t szc,
2945     boolean_t wait)
2946 {
2947         struct seg *seg;
2948         size_t ssize;
2949         caddr_t raddr;                  /* rounded down addr */
2950         size_t rsize;                   /* rounded up size */
2951         int error = 0;
2952         size_t pgsz = page_get_pagesize(szc);
2953 
2954 setpgsz_top:


2969                 as_setwatch(as);
2970                 AS_LOCK_EXIT(as, &as->a_lock);
2971                 return (ENOMEM);
2972         }
2973 
2974         for (; rsize != 0; rsize -= ssize, raddr += ssize) {
2975                 if (raddr >= seg->s_base + seg->s_size) {
2976                         seg = AS_SEGNEXT(as, seg);
2977                         if (seg == NULL || raddr != seg->s_base) {
2978                                 error = ENOMEM;
2979                                 break;
2980                         }
2981                 }
2982                 if ((raddr + rsize) > (seg->s_base + seg->s_size)) {
2983                         ssize = seg->s_base + seg->s_size - raddr;
2984                 } else {
2985                         ssize = rsize;
2986                 }
2987 
2988 retry:
2989                 error = segop_setpagesize(seg, raddr, ssize, szc);
2990 
2991                 if (error == IE_NOMEM) {
2992                         error = EAGAIN;
2993                         break;
2994                 }
2995 
2996                 if (error == IE_RETRY) {
2997                         AS_LOCK_EXIT(as, &as->a_lock);
2998                         goto setpgsz_top;
2999                 }
3000 
3001                 if (error == ENOTSUP) {
3002                         error = EINVAL;
3003                         break;
3004                 }
3005 
3006                 if (wait && (error == EAGAIN)) {
3007                         /*
3008                          * Memory is currently locked.  It must be unlocked
3009                          * before this operation can succeed through a retry.


3048                                  * number of retries without sleeping should
3049                                  * be very small. See segvn_reclaim() for
3050                                  * more comments.
3051                                  */
3052                                 AS_CLRNOUNMAPWAIT(as);
3053                                 mutex_exit(&as->a_contents);
3054                                 goto retry;
3055                         }
3056                         mutex_exit(&as->a_contents);
3057                         goto setpgsz_top;
3058                 } else if (error != 0) {
3059                         break;
3060                 }
3061         }
3062         as_setwatch(as);
3063         AS_LOCK_EXIT(as, &as->a_lock);
3064         return (error);
3065 }
3066 
3067 /*
3068  * as_iset3_default_lpsize() just calls segop_setpagesize() on all segments
3069  * in its chunk where s_szc is less than the szc we want to set.
3070  */
3071 static int
3072 as_iset3_default_lpsize(struct as *as, caddr_t raddr, size_t rsize, uint_t szc,
3073     int *retry)
3074 {
3075         struct seg *seg;
3076         size_t ssize;
3077         int error;
3078 
3079         ASSERT(AS_WRITE_HELD(as, &as->a_lock));
3080 
3081         seg = as_segat(as, raddr);
3082         if (seg == NULL) {
3083                 panic("as_iset3_default_lpsize: no seg");
3084         }
3085 
3086         for (; rsize != 0; rsize -= ssize, raddr += ssize) {
3087                 if (raddr >= seg->s_base + seg->s_size) {
3088                         seg = AS_SEGNEXT(as, seg);
3089                         if (seg == NULL || raddr != seg->s_base) {
3090                                 panic("as_iset3_default_lpsize: as changed");
3091                         }
3092                 }
3093                 if ((raddr + rsize) > (seg->s_base + seg->s_size)) {
3094                         ssize = seg->s_base + seg->s_size - raddr;
3095                 } else {
3096                         ssize = rsize;
3097                 }
3098 
3099                 if (szc > seg->s_szc) {
3100                         error = segop_setpagesize(seg, raddr, ssize, szc);
3101                         /* Only retry on EINVAL segments that have no vnode. */
3102                         if (error == EINVAL) {
3103                                 vnode_t *vp = NULL;
3104                                 if ((segop_gettype(seg, raddr) & MAP_SHARED) &&
3105                                     (segop_getvp(seg, raddr, &vp) != 0 ||
3106                                     vp == NULL)) {
3107                                         *retry = 1;
3108                                 } else {
3109                                         *retry = 0;
3110                                 }
3111                         }
3112                         if (error) {
3113                                 return (error);
3114                         }
3115                 }
3116         }
3117         return (0);
3118 }
3119 
3120 /*
3121  * as_iset2_default_lpsize() calls as_iset3_default_lpsize() to set the
3122  * pagesize on each segment in its range, but if any fails with EINVAL,
3123  * then it reduces the pagesizes to the next size in the bitmap and
3124  * retries as_iset3_default_lpsize(). The reason why the code retries
3125  * smaller allowed sizes on EINVAL is because (a) the anon offset may not


3328         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
3329 again:
3330         error = 0;
3331 
3332         raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
3333         rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
3334             (size_t)raddr;
3335 
3336         if (raddr + rsize < raddr) {         /* check for wraparound */
3337                 AS_LOCK_EXIT(as, &as->a_lock);
3338                 return (ENOMEM);
3339         }
3340         as_clearwatchprot(as, raddr, rsize);
3341         seg = as_segat(as, raddr);
3342         if (seg == NULL) {
3343                 as_setwatch(as);
3344                 AS_LOCK_EXIT(as, &as->a_lock);
3345                 return (ENOMEM);
3346         }
3347         if (seg->s_ops == &segvn_ops) {
3348                 rtype = segop_gettype(seg, addr);
3349                 rflags = rtype & (MAP_TEXT | MAP_INITDATA);
3350                 rtype = rtype & (MAP_SHARED | MAP_PRIVATE);
3351                 segvn = 1;
3352         } else {
3353                 segvn = 0;
3354         }
3355         setaddr = raddr;
3356         setsize = 0;
3357 
3358         for (; rsize != 0; rsize -= ssize, raddr += ssize, setsize += ssize) {
3359                 if (raddr >= (seg->s_base + seg->s_size)) {
3360                         seg = AS_SEGNEXT(as, seg);
3361                         if (seg == NULL || raddr != seg->s_base) {
3362                                 error = ENOMEM;
3363                                 break;
3364                         }
3365                         if (seg->s_ops == &segvn_ops) {
3366                                 stype = segop_gettype(seg, raddr);
3367                                 sflags = stype & (MAP_TEXT | MAP_INITDATA);
3368                                 stype &= (MAP_SHARED | MAP_PRIVATE);
3369                                 if (segvn && (rflags != sflags ||
3370                                     rtype != stype)) {
3371                                         /*
3372                                          * The next segment is also segvn but
3373                                          * has different flags and/or type.
3374                                          */
3375                                         ASSERT(setsize != 0);
3376                                         error = as_iset_default_lpsize(as,
3377                                             setaddr, setsize, rflags, rtype);
3378                                         if (error) {
3379                                                 break;
3380                                         }
3381                                         rflags = sflags;
3382                                         rtype = stype;
3383                                         setaddr = raddr;
3384                                         setsize = 0;
3385                                 } else if (!segvn) {
3386                                         rflags = sflags;


3460 as_setwatch(struct as *as)
3461 {
3462         struct watched_page *pwp;
3463         struct seg *seg;
3464         caddr_t vaddr;
3465         uint_t prot;
3466         int  err, retrycnt;
3467 
3468         if (avl_numnodes(&as->a_wpage) == 0)
3469                 return;
3470 
3471         ASSERT(AS_WRITE_HELD(as, &as->a_lock));
3472 
3473         for (pwp = avl_first(&as->a_wpage); pwp != NULL;
3474             pwp = AVL_NEXT(&as->a_wpage, pwp)) {
3475                 retrycnt = 0;
3476         retry:
3477                 vaddr = pwp->wp_vaddr;
3478                 if (pwp->wp_oprot != 0 ||    /* already set up */
3479                     (seg = as_segat(as, vaddr)) == NULL ||
3480                     segop_getprot(seg, vaddr, 0, &prot) != 0)
3481                         continue;
3482 
3483                 pwp->wp_oprot = prot;
3484                 if (pwp->wp_read)
3485                         prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3486                 if (pwp->wp_write)
3487                         prot &= ~PROT_WRITE;
3488                 if (pwp->wp_exec)
3489                         prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3490                 if (!(pwp->wp_flags & WP_NOWATCH) && prot != pwp->wp_oprot) {
3491                         err = segop_setprot(seg, vaddr, PAGESIZE, prot);
3492                         if (err == IE_RETRY) {
3493                                 pwp->wp_oprot = 0;
3494                                 ASSERT(retrycnt == 0);
3495                                 retrycnt++;
3496                                 goto retry;
3497                         }
3498                 }
3499                 pwp->wp_prot = prot;
3500         }
3501 }
3502 
3503 /*
3504  * Clear all of the watched pages in the address space.
3505  */
3506 void
3507 as_clearwatch(struct as *as)
3508 {
3509         struct watched_page *pwp;
3510         struct seg *seg;
3511         caddr_t vaddr;
3512         uint_t prot;
3513         int err, retrycnt;
3514 
3515         if (avl_numnodes(&as->a_wpage) == 0)
3516                 return;
3517 
3518         ASSERT(AS_WRITE_HELD(as, &as->a_lock));
3519 
3520         for (pwp = avl_first(&as->a_wpage); pwp != NULL;
3521             pwp = AVL_NEXT(&as->a_wpage, pwp)) {
3522                 retrycnt = 0;
3523         retry:
3524                 vaddr = pwp->wp_vaddr;
3525                 if (pwp->wp_oprot == 0 ||    /* not set up */
3526                     (seg = as_segat(as, vaddr)) == NULL)
3527                         continue;
3528 
3529                 if ((prot = pwp->wp_oprot) != pwp->wp_prot) {
3530                         err = segop_setprot(seg, vaddr, PAGESIZE, prot);
3531                         if (err == IE_RETRY) {
3532                                 ASSERT(retrycnt == 0);
3533                                 retrycnt++;
3534                                 goto retry;
3535                         }
3536                 }
3537                 pwp->wp_oprot = 0;
3538                 pwp->wp_prot = 0;
3539         }
3540 }
3541 
3542 /*
3543  * Force a new setup for all the watched pages in the range.
3544  */
3545 static void
3546 as_setwatchprot(struct as *as, caddr_t addr, size_t size, uint_t prot)
3547 {
3548         struct watched_page *pwp;
3549         struct watched_page tpw;
3550         caddr_t eaddr = addr + size;


3564                 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
3565 
3566         while (pwp != NULL && pwp->wp_vaddr < eaddr) {
3567                 retrycnt = 0;
3568                 vaddr = pwp->wp_vaddr;
3569 
3570                 wprot = prot;
3571                 if (pwp->wp_read)
3572                         wprot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3573                 if (pwp->wp_write)
3574                         wprot &= ~PROT_WRITE;
3575                 if (pwp->wp_exec)
3576                         wprot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3577                 if (!(pwp->wp_flags & WP_NOWATCH) && wprot != pwp->wp_oprot) {
3578                 retry:
3579                         seg = as_segat(as, vaddr);
3580                         if (seg == NULL) {
3581                                 panic("as_setwatchprot: no seg");
3582                                 /*NOTREACHED*/
3583                         }
3584                         err = segop_setprot(seg, vaddr, PAGESIZE, wprot);
3585                         if (err == IE_RETRY) {
3586                                 ASSERT(retrycnt == 0);
3587                                 retrycnt++;
3588                                 goto retry;
3589                         }
3590                 }
3591                 pwp->wp_oprot = prot;
3592                 pwp->wp_prot = wprot;
3593 
3594                 pwp = AVL_NEXT(&as->a_wpage, pwp);
3595         }
3596 }
3597 
3598 /*
3599  * Clear all of the watched pages in the range.
3600  */
3601 static void
3602 as_clearwatchprot(struct as *as, caddr_t addr, size_t size)
3603 {
3604         caddr_t eaddr = addr + size;


3611 
3612         if (avl_numnodes(&as->a_wpage) == 0)
3613                 return;
3614 
3615         tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
3616         if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
3617                 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
3618 
3619         ASSERT(AS_WRITE_HELD(as, &as->a_lock));
3620 
3621         while (pwp != NULL && pwp->wp_vaddr < eaddr) {
3622 
3623                 if ((prot = pwp->wp_oprot) != 0) {
3624                         retrycnt = 0;
3625 
3626                         if (prot != pwp->wp_prot) {
3627                         retry:
3628                                 seg = as_segat(as, pwp->wp_vaddr);
3629                                 if (seg == NULL)
3630                                         continue;
3631                                 err = segop_setprot(seg, pwp->wp_vaddr,
3632                                     PAGESIZE, prot);
3633                                 if (err == IE_RETRY) {
3634                                         ASSERT(retrycnt == 0);
3635                                         retrycnt++;
3636                                         goto retry;
3637 
3638                                 }
3639                         }
3640                         pwp->wp_oprot = 0;
3641                         pwp->wp_prot = 0;
3642                 }
3643 
3644                 pwp = AVL_NEXT(&as->a_wpage, pwp);
3645         }
3646 }
3647 
3648 void
3649 as_signal_proc(struct as *as, k_siginfo_t *siginfo)
3650 {
3651         struct proc *p;


3668 int
3669 as_getmemid(struct as *as, caddr_t addr, memid_t *memidp)
3670 {
3671         struct seg      *seg;
3672         int             sts;
3673 
3674         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
3675         seg = as_segat(as, addr);
3676         if (seg == NULL) {
3677                 AS_LOCK_EXIT(as, &as->a_lock);
3678                 return (EFAULT);
3679         }
3680         /*
3681          * catch old drivers which may not support getmemid
3682          */
3683         if (seg->s_ops->getmemid == NULL) {
3684                 AS_LOCK_EXIT(as, &as->a_lock);
3685                 return (ENODEV);
3686         }
3687 
3688         sts = segop_getmemid(seg, addr, memidp);
3689 
3690         AS_LOCK_EXIT(as, &as->a_lock);
3691         return (sts);
3692 }