Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL getmemid segop as a shorthand for ENODEV
Instead of forcing every segment driver to implement a dummy function to
return (hopefully) ENODEV, handle NULL getmemid segop function pointer as
"return ENODEV" shorthand.
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
patch lower-case-segops
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs.  The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync.  Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/vm_as.c
          +++ new/usr/src/uts/common/vm/vm_as.c
↓ open down ↓ 51 lines elided ↑ open up ↑
  52   52  #include <sys/sysinfo.h>
  53   53  #include <sys/kmem.h>
  54   54  #include <sys/vnode.h>
  55   55  #include <sys/vmsystm.h>
  56   56  #include <sys/cmn_err.h>
  57   57  #include <sys/debug.h>
  58   58  #include <sys/tnf_probe.h>
  59   59  #include <sys/vtrace.h>
  60   60  
  61   61  #include <vm/hat.h>
  62      -#include <vm/xhat.h>
  63   62  #include <vm/as.h>
  64   63  #include <vm/seg.h>
  65   64  #include <vm/seg_vn.h>
  66   65  #include <vm/seg_dev.h>
  67   66  #include <vm/seg_kmem.h>
  68   67  #include <vm/seg_map.h>
  69   68  #include <vm/seg_spt.h>
  70   69  #include <vm/page.h>
  71   70  
  72   71  clock_t deadlk_wait = 1; /* number of ticks to wait before retrying */
↓ open down ↓ 394 lines elided ↑ open up ↑
 467  466  
 468  467                  /*
 469  468                   * If top of seg is below the requested address, then
 470  469                   * the insertion point is at the end of the linked list,
 471  470                   * and seg points to the tail of the list.  Otherwise,
 472  471                   * the insertion point is immediately before seg.
 473  472                   */
 474  473                  if (base + seg->s_size > addr) {
 475  474                          if (addr >= base || eaddr > base) {
 476  475  #ifdef __sparc
 477      -                                extern struct seg_ops segnf_ops;
      476 +                                extern const struct seg_ops segnf_ops;
 478  477  
 479  478                                  /*
 480  479                                   * no-fault segs must disappear if overlaid.
 481  480                                   * XXX need new segment type so
 482  481                                   * we don't have to check s_ops
 483  482                                   */
 484  483                                  if (seg->s_ops == &segnf_ops) {
 485  484                                          seg_unmap(seg);
 486  485                                          goto again;
 487  486                                  }
↓ open down ↓ 176 lines elided ↑ open up ↑
 664  663          as->a_sizedir           = 0;
 665  664          as->a_userlimit         = (caddr_t)USERLIMIT;
 666  665          as->a_lastgap           = NULL;
 667  666          as->a_lastgaphl         = NULL;
 668  667          as->a_callbacks         = NULL;
 669  668  
 670  669          AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 671  670          as->a_hat = hat_alloc(as);      /* create hat for default system mmu */
 672  671          AS_LOCK_EXIT(as, &as->a_lock);
 673  672  
 674      -        as->a_xhat = NULL;
 675      -
 676  673          return (as);
 677  674  }
 678  675  
 679  676  /*
 680  677   * Free an address space data structure.
 681  678   * Need to free the hat first and then
 682  679   * all the segments on this as and finally
 683  680   * the space for the as struct itself.
 684  681   */
 685  682  void
 686  683  as_free(struct as *as)
 687  684  {
 688  685          struct hat *hat = as->a_hat;
 689  686          struct seg *seg, *next;
 690      -        int called = 0;
      687 +        boolean_t free_started = B_FALSE;
 691  688  
 692  689  top:
 693  690          /*
 694  691           * Invoke ALL callbacks. as_do_callbacks will do one callback
 695  692           * per call, and not return (-1) until the callback has completed.
 696  693           * When as_do_callbacks returns zero, all callbacks have completed.
 697  694           */
 698  695          mutex_enter(&as->a_contents);
 699  696          while (as->a_callbacks && as_do_callbacks(as, AS_ALL_EVENT, 0, 0))
 700  697                  ;
 701  698  
 702      -        /* This will prevent new XHATs from attaching to as */
 703      -        if (!called)
 704      -                AS_SETBUSY(as);
 705  699          mutex_exit(&as->a_contents);
 706  700          AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 707  701  
 708      -        if (!called) {
 709      -                called = 1;
      702 +        if (!free_started) {
      703 +                free_started = B_TRUE;
 710  704                  hat_free_start(hat);
 711      -                if (as->a_xhat != NULL)
 712      -                        xhat_free_start_all(as);
 713  705          }
 714  706          for (seg = AS_SEGFIRST(as); seg != NULL; seg = next) {
 715  707                  int err;
 716  708  
 717  709                  next = AS_SEGNEXT(as, seg);
 718  710  retry:
 719      -                err = SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
      711 +                err = segop_unmap(seg, seg->s_base, seg->s_size);
 720  712                  if (err == EAGAIN) {
 721  713                          mutex_enter(&as->a_contents);
 722  714                          if (as->a_callbacks) {
 723  715                                  AS_LOCK_EXIT(as, &as->a_lock);
 724  716                          } else if (!AS_ISNOUNMAPWAIT(as)) {
 725  717                                  /*
 726  718                                   * Memory is currently locked. Wait for a
 727  719                                   * cv_signal that it has been unlocked, then
 728  720                                   * try the operation again.
 729  721                                   */
↓ open down ↓ 22 lines elided ↑ open up ↑
 752  744                          goto top;
 753  745                  } else {
 754  746                          /*
 755  747                           * We do not expect any other error return at this
 756  748                           * time. This is similar to an ASSERT in seg_unmap()
 757  749                           */
 758  750                          ASSERT(err == 0);
 759  751                  }
 760  752          }
 761  753          hat_free_end(hat);
 762      -        if (as->a_xhat != NULL)
 763      -                xhat_free_end_all(as);
 764  754          AS_LOCK_EXIT(as, &as->a_lock);
 765  755  
 766  756          /* /proc stuff */
 767  757          ASSERT(avl_numnodes(&as->a_wpage) == 0);
 768  758          if (as->a_objectdir) {
 769  759                  kmem_free(as->a_objectdir, as->a_sizedir * sizeof (vnode_t *));
 770  760                  as->a_objectdir = NULL;
 771  761                  as->a_sizedir = 0;
 772  762          }
 773  763  
↓ open down ↓ 13 lines elided ↑ open up ↑
 787  777          int error;
 788  778  
 789  779          AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 790  780          as_clearwatch(as);
 791  781          newas = as_alloc();
 792  782          newas->a_userlimit = as->a_userlimit;
 793  783          newas->a_proc = forkedproc;
 794  784  
 795  785          AS_LOCK_ENTER(newas, &newas->a_lock, RW_WRITER);
 796  786  
 797      -        /* This will prevent new XHATs from attaching */
 798      -        mutex_enter(&as->a_contents);
 799      -        AS_SETBUSY(as);
 800      -        mutex_exit(&as->a_contents);
 801      -        mutex_enter(&newas->a_contents);
 802      -        AS_SETBUSY(newas);
 803      -        mutex_exit(&newas->a_contents);
 804      -
 805  787          (void) hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_SRD);
 806  788  
 807  789          for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
 808  790  
 809  791                  if (seg->s_flags & S_PURGE) {
 810  792                          purgesize += seg->s_size;
 811  793                          continue;
 812  794                  }
 813  795  
 814  796                  newseg = seg_alloc(newas, seg->s_base, seg->s_size);
 815  797                  if (newseg == NULL) {
 816  798                          AS_LOCK_EXIT(newas, &newas->a_lock);
 817  799                          as_setwatch(as);
 818      -                        mutex_enter(&as->a_contents);
 819      -                        AS_CLRBUSY(as);
 820      -                        mutex_exit(&as->a_contents);
 821  800                          AS_LOCK_EXIT(as, &as->a_lock);
 822  801                          as_free(newas);
 823  802                          return (-1);
 824  803                  }
 825      -                if ((error = SEGOP_DUP(seg, newseg)) != 0) {
      804 +                if ((error = segop_dup(seg, newseg)) != 0) {
 826  805                          /*
 827  806                           * We call seg_free() on the new seg
 828  807                           * because the segment is not set up
 829  808                           * completely; i.e. it has no ops.
 830  809                           */
 831  810                          as_setwatch(as);
 832      -                        mutex_enter(&as->a_contents);
 833      -                        AS_CLRBUSY(as);
 834      -                        mutex_exit(&as->a_contents);
 835  811                          AS_LOCK_EXIT(as, &as->a_lock);
 836  812                          seg_free(newseg);
 837  813                          AS_LOCK_EXIT(newas, &newas->a_lock);
 838  814                          as_free(newas);
 839  815                          return (error);
 840  816                  }
 841  817                  newas->a_size += seg->s_size;
 842  818          }
 843  819          newas->a_resvsize = as->a_resvsize - purgesize;
 844  820  
 845  821          error = hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_ALL);
 846      -        if (as->a_xhat != NULL)
 847      -                error |= xhat_dup_all(as, newas, NULL, 0, HAT_DUP_ALL);
 848  822  
 849      -        mutex_enter(&newas->a_contents);
 850      -        AS_CLRBUSY(newas);
 851      -        mutex_exit(&newas->a_contents);
 852  823          AS_LOCK_EXIT(newas, &newas->a_lock);
 853  824  
 854  825          as_setwatch(as);
 855      -        mutex_enter(&as->a_contents);
 856      -        AS_CLRBUSY(as);
 857      -        mutex_exit(&as->a_contents);
 858  826          AS_LOCK_EXIT(as, &as->a_lock);
 859  827          if (error != 0) {
 860  828                  as_free(newas);
 861  829                  return (error);
 862  830          }
 863  831          forkedproc->p_as = newas;
 864  832          return (0);
 865  833  }
 866  834  
 867  835  /*
↓ open down ↓ 5 lines elided ↑ open up ↑
 873  841  {
 874  842          struct seg *seg;
 875  843          caddr_t raddr;                  /* rounded down addr */
 876  844          size_t rsize;                   /* rounded up size */
 877  845          size_t ssize;
 878  846          faultcode_t res = 0;
 879  847          caddr_t addrsav;
 880  848          struct seg *segsav;
 881  849          int as_lock_held;
 882  850          klwp_t *lwp = ttolwp(curthread);
 883      -        int is_xhat = 0;
 884  851          int holding_wpage = 0;
 885      -        extern struct seg_ops   segdev_ops;
 886      -
 887  852  
 888  853  
 889      -        if (as->a_hat != hat) {
 890      -                /* This must be an XHAT then */
 891      -                is_xhat = 1;
 892      -
 893      -                if ((type != F_INVAL) || (as == &kas))
 894      -                        return (FC_NOSUPPORT);
 895      -        }
 896  854  
 897  855  retry:
 898      -        if (!is_xhat) {
 899      -                /*
 900      -                 * Indicate that the lwp is not to be stopped while waiting
 901      -                 * for a pagefault.  This is to avoid deadlock while debugging
 902      -                 * a process via /proc over NFS (in particular).
 903      -                 */
 904      -                if (lwp != NULL)
 905      -                        lwp->lwp_nostop++;
      856 +        /*
      857 +         * Indicate that the lwp is not to be stopped while waiting for a
      858 +         * pagefault.  This is to avoid deadlock while debugging a process
      859 +         * via /proc over NFS (in particular).
      860 +         */
      861 +        if (lwp != NULL)
      862 +                lwp->lwp_nostop++;
 906  863  
 907      -                /*
 908      -                 * same length must be used when we softlock and softunlock.
 909      -                 * We don't support softunlocking lengths less than
 910      -                 * the original length when there is largepage support.
 911      -                 * See seg_dev.c for more comments.
 912      -                 */
 913      -                switch (type) {
      864 +        /*
      865 +         * same length must be used when we softlock and softunlock.  We
      866 +         * don't support softunlocking lengths less than the original length
      867 +         * when there is largepage support.  See seg_dev.c for more
      868 +         * comments.
      869 +         */
      870 +        switch (type) {
 914  871  
 915      -                case F_SOFTLOCK:
 916      -                        CPU_STATS_ADD_K(vm, softlock, 1);
 917      -                        break;
      872 +        case F_SOFTLOCK:
      873 +                CPU_STATS_ADD_K(vm, softlock, 1);
      874 +                break;
 918  875  
 919      -                case F_SOFTUNLOCK:
 920      -                        break;
      876 +        case F_SOFTUNLOCK:
      877 +                break;
 921  878  
 922      -                case F_PROT:
 923      -                        CPU_STATS_ADD_K(vm, prot_fault, 1);
 924      -                        break;
      879 +        case F_PROT:
      880 +                CPU_STATS_ADD_K(vm, prot_fault, 1);
      881 +                break;
 925  882  
 926      -                case F_INVAL:
 927      -                        CPU_STATS_ENTER_K();
 928      -                        CPU_STATS_ADDQ(CPU, vm, as_fault, 1);
 929      -                        if (as == &kas)
 930      -                                CPU_STATS_ADDQ(CPU, vm, kernel_asflt, 1);
 931      -                        CPU_STATS_EXIT_K();
 932      -                        break;
 933      -                }
      883 +        case F_INVAL:
      884 +                CPU_STATS_ENTER_K();
      885 +                CPU_STATS_ADDQ(CPU, vm, as_fault, 1);
      886 +                if (as == &kas)
      887 +                        CPU_STATS_ADDQ(CPU, vm, kernel_asflt, 1);
      888 +                CPU_STATS_EXIT_K();
      889 +                break;
 934  890          }
 935  891  
 936  892          /* Kernel probe */
 937  893          TNF_PROBE_3(address_fault, "vm pagefault", /* CSTYLED */,
 938  894              tnf_opaque, address,        addr,
 939  895              tnf_fault_type,     fault_type,     type,
 940  896              tnf_seg_access,     access,         rw);
 941  897  
 942  898          raddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 943  899          rsize = (((size_t)(addr + size) + PAGEOFFSET) & PAGEMASK) -
↓ open down ↓ 1 lines elided ↑ open up ↑
 945  901  
 946  902          /*
 947  903           * XXX -- Don't grab the as lock for segkmap. We should grab it for
 948  904           * correctness, but then we could be stuck holding this lock for
 949  905           * a LONG time if the fault needs to be resolved on a slow
 950  906           * filesystem, and then no-one will be able to exec new commands,
 951  907           * as exec'ing requires the write lock on the as.
 952  908           */
 953  909          if (as == &kas && segkmap && segkmap->s_base <= raddr &&
 954  910              raddr + size < segkmap->s_base + segkmap->s_size) {
 955      -                /*
 956      -                 * if (as==&kas), this can't be XHAT: we've already returned
 957      -                 * FC_NOSUPPORT.
 958      -                 */
 959  911                  seg = segkmap;
 960  912                  as_lock_held = 0;
 961  913          } else {
 962  914                  AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 963      -                if (is_xhat && avl_numnodes(&as->a_wpage) != 0) {
 964      -                        /*
 965      -                         * Grab and hold the writers' lock on the as
 966      -                         * if the fault is to a watched page.
 967      -                         * This will keep CPUs from "peeking" at the
 968      -                         * address range while we're temporarily boosting
 969      -                         * the permissions for the XHAT device to
 970      -                         * resolve the fault in the segment layer.
 971      -                         *
 972      -                         * We could check whether faulted address
 973      -                         * is within a watched page and only then grab
 974      -                         * the writer lock, but this is simpler.
 975      -                         */
 976      -                        AS_LOCK_EXIT(as, &as->a_lock);
 977      -                        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 978      -                }
 979  915  
 980  916                  seg = as_segat(as, raddr);
 981  917                  if (seg == NULL) {
 982  918                          AS_LOCK_EXIT(as, &as->a_lock);
 983      -                        if ((lwp != NULL) && (!is_xhat))
      919 +                        if (lwp != NULL)
 984  920                                  lwp->lwp_nostop--;
 985  921                          return (FC_NOMAP);
 986  922                  }
 987  923  
 988  924                  as_lock_held = 1;
 989  925          }
 990  926  
 991  927          addrsav = raddr;
 992  928          segsav = seg;
 993  929  
↓ open down ↓ 3 lines elided ↑ open up ↑
 997  933                          if (seg == NULL || raddr != seg->s_base) {
 998  934                                  res = FC_NOMAP;
 999  935                                  break;
1000  936                          }
1001  937                  }
1002  938                  if (raddr + rsize > seg->s_base + seg->s_size)
1003  939                          ssize = seg->s_base + seg->s_size - raddr;
1004  940                  else
1005  941                          ssize = rsize;
1006  942  
1007      -                if (!is_xhat || (seg->s_ops != &segdev_ops)) {
1008      -
1009      -                        if (is_xhat && avl_numnodes(&as->a_wpage) != 0 &&
1010      -                            pr_is_watchpage_as(raddr, rw, as)) {
1011      -                                /*
1012      -                                 * Handle watch pages.  If we're faulting on a
1013      -                                 * watched page from an X-hat, we have to
1014      -                                 * restore the original permissions while we
1015      -                                 * handle the fault.
1016      -                                 */
1017      -                                as_clearwatch(as);
1018      -                                holding_wpage = 1;
1019      -                        }
1020      -
1021      -                        res = SEGOP_FAULT(hat, seg, raddr, ssize, type, rw);
      943 +                res = segop_fault(hat, seg, raddr, ssize, type, rw);
1022  944  
1023      -                        /* Restore watchpoints */
1024      -                        if (holding_wpage) {
1025      -                                as_setwatch(as);
1026      -                                holding_wpage = 0;
1027      -                        }
      945 +                /* Restore watchpoints */
      946 +                if (holding_wpage) {
      947 +                        as_setwatch(as);
      948 +                        holding_wpage = 0;
      949 +                }
1028  950  
1029      -                        if (res != 0)
1030      -                                break;
1031      -                } else {
1032      -                        /* XHAT does not support seg_dev */
1033      -                        res = FC_NOSUPPORT;
      951 +                if (res != 0)
1034  952                          break;
1035      -                }
1036  953          }
1037  954  
1038  955          /*
1039  956           * If we were SOFTLOCKing and encountered a failure,
1040  957           * we must SOFTUNLOCK the range we already did. (Maybe we
1041  958           * should just panic if we are SOFTLOCKing or even SOFTUNLOCKing
1042  959           * right here...)
1043  960           */
1044  961          if (res != 0 && type == F_SOFTLOCK) {
1045  962                  for (seg = segsav; addrsav < raddr; addrsav += ssize) {
↓ open down ↓ 2 lines elided ↑ open up ↑
1048  965                          ASSERT(seg != NULL);
1049  966                          /*
1050  967                           * Now call the fault routine again to perform the
1051  968                           * unlock using S_OTHER instead of the rw variable
1052  969                           * since we never got a chance to touch the pages.
1053  970                           */
1054  971                          if (raddr > seg->s_base + seg->s_size)
1055  972                                  ssize = seg->s_base + seg->s_size - addrsav;
1056  973                          else
1057  974                                  ssize = raddr - addrsav;
1058      -                        (void) SEGOP_FAULT(hat, seg, addrsav, ssize,
      975 +                        (void) segop_fault(hat, seg, addrsav, ssize,
1059  976                              F_SOFTUNLOCK, S_OTHER);
1060  977                  }
1061  978          }
1062  979          if (as_lock_held)
1063  980                  AS_LOCK_EXIT(as, &as->a_lock);
1064      -        if ((lwp != NULL) && (!is_xhat))
      981 +        if (lwp != NULL)
1065  982                  lwp->lwp_nostop--;
1066  983  
1067  984          /*
1068  985           * If the lower levels returned EDEADLK for a fault,
1069  986           * It means that we should retry the fault.  Let's wait
1070  987           * a bit also to let the deadlock causing condition clear.
1071  988           * This is part of a gross hack to work around a design flaw
1072  989           * in the ufs/sds logging code and should go away when the
1073  990           * logging code is re-designed to fix the problem. See bug
1074  991           * 4125102 for details of the problem.
↓ open down ↓ 43 lines elided ↑ open up ↑
1118 1035          }
1119 1036  
1120 1037          for (; rsize != 0; rsize -= PAGESIZE, raddr += PAGESIZE) {
1121 1038                  if (raddr >= seg->s_base + seg->s_size) {
1122 1039                          seg = AS_SEGNEXT(as, seg);
1123 1040                          if (seg == NULL || raddr != seg->s_base) {
1124 1041                                  res = FC_NOMAP;
1125 1042                                  break;
1126 1043                          }
1127 1044                  }
1128      -                res = SEGOP_FAULTA(seg, raddr);
     1045 +                res = segop_faulta(seg, raddr);
1129 1046                  if (res != 0)
1130 1047                          break;
1131 1048          }
1132 1049          AS_LOCK_EXIT(as, &as->a_lock);
1133 1050          if (lwp != NULL)
1134 1051                  lwp->lwp_nostop--;
1135 1052          /*
1136 1053           * If the lower levels returned EDEADLK for a fault,
1137 1054           * It means that we should retry the fault.  Let's wait
1138 1055           * a bit also to let the deadlock causing condition clear.
↓ open down ↓ 69 lines elided ↑ open up ↑
1208 1125                          if (seg == NULL || raddr != seg->s_base) {
1209 1126                                  error = ENOMEM;
1210 1127                                  break;
1211 1128                          }
1212 1129                  }
1213 1130                  if ((raddr + rsize) > (seg->s_base + seg->s_size))
1214 1131                          ssize = seg->s_base + seg->s_size - raddr;
1215 1132                  else
1216 1133                          ssize = rsize;
1217 1134  retry:
1218      -                error = SEGOP_SETPROT(seg, raddr, ssize, prot);
     1135 +                error = segop_setprot(seg, raddr, ssize, prot);
1219 1136  
1220 1137                  if (error == IE_NOMEM) {
1221 1138                          error = EAGAIN;
1222 1139                          break;
1223 1140                  }
1224 1141  
1225 1142                  if (error == IE_RETRY) {
1226 1143                          AS_LOCK_EXIT(as, &as->a_lock);
1227 1144                          writer = 1;
1228 1145                          goto setprot_top;
↓ open down ↓ 130 lines elided ↑ open up ↑
1359 1276                          if (seg == NULL || raddr != seg->s_base) {
1360 1277                                  error = ENOMEM;
1361 1278                                  break;
1362 1279                          }
1363 1280                  }
1364 1281                  if ((raddr + rsize) > (seg->s_base + seg->s_size))
1365 1282                          ssize = seg->s_base + seg->s_size - raddr;
1366 1283                  else
1367 1284                          ssize = rsize;
1368 1285  
1369      -                error = SEGOP_CHECKPROT(seg, raddr, ssize, prot);
     1286 +                error = segop_checkprot(seg, raddr, ssize, prot);
1370 1287                  if (error != 0)
1371 1288                          break;
1372 1289          }
1373 1290          as_setwatch(as);
1374 1291          AS_LOCK_EXIT(as, &as->a_lock);
1375 1292          return (error);
1376 1293  }
1377 1294  
1378 1295  int
1379 1296  as_unmap(struct as *as, caddr_t addr, size_t size)
↓ open down ↓ 45 lines elided ↑ open up ↑
1425 1342                   * We didn't count /dev/null mappings, so ignore them here.
1426 1343                   * We'll handle MAP_NORESERVE cases in segvn_unmap(). (Again,
1427 1344                   * we have to do this check here while we have seg.)
1428 1345                   */
1429 1346                  rsize = 0;
1430 1347                  if (!SEG_IS_DEVNULL_MAPPING(seg) &&
1431 1348                      !SEG_IS_PARTIAL_RESV(seg))
1432 1349                          rsize = ssize;
1433 1350  
1434 1351  retry:
1435      -                err = SEGOP_UNMAP(seg, raddr, ssize);
     1352 +                err = segop_unmap(seg, raddr, ssize);
1436 1353                  if (err == EAGAIN) {
1437 1354                          /*
1438 1355                           * Memory is currently locked.  It must be unlocked
1439 1356                           * before this operation can succeed through a retry.
1440 1357                           * The possible reasons for locked memory and
1441 1358                           * corresponding strategies for unlocking are:
1442 1359                           * (1) Normal I/O
1443 1360                           *      wait for a signal that the I/O operation
1444 1361                           *      has completed and the memory is unlocked.
1445 1362                           * (2) Asynchronous I/O
↓ open down ↓ 417 lines elided ↑ open up ↑
1863 1780           */
1864 1781          if ((as->a_flags & AS_NEEDSPURGE) == 0)
1865 1782                  return;
1866 1783  
1867 1784          AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
1868 1785          next_seg = NULL;
1869 1786          seg = AS_SEGFIRST(as);
1870 1787          while (seg != NULL) {
1871 1788                  next_seg = AS_SEGNEXT(as, seg);
1872 1789                  if (seg->s_flags & S_PURGE)
1873      -                        SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
     1790 +                        segop_unmap(seg, seg->s_base, seg->s_size);
1874 1791                  seg = next_seg;
1875 1792          }
1876 1793          AS_LOCK_EXIT(as, &as->a_lock);
1877 1794  
1878 1795          mutex_enter(&as->a_contents);
1879 1796          as->a_flags &= ~AS_NEEDSPURGE;
1880 1797          mutex_exit(&as->a_contents);
1881 1798  }
1882 1799  
1883 1800  /*
↓ open down ↓ 197 lines elided ↑ open up ↑
2081 1998  }
2082 1999  
2083 2000  /*
2084 2001   * Return the next range within [base, base + len) that is backed
2085 2002   * with "real memory".  Skip holes and non-seg_vn segments.
2086 2003   * We're lazy and only return one segment at a time.
2087 2004   */
2088 2005  int
2089 2006  as_memory(struct as *as, caddr_t *basep, size_t *lenp)
2090 2007  {
2091      -        extern struct seg_ops segspt_shmops;    /* needs a header file */
     2008 +        extern const struct seg_ops segspt_shmops;      /* needs a header file */
2092 2009          struct seg *seg;
2093 2010          caddr_t addr, eaddr;
2094 2011          caddr_t segend;
2095 2012  
2096 2013          AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2097 2014  
2098 2015          addr = *basep;
2099 2016          eaddr = addr + *lenp;
2100 2017  
2101 2018          seg = as_findseg(as, addr, 0);
↓ open down ↓ 32 lines elided ↑ open up ↑
2134 2051          if (segend > eaddr)
2135 2052                  *lenp = eaddr - addr;
2136 2053          else
2137 2054                  *lenp = segend - addr;
2138 2055  
2139 2056          AS_LOCK_EXIT(as, &as->a_lock);
2140 2057          return (0);
2141 2058  }
2142 2059  
2143 2060  /*
2144      - * Swap the pages associated with the address space as out to
2145      - * secondary storage, returning the number of bytes actually
2146      - * swapped.
2147      - *
2148      - * The value returned is intended to correlate well with the process's
2149      - * memory requirements.  Its usefulness for this purpose depends on
2150      - * how well the segment-level routines do at returning accurate
2151      - * information.
2152      - */
2153      -size_t
2154      -as_swapout(struct as *as)
2155      -{
2156      -        struct seg *seg;
2157      -        size_t swpcnt = 0;
2158      -
2159      -        /*
2160      -         * Kernel-only processes have given up their address
2161      -         * spaces.  Of course, we shouldn't be attempting to
2162      -         * swap out such processes in the first place...
2163      -         */
2164      -        if (as == NULL)
2165      -                return (0);
2166      -
2167      -        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2168      -
2169      -        /* Prevent XHATs from attaching */
2170      -        mutex_enter(&as->a_contents);
2171      -        AS_SETBUSY(as);
2172      -        mutex_exit(&as->a_contents);
2173      -
2174      -
2175      -        /*
2176      -         * Free all mapping resources associated with the address
2177      -         * space.  The segment-level swapout routines capitalize
2178      -         * on this unmapping by scavanging pages that have become
2179      -         * unmapped here.
2180      -         */
2181      -        hat_swapout(as->a_hat);
2182      -        if (as->a_xhat != NULL)
2183      -                xhat_swapout_all(as);
2184      -
2185      -        mutex_enter(&as->a_contents);
2186      -        AS_CLRBUSY(as);
2187      -        mutex_exit(&as->a_contents);
2188      -
2189      -        /*
2190      -         * Call the swapout routines of all segments in the address
2191      -         * space to do the actual work, accumulating the amount of
2192      -         * space reclaimed.
2193      -         */
2194      -        for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
2195      -                struct seg_ops *ov = seg->s_ops;
2196      -
2197      -                /*
2198      -                 * We have to check to see if the seg has
2199      -                 * an ops vector because the seg may have
2200      -                 * been in the middle of being set up when
2201      -                 * the process was picked for swapout.
2202      -                 */
2203      -                if ((ov != NULL) && (ov->swapout != NULL))
2204      -                        swpcnt += SEGOP_SWAPOUT(seg);
2205      -        }
2206      -        AS_LOCK_EXIT(as, &as->a_lock);
2207      -        return (swpcnt);
2208      -}
2209      -
2210      -/*
2211 2061   * Determine whether data from the mappings in interval [addr, addr + size)
2212 2062   * are in the primary memory (core) cache.
2213 2063   */
2214 2064  int
2215 2065  as_incore(struct as *as, caddr_t addr,
2216 2066      size_t size, char *vec, size_t *sizep)
2217 2067  {
2218 2068          struct seg *seg;
2219 2069          size_t ssize;
2220 2070          caddr_t raddr;          /* rounded down addr */
↓ open down ↓ 21 lines elided ↑ open up ↑
2242 2092                          seg = AS_SEGNEXT(as, seg);
2243 2093                          if (seg == NULL || raddr != seg->s_base) {
2244 2094                                  error = -1;
2245 2095                                  break;
2246 2096                          }
2247 2097                  }
2248 2098                  if ((raddr + rsize) > (seg->s_base + seg->s_size))
2249 2099                          ssize = seg->s_base + seg->s_size - raddr;
2250 2100                  else
2251 2101                          ssize = rsize;
2252      -                *sizep += isize = SEGOP_INCORE(seg, raddr, ssize, vec);
     2102 +                *sizep += isize = segop_incore(seg, raddr, ssize, vec);
2253 2103                  if (isize != ssize) {
2254 2104                          error = -1;
2255 2105                          break;
2256 2106                  }
2257 2107                  vec += btopr(ssize);
2258 2108          }
2259 2109          AS_LOCK_EXIT(as, &as->a_lock);
2260 2110          return (error);
2261 2111  }
2262 2112  
↓ open down ↓ 5 lines elided ↑ open up ↑
2268 2118          size_t  pos1 = position;
2269 2119          size_t  pos2;
2270 2120          size_t  size;
2271 2121          size_t  end_pos = npages + position;
2272 2122  
2273 2123          while (bt_range(bitmap, &pos1, &pos2, end_pos)) {
2274 2124                  size = ptob((pos2 - pos1));
2275 2125                  range_start = (caddr_t)((uintptr_t)addr +
2276 2126                      ptob(pos1 - position));
2277 2127  
2278      -                (void) SEGOP_LOCKOP(seg, range_start, size, attr, MC_UNLOCK,
     2128 +                (void) segop_lockop(seg, range_start, size, attr, MC_UNLOCK,
2279 2129                      (ulong_t *)NULL, (size_t)NULL);
2280 2130                  pos1 = pos2;
2281 2131          }
2282 2132  }
2283 2133  
2284 2134  static void
2285 2135  as_unlockerr(struct as *as, int attr, ulong_t *mlock_map,
2286 2136          caddr_t raddr, size_t rsize)
2287 2137  {
2288 2138          struct seg *seg = as_segat(as, raddr);
↓ open down ↓ 75 lines elided ↑ open up ↑
2364 2214                  } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
2365 2215  
2366 2216                  mlock_size = BT_BITOUL(btopr(rlen));
2367 2217                  if ((mlock_map = (ulong_t *)kmem_zalloc(mlock_size *
2368 2218                      sizeof (ulong_t), KM_NOSLEEP)) == NULL) {
2369 2219                                  AS_LOCK_EXIT(as, &as->a_lock);
2370 2220                                  return (EAGAIN);
2371 2221                  }
2372 2222  
2373 2223                  for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
2374      -                        error = SEGOP_LOCKOP(seg, seg->s_base,
     2224 +                        error = segop_lockop(seg, seg->s_base,
2375 2225                              seg->s_size, attr, MC_LOCK, mlock_map, pos);
2376 2226                          if (error != 0)
2377 2227                                  break;
2378 2228                          pos += seg_pages(seg);
2379 2229                  }
2380 2230  
2381 2231                  if (error) {
2382 2232                          for (seg = AS_SEGFIRST(as); seg != NULL;
2383 2233                              seg = AS_SEGNEXT(as, seg)) {
2384 2234  
↓ open down ↓ 8 lines elided ↑ open up ↑
2393 2243  
2394 2244                  kmem_free(mlock_map, mlock_size * sizeof (ulong_t));
2395 2245                  AS_LOCK_EXIT(as, &as->a_lock);
2396 2246                  goto lockerr;
2397 2247          } else if (func == MC_UNLOCKAS) {
2398 2248                  mutex_enter(&as->a_contents);
2399 2249                  AS_CLRPGLCK(as);
2400 2250                  mutex_exit(&as->a_contents);
2401 2251  
2402 2252                  for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
2403      -                        error = SEGOP_LOCKOP(seg, seg->s_base,
     2253 +                        error = segop_lockop(seg, seg->s_base,
2404 2254                              seg->s_size, attr, MC_UNLOCK, NULL, 0);
2405 2255                          if (error != 0)
2406 2256                                  break;
2407 2257                  }
2408 2258  
2409 2259                  AS_LOCK_EXIT(as, &as->a_lock);
2410 2260                  goto lockerr;
2411 2261          }
2412 2262  
2413 2263          /*
↓ open down ↓ 57 lines elided ↑ open up ↑
2471 2321                  /*
2472 2322                   * Dispatch on specific function.
2473 2323                   */
2474 2324                  switch (func) {
2475 2325  
2476 2326                  /*
2477 2327                   * Synchronize cached data from mappings with backing
2478 2328                   * objects.
2479 2329                   */
2480 2330                  case MC_SYNC:
2481      -                        if (error = SEGOP_SYNC(seg, raddr, ssize,
     2331 +                        if (error = segop_sync(seg, raddr, ssize,
2482 2332                              attr, (uint_t)arg)) {
2483 2333                                  AS_LOCK_EXIT(as, &as->a_lock);
2484 2334                                  return (error);
2485 2335                          }
2486 2336                          break;
2487 2337  
2488 2338                  /*
2489 2339                   * Lock pages in memory.
2490 2340                   */
2491 2341                  case MC_LOCK:
2492      -                        if (error = SEGOP_LOCKOP(seg, raddr, ssize,
     2342 +                        if (error = segop_lockop(seg, raddr, ssize,
2493 2343                              attr, func, mlock_map, pos)) {
2494 2344                                  as_unlockerr(as, attr, mlock_map, initraddr,
2495 2345                                      initrsize - rsize + ssize);
2496 2346                                  kmem_free(mlock_map, mlock_size *
2497 2347                                      sizeof (ulong_t));
2498 2348                                  AS_LOCK_EXIT(as, &as->a_lock);
2499 2349                                  goto lockerr;
2500 2350                          }
2501 2351                          break;
2502 2352  
2503 2353                  /*
2504 2354                   * Unlock mapped pages.
2505 2355                   */
2506 2356                  case MC_UNLOCK:
2507      -                        (void) SEGOP_LOCKOP(seg, raddr, ssize, attr, func,
     2357 +                        (void) segop_lockop(seg, raddr, ssize, attr, func,
2508 2358                              (ulong_t *)NULL, (size_t)NULL);
2509 2359                          break;
2510 2360  
2511 2361                  /*
2512 2362                   * Store VM advise for mapped pages in segment layer.
2513 2363                   */
2514 2364                  case MC_ADVISE:
2515      -                        error = SEGOP_ADVISE(seg, raddr, ssize, (uint_t)arg);
     2365 +                        error = segop_advise(seg, raddr, ssize, (uint_t)arg);
2516 2366  
2517 2367                          /*
2518 2368                           * Check for regular errors and special retry error
2519 2369                           */
2520 2370                          if (error) {
2521 2371                                  if (error == IE_RETRY) {
2522 2372                                          /*
2523 2373                                           * Need to acquire writers lock, so
2524 2374                                           * have to drop readers lock and start
2525 2375                                           * all over again
↓ open down ↓ 15 lines elided ↑ open up ↑
2541 2391                                          /*
2542 2392                                           * Regular error
2543 2393                                           */
2544 2394                                          AS_LOCK_EXIT(as, &as->a_lock);
2545 2395                                          return (error);
2546 2396                                  }
2547 2397                          }
2548 2398                          break;
2549 2399  
2550 2400                  case MC_INHERIT_ZERO:
2551      -                        if (seg->s_ops->inherit == NULL) {
2552      -                                error = ENOTSUP;
2553      -                        } else {
2554      -                                error = SEGOP_INHERIT(seg, raddr, ssize,
2555      -                                    SEGP_INH_ZERO);
2556      -                        }
     2401 +                        error = segop_inherit(seg, raddr, ssize, SEGP_INH_ZERO);
2557 2402                          if (error != 0) {
2558 2403                                  AS_LOCK_EXIT(as, &as->a_lock);
2559 2404                                  return (error);
2560 2405                          }
2561 2406                          break;
2562 2407  
2563 2408                  /*
2564 2409                   * Can't happen.
2565 2410                   */
2566 2411                  default:
↓ open down ↓ 63 lines elided ↑ open up ↑
2630 2475          ulong_t segcnt = 1;
2631 2476          ulong_t cnt;
2632 2477          size_t ssize;
2633 2478          pgcnt_t npages = btop(size);
2634 2479          page_t **plist;
2635 2480          page_t **pl;
2636 2481          int error;
2637 2482          caddr_t eaddr;
2638 2483          faultcode_t fault_err = 0;
2639 2484          pgcnt_t pl_off;
2640      -        extern struct seg_ops segspt_shmops;
     2485 +        extern const struct seg_ops segspt_shmops;
2641 2486  
2642 2487          ASSERT(AS_LOCK_HELD(as, &as->a_lock));
2643 2488          ASSERT(seg != NULL);
2644 2489          ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size);
2645 2490          ASSERT(addr + size > seg->s_base + seg->s_size);
2646 2491          ASSERT(IS_P2ALIGNED(size, PAGESIZE));
2647 2492          ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
2648 2493  
2649 2494          /*
2650 2495           * Count the number of segments covered by the range we are about to
↓ open down ↓ 8 lines elided ↑ open up ↑
2659 2504                                  AS_LOCK_EXIT(as, &as->a_lock);
2660 2505                                  return (EFAULT);
2661 2506                          }
2662 2507                          /*
2663 2508                           * Do a quick check if subsequent segments
2664 2509                           * will most likely support pagelock.
2665 2510                           */
2666 2511                          if (seg->s_ops == &segvn_ops) {
2667 2512                                  vnode_t *vp;
2668 2513  
2669      -                                if (SEGOP_GETVP(seg, addr, &vp) != 0 ||
     2514 +                                if (segop_getvp(seg, addr, &vp) != 0 ||
2670 2515                                      vp != NULL) {
2671 2516                                          AS_LOCK_EXIT(as, &as->a_lock);
2672 2517                                          goto slow;
2673 2518                                  }
2674 2519                          } else if (seg->s_ops != &segspt_shmops) {
2675 2520                                  AS_LOCK_EXIT(as, &as->a_lock);
2676 2521                                  goto slow;
2677 2522                          }
2678 2523                          segcnt++;
2679 2524                  }
↓ open down ↓ 17 lines elided ↑ open up ↑
2697 2542                          ASSERT(seg != NULL && addr == seg->s_base);
2698 2543                          cnt++;
2699 2544                          ASSERT(cnt < segcnt);
2700 2545                  }
2701 2546                  if (addr + size > seg->s_base + seg->s_size) {
2702 2547                          ssize = seg->s_base + seg->s_size - addr;
2703 2548                  } else {
2704 2549                          ssize = size;
2705 2550                  }
2706 2551                  pl = &plist[npages + cnt];
2707      -                error = SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl,
     2552 +                error = segop_pagelock(seg, addr, ssize, (page_t ***)pl,
2708 2553                      L_PAGELOCK, rw);
2709 2554                  if (error) {
2710 2555                          break;
2711 2556                  }
2712 2557                  ASSERT(plist[npages + cnt] != NULL);
2713 2558                  ASSERT(pl_off + btop(ssize) <= npages);
2714 2559                  bcopy(plist[npages + cnt], &plist[pl_off],
2715 2560                      btop(ssize) * sizeof (page_t *));
2716 2561                  pl_off += btop(ssize);
2717 2562          }
↓ open down ↓ 22 lines elided ↑ open up ↑
2740 2585                          cnt++;
2741 2586                          ASSERT(cnt < segcnt);
2742 2587                  }
2743 2588                  if (eaddr > seg->s_base + seg->s_size) {
2744 2589                          ssize = seg->s_base + seg->s_size - addr;
2745 2590                  } else {
2746 2591                          ssize = eaddr - addr;
2747 2592                  }
2748 2593                  pl = &plist[npages + cnt];
2749 2594                  ASSERT(*pl != NULL);
2750      -                (void) SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl,
     2595 +                (void) segop_pagelock(seg, addr, ssize, (page_t ***)pl,
2751 2596                      L_PAGEUNLOCK, rw);
2752 2597          }
2753 2598  
2754 2599          AS_LOCK_EXIT(as, &as->a_lock);
2755 2600  
2756 2601          kmem_free(plist, (npages + segcnt) * sizeof (page_t *));
2757 2602  
2758 2603          if (error != ENOTSUP && error != EFAULT) {
2759 2604                  return (error);
2760 2605          }
↓ open down ↓ 54 lines elided ↑ open up ↑
2815 2660                  AS_LOCK_EXIT(as, &as->a_lock);
2816 2661                  return (EFAULT);
2817 2662          }
2818 2663  
2819 2664          TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_START,
2820 2665              "seg_lock_1_start: raddr %p rsize %ld", raddr, rsize);
2821 2666  
2822 2667          /*
2823 2668           * try to lock pages and pass back shadow list
2824 2669           */
2825      -        err = SEGOP_PAGELOCK(seg, raddr, rsize, ppp, L_PAGELOCK, rw);
     2670 +        err = segop_pagelock(seg, raddr, rsize, ppp, L_PAGELOCK, rw);
2826 2671  
2827 2672          TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_END, "seg_lock_1_end");
2828 2673  
2829 2674          AS_LOCK_EXIT(as, &as->a_lock);
2830 2675  
2831 2676          if (err == 0 || (err != ENOTSUP && err != EFAULT)) {
2832 2677                  return (err);
2833 2678          }
2834 2679  
2835 2680          /*
↓ open down ↓ 42 lines elided ↑ open up ↑
2878 2723                          ASSERT(seg != NULL && addr == seg->s_base);
2879 2724                          cnt++;
2880 2725                  }
2881 2726                  if (eaddr > seg->s_base + seg->s_size) {
2882 2727                          ssize = seg->s_base + seg->s_size - addr;
2883 2728                  } else {
2884 2729                          ssize = eaddr - addr;
2885 2730                  }
2886 2731                  pl = &plist[npages + cnt];
2887 2732                  ASSERT(*pl != NULL);
2888      -                (void) SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl,
     2733 +                (void) segop_pagelock(seg, addr, ssize, (page_t ***)pl,
2889 2734                      L_PAGEUNLOCK, rw);
2890 2735          }
2891 2736          ASSERT(cnt > 0);
2892 2737          AS_LOCK_EXIT(as, &as->a_lock);
2893 2738  
2894 2739          cnt++;
2895 2740          kmem_free(plist, (npages + cnt) * sizeof (page_t *));
2896 2741  }
2897 2742  
2898 2743  /*
↓ open down ↓ 25 lines elided ↑ open up ↑
2924 2769  
2925 2770          AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2926 2771          seg = as_segat(as, raddr);
2927 2772          ASSERT(seg != NULL);
2928 2773  
2929 2774          TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEG_UNLOCK_START,
2930 2775              "seg_unlock_start: raddr %p rsize %ld", raddr, rsize);
2931 2776  
2932 2777          ASSERT(raddr >= seg->s_base && raddr < seg->s_base + seg->s_size);
2933 2778          if (raddr + rsize <= seg->s_base + seg->s_size) {
2934      -                SEGOP_PAGELOCK(seg, raddr, rsize, &pp, L_PAGEUNLOCK, rw);
     2779 +                segop_pagelock(seg, raddr, rsize, &pp, L_PAGEUNLOCK, rw);
2935 2780          } else {
2936 2781                  as_pageunlock_segs(as, seg, raddr, rsize, pp, rw);
2937 2782                  return;
2938 2783          }
2939 2784          AS_LOCK_EXIT(as, &as->a_lock);
2940 2785          TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_AS_UNLOCK_END, "as_pageunlock_end");
2941 2786  }
2942 2787  
2943 2788  int
2944 2789  as_setpagesize(struct as *as, caddr_t addr, size_t size, uint_t szc,
↓ open down ↓ 34 lines elided ↑ open up ↑
2979 2824                                  break;
2980 2825                          }
2981 2826                  }
2982 2827                  if ((raddr + rsize) > (seg->s_base + seg->s_size)) {
2983 2828                          ssize = seg->s_base + seg->s_size - raddr;
2984 2829                  } else {
2985 2830                          ssize = rsize;
2986 2831                  }
2987 2832  
2988 2833  retry:
2989      -                error = SEGOP_SETPAGESIZE(seg, raddr, ssize, szc);
     2834 +                error = segop_setpagesize(seg, raddr, ssize, szc);
2990 2835  
2991 2836                  if (error == IE_NOMEM) {
2992 2837                          error = EAGAIN;
2993 2838                          break;
2994 2839                  }
2995 2840  
2996 2841                  if (error == IE_RETRY) {
2997 2842                          AS_LOCK_EXIT(as, &as->a_lock);
2998 2843                          goto setpgsz_top;
2999 2844                  }
↓ open down ↓ 58 lines elided ↑ open up ↑
3058 2903                  } else if (error != 0) {
3059 2904                          break;
3060 2905                  }
3061 2906          }
3062 2907          as_setwatch(as);
3063 2908          AS_LOCK_EXIT(as, &as->a_lock);
3064 2909          return (error);
3065 2910  }
3066 2911  
3067 2912  /*
3068      - * as_iset3_default_lpsize() just calls SEGOP_SETPAGESIZE() on all segments
     2913 + * as_iset3_default_lpsize() just calls segop_setpagesize() on all segments
3069 2914   * in its chunk where s_szc is less than the szc we want to set.
3070 2915   */
3071 2916  static int
3072 2917  as_iset3_default_lpsize(struct as *as, caddr_t raddr, size_t rsize, uint_t szc,
3073 2918      int *retry)
3074 2919  {
3075 2920          struct seg *seg;
3076 2921          size_t ssize;
3077 2922          int error;
3078 2923  
↓ open down ↓ 11 lines elided ↑ open up ↑
3090 2935                                  panic("as_iset3_default_lpsize: as changed");
3091 2936                          }
3092 2937                  }
3093 2938                  if ((raddr + rsize) > (seg->s_base + seg->s_size)) {
3094 2939                          ssize = seg->s_base + seg->s_size - raddr;
3095 2940                  } else {
3096 2941                          ssize = rsize;
3097 2942                  }
3098 2943  
3099 2944                  if (szc > seg->s_szc) {
3100      -                        error = SEGOP_SETPAGESIZE(seg, raddr, ssize, szc);
     2945 +                        error = segop_setpagesize(seg, raddr, ssize, szc);
3101 2946                          /* Only retry on EINVAL segments that have no vnode. */
3102 2947                          if (error == EINVAL) {
3103 2948                                  vnode_t *vp = NULL;
3104      -                                if ((SEGOP_GETTYPE(seg, raddr) & MAP_SHARED) &&
3105      -                                    (SEGOP_GETVP(seg, raddr, &vp) != 0 ||
     2949 +                                if ((segop_gettype(seg, raddr) & MAP_SHARED) &&
     2950 +                                    (segop_getvp(seg, raddr, &vp) != 0 ||
3106 2951                                      vp == NULL)) {
3107 2952                                          *retry = 1;
3108 2953                                  } else {
3109 2954                                          *retry = 0;
3110 2955                                  }
3111 2956                          }
3112 2957                          if (error) {
3113 2958                                  return (error);
3114 2959                          }
3115 2960                  }
↓ open down ↓ 222 lines elided ↑ open up ↑
3338 3183                  return (ENOMEM);
3339 3184          }
3340 3185          as_clearwatchprot(as, raddr, rsize);
3341 3186          seg = as_segat(as, raddr);
3342 3187          if (seg == NULL) {
3343 3188                  as_setwatch(as);
3344 3189                  AS_LOCK_EXIT(as, &as->a_lock);
3345 3190                  return (ENOMEM);
3346 3191          }
3347 3192          if (seg->s_ops == &segvn_ops) {
3348      -                rtype = SEGOP_GETTYPE(seg, addr);
     3193 +                rtype = segop_gettype(seg, addr);
3349 3194                  rflags = rtype & (MAP_TEXT | MAP_INITDATA);
3350 3195                  rtype = rtype & (MAP_SHARED | MAP_PRIVATE);
3351 3196                  segvn = 1;
3352 3197          } else {
3353 3198                  segvn = 0;
3354 3199          }
3355 3200          setaddr = raddr;
3356 3201          setsize = 0;
3357 3202  
3358 3203          for (; rsize != 0; rsize -= ssize, raddr += ssize, setsize += ssize) {
3359 3204                  if (raddr >= (seg->s_base + seg->s_size)) {
3360 3205                          seg = AS_SEGNEXT(as, seg);
3361 3206                          if (seg == NULL || raddr != seg->s_base) {
3362 3207                                  error = ENOMEM;
3363 3208                                  break;
3364 3209                          }
3365 3210                          if (seg->s_ops == &segvn_ops) {
3366      -                                stype = SEGOP_GETTYPE(seg, raddr);
     3211 +                                stype = segop_gettype(seg, raddr);
3367 3212                                  sflags = stype & (MAP_TEXT | MAP_INITDATA);
3368 3213                                  stype &= (MAP_SHARED | MAP_PRIVATE);
3369 3214                                  if (segvn && (rflags != sflags ||
3370 3215                                      rtype != stype)) {
3371 3216                                          /*
3372 3217                                           * The next segment is also segvn but
3373 3218                                           * has different flags and/or type.
3374 3219                                           */
3375 3220                                          ASSERT(setsize != 0);
3376 3221                                          error = as_iset_default_lpsize(as,
↓ open down ↓ 93 lines elided ↑ open up ↑
3470 3315  
3471 3316          ASSERT(AS_WRITE_HELD(as, &as->a_lock));
3472 3317  
3473 3318          for (pwp = avl_first(&as->a_wpage); pwp != NULL;
3474 3319              pwp = AVL_NEXT(&as->a_wpage, pwp)) {
3475 3320                  retrycnt = 0;
3476 3321          retry:
3477 3322                  vaddr = pwp->wp_vaddr;
3478 3323                  if (pwp->wp_oprot != 0 ||       /* already set up */
3479 3324                      (seg = as_segat(as, vaddr)) == NULL ||
3480      -                    SEGOP_GETPROT(seg, vaddr, 0, &prot) != 0)
     3325 +                    segop_getprot(seg, vaddr, 0, &prot) != 0)
3481 3326                          continue;
3482 3327  
3483 3328                  pwp->wp_oprot = prot;
3484 3329                  if (pwp->wp_read)
3485 3330                          prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3486 3331                  if (pwp->wp_write)
3487 3332                          prot &= ~PROT_WRITE;
3488 3333                  if (pwp->wp_exec)
3489 3334                          prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3490 3335                  if (!(pwp->wp_flags & WP_NOWATCH) && prot != pwp->wp_oprot) {
3491      -                        err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
     3336 +                        err = segop_setprot(seg, vaddr, PAGESIZE, prot);
3492 3337                          if (err == IE_RETRY) {
3493 3338                                  pwp->wp_oprot = 0;
3494 3339                                  ASSERT(retrycnt == 0);
3495 3340                                  retrycnt++;
3496 3341                                  goto retry;
3497 3342                          }
3498 3343                  }
3499 3344                  pwp->wp_prot = prot;
3500 3345          }
3501 3346  }
↓ open down ↓ 18 lines elided ↑ open up ↑
3520 3365          for (pwp = avl_first(&as->a_wpage); pwp != NULL;
3521 3366              pwp = AVL_NEXT(&as->a_wpage, pwp)) {
3522 3367                  retrycnt = 0;
3523 3368          retry:
3524 3369                  vaddr = pwp->wp_vaddr;
3525 3370                  if (pwp->wp_oprot == 0 ||       /* not set up */
3526 3371                      (seg = as_segat(as, vaddr)) == NULL)
3527 3372                          continue;
3528 3373  
3529 3374                  if ((prot = pwp->wp_oprot) != pwp->wp_prot) {
3530      -                        err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
     3375 +                        err = segop_setprot(seg, vaddr, PAGESIZE, prot);
3531 3376                          if (err == IE_RETRY) {
3532 3377                                  ASSERT(retrycnt == 0);
3533 3378                                  retrycnt++;
3534 3379                                  goto retry;
3535 3380                          }
3536 3381                  }
3537 3382                  pwp->wp_oprot = 0;
3538 3383                  pwp->wp_prot = 0;
3539 3384          }
3540 3385  }
↓ open down ↓ 33 lines elided ↑ open up ↑
3574 3419                          wprot &= ~PROT_WRITE;
3575 3420                  if (pwp->wp_exec)
3576 3421                          wprot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
3577 3422                  if (!(pwp->wp_flags & WP_NOWATCH) && wprot != pwp->wp_oprot) {
3578 3423                  retry:
3579 3424                          seg = as_segat(as, vaddr);
3580 3425                          if (seg == NULL) {
3581 3426                                  panic("as_setwatchprot: no seg");
3582 3427                                  /*NOTREACHED*/
3583 3428                          }
3584      -                        err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, wprot);
     3429 +                        err = segop_setprot(seg, vaddr, PAGESIZE, wprot);
3585 3430                          if (err == IE_RETRY) {
3586 3431                                  ASSERT(retrycnt == 0);
3587 3432                                  retrycnt++;
3588 3433                                  goto retry;
3589 3434                          }
3590 3435                  }
3591 3436                  pwp->wp_oprot = prot;
3592 3437                  pwp->wp_prot = wprot;
3593 3438  
3594 3439                  pwp = AVL_NEXT(&as->a_wpage, pwp);
↓ open down ↓ 26 lines elided ↑ open up ↑
3621 3466          while (pwp != NULL && pwp->wp_vaddr < eaddr) {
3622 3467  
3623 3468                  if ((prot = pwp->wp_oprot) != 0) {
3624 3469                          retrycnt = 0;
3625 3470  
3626 3471                          if (prot != pwp->wp_prot) {
3627 3472                          retry:
3628 3473                                  seg = as_segat(as, pwp->wp_vaddr);
3629 3474                                  if (seg == NULL)
3630 3475                                          continue;
3631      -                                err = SEGOP_SETPROT(seg, pwp->wp_vaddr,
     3476 +                                err = segop_setprot(seg, pwp->wp_vaddr,
3632 3477                                      PAGESIZE, prot);
3633 3478                                  if (err == IE_RETRY) {
3634 3479                                          ASSERT(retrycnt == 0);
3635 3480                                          retrycnt++;
3636 3481                                          goto retry;
3637 3482  
3638 3483                                  }
3639 3484                          }
3640 3485                          pwp->wp_oprot = 0;
3641 3486                          pwp->wp_prot = 0;
↓ open down ↓ 28 lines elided ↑ open up ↑
3670 3515  {
3671 3516          struct seg      *seg;
3672 3517          int             sts;
3673 3518  
3674 3519          AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
3675 3520          seg = as_segat(as, addr);
3676 3521          if (seg == NULL) {
3677 3522                  AS_LOCK_EXIT(as, &as->a_lock);
3678 3523                  return (EFAULT);
3679 3524          }
3680      -        /*
3681      -         * catch old drivers which may not support getmemid
3682      -         */
3683      -        if (seg->s_ops->getmemid == NULL) {
3684      -                AS_LOCK_EXIT(as, &as->a_lock);
3685      -                return (ENODEV);
3686      -        }
3687 3525  
3688      -        sts = SEGOP_GETMEMID(seg, addr, memidp);
     3526 +        sts = segop_getmemid(seg, addr, memidp);
3689 3527  
3690 3528          AS_LOCK_EXIT(as, &as->a_lock);
3691 3529          return (sts);
3692 3530  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX