Print this page
use NULL dump segop as a shorthand for no-op
Instead of forcing every segment driver to implement a dummy function that
does nothing, handle NULL dump segop function pointer as a no-op shorthand.


  89                 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
  90 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
  91 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
  92                         register size_t len, register uint_t prot);
  93 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
  94                         uint_t prot);
  95 static int      segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
  96 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
  97                         register char *vec);
  98 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
  99                         int attr, uint_t flags);
 100 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
 101                         int attr, int op, ulong_t *lockmap, size_t pos);
 102 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
 103                         uint_t *protv);
 104 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
 105 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
 106 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
 107 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
 108                         uint_t behav);
 109 static void segspt_shmdump(struct seg *seg);
 110 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
 111                         struct page ***, enum lock_type, enum seg_rw);
 112 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
 113 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
 114 
 115 const struct seg_ops segspt_shmops = {
 116         .dup            = segspt_shmdup,
 117         .unmap          = segspt_shmunmap,
 118         .free           = segspt_shmfree,
 119         .fault          = segspt_shmfault,
 120         .faulta         = segspt_shmfaulta,
 121         .setprot        = segspt_shmsetprot,
 122         .checkprot      = segspt_shmcheckprot,
 123         .kluster        = segspt_shmkluster,
 124         .sync           = segspt_shmsync,
 125         .incore         = segspt_shmincore,
 126         .lockop         = segspt_shmlockop,
 127         .getprot        = segspt_shmgetprot,
 128         .getoffset      = segspt_shmgetoffset,
 129         .gettype        = segspt_shmgettype,
 130         .getvp          = segspt_shmgetvp,
 131         .advise         = segspt_shmadvise,
 132         .dump           = segspt_shmdump,
 133         .pagelock       = segspt_shmpagelock,
 134         .getmemid       = segspt_shmgetmemid,
 135         .getpolicy      = segspt_shmgetpolicy,
 136 };
 137 
 138 static void segspt_purge(struct seg *seg);
 139 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
 140                 enum seg_rw, int);
 141 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
 142                 page_t **ppa);
 143 
 144 
 145 
 146 /*ARGSUSED*/
 147 int
 148 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
 149         uint_t prot, uint_t flags, uint_t share_szc)
 150 {
 151         int     err;
 152         struct  as      *newas;


2930                 /*
2931                  * If random memory allocation policy set already,
2932                  * don't bother reapplying it.
2933                  */
2934                 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2935                         return (0);
2936 
2937                 /*
2938                  * Mark any existing pages in the given range for
2939                  * migration, flushing the I/O page cache, and using
2940                  * underlying segment to calculate anon index and get
2941                  * anonmap and vnode pointer from
2942                  */
2943                 if (shmd->shm_softlockcnt > 0)
2944                         segspt_purge(seg);
2945 
2946                 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2947         }
2948 
2949         return (0);
2950 }
2951 
2952 /*ARGSUSED*/
2953 void
2954 segspt_shmdump(struct seg *seg)
2955 {
2956         /* no-op for ISM segment */
2957 }
2958 
2959 /*
2960  * get a memory ID for an addr in a given segment
2961  */
2962 static int
2963 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2964 {
2965         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2966         struct anon     *ap;
2967         size_t          anon_index;
2968         struct anon_map *amp = shmd->shm_amp;
2969         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2970         struct seg      *sptseg = shmd->shm_sptseg;
2971         anon_sync_obj_t cookie;
2972 
2973         anon_index = seg_page(seg, addr);
2974 
2975         if (addr > (seg->s_base + sptd->spt_realsize)) {
2976                 return (EFAULT);




  89                 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
  90 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
  91 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
  92                         register size_t len, register uint_t prot);
  93 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
  94                         uint_t prot);
  95 static int      segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
  96 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
  97                         register char *vec);
  98 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
  99                         int attr, uint_t flags);
 100 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
 101                         int attr, int op, ulong_t *lockmap, size_t pos);
 102 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
 103                         uint_t *protv);
 104 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
 105 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
 106 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
 107 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
 108                         uint_t behav);

 109 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
 110                         struct page ***, enum lock_type, enum seg_rw);
 111 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
 112 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
 113 
 114 const struct seg_ops segspt_shmops = {
 115         .dup            = segspt_shmdup,
 116         .unmap          = segspt_shmunmap,
 117         .free           = segspt_shmfree,
 118         .fault          = segspt_shmfault,
 119         .faulta         = segspt_shmfaulta,
 120         .setprot        = segspt_shmsetprot,
 121         .checkprot      = segspt_shmcheckprot,
 122         .kluster        = segspt_shmkluster,
 123         .sync           = segspt_shmsync,
 124         .incore         = segspt_shmincore,
 125         .lockop         = segspt_shmlockop,
 126         .getprot        = segspt_shmgetprot,
 127         .getoffset      = segspt_shmgetoffset,
 128         .gettype        = segspt_shmgettype,
 129         .getvp          = segspt_shmgetvp,
 130         .advise         = segspt_shmadvise,

 131         .pagelock       = segspt_shmpagelock,
 132         .getmemid       = segspt_shmgetmemid,
 133         .getpolicy      = segspt_shmgetpolicy,
 134 };
 135 
 136 static void segspt_purge(struct seg *seg);
 137 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
 138                 enum seg_rw, int);
 139 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
 140                 page_t **ppa);
 141 
 142 
 143 
 144 /*ARGSUSED*/
 145 int
 146 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
 147         uint_t prot, uint_t flags, uint_t share_szc)
 148 {
 149         int     err;
 150         struct  as      *newas;


2928                 /*
2929                  * If random memory allocation policy set already,
2930                  * don't bother reapplying it.
2931                  */
2932                 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2933                         return (0);
2934 
2935                 /*
2936                  * Mark any existing pages in the given range for
2937                  * migration, flushing the I/O page cache, and using
2938                  * underlying segment to calculate anon index and get
2939                  * anonmap and vnode pointer from
2940                  */
2941                 if (shmd->shm_softlockcnt > 0)
2942                         segspt_purge(seg);
2943 
2944                 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2945         }
2946 
2947         return (0);







2948 }
2949 
2950 /*
2951  * get a memory ID for an addr in a given segment
2952  */
2953 static int
2954 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2955 {
2956         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2957         struct anon     *ap;
2958         size_t          anon_index;
2959         struct anon_map *amp = shmd->shm_amp;
2960         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2961         struct seg      *sptseg = shmd->shm_sptseg;
2962         anon_sync_obj_t cookie;
2963 
2964         anon_index = seg_page(seg, addr);
2965 
2966         if (addr > (seg->s_base + sptd->spt_realsize)) {
2967                 return (EFAULT);