92 register size_t len, register uint_t prot);
93 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
94 uint_t prot);
95 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
96 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
97 register char *vec);
98 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
99 int attr, uint_t flags);
100 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
101 int attr, int op, ulong_t *lockmap, size_t pos);
102 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
103 uint_t *protv);
104 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
105 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
106 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
107 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
108 uint_t behav);
109 static void segspt_shmdump(struct seg *seg);
110 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
111 struct page ***, enum lock_type, enum seg_rw);
112 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
113 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
114 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
115
116 struct seg_ops segspt_shmops = {
117 .dup = segspt_shmdup,
118 .unmap = segspt_shmunmap,
119 .free = segspt_shmfree,
120 .fault = segspt_shmfault,
121 .faulta = segspt_shmfaulta,
122 .setprot = segspt_shmsetprot,
123 .checkprot = segspt_shmcheckprot,
124 .kluster = segspt_shmkluster,
125 .sync = segspt_shmsync,
126 .incore = segspt_shmincore,
127 .lockop = segspt_shmlockop,
128 .getprot = segspt_shmgetprot,
129 .getoffset = segspt_shmgetoffset,
130 .gettype = segspt_shmgettype,
131 .getvp = segspt_shmgetvp,
132 .advise = segspt_shmadvise,
133 .dump = segspt_shmdump,
134 .pagelock = segspt_shmpagelock,
135 .setpagesize = segspt_shmsetpgsz,
136 .getmemid = segspt_shmgetmemid,
137 .getpolicy = segspt_shmgetpolicy,
138 };
139
140 static void segspt_purge(struct seg *seg);
141 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
142 enum seg_rw, int);
143 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
144 page_t **ppa);
145
146
147
148 /*ARGSUSED*/
149 int
150 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
151 uint_t prot, uint_t flags, uint_t share_szc)
152 {
153 int err;
154 struct as *newas;
155 struct segspt_crargs sptcargs;
2939 /*
2940 * Mark any existing pages in the given range for
2941 * migration, flushing the I/O page cache, and using
2942 * underlying segment to calculate anon index and get
2943 * anonmap and vnode pointer from
2944 */
2945 if (shmd->shm_softlockcnt > 0)
2946 segspt_purge(seg);
2947
2948 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2949 }
2950
2951 return (0);
2952 }
2953
2954 /*ARGSUSED*/
2955 void
2956 segspt_shmdump(struct seg *seg)
2957 {
2958 /* no-op for ISM segment */
2959 }
2960
2961 /*ARGSUSED*/
2962 static faultcode_t
2963 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
2964 {
2965 return (ENOTSUP);
2966 }
2967
2968 /*
2969 * get a memory ID for an addr in a given segment
2970 */
2971 static int
2972 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2973 {
2974 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2975 struct anon *ap;
2976 size_t anon_index;
2977 struct anon_map *amp = shmd->shm_amp;
2978 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2979 struct seg *sptseg = shmd->shm_sptseg;
2980 anon_sync_obj_t cookie;
2981
2982 anon_index = seg_page(seg, addr);
2983
2984 if (addr > (seg->s_base + sptd->spt_realsize)) {
2985 return (EFAULT);
|
92 register size_t len, register uint_t prot);
93 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
94 uint_t prot);
95 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
96 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
97 register char *vec);
98 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
99 int attr, uint_t flags);
100 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
101 int attr, int op, ulong_t *lockmap, size_t pos);
102 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
103 uint_t *protv);
104 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
105 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
106 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
107 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
108 uint_t behav);
109 static void segspt_shmdump(struct seg *seg);
110 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
111 struct page ***, enum lock_type, enum seg_rw);
112 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
113 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
114
115 struct seg_ops segspt_shmops = {
116 .dup = segspt_shmdup,
117 .unmap = segspt_shmunmap,
118 .free = segspt_shmfree,
119 .fault = segspt_shmfault,
120 .faulta = segspt_shmfaulta,
121 .setprot = segspt_shmsetprot,
122 .checkprot = segspt_shmcheckprot,
123 .kluster = segspt_shmkluster,
124 .sync = segspt_shmsync,
125 .incore = segspt_shmincore,
126 .lockop = segspt_shmlockop,
127 .getprot = segspt_shmgetprot,
128 .getoffset = segspt_shmgetoffset,
129 .gettype = segspt_shmgettype,
130 .getvp = segspt_shmgetvp,
131 .advise = segspt_shmadvise,
132 .dump = segspt_shmdump,
133 .pagelock = segspt_shmpagelock,
134 .getmemid = segspt_shmgetmemid,
135 .getpolicy = segspt_shmgetpolicy,
136 };
137
138 static void segspt_purge(struct seg *seg);
139 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
140 enum seg_rw, int);
141 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
142 page_t **ppa);
143
144
145
146 /*ARGSUSED*/
147 int
148 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
149 uint_t prot, uint_t flags, uint_t share_szc)
150 {
151 int err;
152 struct as *newas;
153 struct segspt_crargs sptcargs;
2937 /*
2938 * Mark any existing pages in the given range for
2939 * migration, flushing the I/O page cache, and using
2940 * underlying segment to calculate anon index and get
2941 * anonmap and vnode pointer from
2942 */
2943 if (shmd->shm_softlockcnt > 0)
2944 segspt_purge(seg);
2945
2946 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2947 }
2948
2949 return (0);
2950 }
2951
2952 /*ARGSUSED*/
2953 void
2954 segspt_shmdump(struct seg *seg)
2955 {
2956 /* no-op for ISM segment */
2957 }
2958
2959 /*
2960 * get a memory ID for an addr in a given segment
2961 */
2962 static int
2963 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2964 {
2965 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2966 struct anon *ap;
2967 size_t anon_index;
2968 struct anon_map *amp = shmd->shm_amp;
2969 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2970 struct seg *sptseg = shmd->shm_sptseg;
2971 anon_sync_obj_t cookie;
2972
2973 anon_index = seg_page(seg, addr);
2974
2975 if (addr > (seg->s_base + sptd->spt_realsize)) {
2976 return (EFAULT);
|