Print this page
patch lower-case-segops
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/urw.c
+++ new/usr/src/uts/common/os/urw.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 27 /* All Rights Reserved */
28 28
29 -#pragma ident "%Z%%M% %I% %E% SMI"
30 -
31 29 #include <sys/atomic.h>
32 30 #include <sys/errno.h>
33 31 #include <sys/stat.h>
34 32 #include <sys/modctl.h>
35 33 #include <sys/conf.h>
36 34 #include <sys/systm.h>
37 35 #include <sys/ddi.h>
38 36 #include <sys/sunddi.h>
39 37 #include <sys/cpuvar.h>
40 38 #include <sys/kmem.h>
41 39 #include <sys/strsubr.h>
42 40 #include <sys/sysmacros.h>
43 41 #include <sys/frame.h>
44 42 #include <sys/stack.h>
45 43 #include <sys/proc.h>
46 44 #include <sys/priv.h>
47 45 #include <sys/policy.h>
48 46 #include <sys/ontrap.h>
49 47 #include <sys/vmsystm.h>
50 48 #include <sys/prsystm.h>
51 49
52 50 #include <vm/as.h>
53 51 #include <vm/seg.h>
54 52 #include <vm/seg_dev.h>
55 53 #include <vm/seg_vn.h>
56 54 #include <vm/seg_spt.h>
57 55 #include <vm/seg_kmem.h>
58 56
59 57 extern struct seg_ops segdev_ops; /* needs a header file */
60 58 extern struct seg_ops segspt_shmops; /* needs a header file */
61 59
62 60 static int
63 61 page_valid(struct seg *seg, caddr_t addr)
64 62 {
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
65 63 struct segvn_data *svd;
66 64 vnode_t *vp;
67 65 vattr_t vattr;
68 66
69 67 /*
70 68 * Fail if the page doesn't map to a page in the underlying
71 69 * mapped file, if an underlying mapped file exists.
72 70 */
73 71 vattr.va_mask = AT_SIZE;
74 72 if (seg->s_ops == &segvn_ops &&
75 - SEGOP_GETVP(seg, addr, &vp) == 0 &&
73 + segop_getvp(seg, addr, &vp) == 0 &&
76 74 vp != NULL && vp->v_type == VREG &&
77 75 VOP_GETATTR(vp, &vattr, 0, CRED(), NULL) == 0) {
78 76 u_offset_t size = roundup(vattr.va_size, (u_offset_t)PAGESIZE);
79 - u_offset_t offset = SEGOP_GETOFFSET(seg, addr);
77 + u_offset_t offset = segop_getoffset(seg, addr);
80 78
81 79 if (offset >= size)
82 80 return (0);
83 81 }
84 82
85 83 /*
86 84 * Fail if this is an ISM shared segment and the address is
87 85 * not within the real size of the spt segment that backs it.
88 86 */
89 87 if (seg->s_ops == &segspt_shmops &&
90 88 addr >= seg->s_base + spt_realsize(seg))
91 89 return (0);
92 90
93 91 /*
94 92 * Fail if the segment is mapped from /dev/null.
95 93 * The key is that the mapping comes from segdev and the
96 94 * type is neither MAP_SHARED nor MAP_PRIVATE.
97 95 */
98 96 if (seg->s_ops == &segdev_ops &&
99 - ((SEGOP_GETTYPE(seg, addr) & (MAP_SHARED | MAP_PRIVATE)) == 0))
97 + ((segop_gettype(seg, addr) & (MAP_SHARED | MAP_PRIVATE)) == 0))
100 98 return (0);
101 99
102 100 /*
103 101 * Fail if the page is a MAP_NORESERVE page that has
104 102 * not actually materialized.
105 103 * We cheat by knowing that segvn is the only segment
106 104 * driver that supports MAP_NORESERVE.
107 105 */
108 106 if (seg->s_ops == &segvn_ops &&
109 107 (svd = (struct segvn_data *)seg->s_data) != NULL &&
110 108 (svd->vp == NULL || svd->vp->v_type != VREG) &&
111 109 (svd->flags & MAP_NORESERVE)) {
112 110 /*
113 111 * Guilty knowledge here. We know that
114 112 * segvn_incore returns more than just the
115 113 * low-order bit that indicates the page is
116 114 * actually in memory. If any bits are set,
117 115 * then there is backing store for the page.
118 116 */
119 117 char incore = 0;
120 - (void) SEGOP_INCORE(seg, addr, PAGESIZE, &incore);
118 + (void) segop_incore(seg, addr, PAGESIZE, &incore);
121 119 if (incore == 0)
122 120 return (0);
123 121 }
124 122 return (1);
125 123 }
126 124
127 125 /*
128 126 * Map address "addr" in address space "as" into a kernel virtual address.
129 127 * The memory is guaranteed to be resident and locked down.
130 128 */
131 129 static caddr_t
132 130 mapin(struct as *as, caddr_t addr, int writing)
133 131 {
134 132 page_t *pp;
135 133 caddr_t kaddr;
136 134 pfn_t pfnum;
137 135
138 136 /*
139 137 * NB: Because of past mistakes, we have bits being returned
140 138 * by getpfnum that are actually the page type bits of the pte.
141 139 * When the object we are trying to map is a memory page with
142 140 * a page structure everything is ok and we can use the optimal
143 141 * method, ppmapin. Otherwise, we have to do something special.
144 142 */
145 143 pfnum = hat_getpfnum(as->a_hat, addr);
146 144 if (pf_is_memory(pfnum)) {
147 145 pp = page_numtopp_nolock(pfnum);
148 146 if (pp != NULL) {
149 147 ASSERT(PAGE_LOCKED(pp));
150 148 kaddr = ppmapin(pp, writing ?
151 149 (PROT_READ | PROT_WRITE) : PROT_READ,
152 150 (caddr_t)-1);
153 151 return (kaddr + ((uintptr_t)addr & PAGEOFFSET));
154 152 }
155 153 }
156 154
157 155 /*
158 156 * Oh well, we didn't have a page struct for the object we were
159 157 * trying to map in; ppmapin doesn't handle devices, but allocating a
160 158 * heap address allows ppmapout to free virutal space when done.
161 159 */
162 160 kaddr = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
163 161
164 162 hat_devload(kas.a_hat, kaddr, PAGESIZE, pfnum,
165 163 writing ? (PROT_READ | PROT_WRITE) : PROT_READ, HAT_LOAD_LOCK);
166 164
167 165 return (kaddr + ((uintptr_t)addr & PAGEOFFSET));
168 166 }
169 167
170 168 /*ARGSUSED*/
171 169 static void
172 170 mapout(struct as *as, caddr_t addr, caddr_t vaddr, int writing)
173 171 {
174 172 vaddr = (caddr_t)(uintptr_t)((uintptr_t)vaddr & PAGEMASK);
175 173 ppmapout(vaddr);
176 174 }
177 175
178 176 /*
179 177 * Perform I/O to a given process. This will return EIO if we detect
180 178 * corrupt memory and ENXIO if there is no such mapped address in the
181 179 * user process's address space.
182 180 */
183 181 static int
184 182 urw(proc_t *p, int writing, void *buf, size_t len, uintptr_t a)
185 183 {
186 184 caddr_t addr = (caddr_t)a;
187 185 caddr_t page;
188 186 caddr_t vaddr;
189 187 struct seg *seg;
190 188 int error = 0;
191 189 int err = 0;
192 190 uint_t prot;
193 191 uint_t prot_rw = writing ? PROT_WRITE : PROT_READ;
194 192 int protchanged;
195 193 on_trap_data_t otd;
196 194 int retrycnt;
197 195 struct as *as = p->p_as;
198 196 enum seg_rw rw;
199 197
200 198 /*
201 199 * Locate segment containing address of interest.
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
202 200 */
203 201 page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK);
204 202 retrycnt = 0;
205 203 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
206 204 retry:
207 205 if ((seg = as_segat(as, page)) == NULL ||
208 206 !page_valid(seg, page)) {
209 207 AS_LOCK_EXIT(as, &as->a_lock);
210 208 return (ENXIO);
211 209 }
212 - SEGOP_GETPROT(seg, page, 0, &prot);
210 + (void) segop_getprot(seg, page, 0, &prot);
213 211
214 212 protchanged = 0;
215 213 if ((prot & prot_rw) == 0) {
216 214 protchanged = 1;
217 - err = SEGOP_SETPROT(seg, page, PAGESIZE, prot | prot_rw);
215 + err = segop_setprot(seg, page, PAGESIZE, prot | prot_rw);
218 216
219 217 if (err == IE_RETRY) {
220 218 protchanged = 0;
221 219 ASSERT(retrycnt == 0);
222 220 retrycnt++;
223 221 goto retry;
224 222 }
225 223
226 224 if (err != 0) {
227 225 AS_LOCK_EXIT(as, &as->a_lock);
228 226 return (ENXIO);
229 227 }
230 228 }
231 229
232 230 /*
233 231 * segvn may do a copy-on-write for F_SOFTLOCK/S_READ case to break
234 232 * sharing to avoid a copy on write of a softlocked page by another
235 233 * thread. But since we locked the address space as a writer no other
236 234 * thread can cause a copy on write. S_READ_NOCOW is passed as the
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
237 235 * access type to tell segvn that it's ok not to do a copy-on-write
238 236 * for this SOFTLOCK fault.
239 237 */
240 238 if (writing)
241 239 rw = S_WRITE;
242 240 else if (seg->s_ops == &segvn_ops)
243 241 rw = S_READ_NOCOW;
244 242 else
245 243 rw = S_READ;
246 244
247 - if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) {
245 + if (segop_fault(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) {
248 246 if (protchanged)
249 - (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);
247 + (void) segop_setprot(seg, page, PAGESIZE, prot);
250 248 AS_LOCK_EXIT(as, &as->a_lock);
251 249 return (ENXIO);
252 250 }
253 251 CPU_STATS_ADD_K(vm, softlock, 1);
254 252
255 253 /*
256 254 * Make sure we're not trying to read or write off the end of the page.
257 255 */
258 256 ASSERT(len <= page + PAGESIZE - addr);
259 257
260 258 /*
261 259 * Map in the locked page, copy to our local buffer,
262 260 * then map the page out and unlock it.
263 261 */
264 262 vaddr = mapin(as, addr, writing);
265 263
266 264 /*
267 265 * Since we are copying memory on behalf of the user process,
268 266 * protect against memory error correction faults.
269 267 */
270 268 if (!on_trap(&otd, OT_DATA_EC)) {
271 269 if (seg->s_ops == &segdev_ops) {
272 270 /*
273 271 * Device memory can behave strangely; invoke
274 272 * a segdev-specific copy operation instead.
275 273 */
276 274 if (writing) {
277 275 if (segdev_copyto(seg, addr, buf, vaddr, len))
278 276 error = ENXIO;
279 277 } else {
280 278 if (segdev_copyfrom(seg, addr, vaddr, buf, len))
281 279 error = ENXIO;
282 280 }
283 281 } else {
284 282 if (writing)
285 283 bcopy(buf, vaddr, len);
286 284 else
287 285 bcopy(vaddr, buf, len);
288 286 }
289 287 } else {
290 288 error = EIO;
291 289 }
292 290 no_trap();
293 291
294 292 /*
295 293 * If we're writing to an executable page, we may need to sychronize
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
296 294 * the I$ with the modifications we made through the D$.
297 295 */
298 296 if (writing && (prot & PROT_EXEC))
299 297 sync_icache(vaddr, (uint_t)len);
300 298
301 299 mapout(as, addr, vaddr, writing);
302 300
303 301 if (rw == S_READ_NOCOW)
304 302 rw = S_READ;
305 303
306 - (void) SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw);
304 + (void) segop_fault(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw);
307 305
308 306 if (protchanged)
309 - (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot);
307 + (void) segop_setprot(seg, page, PAGESIZE, prot);
310 308
311 309 AS_LOCK_EXIT(as, &as->a_lock);
312 310
313 311 return (error);
314 312 }
315 313
316 314 int
317 315 uread(proc_t *p, void *buf, size_t len, uintptr_t a)
318 316 {
319 317 return (urw(p, 0, buf, len, a));
320 318 }
321 319
322 320 int
323 321 uwrite(proc_t *p, void *buf, size_t len, uintptr_t a)
324 322 {
325 323 return (urw(p, 1, buf, len, a));
326 324 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX