Print this page
3882 remove xmod & friends
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/syscall/fcntl.c
+++ new/usr/src/uts/common/syscall/fcntl.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 -/* ONC_PLUS EXTRACT START */
23 22 /*
24 23 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
25 24 * Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved.
26 25 */
27 26
28 27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
29 28 /* All Rights Reserved */
30 29
31 30 /*
32 31 * Portions of this source code were derived from Berkeley 4.3 BSD
33 32 * under license from the Regents of the University of California.
34 33 */
35 34
36 35
37 -/* ONC_PLUS EXTRACT END */
38 -
39 36 #include <sys/param.h>
40 37 #include <sys/isa_defs.h>
41 38 #include <sys/types.h>
42 39 #include <sys/sysmacros.h>
43 40 #include <sys/systm.h>
44 41 #include <sys/errno.h>
45 42 #include <sys/fcntl.h>
46 -/* ONC_PLUS EXTRACT START */
47 43 #include <sys/flock.h>
48 -/* ONC_PLUS EXTRACT END */
49 44 #include <sys/vnode.h>
50 45 #include <sys/file.h>
51 46 #include <sys/mode.h>
52 47 #include <sys/proc.h>
53 48 #include <sys/filio.h>
54 49 #include <sys/share.h>
55 50 #include <sys/debug.h>
56 51 #include <sys/rctl.h>
57 52 #include <sys/nbmlock.h>
58 53
59 54 #include <sys/cmn_err.h>
60 55
61 -/* ONC_PLUS EXTRACT START */
62 56 static int flock_check(vnode_t *, flock64_t *, offset_t, offset_t);
63 57 static int flock_get_start(vnode_t *, flock64_t *, offset_t, u_offset_t *);
64 58 static void fd_too_big(proc_t *);
65 59
66 60 /*
67 61 * File control.
68 62 */
69 63 int
70 64 fcntl(int fdes, int cmd, intptr_t arg)
71 65 {
72 66 int iarg;
73 67 int error = 0;
74 68 int retval;
75 69 proc_t *p;
76 70 file_t *fp;
77 71 vnode_t *vp;
78 72 u_offset_t offset;
79 73 u_offset_t start;
80 74 struct vattr vattr;
81 75 int in_crit;
82 76 int flag;
83 77 struct flock sbf;
84 78 struct flock64 bf;
85 79 struct o_flock obf;
86 80 struct flock64_32 bf64_32;
87 81 struct fshare fsh;
88 82 struct shrlock shr;
89 83 struct shr_locowner shr_own;
90 84 offset_t maxoffset;
91 85 model_t datamodel;
92 86 int fdres;
93 87
94 88 #if defined(_ILP32) && !defined(lint) && defined(_SYSCALL32)
95 89 ASSERT(sizeof (struct flock) == sizeof (struct flock32));
96 90 ASSERT(sizeof (struct flock64) == sizeof (struct flock64_32));
97 91 #endif
98 92 #if defined(_LP64) && !defined(lint) && defined(_SYSCALL32)
99 93 ASSERT(sizeof (struct flock) == sizeof (struct flock64_64));
100 94 ASSERT(sizeof (struct flock64) == sizeof (struct flock64_64));
101 95 #endif
102 96
103 97 /*
104 98 * First, for speed, deal with the subset of cases
105 99 * that do not require getf() / releasef().
106 100 */
107 101 switch (cmd) {
108 102 case F_GETFD:
109 103 if ((error = f_getfd_error(fdes, &flag)) == 0)
110 104 retval = flag;
111 105 goto out;
112 106
113 107 case F_SETFD:
114 108 error = f_setfd_error(fdes, (int)arg);
115 109 retval = 0;
116 110 goto out;
117 111
118 112 case F_GETFL:
119 113 if ((error = f_getfl(fdes, &flag)) == 0) {
120 114 retval = (flag & (FMASK | FASYNC));
121 115 if ((flag & (FSEARCH | FEXEC)) == 0)
122 116 retval += FOPEN;
123 117 else
124 118 retval |= (flag & (FSEARCH | FEXEC));
125 119 }
126 120 goto out;
127 121
128 122 case F_GETXFL:
129 123 if ((error = f_getfl(fdes, &flag)) == 0) {
130 124 retval = flag;
131 125 if ((flag & (FSEARCH | FEXEC)) == 0)
132 126 retval += FOPEN;
133 127 }
134 128 goto out;
135 129
136 130 case F_BADFD:
137 131 if ((error = f_badfd(fdes, &fdres, (int)arg)) == 0)
138 132 retval = fdres;
139 133 goto out;
140 134 }
141 135
142 136 /*
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
143 137 * Second, for speed, deal with the subset of cases that
144 138 * require getf() / releasef() but do not require copyin.
145 139 */
146 140 if ((fp = getf(fdes)) == NULL) {
147 141 error = EBADF;
148 142 goto out;
149 143 }
150 144 iarg = (int)arg;
151 145
152 146 switch (cmd) {
153 -/* ONC_PLUS EXTRACT END */
154 -
155 147 case F_DUPFD:
156 148 case F_DUPFD_CLOEXEC:
157 149 p = curproc;
158 150 if ((uint_t)iarg >= p->p_fno_ctl) {
159 151 if (iarg >= 0)
160 152 fd_too_big(p);
161 153 error = EINVAL;
162 154 goto done;
163 155 }
164 156 /*
165 157 * We need to increment the f_count reference counter
166 158 * before allocating a new file descriptor.
167 159 * Doing it other way round opens a window for race condition
168 160 * with closeandsetf() on the target file descriptor which can
169 161 * close the file still referenced by the original
170 162 * file descriptor.
171 163 */
172 164 mutex_enter(&fp->f_tlock);
173 165 fp->f_count++;
174 166 mutex_exit(&fp->f_tlock);
175 167 if ((retval = ufalloc_file(iarg, fp)) == -1) {
176 168 /*
177 169 * New file descriptor can't be allocated.
178 170 * Revert the reference count.
179 171 */
180 172 mutex_enter(&fp->f_tlock);
181 173 fp->f_count--;
182 174 mutex_exit(&fp->f_tlock);
183 175 error = EMFILE;
184 176 } else {
185 177 if (cmd == F_DUPFD_CLOEXEC) {
186 178 f_setfd(retval, FD_CLOEXEC);
187 179 }
188 180 }
189 181 goto done;
190 182
191 183 case F_DUP2FD_CLOEXEC:
192 184 if (fdes == iarg) {
193 185 error = EINVAL;
194 186 goto done;
195 187 }
196 188
197 189 /*FALLTHROUGH*/
198 190
199 191 case F_DUP2FD:
200 192 p = curproc;
201 193 if (fdes == iarg) {
202 194 retval = iarg;
203 195 } else if ((uint_t)iarg >= p->p_fno_ctl) {
204 196 if (iarg >= 0)
205 197 fd_too_big(p);
206 198 error = EBADF;
207 199 } else {
208 200 /*
209 201 * We can't hold our getf(fdes) across the call to
210 202 * closeandsetf() because it creates a window for
211 203 * deadlock: if one thread is doing dup2(a, b) while
212 204 * another is doing dup2(b, a), each one will block
213 205 * waiting for the other to call releasef(). The
214 206 * solution is to increment the file reference count
215 207 * (which we have to do anyway), then releasef(fdes),
216 208 * then closeandsetf(). Incrementing f_count ensures
217 209 * that fp won't disappear after we call releasef().
218 210 * When closeandsetf() fails, we try avoid calling
219 211 * closef() because of all the side effects.
220 212 */
221 213 mutex_enter(&fp->f_tlock);
222 214 fp->f_count++;
223 215 mutex_exit(&fp->f_tlock);
224 216 releasef(fdes);
225 217 if ((error = closeandsetf(iarg, fp)) == 0) {
226 218 if (cmd == F_DUP2FD_CLOEXEC) {
227 219 f_setfd(iarg, FD_CLOEXEC);
228 220 }
229 221 retval = iarg;
230 222 } else {
231 223 mutex_enter(&fp->f_tlock);
232 224 if (fp->f_count > 1) {
233 225 fp->f_count--;
234 226 mutex_exit(&fp->f_tlock);
235 227 } else {
236 228 mutex_exit(&fp->f_tlock);
237 229 (void) closef(fp);
238 230 }
239 231 }
240 232 goto out;
241 233 }
242 234 goto done;
243 235
244 236 case F_SETFL:
245 237 vp = fp->f_vnode;
246 238 flag = fp->f_flag;
247 239 if ((iarg & (FNONBLOCK|FNDELAY)) == (FNONBLOCK|FNDELAY))
248 240 iarg &= ~FNDELAY;
249 241 if ((error = VOP_SETFL(vp, flag, iarg, fp->f_cred, NULL)) ==
250 242 0) {
251 243 iarg &= FMASK;
252 244 mutex_enter(&fp->f_tlock);
253 245 fp->f_flag &= ~FMASK | (FREAD|FWRITE);
254 246 fp->f_flag |= (iarg - FOPEN) & ~(FREAD|FWRITE);
255 247 mutex_exit(&fp->f_tlock);
256 248 }
257 249 retval = 0;
258 250 goto done;
259 251 }
260 252
261 253 /*
262 254 * Finally, deal with the expensive cases.
263 255 */
264 256 retval = 0;
265 257 in_crit = 0;
266 258 maxoffset = MAXOFF_T;
267 259 datamodel = DATAMODEL_NATIVE;
↓ open down ↓ |
103 lines elided |
↑ open up ↑ |
268 260 #if defined(_SYSCALL32_IMPL)
269 261 if ((datamodel = get_udatamodel()) == DATAMODEL_ILP32)
270 262 maxoffset = MAXOFF32_T;
271 263 #endif
272 264
273 265 vp = fp->f_vnode;
274 266 flag = fp->f_flag;
275 267 offset = fp->f_offset;
276 268
277 269 switch (cmd) {
278 -/* ONC_PLUS EXTRACT START */
279 270 /*
280 271 * The file system and vnode layers understand and implement
281 272 * locking with flock64 structures. So here once we pass through
282 273 * the test for compatibility as defined by LFS API, (for F_SETLK,
283 274 * F_SETLKW, F_GETLK, F_GETLKW, F_FREESP) we transform
284 275 * the flock structure to a flock64 structure and send it to the
285 276 * lower layers. Similarly in case of GETLK the returned flock64
286 277 * structure is transformed to a flock structure if everything fits
287 278 * in nicely, otherwise we return EOVERFLOW.
288 279 */
289 280
290 281 case F_GETLK:
291 282 case F_O_GETLK:
292 283 case F_SETLK:
293 284 case F_SETLKW:
294 285 case F_SETLK_NBMAND:
295 286
296 287 /*
297 288 * Copy in input fields only.
298 289 */
299 290
300 291 if (cmd == F_O_GETLK) {
301 292 if (datamodel != DATAMODEL_ILP32) {
302 293 error = EINVAL;
303 294 break;
304 295 }
305 296
306 297 if (copyin((void *)arg, &obf, sizeof (obf))) {
307 298 error = EFAULT;
308 299 break;
309 300 }
310 301 bf.l_type = obf.l_type;
311 302 bf.l_whence = obf.l_whence;
312 303 bf.l_start = (off64_t)obf.l_start;
313 304 bf.l_len = (off64_t)obf.l_len;
314 305 bf.l_sysid = (int)obf.l_sysid;
315 306 bf.l_pid = obf.l_pid;
316 307 } else if (datamodel == DATAMODEL_NATIVE) {
317 308 if (copyin((void *)arg, &sbf, sizeof (sbf))) {
318 309 error = EFAULT;
319 310 break;
320 311 }
321 312 /*
322 313 * XXX In an LP64 kernel with an LP64 application
323 314 * there's no need to do a structure copy here
324 315 * struct flock == struct flock64. However,
325 316 * we did it this way to avoid more conditional
326 317 * compilation.
327 318 */
328 319 bf.l_type = sbf.l_type;
329 320 bf.l_whence = sbf.l_whence;
330 321 bf.l_start = (off64_t)sbf.l_start;
331 322 bf.l_len = (off64_t)sbf.l_len;
332 323 bf.l_sysid = sbf.l_sysid;
333 324 bf.l_pid = sbf.l_pid;
334 325 }
335 326 #if defined(_SYSCALL32_IMPL)
336 327 else {
337 328 struct flock32 sbf32;
338 329 if (copyin((void *)arg, &sbf32, sizeof (sbf32))) {
339 330 error = EFAULT;
340 331 break;
341 332 }
342 333 bf.l_type = sbf32.l_type;
343 334 bf.l_whence = sbf32.l_whence;
344 335 bf.l_start = (off64_t)sbf32.l_start;
345 336 bf.l_len = (off64_t)sbf32.l_len;
346 337 bf.l_sysid = sbf32.l_sysid;
347 338 bf.l_pid = sbf32.l_pid;
348 339 }
349 340 #endif /* _SYSCALL32_IMPL */
350 341
351 342 /*
352 343 * 64-bit support: check for overflow for 32-bit lock ops
353 344 */
354 345 if ((error = flock_check(vp, &bf, offset, maxoffset)) != 0)
355 346 break;
356 347
357 348 /*
358 349 * Not all of the filesystems understand F_O_GETLK, and
359 350 * there's no need for them to know. Map it to F_GETLK.
360 351 */
361 352 if ((error = VOP_FRLOCK(vp, (cmd == F_O_GETLK) ? F_GETLK : cmd,
362 353 &bf, flag, offset, NULL, fp->f_cred, NULL)) != 0)
363 354 break;
364 355
365 356 /*
366 357 * If command is GETLK and no lock is found, only
367 358 * the type field is changed.
368 359 */
369 360 if ((cmd == F_O_GETLK || cmd == F_GETLK) &&
370 361 bf.l_type == F_UNLCK) {
371 362 /* l_type always first entry, always a short */
372 363 if (copyout(&bf.l_type, &((struct flock *)arg)->l_type,
373 364 sizeof (bf.l_type)))
374 365 error = EFAULT;
375 366 break;
376 367 }
377 368
378 369 if (cmd == F_O_GETLK) {
379 370 /*
380 371 * Return an SVR3 flock structure to the user.
381 372 */
382 373 obf.l_type = (int16_t)bf.l_type;
383 374 obf.l_whence = (int16_t)bf.l_whence;
384 375 obf.l_start = (int32_t)bf.l_start;
385 376 obf.l_len = (int32_t)bf.l_len;
386 377 if (bf.l_sysid > SHRT_MAX || bf.l_pid > SHRT_MAX) {
387 378 /*
388 379 * One or both values for the above fields
389 380 * is too large to store in an SVR3 flock
390 381 * structure.
391 382 */
392 383 error = EOVERFLOW;
393 384 break;
394 385 }
395 386 obf.l_sysid = (int16_t)bf.l_sysid;
396 387 obf.l_pid = (int16_t)bf.l_pid;
397 388 if (copyout(&obf, (void *)arg, sizeof (obf)))
398 389 error = EFAULT;
399 390 } else if (cmd == F_GETLK) {
400 391 /*
401 392 * Copy out SVR4 flock.
402 393 */
403 394 int i;
404 395
405 396 if (bf.l_start > maxoffset || bf.l_len > maxoffset) {
406 397 error = EOVERFLOW;
407 398 break;
408 399 }
409 400
410 401 if (datamodel == DATAMODEL_NATIVE) {
411 402 for (i = 0; i < 4; i++)
412 403 sbf.l_pad[i] = 0;
413 404 /*
414 405 * XXX In an LP64 kernel with an LP64
415 406 * application there's no need to do a
416 407 * structure copy here as currently
417 408 * struct flock == struct flock64.
418 409 * We did it this way to avoid more
419 410 * conditional compilation.
420 411 */
421 412 sbf.l_type = bf.l_type;
422 413 sbf.l_whence = bf.l_whence;
423 414 sbf.l_start = (off_t)bf.l_start;
424 415 sbf.l_len = (off_t)bf.l_len;
425 416 sbf.l_sysid = bf.l_sysid;
426 417 sbf.l_pid = bf.l_pid;
427 418 if (copyout(&sbf, (void *)arg, sizeof (sbf)))
428 419 error = EFAULT;
429 420 }
430 421 #if defined(_SYSCALL32_IMPL)
431 422 else {
432 423 struct flock32 sbf32;
433 424 if (bf.l_start > MAXOFF32_T ||
434 425 bf.l_len > MAXOFF32_T) {
435 426 error = EOVERFLOW;
436 427 break;
437 428 }
438 429 for (i = 0; i < 4; i++)
439 430 sbf32.l_pad[i] = 0;
440 431 sbf32.l_type = (int16_t)bf.l_type;
441 432 sbf32.l_whence = (int16_t)bf.l_whence;
442 433 sbf32.l_start = (off32_t)bf.l_start;
↓ open down ↓ |
154 lines elided |
↑ open up ↑ |
443 434 sbf32.l_len = (off32_t)bf.l_len;
444 435 sbf32.l_sysid = (int32_t)bf.l_sysid;
445 436 sbf32.l_pid = (pid32_t)bf.l_pid;
446 437 if (copyout(&sbf32,
447 438 (void *)arg, sizeof (sbf32)))
448 439 error = EFAULT;
449 440 }
450 441 #endif
451 442 }
452 443 break;
453 -/* ONC_PLUS EXTRACT END */
454 444
455 445 case F_CHKFL:
456 446 /*
457 447 * This is for internal use only, to allow the vnode layer
458 448 * to validate a flags setting before applying it. User
459 449 * programs can't issue it.
460 450 */
461 451 error = EINVAL;
462 452 break;
463 453
464 454 case F_ALLOCSP:
465 455 case F_FREESP:
466 456 case F_ALLOCSP64:
467 457 case F_FREESP64:
468 458 /*
469 459 * Test for not-a-regular-file (and returning EINVAL)
470 460 * before testing for open-for-writing (and returning EBADF).
471 461 * This is relied upon by posix_fallocate() in libc.
472 462 */
473 463 if (vp->v_type != VREG) {
474 464 error = EINVAL;
475 465 break;
476 466 }
477 467
478 468 if ((flag & FWRITE) == 0) {
479 469 error = EBADF;
480 470 break;
481 471 }
482 472
483 473 if (datamodel != DATAMODEL_ILP32 &&
484 474 (cmd == F_ALLOCSP64 || cmd == F_FREESP64)) {
485 475 error = EINVAL;
486 476 break;
487 477 }
488 478
489 479 #if defined(_ILP32) || defined(_SYSCALL32_IMPL)
490 480 if (datamodel == DATAMODEL_ILP32 &&
491 481 (cmd == F_ALLOCSP || cmd == F_FREESP)) {
492 482 struct flock32 sbf32;
493 483 /*
494 484 * For compatibility we overlay an SVR3 flock on an SVR4
495 485 * flock. This works because the input field offsets
496 486 * in "struct flock" were preserved.
497 487 */
498 488 if (copyin((void *)arg, &sbf32, sizeof (sbf32))) {
499 489 error = EFAULT;
500 490 break;
501 491 } else {
502 492 bf.l_type = sbf32.l_type;
503 493 bf.l_whence = sbf32.l_whence;
504 494 bf.l_start = (off64_t)sbf32.l_start;
505 495 bf.l_len = (off64_t)sbf32.l_len;
506 496 bf.l_sysid = sbf32.l_sysid;
507 497 bf.l_pid = sbf32.l_pid;
508 498 }
509 499 }
510 500 #endif /* _ILP32 || _SYSCALL32_IMPL */
511 501
512 502 #if defined(_LP64)
513 503 if (datamodel == DATAMODEL_LP64 &&
514 504 (cmd == F_ALLOCSP || cmd == F_FREESP)) {
515 505 if (copyin((void *)arg, &bf, sizeof (bf))) {
516 506 error = EFAULT;
517 507 break;
518 508 }
519 509 }
520 510 #endif /* defined(_LP64) */
521 511
522 512 #if !defined(_LP64) || defined(_SYSCALL32_IMPL)
523 513 if (datamodel == DATAMODEL_ILP32 &&
524 514 (cmd == F_ALLOCSP64 || cmd == F_FREESP64)) {
525 515 if (copyin((void *)arg, &bf64_32, sizeof (bf64_32))) {
526 516 error = EFAULT;
527 517 break;
528 518 } else {
529 519 /*
530 520 * Note that the size of flock64 is different in
531 521 * the ILP32 and LP64 models, due to the l_pad
532 522 * field. We do not want to assume that the
533 523 * flock64 structure is laid out the same in
534 524 * ILP32 and LP64 environments, so we will
535 525 * copy in the ILP32 version of flock64
536 526 * explicitly and copy it to the native
537 527 * flock64 structure.
538 528 */
539 529 bf.l_type = (short)bf64_32.l_type;
540 530 bf.l_whence = (short)bf64_32.l_whence;
541 531 bf.l_start = bf64_32.l_start;
542 532 bf.l_len = bf64_32.l_len;
543 533 bf.l_sysid = (int)bf64_32.l_sysid;
544 534 bf.l_pid = (pid_t)bf64_32.l_pid;
545 535 }
546 536 }
547 537 #endif /* !defined(_LP64) || defined(_SYSCALL32_IMPL) */
548 538
549 539 if (cmd == F_ALLOCSP || cmd == F_FREESP)
550 540 error = flock_check(vp, &bf, offset, maxoffset);
551 541 else if (cmd == F_ALLOCSP64 || cmd == F_FREESP64)
552 542 error = flock_check(vp, &bf, offset, MAXOFFSET_T);
553 543 if (error)
554 544 break;
555 545
556 546 if (vp->v_type == VREG && bf.l_len == 0 &&
557 547 bf.l_start > OFFSET_MAX(fp)) {
558 548 error = EFBIG;
559 549 break;
560 550 }
561 551
562 552 /*
563 553 * Make sure that there are no conflicting non-blocking
564 554 * mandatory locks in the region being manipulated. If
565 555 * there are such locks then return EACCES.
566 556 */
567 557 if ((error = flock_get_start(vp, &bf, offset, &start)) != 0)
568 558 break;
569 559
570 560 if (nbl_need_check(vp)) {
571 561 u_offset_t begin;
572 562 ssize_t length;
573 563
574 564 nbl_start_crit(vp, RW_READER);
575 565 in_crit = 1;
576 566 vattr.va_mask = AT_SIZE;
577 567 if ((error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL))
578 568 != 0)
579 569 break;
580 570 begin = start > vattr.va_size ? vattr.va_size : start;
581 571 length = vattr.va_size > start ? vattr.va_size - start :
582 572 start - vattr.va_size;
583 573 if (nbl_conflict(vp, NBL_WRITE, begin, length, 0,
584 574 NULL)) {
585 575 error = EACCES;
586 576 break;
587 577 }
588 578 }
589 579
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
590 580 if (cmd == F_ALLOCSP64)
591 581 cmd = F_ALLOCSP;
592 582 else if (cmd == F_FREESP64)
593 583 cmd = F_FREESP;
594 584
595 585 error = VOP_SPACE(vp, cmd, &bf, flag, offset, fp->f_cred, NULL);
596 586
597 587 break;
598 588
599 589 #if !defined(_LP64) || defined(_SYSCALL32_IMPL)
600 -/* ONC_PLUS EXTRACT START */
601 590 case F_GETLK64:
602 591 case F_SETLK64:
603 592 case F_SETLKW64:
604 593 case F_SETLK64_NBMAND:
605 594 /*
606 595 * Large Files: Here we set cmd as *LK and send it to
607 596 * lower layers. *LK64 is only for the user land.
608 597 * Most of the comments described above for F_SETLK
609 598 * applies here too.
610 599 * Large File support is only needed for ILP32 apps!
611 600 */
612 601 if (datamodel != DATAMODEL_ILP32) {
613 602 error = EINVAL;
614 603 break;
615 604 }
616 605
617 606 if (cmd == F_GETLK64)
618 607 cmd = F_GETLK;
619 608 else if (cmd == F_SETLK64)
620 609 cmd = F_SETLK;
621 610 else if (cmd == F_SETLKW64)
622 611 cmd = F_SETLKW;
623 612 else if (cmd == F_SETLK64_NBMAND)
624 613 cmd = F_SETLK_NBMAND;
625 614
626 615 /*
627 616 * Note that the size of flock64 is different in the ILP32
628 617 * and LP64 models, due to the sucking l_pad field.
629 618 * We do not want to assume that the flock64 structure is
630 619 * laid out in the same in ILP32 and LP64 environments, so
631 620 * we will copy in the ILP32 version of flock64 explicitly
632 621 * and copy it to the native flock64 structure.
633 622 */
634 623
635 624 if (copyin((void *)arg, &bf64_32, sizeof (bf64_32))) {
636 625 error = EFAULT;
637 626 break;
638 627 }
639 628
640 629 bf.l_type = (short)bf64_32.l_type;
641 630 bf.l_whence = (short)bf64_32.l_whence;
642 631 bf.l_start = bf64_32.l_start;
643 632 bf.l_len = bf64_32.l_len;
644 633 bf.l_sysid = (int)bf64_32.l_sysid;
645 634 bf.l_pid = (pid_t)bf64_32.l_pid;
646 635
647 636 if ((error = flock_check(vp, &bf, offset, MAXOFFSET_T)) != 0)
648 637 break;
649 638
650 639 if ((error = VOP_FRLOCK(vp, cmd, &bf, flag, offset,
651 640 NULL, fp->f_cred, NULL)) != 0)
652 641 break;
653 642
654 643 if ((cmd == F_GETLK) && bf.l_type == F_UNLCK) {
655 644 if (copyout(&bf.l_type, &((struct flock *)arg)->l_type,
656 645 sizeof (bf.l_type)))
657 646 error = EFAULT;
658 647 break;
659 648 }
660 649
661 650 if (cmd == F_GETLK) {
662 651 int i;
663 652
664 653 /*
665 654 * We do not want to assume that the flock64 structure
666 655 * is laid out in the same in ILP32 and LP64
667 656 * environments, so we will copy out the ILP32 version
668 657 * of flock64 explicitly after copying the native
669 658 * flock64 structure to it.
670 659 */
671 660 for (i = 0; i < 4; i++)
672 661 bf64_32.l_pad[i] = 0;
↓ open down ↓ |
62 lines elided |
↑ open up ↑ |
673 662 bf64_32.l_type = (int16_t)bf.l_type;
674 663 bf64_32.l_whence = (int16_t)bf.l_whence;
675 664 bf64_32.l_start = bf.l_start;
676 665 bf64_32.l_len = bf.l_len;
677 666 bf64_32.l_sysid = (int32_t)bf.l_sysid;
678 667 bf64_32.l_pid = (pid32_t)bf.l_pid;
679 668 if (copyout(&bf64_32, (void *)arg, sizeof (bf64_32)))
680 669 error = EFAULT;
681 670 }
682 671 break;
683 -/* ONC_PLUS EXTRACT END */
684 672 #endif /* !defined(_LP64) || defined(_SYSCALL32_IMPL) */
685 673
686 -/* ONC_PLUS EXTRACT START */
687 674 case F_SHARE:
688 675 case F_SHARE_NBMAND:
689 676 case F_UNSHARE:
690 677
691 678 /*
692 679 * Copy in input fields only.
693 680 */
694 681 if (copyin((void *)arg, &fsh, sizeof (fsh))) {
695 682 error = EFAULT;
696 683 break;
697 684 }
698 685
699 686 /*
700 687 * Local share reservations always have this simple form
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
701 688 */
702 689 shr.s_access = fsh.f_access;
703 690 shr.s_deny = fsh.f_deny;
704 691 shr.s_sysid = 0;
705 692 shr.s_pid = ttoproc(curthread)->p_pid;
706 693 shr_own.sl_pid = shr.s_pid;
707 694 shr_own.sl_id = fsh.f_id;
708 695 shr.s_own_len = sizeof (shr_own);
709 696 shr.s_owner = (caddr_t)&shr_own;
710 697 error = VOP_SHRLOCK(vp, cmd, &shr, flag, fp->f_cred, NULL);
711 -/* ONC_PLUS EXTRACT END */
712 698 break;
713 699
714 700 default:
715 701 error = EINVAL;
716 702 break;
717 703 }
718 704
719 705 if (in_crit)
720 706 nbl_end_crit(vp);
721 707
722 708 done:
723 709 releasef(fdes);
724 710 out:
725 711 if (error)
726 712 return (set_errno(error));
727 713 return (retval);
728 714 }
729 715
730 -/* ONC_PLUS EXTRACT START */
731 716 int
732 717 flock_check(vnode_t *vp, flock64_t *flp, offset_t offset, offset_t max)
733 718 {
734 719 struct vattr vattr;
735 720 int error;
736 721 u_offset_t start, end;
737 722
738 723 /*
739 724 * Determine the starting point of the request
740 725 */
741 726 switch (flp->l_whence) {
742 727 case 0: /* SEEK_SET */
743 728 start = (u_offset_t)flp->l_start;
744 729 if (start > max)
745 730 return (EINVAL);
746 731 break;
747 732 case 1: /* SEEK_CUR */
748 733 if (flp->l_start > (max - offset))
749 734 return (EOVERFLOW);
750 735 start = (u_offset_t)(flp->l_start + offset);
751 736 if (start > max)
752 737 return (EINVAL);
753 738 break;
754 739 case 2: /* SEEK_END */
755 740 vattr.va_mask = AT_SIZE;
756 741 if (error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL))
757 742 return (error);
758 743 if (flp->l_start > (max - (offset_t)vattr.va_size))
759 744 return (EOVERFLOW);
760 745 start = (u_offset_t)(flp->l_start + (offset_t)vattr.va_size);
761 746 if (start > max)
762 747 return (EINVAL);
763 748 break;
764 749 default:
765 750 return (EINVAL);
766 751 }
767 752
768 753 /*
769 754 * Determine the range covered by the request.
770 755 */
771 756 if (flp->l_len == 0)
772 757 end = MAXEND;
773 758 else if ((offset_t)flp->l_len > 0) {
774 759 if (flp->l_len > (max - start + 1))
775 760 return (EOVERFLOW);
776 761 end = (u_offset_t)(start + (flp->l_len - 1));
777 762 ASSERT(end <= max);
778 763 } else {
779 764 /*
780 765 * Negative length; why do we even allow this ?
781 766 * Because this allows easy specification of
782 767 * the last n bytes of the file.
783 768 */
784 769 end = start;
785 770 start += (u_offset_t)flp->l_len;
786 771 (start)++;
787 772 if (start > max)
788 773 return (EINVAL);
789 774 ASSERT(end <= max);
790 775 }
791 776 ASSERT(start <= max);
792 777 if (flp->l_type == F_UNLCK && flp->l_len > 0 &&
793 778 end == (offset_t)max) {
794 779 flp->l_len = 0;
795 780 }
796 781 if (start > end)
797 782 return (EINVAL);
798 783 return (0);
799 784 }
800 785
801 786 static int
802 787 flock_get_start(vnode_t *vp, flock64_t *flp, offset_t offset, u_offset_t *start)
803 788 {
804 789 struct vattr vattr;
805 790 int error;
806 791
807 792 /*
808 793 * Determine the starting point of the request. Assume that it is
809 794 * a valid starting point.
810 795 */
811 796 switch (flp->l_whence) {
812 797 case 0: /* SEEK_SET */
813 798 *start = (u_offset_t)flp->l_start;
814 799 break;
815 800 case 1: /* SEEK_CUR */
816 801 *start = (u_offset_t)(flp->l_start + offset);
817 802 break;
818 803 case 2: /* SEEK_END */
819 804 vattr.va_mask = AT_SIZE;
820 805 if (error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL))
821 806 return (error);
822 807 *start = (u_offset_t)(flp->l_start + (offset_t)vattr.va_size);
823 808 break;
824 809 default:
825 810 return (EINVAL);
826 811 }
827 812
828 813 return (0);
829 814 }
830 815
831 816 /*
↓ open down ↓ |
91 lines elided |
↑ open up ↑ |
832 817 * Take rctl action when the requested file descriptor is too big.
833 818 */
834 819 static void
835 820 fd_too_big(proc_t *p)
836 821 {
837 822 mutex_enter(&p->p_lock);
838 823 (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
839 824 p->p_rctls, p, RCA_SAFE);
840 825 mutex_exit(&p->p_lock);
841 826 }
842 -/* ONC_PLUS EXTRACT END */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX