Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/fio.c
+++ new/usr/src/uts/common/os/fio.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 2012, Joyent Inc. All rights reserved.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 #include <sys/types.h>
31 31 #include <sys/sysmacros.h>
32 32 #include <sys/param.h>
33 33 #include <sys/systm.h>
34 34 #include <sys/errno.h>
35 35 #include <sys/signal.h>
36 36 #include <sys/cred.h>
37 37 #include <sys/user.h>
38 38 #include <sys/conf.h>
39 39 #include <sys/vfs.h>
40 40 #include <sys/vnode.h>
41 41 #include <sys/pathname.h>
42 42 #include <sys/file.h>
43 43 #include <sys/proc.h>
44 44 #include <sys/var.h>
45 45 #include <sys/cpuvar.h>
46 46 #include <sys/open.h>
47 47 #include <sys/cmn_err.h>
48 48 #include <sys/priocntl.h>
49 49 #include <sys/procset.h>
50 50 #include <sys/prsystm.h>
51 51 #include <sys/debug.h>
52 52 #include <sys/kmem.h>
53 53 #include <sys/atomic.h>
54 54 #include <sys/fcntl.h>
55 55 #include <sys/poll.h>
56 56 #include <sys/rctl.h>
57 57 #include <sys/port_impl.h>
58 58 #include <sys/dtrace.h>
59 59
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
60 60 #include <c2/audit.h>
61 61 #include <sys/nbmlock.h>
62 62
63 63 #ifdef DEBUG
64 64
65 65 static uint32_t afd_maxfd; /* # of entries in maximum allocated array */
66 66 static uint32_t afd_alloc; /* count of kmem_alloc()s */
67 67 static uint32_t afd_free; /* count of kmem_free()s */
68 68 static uint32_t afd_wait; /* count of waits on non-zero ref count */
69 69 #define MAXFD(x) (afd_maxfd = ((afd_maxfd >= (x))? afd_maxfd : (x)))
70 -#define COUNT(x) atomic_add_32(&x, 1)
70 +#define COUNT(x) atomic_inc_32(&x)
71 71
72 72 #else /* DEBUG */
73 73
74 74 #define MAXFD(x)
75 75 #define COUNT(x)
76 76
77 77 #endif /* DEBUG */
78 78
79 79 kmem_cache_t *file_cache;
80 80
81 81 static void port_close_fd(portfd_t *);
82 82
83 83 /*
84 84 * File descriptor allocation.
85 85 *
86 86 * fd_find(fip, minfd) finds the first available descriptor >= minfd.
87 87 * The most common case is open(2), in which minfd = 0, but we must also
88 88 * support fcntl(fd, F_DUPFD, minfd).
89 89 *
90 90 * The algorithm is as follows: we keep all file descriptors in an infix
91 91 * binary tree in which each node records the number of descriptors
92 92 * allocated in its right subtree, including itself. Starting at minfd,
93 93 * we ascend the tree until we find a non-fully allocated right subtree.
94 94 * We then descend that subtree in a binary search for the smallest fd.
95 95 * Finally, we ascend the tree again to increment the allocation count
96 96 * of every subtree containing the newly-allocated fd. Freeing an fd
97 97 * requires only the last step: we ascend the tree to decrement allocation
98 98 * counts. Each of these three steps (ascent to find non-full subtree,
99 99 * descent to find lowest fd, ascent to update allocation counts) is
100 100 * O(log n), thus the algorithm as a whole is O(log n).
101 101 *
102 102 * We don't implement the fd tree using the customary left/right/parent
103 103 * pointers, but instead take advantage of the glorious mathematics of
104 104 * full infix binary trees. For reference, here's an illustration of the
105 105 * logical structure of such a tree, rooted at 4 (binary 100), covering
106 106 * the range 1-7 (binary 001-111). Our canonical trees do not include
107 107 * fd 0; we'll deal with that later.
108 108 *
109 109 * 100
110 110 * / \
111 111 * / \
112 112 * 010 110
113 113 * / \ / \
114 114 * 001 011 101 111
115 115 *
116 116 * We make the following observations, all of which are easily proven by
117 117 * induction on the depth of the tree:
118 118 *
119 119 * (T1) The least-significant bit (LSB) of any node is equal to its level
120 120 * in the tree. In our example, nodes 001, 011, 101 and 111 are at
121 121 * level 0; nodes 010 and 110 are at level 1; and node 100 is at level 2.
122 122 *
123 123 * (T2) The child size (CSIZE) of node N -- that is, the total number of
124 124 * right-branch descendants in a child of node N, including itself -- is
125 125 * given by clearing all but the least significant bit of N. This
126 126 * follows immediately from (T1). Applying this rule to our example, we
127 127 * see that CSIZE(100) = 100, CSIZE(x10) = 10, and CSIZE(xx1) = 1.
128 128 *
129 129 * (T3) The nearest left ancestor (LPARENT) of node N -- that is, the nearest
130 130 * ancestor containing node N in its right child -- is given by clearing
131 131 * the LSB of N. For example, LPARENT(111) = 110 and LPARENT(110) = 100.
132 132 * Clearing the LSB of nodes 001, 010 or 100 yields zero, reflecting
133 133 * the fact that these are leftmost nodes. Note that this algorithm
134 134 * automatically skips generations as necessary. For example, the parent
135 135 * of node 101 is 110, which is a *right* ancestor (not what we want);
136 136 * but its grandparent is 100, which is a left ancestor. Clearing the LSB
137 137 * of 101 gets us to 100 directly, skipping right past the uninteresting
138 138 * generation (110).
139 139 *
140 140 * Note that since LPARENT clears the LSB, whereas CSIZE clears all *but*
141 141 * the LSB, we can express LPARENT() nicely in terms of CSIZE():
142 142 *
143 143 * LPARENT(N) = N - CSIZE(N)
144 144 *
145 145 * (T4) The nearest right ancestor (RPARENT) of node N is given by:
146 146 *
147 147 * RPARENT(N) = N + CSIZE(N)
148 148 *
149 149 * (T5) For every interior node, the children differ from their parent by
150 150 * CSIZE(parent) / 2. In our example, CSIZE(100) / 2 = 2 = 10 binary,
151 151 * and indeed, the children of 100 are 100 +/- 10 = 010 and 110.
152 152 *
153 153 * Next, we'll need a few two's-complement math tricks. Suppose a number,
154 154 * N, has the following form:
155 155 *
156 156 * N = xxxx10...0
157 157 *
158 158 * That is, the binary representation of N consists of some string of bits,
159 159 * then a 1, then all zeroes. This amounts to nothing more than saying that
160 160 * N has a least-significant bit, which is true for any N != 0. If we look
161 161 * at N and N - 1 together, we see that we can combine them in useful ways:
162 162 *
163 163 * N = xxxx10...0
164 164 * N - 1 = xxxx01...1
165 165 * ------------------------
166 166 * N & (N - 1) = xxxx000000
167 167 * N | (N - 1) = xxxx111111
168 168 * N ^ (N - 1) = 111111
169 169 *
170 170 * In particular, this suggests several easy ways to clear all but the LSB,
171 171 * which by (T2) is exactly what we need to determine CSIZE(N) = 10...0.
172 172 * We'll opt for this formulation:
173 173 *
174 174 * (C1) CSIZE(N) = (N - 1) ^ (N | (N - 1))
175 175 *
176 176 * Similarly, we have an easy way to determine LPARENT(N), which requires
177 177 * that we clear the LSB of N:
178 178 *
179 179 * (L1) LPARENT(N) = N & (N - 1)
180 180 *
181 181 * We note in the above relations that (N | (N - 1)) - N = CSIZE(N) - 1.
182 182 * When combined with (T4), this yields an easy way to compute RPARENT(N):
183 183 *
184 184 * (R1) RPARENT(N) = (N | (N - 1)) + 1
185 185 *
186 186 * Finally, to accommodate fd 0 we must adjust all of our results by +/-1 to
187 187 * move the fd range from [1, 2^n) to [0, 2^n - 1). This is straightforward,
188 188 * so there's no need to belabor the algebra; the revised relations become:
189 189 *
190 190 * (C1a) CSIZE(N) = N ^ (N | (N + 1))
191 191 *
192 192 * (L1a) LPARENT(N) = (N & (N + 1)) - 1
193 193 *
194 194 * (R1a) RPARENT(N) = N | (N + 1)
195 195 *
196 196 * This completes the mathematical framework. We now have all the tools
197 197 * we need to implement fd_find() and fd_reserve().
198 198 *
199 199 * fd_find(fip, minfd) finds the smallest available file descriptor >= minfd.
200 200 * It does not actually allocate the descriptor; that's done by fd_reserve().
201 201 * fd_find() proceeds in two steps:
202 202 *
203 203 * (1) Find the leftmost subtree that contains a descriptor >= minfd.
204 204 * We start at the right subtree rooted at minfd. If this subtree is
205 205 * not full -- if fip->fi_list[minfd].uf_alloc != CSIZE(minfd) -- then
206 206 * step 1 is done. Otherwise, we know that all fds in this subtree
207 207 * are taken, so we ascend to RPARENT(minfd) using (R1a). We repeat
208 208 * this process until we either find a candidate subtree or exceed
209 209 * fip->fi_nfiles. We use (C1a) to compute CSIZE().
210 210 *
211 211 * (2) Find the smallest fd in the subtree discovered by step 1.
212 212 * Starting at the root of this subtree, we descend to find the
213 213 * smallest available fd. Since the left children have the smaller
214 214 * fds, we will descend rightward only when the left child is full.
215 215 *
216 216 * We begin by comparing the number of allocated fds in the root
217 217 * to the number of allocated fds in its right child; if they differ
218 218 * by exactly CSIZE(child), we know the left subtree is full, so we
219 219 * descend right; that is, the right child becomes the search root.
220 220 * Otherwise we leave the root alone and start following the right
221 221 * child's left children. As fortune would have it, this is very
222 222 * simple computationally: by (T5), the right child of fd is just
223 223 * fd + size, where size = CSIZE(fd) / 2. Applying (T5) again,
224 224 * we find that the right child's left child is fd + size - (size / 2) =
225 225 * fd + (size / 2); *its* left child is fd + (size / 2) - (size / 4) =
226 226 * fd + (size / 4), and so on. In general, fd's right child's
227 227 * leftmost nth descendant is fd + (size >> n). Thus, to follow
228 228 * the right child's left descendants, we just halve the size in
229 229 * each iteration of the search.
230 230 *
231 231 * When we descend leftward, we must keep track of the number of fds
232 232 * that were allocated in all the right subtrees we rejected, so we
233 233 * know how many of the root fd's allocations are in the remaining
234 234 * (as yet unexplored) leftmost part of its right subtree. When we
235 235 * encounter a fully-allocated left child -- that is, when we find
236 236 * that fip->fi_list[fd].uf_alloc == ralloc + size -- we descend right
237 237 * (as described earlier), resetting ralloc to zero.
238 238 *
239 239 * fd_reserve(fip, fd, incr) either allocates or frees fd, depending
240 240 * on whether incr is 1 or -1. Starting at fd, fd_reserve() ascends
241 241 * the leftmost ancestors (see (T3)) and updates the allocation counts.
242 242 * At each step we use (L1a) to compute LPARENT(), the next left ancestor.
243 243 *
244 244 * flist_minsize() finds the minimal tree that still covers all
245 245 * used fds; as long as the allocation count of a root node is zero, we
246 246 * don't need that node or its right subtree.
247 247 *
248 248 * flist_nalloc() counts the number of allocated fds in the tree, by starting
249 249 * at the top of the tree and summing the right-subtree allocation counts as
250 250 * it descends leftwards.
251 251 *
252 252 * Note: we assume that flist_grow() will keep fip->fi_nfiles of the form
253 253 * 2^n - 1. This ensures that the fd trees are always full, which saves
254 254 * quite a bit of boundary checking.
255 255 */
256 256 static int
257 257 fd_find(uf_info_t *fip, int minfd)
258 258 {
259 259 int size, ralloc, fd;
260 260
261 261 ASSERT(MUTEX_HELD(&fip->fi_lock));
262 262 ASSERT((fip->fi_nfiles & (fip->fi_nfiles + 1)) == 0);
263 263
264 264 for (fd = minfd; (uint_t)fd < fip->fi_nfiles; fd |= fd + 1) {
265 265 size = fd ^ (fd | (fd + 1));
266 266 if (fip->fi_list[fd].uf_alloc == size)
267 267 continue;
268 268 for (ralloc = 0, size >>= 1; size != 0; size >>= 1) {
269 269 ralloc += fip->fi_list[fd + size].uf_alloc;
270 270 if (fip->fi_list[fd].uf_alloc == ralloc + size) {
271 271 fd += size;
272 272 ralloc = 0;
273 273 }
274 274 }
275 275 return (fd);
276 276 }
277 277 return (-1);
278 278 }
279 279
280 280 static void
281 281 fd_reserve(uf_info_t *fip, int fd, int incr)
282 282 {
283 283 int pfd;
284 284 uf_entry_t *ufp = &fip->fi_list[fd];
285 285
286 286 ASSERT((uint_t)fd < fip->fi_nfiles);
287 287 ASSERT((ufp->uf_busy == 0 && incr == 1) ||
288 288 (ufp->uf_busy == 1 && incr == -1));
289 289 ASSERT(MUTEX_HELD(&ufp->uf_lock));
290 290 ASSERT(MUTEX_HELD(&fip->fi_lock));
291 291
292 292 for (pfd = fd; pfd >= 0; pfd = (pfd & (pfd + 1)) - 1)
293 293 fip->fi_list[pfd].uf_alloc += incr;
294 294
295 295 ufp->uf_busy += incr;
296 296 }
297 297
298 298 static int
299 299 flist_minsize(uf_info_t *fip)
300 300 {
301 301 int fd;
302 302
303 303 /*
304 304 * We'd like to ASSERT(MUTEX_HELD(&fip->fi_lock)), but we're called
305 305 * by flist_fork(), which relies on other mechanisms for mutual
306 306 * exclusion.
307 307 */
308 308 ASSERT((fip->fi_nfiles & (fip->fi_nfiles + 1)) == 0);
309 309
310 310 for (fd = fip->fi_nfiles; fd != 0; fd >>= 1)
311 311 if (fip->fi_list[fd >> 1].uf_alloc != 0)
312 312 break;
313 313
314 314 return (fd);
315 315 }
316 316
317 317 static int
318 318 flist_nalloc(uf_info_t *fip)
319 319 {
320 320 int fd;
321 321 int nalloc = 0;
322 322
323 323 ASSERT(MUTEX_HELD(&fip->fi_lock));
324 324 ASSERT((fip->fi_nfiles & (fip->fi_nfiles + 1)) == 0);
325 325
326 326 for (fd = fip->fi_nfiles; fd != 0; fd >>= 1)
327 327 nalloc += fip->fi_list[fd >> 1].uf_alloc;
328 328
329 329 return (nalloc);
330 330 }
331 331
332 332 /*
333 333 * Increase size of the fi_list array to accommodate at least maxfd.
334 334 * We keep the size of the form 2^n - 1 for benefit of fd_find().
335 335 */
336 336 static void
337 337 flist_grow(int maxfd)
338 338 {
339 339 uf_info_t *fip = P_FINFO(curproc);
340 340 int newcnt, oldcnt;
341 341 uf_entry_t *src, *dst, *newlist, *oldlist, *newend, *oldend;
342 342 uf_rlist_t *urp;
343 343
344 344 for (newcnt = 1; newcnt <= maxfd; newcnt = (newcnt << 1) | 1)
345 345 continue;
346 346
347 347 newlist = kmem_zalloc(newcnt * sizeof (uf_entry_t), KM_SLEEP);
348 348
349 349 mutex_enter(&fip->fi_lock);
350 350 oldcnt = fip->fi_nfiles;
351 351 if (newcnt <= oldcnt) {
352 352 mutex_exit(&fip->fi_lock);
353 353 kmem_free(newlist, newcnt * sizeof (uf_entry_t));
354 354 return;
355 355 }
356 356 ASSERT((newcnt & (newcnt + 1)) == 0);
357 357 oldlist = fip->fi_list;
358 358 oldend = oldlist + oldcnt;
359 359 newend = newlist + oldcnt; /* no need to lock beyond old end */
360 360
361 361 /*
362 362 * fi_list and fi_nfiles cannot change while any uf_lock is held,
363 363 * so we must grab all the old locks *and* the new locks up to oldcnt.
364 364 * (Locks beyond the end of oldcnt aren't visible until we store
365 365 * the new fi_nfiles, which is the last thing we do before dropping
366 366 * all the locks, so there's no need to acquire these locks).
367 367 * Holding the new locks is necessary because when fi_list changes
368 368 * to point to the new list, fi_nfiles won't have been stored yet.
369 369 * If we *didn't* hold the new locks, someone doing a UF_ENTER()
370 370 * could see the new fi_list, grab the new uf_lock, and then see
371 371 * fi_nfiles change while the lock is held -- in violation of
372 372 * UF_ENTER() semantics.
373 373 */
374 374 for (src = oldlist; src < oldend; src++)
375 375 mutex_enter(&src->uf_lock);
376 376
377 377 for (dst = newlist; dst < newend; dst++)
378 378 mutex_enter(&dst->uf_lock);
379 379
380 380 for (src = oldlist, dst = newlist; src < oldend; src++, dst++) {
381 381 dst->uf_file = src->uf_file;
382 382 dst->uf_fpollinfo = src->uf_fpollinfo;
383 383 dst->uf_refcnt = src->uf_refcnt;
384 384 dst->uf_alloc = src->uf_alloc;
385 385 dst->uf_flag = src->uf_flag;
386 386 dst->uf_busy = src->uf_busy;
387 387 dst->uf_portfd = src->uf_portfd;
388 388 }
389 389
390 390 /*
391 391 * As soon as we store the new flist, future locking operations
392 392 * will use it. Therefore, we must ensure that all the state
393 393 * we've just established reaches global visibility before the
394 394 * new flist does.
395 395 */
396 396 membar_producer();
397 397 fip->fi_list = newlist;
398 398
399 399 /*
400 400 * Routines like getf() make an optimistic check on the validity
401 401 * of the supplied file descriptor: if it's less than the current
402 402 * value of fi_nfiles -- examined without any locks -- then it's
403 403 * safe to attempt a UF_ENTER() on that fd (which is a valid
404 404 * assumption because fi_nfiles only increases). Therefore, it
405 405 * is critical that the new value of fi_nfiles not reach global
406 406 * visibility until after the new fi_list: if it happened the
407 407 * other way around, getf() could see the new fi_nfiles and attempt
408 408 * a UF_ENTER() on the old fi_list, which would write beyond its
409 409 * end if the fd exceeded the old fi_nfiles.
410 410 */
411 411 membar_producer();
412 412 fip->fi_nfiles = newcnt;
413 413
414 414 /*
415 415 * The new state is consistent now, so we can drop all the locks.
416 416 */
417 417 for (dst = newlist; dst < newend; dst++)
418 418 mutex_exit(&dst->uf_lock);
419 419
420 420 for (src = oldlist; src < oldend; src++) {
421 421 /*
422 422 * If any threads are blocked on the old cvs, wake them.
423 423 * This will force them to wake up, discover that fi_list
424 424 * has changed, and go back to sleep on the new cvs.
425 425 */
426 426 cv_broadcast(&src->uf_wanted_cv);
427 427 cv_broadcast(&src->uf_closing_cv);
428 428 mutex_exit(&src->uf_lock);
429 429 }
430 430
431 431 mutex_exit(&fip->fi_lock);
432 432
433 433 /*
434 434 * Retire the old flist. We can't actually kmem_free() it now
435 435 * because someone may still have a pointer to it. Instead,
436 436 * we link it onto a list of retired flists. The new flist
437 437 * is at least double the size of the previous flist, so the
438 438 * total size of all retired flists will be less than the size
439 439 * of the current one (to prove, consider the sum of a geometric
440 440 * series in powers of 2). exit() frees the retired flists.
441 441 */
442 442 urp = kmem_zalloc(sizeof (uf_rlist_t), KM_SLEEP);
443 443 urp->ur_list = oldlist;
444 444 urp->ur_nfiles = oldcnt;
445 445
446 446 mutex_enter(&fip->fi_lock);
447 447 urp->ur_next = fip->fi_rlist;
448 448 fip->fi_rlist = urp;
449 449 mutex_exit(&fip->fi_lock);
450 450 }
451 451
452 452 /*
453 453 * Utility functions for keeping track of the active file descriptors.
454 454 */
455 455 void
456 456 clear_stale_fd() /* called from post_syscall() */
457 457 {
458 458 afd_t *afd = &curthread->t_activefd;
459 459 int i;
460 460
461 461 /* uninitialized is ok here, a_nfd is then zero */
462 462 for (i = 0; i < afd->a_nfd; i++) {
463 463 /* assert that this should not be necessary */
464 464 ASSERT(afd->a_fd[i] == -1);
465 465 afd->a_fd[i] = -1;
466 466 }
467 467 afd->a_stale = 0;
468 468 }
469 469
470 470 void
471 471 free_afd(afd_t *afd) /* called below and from thread_free() */
472 472 {
473 473 int i;
474 474
475 475 /* free the buffer if it was kmem_alloc()ed */
476 476 if (afd->a_nfd > sizeof (afd->a_buf) / sizeof (afd->a_buf[0])) {
477 477 COUNT(afd_free);
478 478 kmem_free(afd->a_fd, afd->a_nfd * sizeof (afd->a_fd[0]));
479 479 }
480 480
481 481 /* (re)initialize the structure */
482 482 afd->a_fd = &afd->a_buf[0];
483 483 afd->a_nfd = sizeof (afd->a_buf) / sizeof (afd->a_buf[0]);
484 484 afd->a_stale = 0;
485 485 for (i = 0; i < afd->a_nfd; i++)
486 486 afd->a_fd[i] = -1;
487 487 }
488 488
489 489 static void
490 490 set_active_fd(int fd)
491 491 {
492 492 afd_t *afd = &curthread->t_activefd;
493 493 int i;
494 494 int *old_fd;
495 495 int old_nfd;
496 496 int *new_fd;
497 497 int new_nfd;
498 498
499 499 if (afd->a_nfd == 0) { /* first time initialization */
500 500 ASSERT(fd == -1);
501 501 mutex_enter(&afd->a_fdlock);
502 502 free_afd(afd);
503 503 mutex_exit(&afd->a_fdlock);
504 504 }
505 505
506 506 /* insert fd into vacant slot, if any */
507 507 for (i = 0; i < afd->a_nfd; i++) {
508 508 if (afd->a_fd[i] == -1) {
509 509 afd->a_fd[i] = fd;
510 510 return;
511 511 }
512 512 }
513 513
514 514 /*
515 515 * Reallocate the a_fd[] array to add one more slot.
516 516 */
517 517 ASSERT(fd == -1);
518 518 old_nfd = afd->a_nfd;
519 519 old_fd = afd->a_fd;
520 520 new_nfd = old_nfd + 1;
521 521 new_fd = kmem_alloc(new_nfd * sizeof (afd->a_fd[0]), KM_SLEEP);
522 522 MAXFD(new_nfd);
523 523 COUNT(afd_alloc);
524 524
525 525 mutex_enter(&afd->a_fdlock);
526 526 afd->a_fd = new_fd;
527 527 afd->a_nfd = new_nfd;
528 528 for (i = 0; i < old_nfd; i++)
529 529 afd->a_fd[i] = old_fd[i];
530 530 afd->a_fd[i] = fd;
531 531 mutex_exit(&afd->a_fdlock);
532 532
533 533 if (old_nfd > sizeof (afd->a_buf) / sizeof (afd->a_buf[0])) {
534 534 COUNT(afd_free);
535 535 kmem_free(old_fd, old_nfd * sizeof (afd->a_fd[0]));
536 536 }
537 537 }
538 538
539 539 void
540 540 clear_active_fd(int fd) /* called below and from aio.c */
541 541 {
542 542 afd_t *afd = &curthread->t_activefd;
543 543 int i;
544 544
545 545 for (i = 0; i < afd->a_nfd; i++) {
546 546 if (afd->a_fd[i] == fd) {
547 547 afd->a_fd[i] = -1;
548 548 break;
549 549 }
550 550 }
551 551 ASSERT(i < afd->a_nfd); /* not found is not ok */
552 552 }
553 553
554 554 /*
555 555 * Does this thread have this fd active?
556 556 */
557 557 static int
558 558 is_active_fd(kthread_t *t, int fd)
559 559 {
560 560 afd_t *afd = &t->t_activefd;
561 561 int i;
562 562
563 563 ASSERT(t != curthread);
564 564 mutex_enter(&afd->a_fdlock);
565 565 /* uninitialized is ok here, a_nfd is then zero */
566 566 for (i = 0; i < afd->a_nfd; i++) {
567 567 if (afd->a_fd[i] == fd) {
568 568 mutex_exit(&afd->a_fdlock);
569 569 return (1);
570 570 }
571 571 }
572 572 mutex_exit(&afd->a_fdlock);
573 573 return (0);
574 574 }
575 575
576 576 /*
577 577 * Convert a user supplied file descriptor into a pointer to a file
578 578 * structure. Only task is to check range of the descriptor (soft
579 579 * resource limit was enforced at open time and shouldn't be checked
580 580 * here).
581 581 */
582 582 file_t *
583 583 getf(int fd)
584 584 {
585 585 uf_info_t *fip = P_FINFO(curproc);
586 586 uf_entry_t *ufp;
587 587 file_t *fp;
588 588
589 589 if ((uint_t)fd >= fip->fi_nfiles)
590 590 return (NULL);
591 591
592 592 /*
593 593 * Reserve a slot in the active fd array now so we can call
594 594 * set_active_fd(fd) for real below, while still inside UF_ENTER().
595 595 */
596 596 set_active_fd(-1);
597 597
598 598 UF_ENTER(ufp, fip, fd);
599 599
600 600 if ((fp = ufp->uf_file) == NULL) {
601 601 UF_EXIT(ufp);
602 602
603 603 if (fd == fip->fi_badfd && fip->fi_action > 0)
604 604 tsignal(curthread, fip->fi_action);
605 605
606 606 return (NULL);
607 607 }
608 608 ufp->uf_refcnt++;
609 609
610 610 set_active_fd(fd); /* record the active file descriptor */
611 611
612 612 UF_EXIT(ufp);
613 613
614 614 return (fp);
615 615 }
616 616
617 617 /*
618 618 * Close whatever file currently occupies the file descriptor slot
619 619 * and install the new file, usually NULL, in the file descriptor slot.
620 620 * The close must complete before we release the file descriptor slot.
621 621 * If newfp != NULL we only return an error if we can't allocate the
622 622 * slot so the caller knows that it needs to free the filep;
623 623 * in the other cases we return the error number from closef().
624 624 */
625 625 int
626 626 closeandsetf(int fd, file_t *newfp)
627 627 {
628 628 proc_t *p = curproc;
629 629 uf_info_t *fip = P_FINFO(p);
630 630 uf_entry_t *ufp;
631 631 file_t *fp;
632 632 fpollinfo_t *fpip;
633 633 portfd_t *pfd;
634 634 int error;
635 635
636 636 if ((uint_t)fd >= fip->fi_nfiles) {
637 637 if (newfp == NULL)
638 638 return (EBADF);
639 639 flist_grow(fd);
640 640 }
641 641
642 642 if (newfp != NULL) {
643 643 /*
644 644 * If ufp is reserved but has no file pointer, it's in the
645 645 * transition between ufalloc() and setf(). We must wait
646 646 * for this transition to complete before assigning the
647 647 * new non-NULL file pointer.
648 648 */
649 649 mutex_enter(&fip->fi_lock);
650 650 if (fd == fip->fi_badfd) {
651 651 mutex_exit(&fip->fi_lock);
652 652 if (fip->fi_action > 0)
653 653 tsignal(curthread, fip->fi_action);
654 654 return (EBADF);
655 655 }
656 656 UF_ENTER(ufp, fip, fd);
657 657 while (ufp->uf_busy && ufp->uf_file == NULL) {
658 658 mutex_exit(&fip->fi_lock);
659 659 cv_wait_stop(&ufp->uf_wanted_cv, &ufp->uf_lock, 250);
660 660 UF_EXIT(ufp);
661 661 mutex_enter(&fip->fi_lock);
662 662 UF_ENTER(ufp, fip, fd);
663 663 }
664 664 if ((fp = ufp->uf_file) == NULL) {
665 665 ASSERT(ufp->uf_fpollinfo == NULL);
666 666 ASSERT(ufp->uf_flag == 0);
667 667 fd_reserve(fip, fd, 1);
668 668 ufp->uf_file = newfp;
669 669 UF_EXIT(ufp);
670 670 mutex_exit(&fip->fi_lock);
671 671 return (0);
672 672 }
673 673 mutex_exit(&fip->fi_lock);
674 674 } else {
675 675 UF_ENTER(ufp, fip, fd);
676 676 if ((fp = ufp->uf_file) == NULL) {
677 677 UF_EXIT(ufp);
678 678 return (EBADF);
679 679 }
680 680 }
681 681
682 682 ASSERT(ufp->uf_busy);
683 683 ufp->uf_file = NULL;
684 684 ufp->uf_flag = 0;
685 685
686 686 /*
687 687 * If the file descriptor reference count is non-zero, then
688 688 * some other lwp in the process is performing system call
689 689 * activity on the file. To avoid blocking here for a long
690 690 * time (the other lwp might be in a long term sleep in its
691 691 * system call), we scan all other lwps in the process to
692 692 * find the ones with this fd as one of their active fds,
693 693 * set their a_stale flag, and set them running if they
694 694 * are in an interruptible sleep so they will emerge from
695 695 * their system calls immediately. post_syscall() will
696 696 * test the a_stale flag and set errno to EBADF.
697 697 */
698 698 ASSERT(ufp->uf_refcnt == 0 || p->p_lwpcnt > 1);
699 699 if (ufp->uf_refcnt > 0) {
700 700 kthread_t *t;
701 701
702 702 /*
703 703 * We call sprlock_proc(p) to ensure that the thread
704 704 * list will not change while we are scanning it.
705 705 * To do this, we must drop ufp->uf_lock and then
706 706 * reacquire it (so we are not holding both p->p_lock
707 707 * and ufp->uf_lock at the same time). ufp->uf_lock
708 708 * must be held for is_active_fd() to be correct
709 709 * (set_active_fd() is called while holding ufp->uf_lock).
710 710 *
711 711 * This is a convoluted dance, but it is better than
712 712 * the old brute-force method of stopping every thread
713 713 * in the process by calling holdlwps(SHOLDFORK1).
714 714 */
715 715
716 716 UF_EXIT(ufp);
717 717 COUNT(afd_wait);
718 718
719 719 mutex_enter(&p->p_lock);
720 720 sprlock_proc(p);
721 721 mutex_exit(&p->p_lock);
722 722
723 723 UF_ENTER(ufp, fip, fd);
724 724 ASSERT(ufp->uf_file == NULL);
725 725
726 726 if (ufp->uf_refcnt > 0) {
727 727 for (t = curthread->t_forw;
728 728 t != curthread;
729 729 t = t->t_forw) {
730 730 if (is_active_fd(t, fd)) {
731 731 thread_lock(t);
732 732 t->t_activefd.a_stale = 1;
733 733 t->t_post_sys = 1;
734 734 if (ISWAKEABLE(t))
735 735 setrun_locked(t);
736 736 thread_unlock(t);
737 737 }
738 738 }
739 739 }
740 740
741 741 UF_EXIT(ufp);
742 742
743 743 mutex_enter(&p->p_lock);
744 744 sprunlock(p);
745 745
746 746 UF_ENTER(ufp, fip, fd);
747 747 ASSERT(ufp->uf_file == NULL);
748 748 }
749 749
750 750 /*
751 751 * Wait for other lwps to stop using this file descriptor.
752 752 */
753 753 while (ufp->uf_refcnt > 0) {
754 754 cv_wait_stop(&ufp->uf_closing_cv, &ufp->uf_lock, 250);
755 755 /*
756 756 * cv_wait_stop() drops ufp->uf_lock, so the file list
757 757 * can change. Drop the lock on our (possibly) stale
758 758 * ufp and let UF_ENTER() find and lock the current ufp.
759 759 */
760 760 UF_EXIT(ufp);
761 761 UF_ENTER(ufp, fip, fd);
762 762 }
763 763
764 764 #ifdef DEBUG
765 765 /*
766 766 * catch a watchfd on device's pollhead list but not on fpollinfo list
767 767 */
768 768 if (ufp->uf_fpollinfo != NULL)
769 769 checkwfdlist(fp->f_vnode, ufp->uf_fpollinfo);
770 770 #endif /* DEBUG */
771 771
772 772 /*
773 773 * We may need to cleanup some cached poll states in t_pollstate
774 774 * before the fd can be reused. It is important that we don't
775 775 * access a stale thread structure. We will do the cleanup in two
776 776 * phases to avoid deadlock and holding uf_lock for too long.
777 777 * In phase 1, hold the uf_lock and call pollblockexit() to set
778 778 * state in t_pollstate struct so that a thread does not exit on
779 779 * us. In phase 2, we drop the uf_lock and call pollcacheclean().
780 780 */
781 781 pfd = ufp->uf_portfd;
782 782 ufp->uf_portfd = NULL;
783 783 fpip = ufp->uf_fpollinfo;
784 784 ufp->uf_fpollinfo = NULL;
785 785 if (fpip != NULL)
786 786 pollblockexit(fpip);
787 787 UF_EXIT(ufp);
788 788 if (fpip != NULL)
789 789 pollcacheclean(fpip, fd);
790 790 if (pfd)
791 791 port_close_fd(pfd);
792 792
793 793 /*
794 794 * Keep the file descriptor entry reserved across the closef().
795 795 */
796 796 error = closef(fp);
797 797
798 798 setf(fd, newfp);
799 799
800 800 /* Only return closef() error when closing is all we do */
801 801 return (newfp == NULL ? error : 0);
802 802 }
803 803
804 804 /*
805 805 * Decrement uf_refcnt; wakeup anyone waiting to close the file.
806 806 */
807 807 void
808 808 releasef(int fd)
809 809 {
810 810 uf_info_t *fip = P_FINFO(curproc);
811 811 uf_entry_t *ufp;
812 812
813 813 UF_ENTER(ufp, fip, fd);
814 814 ASSERT(ufp->uf_refcnt > 0);
815 815 clear_active_fd(fd); /* clear the active file descriptor */
816 816 if (--ufp->uf_refcnt == 0)
817 817 cv_broadcast(&ufp->uf_closing_cv);
818 818 UF_EXIT(ufp);
819 819 }
820 820
821 821 /*
822 822 * Identical to releasef() but can be called from another process.
823 823 */
824 824 void
825 825 areleasef(int fd, uf_info_t *fip)
826 826 {
827 827 uf_entry_t *ufp;
828 828
829 829 UF_ENTER(ufp, fip, fd);
830 830 ASSERT(ufp->uf_refcnt > 0);
831 831 if (--ufp->uf_refcnt == 0)
832 832 cv_broadcast(&ufp->uf_closing_cv);
833 833 UF_EXIT(ufp);
834 834 }
835 835
836 836 /*
837 837 * Duplicate all file descriptors across a fork.
838 838 */
839 839 void
840 840 flist_fork(uf_info_t *pfip, uf_info_t *cfip)
841 841 {
842 842 int fd, nfiles;
843 843 uf_entry_t *pufp, *cufp;
844 844
845 845 mutex_init(&cfip->fi_lock, NULL, MUTEX_DEFAULT, NULL);
846 846 cfip->fi_rlist = NULL;
847 847
848 848 /*
849 849 * We don't need to hold fi_lock because all other lwp's in the
850 850 * parent have been held.
851 851 */
852 852 cfip->fi_nfiles = nfiles = flist_minsize(pfip);
853 853
854 854 cfip->fi_list = kmem_zalloc(nfiles * sizeof (uf_entry_t), KM_SLEEP);
855 855
856 856 for (fd = 0, pufp = pfip->fi_list, cufp = cfip->fi_list; fd < nfiles;
857 857 fd++, pufp++, cufp++) {
858 858 cufp->uf_file = pufp->uf_file;
859 859 cufp->uf_alloc = pufp->uf_alloc;
860 860 cufp->uf_flag = pufp->uf_flag;
861 861 cufp->uf_busy = pufp->uf_busy;
862 862 if (pufp->uf_file == NULL) {
863 863 ASSERT(pufp->uf_flag == 0);
864 864 if (pufp->uf_busy) {
865 865 /*
866 866 * Grab locks to appease ASSERTs in fd_reserve
867 867 */
868 868 mutex_enter(&cfip->fi_lock);
869 869 mutex_enter(&cufp->uf_lock);
870 870 fd_reserve(cfip, fd, -1);
871 871 mutex_exit(&cufp->uf_lock);
872 872 mutex_exit(&cfip->fi_lock);
873 873 }
874 874 }
875 875 }
876 876 }
877 877
878 878 /*
879 879 * Close all open file descriptors for the current process.
880 880 * This is only called from exit(), which is single-threaded,
881 881 * so we don't need any locking.
882 882 */
883 883 void
884 884 closeall(uf_info_t *fip)
885 885 {
886 886 int fd;
887 887 file_t *fp;
888 888 uf_entry_t *ufp;
889 889
890 890 ufp = fip->fi_list;
891 891 for (fd = 0; fd < fip->fi_nfiles; fd++, ufp++) {
892 892 if ((fp = ufp->uf_file) != NULL) {
893 893 ufp->uf_file = NULL;
894 894 if (ufp->uf_portfd != NULL) {
895 895 portfd_t *pfd;
896 896 /* remove event port association */
897 897 pfd = ufp->uf_portfd;
898 898 ufp->uf_portfd = NULL;
899 899 port_close_fd(pfd);
900 900 }
901 901 ASSERT(ufp->uf_fpollinfo == NULL);
902 902 (void) closef(fp);
903 903 }
904 904 }
905 905
906 906 kmem_free(fip->fi_list, fip->fi_nfiles * sizeof (uf_entry_t));
907 907 fip->fi_list = NULL;
908 908 fip->fi_nfiles = 0;
909 909 while (fip->fi_rlist != NULL) {
910 910 uf_rlist_t *urp = fip->fi_rlist;
911 911 fip->fi_rlist = urp->ur_next;
912 912 kmem_free(urp->ur_list, urp->ur_nfiles * sizeof (uf_entry_t));
913 913 kmem_free(urp, sizeof (uf_rlist_t));
914 914 }
915 915 }
916 916
917 917 /*
918 918 * Internal form of close. Decrement reference count on file
919 919 * structure. Decrement reference count on the vnode following
920 920 * removal of the referencing file structure.
921 921 */
922 922 int
923 923 closef(file_t *fp)
924 924 {
925 925 vnode_t *vp;
926 926 int error;
927 927 int count;
928 928 int flag;
929 929 offset_t offset;
930 930
931 931 /*
932 932 * audit close of file (may be exit)
933 933 */
934 934 if (AU_AUDITING())
935 935 audit_closef(fp);
936 936 ASSERT(MUTEX_NOT_HELD(&P_FINFO(curproc)->fi_lock));
937 937
938 938 mutex_enter(&fp->f_tlock);
939 939
940 940 ASSERT(fp->f_count > 0);
941 941
942 942 count = fp->f_count--;
943 943 flag = fp->f_flag;
944 944 offset = fp->f_offset;
945 945
946 946 vp = fp->f_vnode;
947 947
948 948 error = VOP_CLOSE(vp, flag, count, offset, fp->f_cred, NULL);
949 949
950 950 if (count > 1) {
951 951 mutex_exit(&fp->f_tlock);
952 952 return (error);
953 953 }
954 954 ASSERT(fp->f_count == 0);
955 955 mutex_exit(&fp->f_tlock);
956 956
957 957 /*
958 958 * If DTrace has getf() subroutines active, it will set dtrace_closef
959 959 * to point to code that implements a barrier with respect to probe
960 960 * context. This must be called before the file_t is freed (and the
961 961 * vnode that it refers to is released) -- but it must be after the
962 962 * file_t has been removed from the uf_entry_t. That is, there must
963 963 * be no way for a racing getf() in probe context to yield the fp that
964 964 * we're operating upon.
965 965 */
966 966 if (dtrace_closef != NULL)
967 967 (*dtrace_closef)();
968 968
969 969 VN_RELE(vp);
970 970 /*
971 971 * deallocate resources to audit_data
972 972 */
973 973 if (audit_active)
974 974 audit_unfalloc(fp);
975 975 crfree(fp->f_cred);
976 976 kmem_cache_free(file_cache, fp);
977 977 return (error);
978 978 }
979 979
980 980 /*
981 981 * This is a combination of ufalloc() and setf().
982 982 */
983 983 int
984 984 ufalloc_file(int start, file_t *fp)
985 985 {
986 986 proc_t *p = curproc;
987 987 uf_info_t *fip = P_FINFO(p);
988 988 int filelimit;
989 989 uf_entry_t *ufp;
990 990 int nfiles;
991 991 int fd;
992 992
993 993 /*
994 994 * Assertion is to convince the correctness of the following
995 995 * assignment for filelimit after casting to int.
996 996 */
997 997 ASSERT(p->p_fno_ctl <= INT_MAX);
998 998 filelimit = (int)p->p_fno_ctl;
999 999
1000 1000 for (;;) {
1001 1001 mutex_enter(&fip->fi_lock);
1002 1002 fd = fd_find(fip, start);
1003 1003 if (fd >= 0 && fd == fip->fi_badfd) {
1004 1004 start = fd + 1;
1005 1005 mutex_exit(&fip->fi_lock);
1006 1006 continue;
1007 1007 }
1008 1008 if ((uint_t)fd < filelimit)
1009 1009 break;
1010 1010 if (fd >= filelimit) {
1011 1011 mutex_exit(&fip->fi_lock);
1012 1012 mutex_enter(&p->p_lock);
1013 1013 (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
1014 1014 p->p_rctls, p, RCA_SAFE);
1015 1015 mutex_exit(&p->p_lock);
1016 1016 return (-1);
1017 1017 }
1018 1018 /* fd_find() returned -1 */
1019 1019 nfiles = fip->fi_nfiles;
1020 1020 mutex_exit(&fip->fi_lock);
1021 1021 flist_grow(MAX(start, nfiles));
1022 1022 }
1023 1023
1024 1024 UF_ENTER(ufp, fip, fd);
1025 1025 fd_reserve(fip, fd, 1);
1026 1026 ASSERT(ufp->uf_file == NULL);
1027 1027 ufp->uf_file = fp;
1028 1028 UF_EXIT(ufp);
1029 1029 mutex_exit(&fip->fi_lock);
1030 1030 return (fd);
1031 1031 }
1032 1032
1033 1033 /*
1034 1034 * Allocate a user file descriptor greater than or equal to "start".
1035 1035 */
1036 1036 int
1037 1037 ufalloc(int start)
1038 1038 {
1039 1039 return (ufalloc_file(start, NULL));
1040 1040 }
1041 1041
1042 1042 /*
1043 1043 * Check that a future allocation of count fds on proc p has a good
1044 1044 * chance of succeeding. If not, do rctl processing as if we'd failed
1045 1045 * the allocation.
1046 1046 *
1047 1047 * Our caller must guarantee that p cannot disappear underneath us.
1048 1048 */
1049 1049 int
1050 1050 ufcanalloc(proc_t *p, uint_t count)
1051 1051 {
1052 1052 uf_info_t *fip = P_FINFO(p);
1053 1053 int filelimit;
1054 1054 int current;
1055 1055
1056 1056 if (count == 0)
1057 1057 return (1);
1058 1058
1059 1059 ASSERT(p->p_fno_ctl <= INT_MAX);
1060 1060 filelimit = (int)p->p_fno_ctl;
1061 1061
1062 1062 mutex_enter(&fip->fi_lock);
1063 1063 current = flist_nalloc(fip); /* # of in-use descriptors */
1064 1064 mutex_exit(&fip->fi_lock);
1065 1065
1066 1066 /*
1067 1067 * If count is a positive integer, the worst that can happen is
1068 1068 * an overflow to a negative value, which is caught by the >= 0 check.
1069 1069 */
1070 1070 current += count;
1071 1071 if (count <= INT_MAX && current >= 0 && current <= filelimit)
1072 1072 return (1);
1073 1073
1074 1074 mutex_enter(&p->p_lock);
1075 1075 (void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
1076 1076 p->p_rctls, p, RCA_SAFE);
1077 1077 mutex_exit(&p->p_lock);
1078 1078 return (0);
1079 1079 }
1080 1080
1081 1081 /*
1082 1082 * Allocate a user file descriptor and a file structure.
1083 1083 * Initialize the descriptor to point at the file structure.
1084 1084 * If fdp is NULL, the user file descriptor will not be allocated.
1085 1085 */
1086 1086 int
1087 1087 falloc(vnode_t *vp, int flag, file_t **fpp, int *fdp)
1088 1088 {
1089 1089 file_t *fp;
1090 1090 int fd;
1091 1091
1092 1092 if (fdp) {
1093 1093 if ((fd = ufalloc(0)) == -1)
1094 1094 return (EMFILE);
1095 1095 }
1096 1096 fp = kmem_cache_alloc(file_cache, KM_SLEEP);
1097 1097 /*
1098 1098 * Note: falloc returns the fp locked
1099 1099 */
1100 1100 mutex_enter(&fp->f_tlock);
1101 1101 fp->f_count = 1;
1102 1102 fp->f_flag = (ushort_t)flag;
1103 1103 fp->f_flag2 = (flag & (FSEARCH|FEXEC)) >> 16;
1104 1104 fp->f_vnode = vp;
1105 1105 fp->f_offset = 0;
1106 1106 fp->f_audit_data = 0;
1107 1107 crhold(fp->f_cred = CRED());
1108 1108 /*
1109 1109 * allocate resources to audit_data
1110 1110 */
1111 1111 if (audit_active)
1112 1112 audit_falloc(fp);
1113 1113 *fpp = fp;
1114 1114 if (fdp)
1115 1115 *fdp = fd;
1116 1116 return (0);
1117 1117 }
1118 1118
1119 1119 /*ARGSUSED*/
1120 1120 static int
1121 1121 file_cache_constructor(void *buf, void *cdrarg, int kmflags)
1122 1122 {
1123 1123 file_t *fp = buf;
1124 1124
1125 1125 mutex_init(&fp->f_tlock, NULL, MUTEX_DEFAULT, NULL);
1126 1126 return (0);
1127 1127 }
1128 1128
1129 1129 /*ARGSUSED*/
1130 1130 static void
1131 1131 file_cache_destructor(void *buf, void *cdrarg)
1132 1132 {
1133 1133 file_t *fp = buf;
1134 1134
1135 1135 mutex_destroy(&fp->f_tlock);
1136 1136 }
1137 1137
1138 1138 void
1139 1139 finit()
1140 1140 {
1141 1141 file_cache = kmem_cache_create("file_cache", sizeof (file_t), 0,
1142 1142 file_cache_constructor, file_cache_destructor, NULL, NULL, NULL, 0);
1143 1143 }
1144 1144
1145 1145 void
1146 1146 unfalloc(file_t *fp)
1147 1147 {
1148 1148 ASSERT(MUTEX_HELD(&fp->f_tlock));
1149 1149 if (--fp->f_count <= 0) {
1150 1150 /*
1151 1151 * deallocate resources to audit_data
1152 1152 */
1153 1153 if (audit_active)
1154 1154 audit_unfalloc(fp);
1155 1155 crfree(fp->f_cred);
1156 1156 mutex_exit(&fp->f_tlock);
1157 1157 kmem_cache_free(file_cache, fp);
1158 1158 } else
1159 1159 mutex_exit(&fp->f_tlock);
1160 1160 }
1161 1161
1162 1162 /*
1163 1163 * Given a file descriptor, set the user's
1164 1164 * file pointer to the given parameter.
1165 1165 */
1166 1166 void
1167 1167 setf(int fd, file_t *fp)
1168 1168 {
1169 1169 uf_info_t *fip = P_FINFO(curproc);
1170 1170 uf_entry_t *ufp;
1171 1171
1172 1172 if (AU_AUDITING())
1173 1173 audit_setf(fp, fd);
1174 1174
1175 1175 if (fp == NULL) {
1176 1176 mutex_enter(&fip->fi_lock);
1177 1177 UF_ENTER(ufp, fip, fd);
1178 1178 fd_reserve(fip, fd, -1);
1179 1179 mutex_exit(&fip->fi_lock);
1180 1180 } else {
1181 1181 UF_ENTER(ufp, fip, fd);
1182 1182 ASSERT(ufp->uf_busy);
1183 1183 }
1184 1184 ASSERT(ufp->uf_fpollinfo == NULL);
1185 1185 ASSERT(ufp->uf_flag == 0);
1186 1186 ufp->uf_file = fp;
1187 1187 cv_broadcast(&ufp->uf_wanted_cv);
1188 1188 UF_EXIT(ufp);
1189 1189 }
1190 1190
1191 1191 /*
1192 1192 * Given a file descriptor, return the file table flags, plus,
1193 1193 * if this is a socket in asynchronous mode, the FASYNC flag.
1194 1194 * getf() may or may not have been called before calling f_getfl().
1195 1195 */
1196 1196 int
1197 1197 f_getfl(int fd, int *flagp)
1198 1198 {
1199 1199 uf_info_t *fip = P_FINFO(curproc);
1200 1200 uf_entry_t *ufp;
1201 1201 file_t *fp;
1202 1202 int error;
1203 1203
1204 1204 if ((uint_t)fd >= fip->fi_nfiles)
1205 1205 error = EBADF;
1206 1206 else {
1207 1207 UF_ENTER(ufp, fip, fd);
1208 1208 if ((fp = ufp->uf_file) == NULL)
1209 1209 error = EBADF;
1210 1210 else {
1211 1211 vnode_t *vp = fp->f_vnode;
1212 1212 int flag = fp->f_flag | (fp->f_flag2 << 16);
1213 1213
1214 1214 /*
1215 1215 * BSD fcntl() FASYNC compatibility.
1216 1216 */
1217 1217 if (vp->v_type == VSOCK)
1218 1218 flag |= sock_getfasync(vp);
1219 1219 *flagp = flag;
1220 1220 error = 0;
1221 1221 }
1222 1222 UF_EXIT(ufp);
1223 1223 }
1224 1224
1225 1225 return (error);
1226 1226 }
1227 1227
1228 1228 /*
1229 1229 * Given a file descriptor, return the user's file flags.
1230 1230 * Force the FD_CLOEXEC flag for writable self-open /proc files.
1231 1231 * getf() may or may not have been called before calling f_getfd_error().
1232 1232 */
1233 1233 int
1234 1234 f_getfd_error(int fd, int *flagp)
1235 1235 {
1236 1236 uf_info_t *fip = P_FINFO(curproc);
1237 1237 uf_entry_t *ufp;
1238 1238 file_t *fp;
1239 1239 int flag;
1240 1240 int error;
1241 1241
1242 1242 if ((uint_t)fd >= fip->fi_nfiles)
1243 1243 error = EBADF;
1244 1244 else {
1245 1245 UF_ENTER(ufp, fip, fd);
1246 1246 if ((fp = ufp->uf_file) == NULL)
1247 1247 error = EBADF;
1248 1248 else {
1249 1249 flag = ufp->uf_flag;
1250 1250 if ((fp->f_flag & FWRITE) && pr_isself(fp->f_vnode))
1251 1251 flag |= FD_CLOEXEC;
1252 1252 *flagp = flag;
1253 1253 error = 0;
1254 1254 }
1255 1255 UF_EXIT(ufp);
1256 1256 }
1257 1257
1258 1258 return (error);
1259 1259 }
1260 1260
1261 1261 /*
1262 1262 * getf() must have been called before calling f_getfd().
1263 1263 */
1264 1264 char
1265 1265 f_getfd(int fd)
1266 1266 {
1267 1267 int flag = 0;
1268 1268 (void) f_getfd_error(fd, &flag);
1269 1269 return ((char)flag);
1270 1270 }
1271 1271
1272 1272 /*
1273 1273 * Given a file descriptor and file flags, set the user's file flags.
1274 1274 * At present, the only valid flag is FD_CLOEXEC.
1275 1275 * getf() may or may not have been called before calling f_setfd_error().
1276 1276 */
1277 1277 int
1278 1278 f_setfd_error(int fd, int flags)
1279 1279 {
1280 1280 uf_info_t *fip = P_FINFO(curproc);
1281 1281 uf_entry_t *ufp;
1282 1282 int error;
1283 1283
1284 1284 if ((uint_t)fd >= fip->fi_nfiles)
1285 1285 error = EBADF;
1286 1286 else {
1287 1287 UF_ENTER(ufp, fip, fd);
1288 1288 if (ufp->uf_file == NULL)
1289 1289 error = EBADF;
1290 1290 else {
1291 1291 ufp->uf_flag = flags & FD_CLOEXEC;
1292 1292 error = 0;
1293 1293 }
1294 1294 UF_EXIT(ufp);
1295 1295 }
1296 1296 return (error);
1297 1297 }
1298 1298
1299 1299 void
1300 1300 f_setfd(int fd, char flags)
1301 1301 {
1302 1302 (void) f_setfd_error(fd, flags);
1303 1303 }
1304 1304
1305 1305 #define BADFD_MIN 3
1306 1306 #define BADFD_MAX 255
1307 1307
1308 1308 /*
1309 1309 * Attempt to allocate a file descriptor which is bad and which
1310 1310 * is "poison" to the application. It cannot be closed (except
1311 1311 * on exec), allocated for a different use, etc.
1312 1312 */
1313 1313 int
1314 1314 f_badfd(int start, int *fdp, int action)
1315 1315 {
1316 1316 int fdr;
1317 1317 int badfd;
1318 1318 uf_info_t *fip = P_FINFO(curproc);
1319 1319
1320 1320 #ifdef _LP64
1321 1321 /* No restrictions on 64 bit _file */
1322 1322 if (get_udatamodel() != DATAMODEL_ILP32)
1323 1323 return (EINVAL);
1324 1324 #endif
1325 1325
1326 1326 if (start > BADFD_MAX || start < BADFD_MIN)
1327 1327 return (EINVAL);
1328 1328
1329 1329 if (action >= NSIG || action < 0)
1330 1330 return (EINVAL);
1331 1331
1332 1332 mutex_enter(&fip->fi_lock);
1333 1333 badfd = fip->fi_badfd;
1334 1334 mutex_exit(&fip->fi_lock);
1335 1335
1336 1336 if (badfd != -1)
1337 1337 return (EAGAIN);
1338 1338
1339 1339 fdr = ufalloc(start);
1340 1340
1341 1341 if (fdr > BADFD_MAX) {
1342 1342 setf(fdr, NULL);
1343 1343 return (EMFILE);
1344 1344 }
1345 1345 if (fdr < 0)
1346 1346 return (EMFILE);
1347 1347
1348 1348 mutex_enter(&fip->fi_lock);
1349 1349 if (fip->fi_badfd != -1) {
1350 1350 /* Lost race */
1351 1351 mutex_exit(&fip->fi_lock);
1352 1352 setf(fdr, NULL);
1353 1353 return (EAGAIN);
1354 1354 }
1355 1355 fip->fi_action = action;
1356 1356 fip->fi_badfd = fdr;
1357 1357 mutex_exit(&fip->fi_lock);
1358 1358 setf(fdr, NULL);
1359 1359
1360 1360 *fdp = fdr;
1361 1361
1362 1362 return (0);
1363 1363 }
1364 1364
1365 1365 /*
1366 1366 * Allocate a file descriptor and assign it to the vnode "*vpp",
1367 1367 * performing the usual open protocol upon it and returning the
1368 1368 * file descriptor allocated. It is the responsibility of the
1369 1369 * caller to dispose of "*vpp" if any error occurs.
1370 1370 */
1371 1371 int
1372 1372 fassign(vnode_t **vpp, int mode, int *fdp)
1373 1373 {
1374 1374 file_t *fp;
1375 1375 int error;
1376 1376 int fd;
1377 1377
1378 1378 if (error = falloc((vnode_t *)NULL, mode, &fp, &fd))
1379 1379 return (error);
1380 1380 if (error = VOP_OPEN(vpp, mode, fp->f_cred, NULL)) {
1381 1381 setf(fd, NULL);
1382 1382 unfalloc(fp);
1383 1383 return (error);
1384 1384 }
1385 1385 fp->f_vnode = *vpp;
1386 1386 mutex_exit(&fp->f_tlock);
1387 1387 /*
1388 1388 * Fill in the slot falloc reserved.
1389 1389 */
1390 1390 setf(fd, fp);
1391 1391 *fdp = fd;
1392 1392 return (0);
1393 1393 }
1394 1394
1395 1395 /*
1396 1396 * When a process forks it must increment the f_count of all file pointers
1397 1397 * since there is a new process pointing at them. fcnt_add(fip, 1) does this.
1398 1398 * Since we are called when there is only 1 active lwp we don't need to
1399 1399 * hold fi_lock or any uf_lock. If the fork fails, fork_fail() calls
1400 1400 * fcnt_add(fip, -1) to restore the counts.
1401 1401 */
1402 1402 void
1403 1403 fcnt_add(uf_info_t *fip, int incr)
1404 1404 {
1405 1405 int i;
1406 1406 uf_entry_t *ufp;
1407 1407 file_t *fp;
1408 1408
1409 1409 ufp = fip->fi_list;
1410 1410 for (i = 0; i < fip->fi_nfiles; i++, ufp++) {
1411 1411 if ((fp = ufp->uf_file) != NULL) {
1412 1412 mutex_enter(&fp->f_tlock);
1413 1413 ASSERT((incr == 1 && fp->f_count >= 1) ||
1414 1414 (incr == -1 && fp->f_count >= 2));
1415 1415 fp->f_count += incr;
1416 1416 mutex_exit(&fp->f_tlock);
1417 1417 }
1418 1418 }
1419 1419 }
1420 1420
1421 1421 /*
1422 1422 * This is called from exec to close all fd's that have the FD_CLOEXEC flag
1423 1423 * set and also to close all self-open for write /proc file descriptors.
1424 1424 */
1425 1425 void
1426 1426 close_exec(uf_info_t *fip)
1427 1427 {
1428 1428 int fd;
1429 1429 file_t *fp;
1430 1430 fpollinfo_t *fpip;
1431 1431 uf_entry_t *ufp;
1432 1432 portfd_t *pfd;
1433 1433
1434 1434 ufp = fip->fi_list;
1435 1435 for (fd = 0; fd < fip->fi_nfiles; fd++, ufp++) {
1436 1436 if ((fp = ufp->uf_file) != NULL &&
1437 1437 ((ufp->uf_flag & FD_CLOEXEC) ||
1438 1438 ((fp->f_flag & FWRITE) && pr_isself(fp->f_vnode)))) {
1439 1439 fpip = ufp->uf_fpollinfo;
1440 1440 mutex_enter(&fip->fi_lock);
1441 1441 mutex_enter(&ufp->uf_lock);
1442 1442 fd_reserve(fip, fd, -1);
1443 1443 mutex_exit(&fip->fi_lock);
1444 1444 ufp->uf_file = NULL;
1445 1445 ufp->uf_fpollinfo = NULL;
1446 1446 ufp->uf_flag = 0;
1447 1447 /*
1448 1448 * We may need to cleanup some cached poll states
1449 1449 * in t_pollstate before the fd can be reused. It
1450 1450 * is important that we don't access a stale thread
1451 1451 * structure. We will do the cleanup in two
1452 1452 * phases to avoid deadlock and holding uf_lock for
1453 1453 * too long. In phase 1, hold the uf_lock and call
1454 1454 * pollblockexit() to set state in t_pollstate struct
1455 1455 * so that a thread does not exit on us. In phase 2,
1456 1456 * we drop the uf_lock and call pollcacheclean().
1457 1457 */
1458 1458 pfd = ufp->uf_portfd;
1459 1459 ufp->uf_portfd = NULL;
1460 1460 if (fpip != NULL)
1461 1461 pollblockexit(fpip);
1462 1462 mutex_exit(&ufp->uf_lock);
1463 1463 if (fpip != NULL)
1464 1464 pollcacheclean(fpip, fd);
1465 1465 if (pfd)
1466 1466 port_close_fd(pfd);
1467 1467 (void) closef(fp);
1468 1468 }
1469 1469 }
1470 1470
1471 1471 /* Reset bad fd */
1472 1472 fip->fi_badfd = -1;
1473 1473 fip->fi_action = -1;
1474 1474 }
1475 1475
1476 1476 /*
1477 1477 * Utility function called by most of the *at() system call interfaces.
1478 1478 *
1479 1479 * Generate a starting vnode pointer for an (fd, path) pair where 'fd'
1480 1480 * is an open file descriptor for a directory to be used as the starting
1481 1481 * point for the lookup of the relative pathname 'path' (or, if path is
1482 1482 * NULL, generate a vnode pointer for the direct target of the operation).
1483 1483 *
1484 1484 * If we successfully return a non-NULL startvp, it has been the target
1485 1485 * of VN_HOLD() and the caller must call VN_RELE() on it.
1486 1486 */
1487 1487 int
1488 1488 fgetstartvp(int fd, char *path, vnode_t **startvpp)
1489 1489 {
1490 1490 vnode_t *startvp;
1491 1491 file_t *startfp;
1492 1492 char startchar;
1493 1493
1494 1494 if (fd == AT_FDCWD && path == NULL)
1495 1495 return (EFAULT);
1496 1496
1497 1497 if (fd == AT_FDCWD) {
1498 1498 /*
1499 1499 * Start from the current working directory.
1500 1500 */
1501 1501 startvp = NULL;
1502 1502 } else {
1503 1503 if (path == NULL)
1504 1504 startchar = '\0';
1505 1505 else if (copyin(path, &startchar, sizeof (char)))
1506 1506 return (EFAULT);
1507 1507
1508 1508 if (startchar == '/') {
1509 1509 /*
1510 1510 * 'path' is an absolute pathname.
1511 1511 */
1512 1512 startvp = NULL;
1513 1513 } else {
1514 1514 /*
1515 1515 * 'path' is a relative pathname or we will
1516 1516 * be applying the operation to 'fd' itself.
1517 1517 */
1518 1518 if ((startfp = getf(fd)) == NULL)
1519 1519 return (EBADF);
1520 1520 startvp = startfp->f_vnode;
1521 1521 VN_HOLD(startvp);
1522 1522 releasef(fd);
1523 1523 }
1524 1524 }
1525 1525 *startvpp = startvp;
1526 1526 return (0);
1527 1527 }
1528 1528
1529 1529 /*
1530 1530 * Called from fchownat() and fchmodat() to set ownership and mode.
1531 1531 * The contents of *vap must be set before calling here.
1532 1532 */
1533 1533 int
1534 1534 fsetattrat(int fd, char *path, int flags, struct vattr *vap)
1535 1535 {
1536 1536 vnode_t *startvp;
1537 1537 vnode_t *vp;
1538 1538 int error;
1539 1539
1540 1540 /*
1541 1541 * Since we are never called to set the size of a file, we don't
1542 1542 * need to check for non-blocking locks (via nbl_need_check(vp)).
1543 1543 */
1544 1544 ASSERT(!(vap->va_mask & AT_SIZE));
1545 1545
1546 1546 if ((error = fgetstartvp(fd, path, &startvp)) != 0)
1547 1547 return (error);
1548 1548 if (AU_AUDITING() && startvp != NULL)
1549 1549 audit_setfsat_path(1);
1550 1550
1551 1551 /*
1552 1552 * Do lookup for fchownat/fchmodat when path not NULL
1553 1553 */
1554 1554 if (path != NULL) {
1555 1555 if (error = lookupnameat(path, UIO_USERSPACE,
1556 1556 (flags == AT_SYMLINK_NOFOLLOW) ?
1557 1557 NO_FOLLOW : FOLLOW,
1558 1558 NULLVPP, &vp, startvp)) {
1559 1559 if (startvp != NULL)
1560 1560 VN_RELE(startvp);
1561 1561 return (error);
1562 1562 }
1563 1563 } else {
1564 1564 vp = startvp;
1565 1565 ASSERT(vp);
1566 1566 VN_HOLD(vp);
1567 1567 }
1568 1568
1569 1569 if (vn_is_readonly(vp)) {
1570 1570 error = EROFS;
1571 1571 } else {
1572 1572 error = VOP_SETATTR(vp, vap, 0, CRED(), NULL);
1573 1573 }
1574 1574
1575 1575 if (startvp != NULL)
1576 1576 VN_RELE(startvp);
1577 1577 VN_RELE(vp);
1578 1578
1579 1579 return (error);
1580 1580 }
1581 1581
1582 1582 /*
1583 1583 * Return true if the given vnode is referenced by any
1584 1584 * entry in the current process's file descriptor table.
1585 1585 */
1586 1586 int
1587 1587 fisopen(vnode_t *vp)
1588 1588 {
1589 1589 int fd;
1590 1590 file_t *fp;
1591 1591 vnode_t *ovp;
1592 1592 uf_info_t *fip = P_FINFO(curproc);
1593 1593 uf_entry_t *ufp;
1594 1594
1595 1595 mutex_enter(&fip->fi_lock);
1596 1596 for (fd = 0; fd < fip->fi_nfiles; fd++) {
1597 1597 UF_ENTER(ufp, fip, fd);
1598 1598 if ((fp = ufp->uf_file) != NULL &&
1599 1599 (ovp = fp->f_vnode) != NULL && VN_CMP(vp, ovp)) {
1600 1600 UF_EXIT(ufp);
1601 1601 mutex_exit(&fip->fi_lock);
1602 1602 return (1);
1603 1603 }
1604 1604 UF_EXIT(ufp);
1605 1605 }
1606 1606 mutex_exit(&fip->fi_lock);
1607 1607 return (0);
1608 1608 }
1609 1609
1610 1610 /*
1611 1611 * Return zero if at least one file currently open (by curproc) shouldn't be
1612 1612 * allowed to change zones.
1613 1613 */
1614 1614 int
1615 1615 files_can_change_zones(void)
1616 1616 {
1617 1617 int fd;
1618 1618 file_t *fp;
1619 1619 uf_info_t *fip = P_FINFO(curproc);
1620 1620 uf_entry_t *ufp;
1621 1621
1622 1622 mutex_enter(&fip->fi_lock);
1623 1623 for (fd = 0; fd < fip->fi_nfiles; fd++) {
1624 1624 UF_ENTER(ufp, fip, fd);
1625 1625 if ((fp = ufp->uf_file) != NULL &&
1626 1626 !vn_can_change_zones(fp->f_vnode)) {
1627 1627 UF_EXIT(ufp);
1628 1628 mutex_exit(&fip->fi_lock);
1629 1629 return (0);
1630 1630 }
1631 1631 UF_EXIT(ufp);
1632 1632 }
1633 1633 mutex_exit(&fip->fi_lock);
1634 1634 return (1);
1635 1635 }
1636 1636
1637 1637 #ifdef DEBUG
1638 1638
1639 1639 /*
1640 1640 * The following functions are only used in ASSERT()s elsewhere.
1641 1641 * They do not modify the state of the system.
1642 1642 */
1643 1643
1644 1644 /*
1645 1645 * Return true (1) if the current thread is in the fpollinfo
1646 1646 * list for this file descriptor, else false (0).
1647 1647 */
1648 1648 static int
1649 1649 curthread_in_plist(uf_entry_t *ufp)
1650 1650 {
1651 1651 fpollinfo_t *fpip;
1652 1652
1653 1653 ASSERT(MUTEX_HELD(&ufp->uf_lock));
1654 1654 for (fpip = ufp->uf_fpollinfo; fpip; fpip = fpip->fp_next)
1655 1655 if (fpip->fp_thread == curthread)
1656 1656 return (1);
1657 1657 return (0);
1658 1658 }
1659 1659
1660 1660 /*
1661 1661 * Sanity check to make sure that after lwp_exit(),
1662 1662 * curthread does not appear on any fd's fpollinfo list.
1663 1663 */
1664 1664 void
1665 1665 checkfpollinfo(void)
1666 1666 {
1667 1667 int fd;
1668 1668 uf_info_t *fip = P_FINFO(curproc);
1669 1669 uf_entry_t *ufp;
1670 1670
1671 1671 mutex_enter(&fip->fi_lock);
1672 1672 for (fd = 0; fd < fip->fi_nfiles; fd++) {
1673 1673 UF_ENTER(ufp, fip, fd);
1674 1674 ASSERT(!curthread_in_plist(ufp));
1675 1675 UF_EXIT(ufp);
1676 1676 }
1677 1677 mutex_exit(&fip->fi_lock);
1678 1678 }
1679 1679
1680 1680 /*
1681 1681 * Return true (1) if the current thread is in the fpollinfo
1682 1682 * list for this file descriptor, else false (0).
1683 1683 * This is the same as curthread_in_plist(),
1684 1684 * but is called w/o holding uf_lock.
1685 1685 */
1686 1686 int
1687 1687 infpollinfo(int fd)
1688 1688 {
1689 1689 uf_info_t *fip = P_FINFO(curproc);
1690 1690 uf_entry_t *ufp;
1691 1691 int rc;
1692 1692
1693 1693 UF_ENTER(ufp, fip, fd);
1694 1694 rc = curthread_in_plist(ufp);
1695 1695 UF_EXIT(ufp);
1696 1696 return (rc);
1697 1697 }
1698 1698
1699 1699 #endif /* DEBUG */
1700 1700
1701 1701 /*
1702 1702 * Add the curthread to fpollinfo list, meaning this fd is currently in the
1703 1703 * thread's poll cache. Each lwp polling this file descriptor should call
1704 1704 * this routine once.
1705 1705 */
1706 1706 void
1707 1707 addfpollinfo(int fd)
1708 1708 {
1709 1709 struct uf_entry *ufp;
1710 1710 fpollinfo_t *fpip;
1711 1711 uf_info_t *fip = P_FINFO(curproc);
1712 1712
1713 1713 fpip = kmem_zalloc(sizeof (fpollinfo_t), KM_SLEEP);
1714 1714 fpip->fp_thread = curthread;
1715 1715 UF_ENTER(ufp, fip, fd);
1716 1716 /*
1717 1717 * Assert we are not already on the list, that is, that
1718 1718 * this lwp did not call addfpollinfo twice for the same fd.
1719 1719 */
1720 1720 ASSERT(!curthread_in_plist(ufp));
1721 1721 /*
1722 1722 * addfpollinfo is always done inside the getf/releasef pair.
1723 1723 */
1724 1724 ASSERT(ufp->uf_refcnt >= 1);
1725 1725 fpip->fp_next = ufp->uf_fpollinfo;
1726 1726 ufp->uf_fpollinfo = fpip;
1727 1727 UF_EXIT(ufp);
1728 1728 }
1729 1729
1730 1730 /*
1731 1731 * Delete curthread from fpollinfo list if it is there.
1732 1732 */
1733 1733 void
1734 1734 delfpollinfo(int fd)
1735 1735 {
1736 1736 struct uf_entry *ufp;
1737 1737 struct fpollinfo *fpip;
1738 1738 struct fpollinfo **fpipp;
1739 1739 uf_info_t *fip = P_FINFO(curproc);
1740 1740
1741 1741 UF_ENTER(ufp, fip, fd);
1742 1742 for (fpipp = &ufp->uf_fpollinfo;
1743 1743 (fpip = *fpipp) != NULL;
1744 1744 fpipp = &fpip->fp_next) {
1745 1745 if (fpip->fp_thread == curthread) {
1746 1746 *fpipp = fpip->fp_next;
1747 1747 kmem_free(fpip, sizeof (fpollinfo_t));
1748 1748 break;
1749 1749 }
1750 1750 }
1751 1751 /*
1752 1752 * Assert that we are not still on the list, that is, that
1753 1753 * this lwp did not call addfpollinfo twice for the same fd.
1754 1754 */
1755 1755 ASSERT(!curthread_in_plist(ufp));
1756 1756 UF_EXIT(ufp);
1757 1757 }
1758 1758
1759 1759 /*
1760 1760 * fd is associated with a port. pfd is a pointer to the fd entry in the
1761 1761 * cache of the port.
1762 1762 */
1763 1763
1764 1764 void
1765 1765 addfd_port(int fd, portfd_t *pfd)
1766 1766 {
1767 1767 struct uf_entry *ufp;
1768 1768 uf_info_t *fip = P_FINFO(curproc);
1769 1769
1770 1770 UF_ENTER(ufp, fip, fd);
1771 1771 /*
1772 1772 * addfd_port is always done inside the getf/releasef pair.
1773 1773 */
1774 1774 ASSERT(ufp->uf_refcnt >= 1);
1775 1775 if (ufp->uf_portfd == NULL) {
1776 1776 /* first entry */
1777 1777 ufp->uf_portfd = pfd;
1778 1778 pfd->pfd_next = NULL;
1779 1779 } else {
1780 1780 pfd->pfd_next = ufp->uf_portfd;
1781 1781 ufp->uf_portfd = pfd;
1782 1782 pfd->pfd_next->pfd_prev = pfd;
1783 1783 }
1784 1784 UF_EXIT(ufp);
1785 1785 }
1786 1786
1787 1787 void
1788 1788 delfd_port(int fd, portfd_t *pfd)
1789 1789 {
1790 1790 struct uf_entry *ufp;
1791 1791 uf_info_t *fip = P_FINFO(curproc);
1792 1792
1793 1793 UF_ENTER(ufp, fip, fd);
1794 1794 /*
1795 1795 * delfd_port is always done inside the getf/releasef pair.
1796 1796 */
1797 1797 ASSERT(ufp->uf_refcnt >= 1);
1798 1798 if (ufp->uf_portfd == pfd) {
1799 1799 /* remove first entry */
1800 1800 ufp->uf_portfd = pfd->pfd_next;
1801 1801 } else {
1802 1802 pfd->pfd_prev->pfd_next = pfd->pfd_next;
1803 1803 if (pfd->pfd_next != NULL)
1804 1804 pfd->pfd_next->pfd_prev = pfd->pfd_prev;
1805 1805 }
1806 1806 UF_EXIT(ufp);
1807 1807 }
1808 1808
1809 1809 static void
1810 1810 port_close_fd(portfd_t *pfd)
1811 1811 {
1812 1812 portfd_t *pfdn;
1813 1813
1814 1814 /*
1815 1815 * At this point, no other thread should access
1816 1816 * the portfd_t list for this fd. The uf_file, uf_portfd
1817 1817 * pointers in the uf_entry_t struct for this fd would
1818 1818 * be set to NULL.
1819 1819 */
1820 1820 for (; pfd != NULL; pfd = pfdn) {
1821 1821 pfdn = pfd->pfd_next;
1822 1822 port_close_pfd(pfd);
1823 1823 }
1824 1824 }
↓ open down ↓ |
1744 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX