Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/strsubr.c
+++ new/usr/src/uts/common/os/strsubr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 22 /* All Rights Reserved */
23 23
24 24
25 25 /*
26 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 27 * Use is subject to license terms.
28 28 */
29 29
30 30 #include <sys/types.h>
31 31 #include <sys/sysmacros.h>
32 32 #include <sys/param.h>
33 33 #include <sys/errno.h>
34 34 #include <sys/signal.h>
35 35 #include <sys/proc.h>
36 36 #include <sys/conf.h>
37 37 #include <sys/cred.h>
38 38 #include <sys/user.h>
39 39 #include <sys/vnode.h>
40 40 #include <sys/file.h>
41 41 #include <sys/session.h>
42 42 #include <sys/stream.h>
43 43 #include <sys/strsubr.h>
44 44 #include <sys/stropts.h>
45 45 #include <sys/poll.h>
46 46 #include <sys/systm.h>
47 47 #include <sys/cpuvar.h>
48 48 #include <sys/uio.h>
49 49 #include <sys/cmn_err.h>
50 50 #include <sys/priocntl.h>
51 51 #include <sys/procset.h>
52 52 #include <sys/vmem.h>
53 53 #include <sys/bitmap.h>
54 54 #include <sys/kmem.h>
55 55 #include <sys/siginfo.h>
56 56 #include <sys/vtrace.h>
57 57 #include <sys/callb.h>
58 58 #include <sys/debug.h>
59 59 #include <sys/modctl.h>
60 60 #include <sys/vmsystm.h>
61 61 #include <vm/page.h>
62 62 #include <sys/atomic.h>
63 63 #include <sys/suntpi.h>
64 64 #include <sys/strlog.h>
65 65 #include <sys/promif.h>
66 66 #include <sys/project.h>
67 67 #include <sys/vm.h>
68 68 #include <sys/taskq.h>
69 69 #include <sys/sunddi.h>
70 70 #include <sys/sunldi_impl.h>
71 71 #include <sys/strsun.h>
72 72 #include <sys/isa_defs.h>
73 73 #include <sys/multidata.h>
74 74 #include <sys/pattr.h>
75 75 #include <sys/strft.h>
76 76 #include <sys/fs/snode.h>
77 77 #include <sys/zone.h>
78 78 #include <sys/open.h>
79 79 #include <sys/sunldi.h>
80 80 #include <sys/sad.h>
81 81 #include <sys/netstack.h>
82 82
83 83 #define O_SAMESTR(q) (((q)->q_next) && \
84 84 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR)))
85 85
86 86 /*
87 87 * WARNING:
88 88 * The variables and routines in this file are private, belonging
89 89 * to the STREAMS subsystem. These should not be used by modules
90 90 * or drivers. Compatibility will not be guaranteed.
91 91 */
92 92
93 93 /*
94 94 * Id value used to distinguish between different multiplexor links.
95 95 */
96 96 static int32_t lnk_id = 0;
97 97
98 98 #define STREAMS_LOPRI MINCLSYSPRI
99 99 static pri_t streams_lopri = STREAMS_LOPRI;
100 100
101 101 #define STRSTAT(x) (str_statistics.x.value.ui64++)
102 102 typedef struct str_stat {
103 103 kstat_named_t sqenables;
104 104 kstat_named_t stenables;
105 105 kstat_named_t syncqservice;
106 106 kstat_named_t freebs;
107 107 kstat_named_t qwr_outer;
108 108 kstat_named_t rservice;
109 109 kstat_named_t strwaits;
110 110 kstat_named_t taskqfails;
111 111 kstat_named_t bufcalls;
112 112 kstat_named_t qhelps;
113 113 kstat_named_t qremoved;
114 114 kstat_named_t sqremoved;
115 115 kstat_named_t bcwaits;
116 116 kstat_named_t sqtoomany;
117 117 } str_stat_t;
118 118
119 119 static str_stat_t str_statistics = {
120 120 { "sqenables", KSTAT_DATA_UINT64 },
121 121 { "stenables", KSTAT_DATA_UINT64 },
122 122 { "syncqservice", KSTAT_DATA_UINT64 },
123 123 { "freebs", KSTAT_DATA_UINT64 },
124 124 { "qwr_outer", KSTAT_DATA_UINT64 },
125 125 { "rservice", KSTAT_DATA_UINT64 },
126 126 { "strwaits", KSTAT_DATA_UINT64 },
127 127 { "taskqfails", KSTAT_DATA_UINT64 },
128 128 { "bufcalls", KSTAT_DATA_UINT64 },
129 129 { "qhelps", KSTAT_DATA_UINT64 },
130 130 { "qremoved", KSTAT_DATA_UINT64 },
131 131 { "sqremoved", KSTAT_DATA_UINT64 },
132 132 { "bcwaits", KSTAT_DATA_UINT64 },
133 133 { "sqtoomany", KSTAT_DATA_UINT64 },
134 134 };
135 135
136 136 static kstat_t *str_kstat;
137 137
138 138 /*
139 139 * qrunflag was used previously to control background scheduling of queues. It
140 140 * is not used anymore, but kept here in case some module still wants to access
141 141 * it via qready() and setqsched macros.
142 142 */
143 143 char qrunflag; /* Unused */
144 144
145 145 /*
146 146 * Most of the streams scheduling is done via task queues. Task queues may fail
147 147 * for non-sleep dispatches, so there are two backup threads servicing failed
148 148 * requests for queues and syncqs. Both of these threads also service failed
149 149 * dispatches freebs requests. Queues are put in the list specified by `qhead'
150 150 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs
151 151 * requests are put into `freebs_list' which has no tail pointer. All three
152 152 * lists are protected by a single `service_queue' lock and use
153 153 * `services_to_run' condition variable for signaling background threads. Use of
154 154 * a single lock should not be a problem because it is only used under heavy
155 155 * loads when task queues start to fail and at that time it may be a good idea
156 156 * to throttle scheduling requests.
157 157 *
158 158 * NOTE: queues and syncqs should be scheduled by two separate threads because
159 159 * queue servicing may be blocked waiting for a syncq which may be also
160 160 * scheduled for background execution. This may create a deadlock when only one
161 161 * thread is used for both.
162 162 */
163 163
164 164 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */
165 165
166 166 static kmutex_t service_queue; /* protects all of servicing vars */
167 167 static kcondvar_t services_to_run; /* wake up background service thread */
168 168 static kcondvar_t syncqs_to_run; /* wake up background service thread */
169 169
170 170 /*
171 171 * List of queues scheduled for background processing due to lack of resources
172 172 * in the task queues. Protected by service_queue lock;
173 173 */
174 174 static struct queue *qhead;
175 175 static struct queue *qtail;
176 176
177 177 /*
178 178 * Same list for syncqs
179 179 */
180 180 static syncq_t *sqhead;
181 181 static syncq_t *sqtail;
182 182
183 183 static mblk_t *freebs_list; /* list of buffers to free */
184 184
185 185 /*
186 186 * Backup threads for servicing queues and syncqs
187 187 */
188 188 kthread_t *streams_qbkgrnd_thread;
189 189 kthread_t *streams_sqbkgrnd_thread;
190 190
191 191 /*
192 192 * Bufcalls related variables.
193 193 */
194 194 struct bclist strbcalls; /* list of waiting bufcalls */
195 195 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */
196 196 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */
197 197 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */
198 198 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */
199 199 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */
200 200
201 201 kmutex_t strresources; /* protects global resources */
202 202 kmutex_t muxifier; /* single-threads multiplexor creation */
203 203
204 204 static void *str_stack_init(netstackid_t stackid, netstack_t *ns);
205 205 static void str_stack_shutdown(netstackid_t stackid, void *arg);
206 206 static void str_stack_fini(netstackid_t stackid, void *arg);
207 207
208 208 /*
209 209 * run_queues is no longer used, but is kept in case some 3rd party
210 210 * module/driver decides to use it.
211 211 */
212 212 int run_queues = 0;
213 213
214 214 /*
215 215 * sq_max_size is the depth of the syncq (in number of messages) before
216 216 * qfill_syncq() starts QFULL'ing destination queues. As its primary
217 217 * consumer - IP is no longer D_MTPERMOD, but there may be other
218 218 * modules/drivers depend on this syncq flow control, we prefer to
219 219 * choose a large number as the default value. For potential
220 220 * performance gain, this value is tunable in /etc/system.
221 221 */
222 222 int sq_max_size = 10000;
223 223
224 224 /*
225 225 * The number of ciputctrl structures per syncq and stream we create when
226 226 * needed.
227 227 */
228 228 int n_ciputctrl;
229 229 int max_n_ciputctrl = 16;
230 230 /*
231 231 * If n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache.
232 232 */
233 233 int min_n_ciputctrl = 2;
234 234
235 235 /*
236 236 * Per-driver/module syncqs
237 237 * ========================
238 238 *
239 239 * For drivers/modules that use PERMOD or outer syncqs we keep a list of
240 240 * perdm structures, new entries being added (and new syncqs allocated) when
241 241 * setq() encounters a module/driver with a streamtab that it hasn't seen
242 242 * before.
243 243 * The reason for this mechanism is that some modules and drivers share a
244 244 * common streamtab and it is necessary for those modules and drivers to also
245 245 * share a common PERMOD syncq.
246 246 *
247 247 * perdm_list --> dm_str == streamtab_1
248 248 * dm_sq == syncq_1
249 249 * dm_ref
250 250 * dm_next --> dm_str == streamtab_2
251 251 * dm_sq == syncq_2
252 252 * dm_ref
253 253 * dm_next --> ... NULL
254 254 *
255 255 * The dm_ref field is incremented for each new driver/module that takes
256 256 * a reference to the perdm structure and hence shares the syncq.
257 257 * References are held in the fmodsw_impl_t structure for each STREAMS module
258 258 * or the dev_impl array (indexed by device major number) for each driver.
259 259 *
260 260 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL
261 261 * ^ ^ ^ ^
262 262 * | ______________/ | |
263 263 * | / | |
264 264 * dev_impl: ...|x|y|... module A module B
265 265 *
266 266 * When a module/driver is unloaded the reference count is decremented and,
267 267 * when it falls to zero, the perdm structure is removed from the list and
268 268 * the syncq is freed (see rele_dm()).
269 269 */
270 270 perdm_t *perdm_list = NULL;
271 271 static krwlock_t perdm_rwlock;
272 272 cdevsw_impl_t *devimpl;
273 273
274 274 extern struct qinit strdata;
275 275 extern struct qinit stwdata;
276 276
277 277 static void runservice(queue_t *);
278 278 static void streams_bufcall_service(void);
279 279 static void streams_qbkgrnd_service(void);
280 280 static void streams_sqbkgrnd_service(void);
281 281 static syncq_t *new_syncq(void);
282 282 static void free_syncq(syncq_t *);
283 283 static void outer_insert(syncq_t *, syncq_t *);
284 284 static void outer_remove(syncq_t *, syncq_t *);
285 285 static void write_now(syncq_t *);
286 286 static void clr_qfull(queue_t *);
287 287 static void runbufcalls(void);
288 288 static void sqenable(syncq_t *);
289 289 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)());
290 290 static void wait_q_syncq(queue_t *);
291 291 static void backenable_insertedq(queue_t *);
292 292
293 293 static void queue_service(queue_t *);
294 294 static void stream_service(stdata_t *);
295 295 static void syncq_service(syncq_t *);
296 296 static void qwriter_outer_service(syncq_t *);
297 297 static void mblk_free(mblk_t *);
298 298 #ifdef DEBUG
299 299 static int qprocsareon(queue_t *);
300 300 #endif
301 301
302 302 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *);
303 303 static void reset_nfsrv_ptr(queue_t *, queue_t *);
304 304 void set_qfull(queue_t *);
305 305
306 306 static void sq_run_events(syncq_t *);
307 307 static int propagate_syncq(queue_t *);
308 308
309 309 static void blocksq(syncq_t *, ushort_t, int);
310 310 static void unblocksq(syncq_t *, ushort_t, int);
311 311 static int dropsq(syncq_t *, uint16_t);
312 312 static void emptysq(syncq_t *);
313 313 static sqlist_t *sqlist_alloc(struct stdata *, int);
314 314 static void sqlist_free(sqlist_t *);
315 315 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t);
316 316 static void sqlist_insert(sqlist_t *, syncq_t *);
317 317 static void sqlist_insertall(sqlist_t *, queue_t *);
318 318
319 319 static void strsetuio(stdata_t *);
320 320
321 321 struct kmem_cache *stream_head_cache;
322 322 struct kmem_cache *queue_cache;
323 323 struct kmem_cache *syncq_cache;
324 324 struct kmem_cache *qband_cache;
325 325 struct kmem_cache *linkinfo_cache;
326 326 struct kmem_cache *ciputctrl_cache = NULL;
327 327
328 328 static linkinfo_t *linkinfo_list;
329 329
330 330 /* Global esballoc throttling queue */
331 331 static esb_queue_t system_esbq;
332 332
333 333 /* Array of esballoc throttling queues, of length esbq_nelem */
334 334 static esb_queue_t *volatile system_esbq_array;
335 335 static int esbq_nelem;
336 336 static kmutex_t esbq_lock;
337 337 static int esbq_log2_cpus_per_q = 0;
338 338
339 339 /* Scale the system_esbq length by setting number of CPUs per queue. */
340 340 uint_t esbq_cpus_per_q = 1;
341 341
342 342 /*
343 343 * esballoc tunable parameters.
344 344 */
345 345 int esbq_max_qlen = 0x16; /* throttled queue length */
346 346 clock_t esbq_timeout = 0x8; /* timeout to process esb queue */
347 347
348 348 /*
349 349 * Routines to handle esballoc queueing.
350 350 */
351 351 static void esballoc_process_queue(esb_queue_t *);
352 352 static void esballoc_enqueue_mblk(mblk_t *);
353 353 static void esballoc_timer(void *);
354 354 static void esballoc_set_timer(esb_queue_t *, clock_t);
355 355 static void esballoc_mblk_free(mblk_t *);
356 356
357 357 /*
358 358 * Qinit structure and Module_info structures
359 359 * for passthru read and write queues
360 360 */
361 361
362 362 static void pass_wput(queue_t *, mblk_t *);
363 363 static queue_t *link_addpassthru(stdata_t *);
364 364 static void link_rempassthru(queue_t *);
365 365
366 366 struct module_info passthru_info = {
367 367 0,
368 368 "passthru",
369 369 0,
370 370 INFPSZ,
371 371 STRHIGH,
372 372 STRLOW
373 373 };
374 374
375 375 struct qinit passthru_rinit = {
376 376 (int (*)())putnext,
377 377 NULL,
378 378 NULL,
379 379 NULL,
380 380 NULL,
381 381 &passthru_info,
382 382 NULL
383 383 };
384 384
385 385 struct qinit passthru_winit = {
386 386 (int (*)()) pass_wput,
387 387 NULL,
388 388 NULL,
389 389 NULL,
390 390 NULL,
391 391 &passthru_info,
392 392 NULL
393 393 };
394 394
395 395 /*
396 396 * Verify correctness of list head/tail pointers.
397 397 */
398 398 #define LISTCHECK(head, tail, link) { \
399 399 EQUIV(head, tail); \
400 400 IMPLY(tail != NULL, tail->link == NULL); \
401 401 }
402 402
403 403 /*
404 404 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail'
405 405 * using a `link' field.
406 406 */
407 407 #define ENQUEUE(el, head, tail, link) { \
408 408 ASSERT(el->link == NULL); \
409 409 LISTCHECK(head, tail, link); \
410 410 if (head == NULL) \
411 411 head = el; \
412 412 else \
413 413 tail->link = el; \
414 414 tail = el; \
415 415 }
416 416
417 417 /*
418 418 * Dequeue the first element of the list denoted by `head' and `tail' pointers
419 419 * using a `link' field and put result into `el'.
420 420 */
421 421 #define DQ(el, head, tail, link) { \
422 422 LISTCHECK(head, tail, link); \
423 423 el = head; \
424 424 if (head != NULL) { \
425 425 head = head->link; \
426 426 if (head == NULL) \
427 427 tail = NULL; \
428 428 el->link = NULL; \
429 429 } \
430 430 }
431 431
432 432 /*
433 433 * Remove `el' from the list using `chase' and `curr' pointers and return result
434 434 * in `succeed'.
435 435 */
436 436 #define RMQ(el, head, tail, link, chase, curr, succeed) { \
437 437 LISTCHECK(head, tail, link); \
438 438 chase = NULL; \
439 439 succeed = 0; \
440 440 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \
441 441 chase = curr; \
442 442 if (curr != NULL) { \
443 443 succeed = 1; \
444 444 ASSERT(curr == el); \
445 445 if (chase != NULL) \
446 446 chase->link = curr->link; \
447 447 else \
448 448 head = curr->link; \
449 449 curr->link = NULL; \
450 450 if (curr == tail) \
451 451 tail = chase; \
452 452 } \
453 453 LISTCHECK(head, tail, link); \
454 454 }
455 455
456 456 /* Handling of delayed messages on the inner syncq. */
457 457
458 458 /*
459 459 * DEBUG versions should use function versions (to simplify tracing) and
460 460 * non-DEBUG kernels should use macro versions.
461 461 */
462 462
463 463 /*
464 464 * Put a queue on the syncq list of queues.
465 465 * Assumes SQLOCK held.
466 466 */
467 467 #define SQPUT_Q(sq, qp) \
468 468 { \
469 469 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
470 470 if (!(qp->q_sqflags & Q_SQQUEUED)) { \
471 471 /* The queue should not be linked anywhere */ \
472 472 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \
473 473 /* Head and tail may only be NULL simultaneously */ \
474 474 EQUIV(sq->sq_head, sq->sq_tail); \
475 475 /* Queue may be only enqueued on its syncq */ \
476 476 ASSERT(sq == qp->q_syncq); \
477 477 /* Check the correctness of SQ_MESSAGES flag */ \
478 478 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \
479 479 /* Sanity check first/last elements of the list */ \
480 480 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\
481 481 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\
482 482 /* \
483 483 * Sanity check of priority field: empty queue should \
484 484 * have zero priority \
485 485 * and nqueues equal to zero. \
486 486 */ \
487 487 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \
488 488 /* Sanity check of sq_nqueues field */ \
489 489 EQUIV(sq->sq_head, sq->sq_nqueues); \
490 490 if (sq->sq_head == NULL) { \
491 491 sq->sq_head = sq->sq_tail = qp; \
492 492 sq->sq_flags |= SQ_MESSAGES; \
493 493 } else if (qp->q_spri == 0) { \
494 494 qp->q_sqprev = sq->sq_tail; \
495 495 sq->sq_tail->q_sqnext = qp; \
496 496 sq->sq_tail = qp; \
497 497 } else { \
498 498 /* \
499 499 * Put this queue in priority order: higher \
500 500 * priority gets closer to the head. \
501 501 */ \
502 502 queue_t **qpp = &sq->sq_tail; \
503 503 queue_t *qnext = NULL; \
504 504 \
505 505 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \
506 506 qnext = *qpp; \
507 507 qpp = &(*qpp)->q_sqprev; \
508 508 } \
509 509 qp->q_sqnext = qnext; \
510 510 qp->q_sqprev = *qpp; \
511 511 if (*qpp != NULL) { \
512 512 (*qpp)->q_sqnext = qp; \
513 513 } else { \
514 514 sq->sq_head = qp; \
515 515 sq->sq_pri = sq->sq_head->q_spri; \
516 516 } \
517 517 *qpp = qp; \
518 518 } \
519 519 qp->q_sqflags |= Q_SQQUEUED; \
520 520 qp->q_sqtstamp = ddi_get_lbolt(); \
521 521 sq->sq_nqueues++; \
522 522 } \
523 523 }
524 524
525 525 /*
526 526 * Remove a queue from the syncq list
527 527 * Assumes SQLOCK held.
528 528 */
529 529 #define SQRM_Q(sq, qp) \
530 530 { \
531 531 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
532 532 ASSERT(qp->q_sqflags & Q_SQQUEUED); \
533 533 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \
534 534 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \
535 535 /* Check that the queue is actually in the list */ \
536 536 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \
537 537 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \
538 538 ASSERT(sq->sq_nqueues != 0); \
539 539 if (qp->q_sqprev == NULL) { \
540 540 /* First queue on list, make head q_sqnext */ \
541 541 sq->sq_head = qp->q_sqnext; \
542 542 } else { \
543 543 /* Make prev->next == next */ \
544 544 qp->q_sqprev->q_sqnext = qp->q_sqnext; \
545 545 } \
546 546 if (qp->q_sqnext == NULL) { \
547 547 /* Last queue on list, make tail sqprev */ \
548 548 sq->sq_tail = qp->q_sqprev; \
549 549 } else { \
550 550 /* Make next->prev == prev */ \
551 551 qp->q_sqnext->q_sqprev = qp->q_sqprev; \
552 552 } \
553 553 /* clear out references on this queue */ \
554 554 qp->q_sqprev = qp->q_sqnext = NULL; \
555 555 qp->q_sqflags &= ~Q_SQQUEUED; \
556 556 /* If there is nothing queued, clear SQ_MESSAGES */ \
557 557 if (sq->sq_head != NULL) { \
558 558 sq->sq_pri = sq->sq_head->q_spri; \
559 559 } else { \
560 560 sq->sq_flags &= ~SQ_MESSAGES; \
561 561 sq->sq_pri = 0; \
562 562 } \
563 563 sq->sq_nqueues--; \
564 564 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \
565 565 (sq->sq_flags & SQ_QUEUED) == 0); \
566 566 }
567 567
568 568 /* Hide the definition from the header file. */
569 569 #ifdef SQPUT_MP
570 570 #undef SQPUT_MP
571 571 #endif
572 572
573 573 /*
574 574 * Put a message on the queue syncq.
575 575 * Assumes QLOCK held.
576 576 */
577 577 #define SQPUT_MP(qp, mp) \
578 578 { \
579 579 ASSERT(MUTEX_HELD(QLOCK(qp))); \
580 580 ASSERT(qp->q_sqhead == NULL || \
581 581 (qp->q_sqtail != NULL && \
582 582 qp->q_sqtail->b_next == NULL)); \
583 583 qp->q_syncqmsgs++; \
584 584 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \
585 585 if (qp->q_sqhead == NULL) { \
586 586 qp->q_sqhead = qp->q_sqtail = mp; \
587 587 } else { \
588 588 qp->q_sqtail->b_next = mp; \
589 589 qp->q_sqtail = mp; \
590 590 } \
591 591 ASSERT(qp->q_syncqmsgs > 0); \
592 592 set_qfull(qp); \
593 593 }
594 594
595 595 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \
596 596 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
597 597 if ((sq)->sq_ciputctrl != NULL) { \
598 598 int i; \
599 599 int nlocks = (sq)->sq_nciputctrl; \
600 600 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
601 601 ASSERT((sq)->sq_type & SQ_CIPUT); \
602 602 for (i = 0; i <= nlocks; i++) { \
603 603 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
604 604 cip[i].ciputctrl_count |= SQ_FASTPUT; \
605 605 } \
606 606 } \
607 607 }
608 608
609 609
610 610 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \
611 611 ASSERT(MUTEX_HELD(SQLOCK(sq))); \
612 612 if ((sq)->sq_ciputctrl != NULL) { \
613 613 int i; \
614 614 int nlocks = (sq)->sq_nciputctrl; \
615 615 ciputctrl_t *cip = (sq)->sq_ciputctrl; \
616 616 ASSERT((sq)->sq_type & SQ_CIPUT); \
617 617 for (i = 0; i <= nlocks; i++) { \
618 618 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \
619 619 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \
620 620 } \
621 621 } \
622 622 }
623 623
624 624 /*
625 625 * Run service procedures for all queues in the stream head.
626 626 */
627 627 #define STR_SERVICE(stp, q) { \
628 628 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \
629 629 while (stp->sd_qhead != NULL) { \
630 630 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \
631 631 ASSERT(stp->sd_nqueues > 0); \
632 632 stp->sd_nqueues--; \
633 633 ASSERT(!(q->q_flag & QINSERVICE)); \
634 634 mutex_exit(&stp->sd_qlock); \
635 635 queue_service(q); \
636 636 mutex_enter(&stp->sd_qlock); \
637 637 } \
638 638 ASSERT(stp->sd_nqueues == 0); \
639 639 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \
640 640 }
641 641
642 642 /*
643 643 * Constructor/destructor routines for the stream head cache
644 644 */
645 645 /* ARGSUSED */
646 646 static int
647 647 stream_head_constructor(void *buf, void *cdrarg, int kmflags)
648 648 {
649 649 stdata_t *stp = buf;
650 650
651 651 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
652 652 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL);
653 653 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL);
654 654 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL);
655 655 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL);
656 656 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL);
657 657 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL);
658 658 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL);
659 659 stp->sd_wrq = NULL;
660 660
661 661 return (0);
662 662 }
663 663
664 664 /* ARGSUSED */
665 665 static void
666 666 stream_head_destructor(void *buf, void *cdrarg)
667 667 {
668 668 stdata_t *stp = buf;
669 669
670 670 mutex_destroy(&stp->sd_lock);
671 671 mutex_destroy(&stp->sd_reflock);
672 672 mutex_destroy(&stp->sd_qlock);
673 673 cv_destroy(&stp->sd_monitor);
674 674 cv_destroy(&stp->sd_iocmonitor);
675 675 cv_destroy(&stp->sd_refmonitor);
676 676 cv_destroy(&stp->sd_qcv);
677 677 cv_destroy(&stp->sd_zcopy_wait);
678 678 }
679 679
680 680 /*
681 681 * Constructor/destructor routines for the queue cache
682 682 */
683 683 /* ARGSUSED */
684 684 static int
685 685 queue_constructor(void *buf, void *cdrarg, int kmflags)
686 686 {
687 687 queinfo_t *qip = buf;
688 688 queue_t *qp = &qip->qu_rqueue;
689 689 queue_t *wqp = &qip->qu_wqueue;
690 690 syncq_t *sq = &qip->qu_syncq;
691 691
692 692 qp->q_first = NULL;
693 693 qp->q_link = NULL;
694 694 qp->q_count = 0;
695 695 qp->q_mblkcnt = 0;
696 696 qp->q_sqhead = NULL;
697 697 qp->q_sqtail = NULL;
698 698 qp->q_sqnext = NULL;
699 699 qp->q_sqprev = NULL;
700 700 qp->q_sqflags = 0;
701 701 qp->q_rwcnt = 0;
702 702 qp->q_spri = 0;
703 703
704 704 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL);
705 705 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL);
706 706
707 707 wqp->q_first = NULL;
708 708 wqp->q_link = NULL;
709 709 wqp->q_count = 0;
710 710 wqp->q_mblkcnt = 0;
711 711 wqp->q_sqhead = NULL;
712 712 wqp->q_sqtail = NULL;
713 713 wqp->q_sqnext = NULL;
714 714 wqp->q_sqprev = NULL;
715 715 wqp->q_sqflags = 0;
716 716 wqp->q_rwcnt = 0;
717 717 wqp->q_spri = 0;
718 718
719 719 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL);
720 720 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL);
721 721
722 722 sq->sq_head = NULL;
723 723 sq->sq_tail = NULL;
724 724 sq->sq_evhead = NULL;
725 725 sq->sq_evtail = NULL;
726 726 sq->sq_callbpend = NULL;
727 727 sq->sq_outer = NULL;
728 728 sq->sq_onext = NULL;
729 729 sq->sq_oprev = NULL;
730 730 sq->sq_next = NULL;
731 731 sq->sq_svcflags = 0;
732 732 sq->sq_servcount = 0;
733 733 sq->sq_needexcl = 0;
734 734 sq->sq_nqueues = 0;
735 735 sq->sq_pri = 0;
736 736
737 737 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
738 738 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
739 739 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
740 740
741 741 return (0);
742 742 }
743 743
744 744 /* ARGSUSED */
745 745 static void
746 746 queue_destructor(void *buf, void *cdrarg)
747 747 {
748 748 queinfo_t *qip = buf;
749 749 queue_t *qp = &qip->qu_rqueue;
750 750 queue_t *wqp = &qip->qu_wqueue;
751 751 syncq_t *sq = &qip->qu_syncq;
752 752
753 753 ASSERT(qp->q_sqhead == NULL);
754 754 ASSERT(wqp->q_sqhead == NULL);
755 755 ASSERT(qp->q_sqnext == NULL);
756 756 ASSERT(wqp->q_sqnext == NULL);
757 757 ASSERT(qp->q_rwcnt == 0);
758 758 ASSERT(wqp->q_rwcnt == 0);
759 759
760 760 mutex_destroy(&qp->q_lock);
761 761 cv_destroy(&qp->q_wait);
762 762
763 763 mutex_destroy(&wqp->q_lock);
764 764 cv_destroy(&wqp->q_wait);
765 765
766 766 mutex_destroy(&sq->sq_lock);
767 767 cv_destroy(&sq->sq_wait);
768 768 cv_destroy(&sq->sq_exitwait);
769 769 }
770 770
771 771 /*
772 772 * Constructor/destructor routines for the syncq cache
773 773 */
774 774 /* ARGSUSED */
775 775 static int
776 776 syncq_constructor(void *buf, void *cdrarg, int kmflags)
777 777 {
778 778 syncq_t *sq = buf;
779 779
780 780 bzero(buf, sizeof (syncq_t));
781 781
782 782 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL);
783 783 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL);
784 784 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL);
785 785
786 786 return (0);
787 787 }
788 788
789 789 /* ARGSUSED */
790 790 static void
791 791 syncq_destructor(void *buf, void *cdrarg)
792 792 {
793 793 syncq_t *sq = buf;
794 794
795 795 ASSERT(sq->sq_head == NULL);
796 796 ASSERT(sq->sq_tail == NULL);
797 797 ASSERT(sq->sq_evhead == NULL);
798 798 ASSERT(sq->sq_evtail == NULL);
799 799 ASSERT(sq->sq_callbpend == NULL);
800 800 ASSERT(sq->sq_callbflags == 0);
801 801 ASSERT(sq->sq_outer == NULL);
802 802 ASSERT(sq->sq_onext == NULL);
803 803 ASSERT(sq->sq_oprev == NULL);
804 804 ASSERT(sq->sq_next == NULL);
805 805 ASSERT(sq->sq_needexcl == 0);
806 806 ASSERT(sq->sq_svcflags == 0);
807 807 ASSERT(sq->sq_servcount == 0);
808 808 ASSERT(sq->sq_nqueues == 0);
809 809 ASSERT(sq->sq_pri == 0);
810 810 ASSERT(sq->sq_count == 0);
811 811 ASSERT(sq->sq_rmqcount == 0);
812 812 ASSERT(sq->sq_cancelid == 0);
813 813 ASSERT(sq->sq_ciputctrl == NULL);
814 814 ASSERT(sq->sq_nciputctrl == 0);
815 815 ASSERT(sq->sq_type == 0);
816 816 ASSERT(sq->sq_flags == 0);
817 817
818 818 mutex_destroy(&sq->sq_lock);
819 819 cv_destroy(&sq->sq_wait);
820 820 cv_destroy(&sq->sq_exitwait);
821 821 }
822 822
823 823 /* ARGSUSED */
824 824 static int
825 825 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags)
826 826 {
827 827 ciputctrl_t *cip = buf;
828 828 int i;
829 829
830 830 for (i = 0; i < n_ciputctrl; i++) {
831 831 cip[i].ciputctrl_count = SQ_FASTPUT;
832 832 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL);
833 833 }
834 834
835 835 return (0);
836 836 }
837 837
838 838 /* ARGSUSED */
839 839 static void
840 840 ciputctrl_destructor(void *buf, void *cdrarg)
841 841 {
842 842 ciputctrl_t *cip = buf;
843 843 int i;
844 844
845 845 for (i = 0; i < n_ciputctrl; i++) {
846 846 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT);
847 847 mutex_destroy(&cip[i].ciputctrl_lock);
848 848 }
849 849 }
850 850
851 851 /*
852 852 * Init routine run from main at boot time.
853 853 */
854 854 void
855 855 strinit(void)
856 856 {
857 857 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
858 858
859 859 stream_head_cache = kmem_cache_create("stream_head_cache",
860 860 sizeof (stdata_t), 0,
861 861 stream_head_constructor, stream_head_destructor, NULL,
862 862 NULL, NULL, 0);
863 863
864 864 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0,
865 865 queue_constructor, queue_destructor, NULL, NULL, NULL, 0);
866 866
867 867 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0,
868 868 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0);
869 869
870 870 qband_cache = kmem_cache_create("qband_cache",
871 871 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
872 872
873 873 linkinfo_cache = kmem_cache_create("linkinfo_cache",
874 874 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
875 875
876 876 n_ciputctrl = ncpus;
877 877 n_ciputctrl = 1 << highbit(n_ciputctrl - 1);
878 878 ASSERT(n_ciputctrl >= 1);
879 879 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl);
880 880 if (n_ciputctrl >= min_n_ciputctrl) {
881 881 ciputctrl_cache = kmem_cache_create("ciputctrl_cache",
882 882 sizeof (ciputctrl_t) * n_ciputctrl,
883 883 sizeof (ciputctrl_t), ciputctrl_constructor,
884 884 ciputctrl_destructor, NULL, NULL, NULL, 0);
885 885 }
886 886
887 887 streams_taskq = system_taskq;
888 888
889 889 if (streams_taskq == NULL)
890 890 panic("strinit: no memory for streams taskq!");
891 891
892 892 bc_bkgrnd_thread = thread_create(NULL, 0,
893 893 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri);
894 894
895 895 streams_qbkgrnd_thread = thread_create(NULL, 0,
896 896 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
897 897
898 898 streams_sqbkgrnd_thread = thread_create(NULL, 0,
899 899 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri);
900 900
901 901 /*
902 902 * Create STREAMS kstats.
903 903 */
904 904 str_kstat = kstat_create("streams", 0, "strstat",
905 905 "net", KSTAT_TYPE_NAMED,
906 906 sizeof (str_statistics) / sizeof (kstat_named_t),
907 907 KSTAT_FLAG_VIRTUAL);
908 908
909 909 if (str_kstat != NULL) {
910 910 str_kstat->ks_data = &str_statistics;
911 911 kstat_install(str_kstat);
912 912 }
913 913
914 914 /*
915 915 * TPI support routine initialisation.
916 916 */
917 917 tpi_init();
918 918
919 919 /*
920 920 * Handle to have autopush and persistent link information per
921 921 * zone.
922 922 * Note: uses shutdown hook instead of destroy hook so that the
923 923 * persistent links can be torn down before the destroy hooks
924 924 * in the TCP/IP stack are called.
925 925 */
926 926 netstack_register(NS_STR, str_stack_init, str_stack_shutdown,
927 927 str_stack_fini);
928 928 }
929 929
930 930 void
931 931 str_sendsig(vnode_t *vp, int event, uchar_t band, int error)
932 932 {
933 933 struct stdata *stp;
934 934
935 935 ASSERT(vp->v_stream);
936 936 stp = vp->v_stream;
937 937 /* Have to hold sd_lock to prevent siglist from changing */
938 938 mutex_enter(&stp->sd_lock);
939 939 if (stp->sd_sigflags & event)
940 940 strsendsig(stp->sd_siglist, event, band, error);
941 941 mutex_exit(&stp->sd_lock);
942 942 }
943 943
944 944 /*
945 945 * Send the "sevent" set of signals to a process.
946 946 * This might send more than one signal if the process is registered
947 947 * for multiple events. The caller should pass in an sevent that only
948 948 * includes the events for which the process has registered.
949 949 */
950 950 static void
951 951 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info,
952 952 uchar_t band, int error)
953 953 {
954 954 ASSERT(MUTEX_HELD(&proc->p_lock));
955 955
956 956 info->si_band = 0;
957 957 info->si_errno = 0;
958 958
959 959 if (sevent & S_ERROR) {
960 960 sevent &= ~S_ERROR;
961 961 info->si_code = POLL_ERR;
962 962 info->si_errno = error;
963 963 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
964 964 "strsendsig:proc %p info %p", proc, info);
965 965 sigaddq(proc, NULL, info, KM_NOSLEEP);
966 966 info->si_errno = 0;
967 967 }
968 968 if (sevent & S_HANGUP) {
969 969 sevent &= ~S_HANGUP;
970 970 info->si_code = POLL_HUP;
971 971 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
972 972 "strsendsig:proc %p info %p", proc, info);
973 973 sigaddq(proc, NULL, info, KM_NOSLEEP);
974 974 }
975 975 if (sevent & S_HIPRI) {
976 976 sevent &= ~S_HIPRI;
977 977 info->si_code = POLL_PRI;
978 978 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
979 979 "strsendsig:proc %p info %p", proc, info);
980 980 sigaddq(proc, NULL, info, KM_NOSLEEP);
981 981 }
982 982 if (sevent & S_RDBAND) {
983 983 sevent &= ~S_RDBAND;
984 984 if (events & S_BANDURG)
985 985 sigtoproc(proc, NULL, SIGURG);
986 986 else
987 987 sigtoproc(proc, NULL, SIGPOLL);
988 988 }
989 989 if (sevent & S_WRBAND) {
990 990 sevent &= ~S_WRBAND;
991 991 sigtoproc(proc, NULL, SIGPOLL);
992 992 }
993 993 if (sevent & S_INPUT) {
994 994 sevent &= ~S_INPUT;
995 995 info->si_code = POLL_IN;
996 996 info->si_band = band;
997 997 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
998 998 "strsendsig:proc %p info %p", proc, info);
999 999 sigaddq(proc, NULL, info, KM_NOSLEEP);
1000 1000 info->si_band = 0;
1001 1001 }
1002 1002 if (sevent & S_OUTPUT) {
1003 1003 sevent &= ~S_OUTPUT;
1004 1004 info->si_code = POLL_OUT;
1005 1005 info->si_band = band;
1006 1006 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1007 1007 "strsendsig:proc %p info %p", proc, info);
1008 1008 sigaddq(proc, NULL, info, KM_NOSLEEP);
1009 1009 info->si_band = 0;
1010 1010 }
1011 1011 if (sevent & S_MSG) {
1012 1012 sevent &= ~S_MSG;
1013 1013 info->si_code = POLL_MSG;
1014 1014 info->si_band = band;
1015 1015 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG,
1016 1016 "strsendsig:proc %p info %p", proc, info);
1017 1017 sigaddq(proc, NULL, info, KM_NOSLEEP);
1018 1018 info->si_band = 0;
1019 1019 }
1020 1020 if (sevent & S_RDNORM) {
1021 1021 sevent &= ~S_RDNORM;
1022 1022 sigtoproc(proc, NULL, SIGPOLL);
1023 1023 }
1024 1024 if (sevent != 0) {
1025 1025 panic("strsendsig: unknown event(s) %x", sevent);
1026 1026 }
1027 1027 }
1028 1028
1029 1029 /*
1030 1030 * Send SIGPOLL/SIGURG signal to all processes and process groups
1031 1031 * registered on the given signal list that want a signal for at
1032 1032 * least one of the specified events.
1033 1033 *
1034 1034 * Must be called with exclusive access to siglist (caller holding sd_lock).
1035 1035 *
1036 1036 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding
1037 1037 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure
1038 1038 * while it is in the siglist.
1039 1039 *
1040 1040 * For performance reasons (MP scalability) the code drops pidlock
1041 1041 * when sending signals to a single process.
1042 1042 * When sending to a process group the code holds
1043 1043 * pidlock to prevent the membership in the process group from changing
1044 1044 * while walking the p_pglink list.
1045 1045 */
1046 1046 void
1047 1047 strsendsig(strsig_t *siglist, int event, uchar_t band, int error)
1048 1048 {
1049 1049 strsig_t *ssp;
1050 1050 k_siginfo_t info;
1051 1051 struct pid *pidp;
1052 1052 proc_t *proc;
1053 1053
1054 1054 info.si_signo = SIGPOLL;
1055 1055 info.si_errno = 0;
1056 1056 for (ssp = siglist; ssp; ssp = ssp->ss_next) {
1057 1057 int sevent;
1058 1058
1059 1059 sevent = ssp->ss_events & event;
1060 1060 if (sevent == 0)
1061 1061 continue;
1062 1062
1063 1063 if ((pidp = ssp->ss_pidp) == NULL) {
1064 1064 /* pid was released but still on event list */
1065 1065 continue;
1066 1066 }
1067 1067
1068 1068
1069 1069 if (ssp->ss_pid > 0) {
1070 1070 /*
1071 1071 * XXX This unfortunately still generates
1072 1072 * a signal when a fd is closed but
1073 1073 * the proc is active.
1074 1074 */
1075 1075 ASSERT(ssp->ss_pid == pidp->pid_id);
1076 1076
1077 1077 mutex_enter(&pidlock);
1078 1078 proc = prfind_zone(pidp->pid_id, ALL_ZONES);
1079 1079 if (proc == NULL) {
1080 1080 mutex_exit(&pidlock);
1081 1081 continue;
1082 1082 }
1083 1083 mutex_enter(&proc->p_lock);
1084 1084 mutex_exit(&pidlock);
1085 1085 dosendsig(proc, ssp->ss_events, sevent, &info,
1086 1086 band, error);
1087 1087 mutex_exit(&proc->p_lock);
1088 1088 } else {
1089 1089 /*
1090 1090 * Send to process group. Hold pidlock across
1091 1091 * calls to dosendsig().
1092 1092 */
1093 1093 pid_t pgrp = -ssp->ss_pid;
1094 1094
1095 1095 mutex_enter(&pidlock);
1096 1096 proc = pgfind_zone(pgrp, ALL_ZONES);
1097 1097 while (proc != NULL) {
1098 1098 mutex_enter(&proc->p_lock);
1099 1099 dosendsig(proc, ssp->ss_events, sevent,
1100 1100 &info, band, error);
1101 1101 mutex_exit(&proc->p_lock);
1102 1102 proc = proc->p_pglink;
1103 1103 }
1104 1104 mutex_exit(&pidlock);
1105 1105 }
1106 1106 }
1107 1107 }
1108 1108
1109 1109 /*
1110 1110 * Attach a stream device or module.
1111 1111 * qp is a read queue; the new queue goes in so its next
1112 1112 * read ptr is the argument, and the write queue corresponding
1113 1113 * to the argument points to this queue. Return 0 on success,
1114 1114 * or a non-zero errno on failure.
1115 1115 */
1116 1116 int
1117 1117 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp,
1118 1118 boolean_t is_insert)
1119 1119 {
1120 1120 major_t major;
1121 1121 cdevsw_impl_t *dp;
1122 1122 struct streamtab *str;
1123 1123 queue_t *rq;
1124 1124 queue_t *wrq;
1125 1125 uint32_t qflag;
1126 1126 uint32_t sqtype;
1127 1127 perdm_t *dmp;
1128 1128 int error;
1129 1129 int sflag;
1130 1130
1131 1131 rq = allocq();
1132 1132 wrq = _WR(rq);
1133 1133 STREAM(rq) = STREAM(wrq) = STREAM(qp);
1134 1134
1135 1135 if (fp != NULL) {
1136 1136 str = fp->f_str;
1137 1137 qflag = fp->f_qflag;
1138 1138 sqtype = fp->f_sqtype;
1139 1139 dmp = fp->f_dmp;
1140 1140 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
1141 1141 sflag = MODOPEN;
1142 1142
1143 1143 /*
1144 1144 * stash away a pointer to the module structure so we can
1145 1145 * unref it in qdetach.
1146 1146 */
1147 1147 rq->q_fp = fp;
1148 1148 } else {
1149 1149 ASSERT(!is_insert);
1150 1150
1151 1151 major = getmajor(*devp);
1152 1152 dp = &devimpl[major];
1153 1153
1154 1154 str = dp->d_str;
1155 1155 ASSERT(str == STREAMSTAB(major));
1156 1156
1157 1157 qflag = dp->d_qflag;
1158 1158 ASSERT(qflag & QISDRV);
1159 1159 sqtype = dp->d_sqtype;
1160 1160
1161 1161 /* create perdm_t if needed */
1162 1162 if (NEED_DM(dp->d_dmp, qflag))
1163 1163 dp->d_dmp = hold_dm(str, qflag, sqtype);
1164 1164
1165 1165 dmp = dp->d_dmp;
1166 1166 sflag = 0;
1167 1167 }
1168 1168
1169 1169 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS,
1170 1170 "qattach:qflag == %X(%X)", qflag, *devp);
1171 1171
1172 1172 /* setq might sleep in allocator - avoid holding locks. */
1173 1173 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE);
1174 1174
1175 1175 /*
1176 1176 * Before calling the module's open routine, set up the q_next
1177 1177 * pointer for inserting a module in the middle of a stream.
1178 1178 *
1179 1179 * Note that we can always set _QINSERTING and set up q_next
1180 1180 * pointer for both inserting and pushing a module. Then there
1181 1181 * is no need for the is_insert parameter. In insertq(), called
1182 1182 * by qprocson(), assume that q_next of the new module always points
1183 1183 * to the correct queue and use it for insertion. Everything should
1184 1184 * work out fine. But in the first release of _I_INSERT, we
1185 1185 * distinguish between inserting and pushing to make sure that
1186 1186 * pushing a module follows the same code path as before.
1187 1187 */
1188 1188 if (is_insert) {
1189 1189 rq->q_flag |= _QINSERTING;
1190 1190 rq->q_next = qp;
1191 1191 }
1192 1192
1193 1193 /*
1194 1194 * If there is an outer perimeter get exclusive access during
1195 1195 * the open procedure. Bump up the reference count on the queue.
1196 1196 */
1197 1197 entersq(rq->q_syncq, SQ_OPENCLOSE);
1198 1198 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp);
1199 1199 if (error != 0)
1200 1200 goto failed;
1201 1201 leavesq(rq->q_syncq, SQ_OPENCLOSE);
1202 1202 ASSERT(qprocsareon(rq));
1203 1203 return (0);
1204 1204
1205 1205 failed:
1206 1206 rq->q_flag &= ~_QINSERTING;
1207 1207 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq)
1208 1208 qprocsoff(rq);
1209 1209 leavesq(rq->q_syncq, SQ_OPENCLOSE);
1210 1210 rq->q_next = wrq->q_next = NULL;
1211 1211 qdetach(rq, 0, 0, crp, B_FALSE);
1212 1212 return (error);
1213 1213 }
1214 1214
1215 1215 /*
1216 1216 * Handle second open of stream. For modules, set the
1217 1217 * last argument to MODOPEN and do not pass any open flags.
1218 1218 * Ignore dummydev since this is not the first open.
1219 1219 */
1220 1220 int
1221 1221 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp)
1222 1222 {
1223 1223 int error;
1224 1224 dev_t dummydev;
1225 1225 queue_t *wqp = _WR(qp);
1226 1226
1227 1227 ASSERT(qp->q_flag & QREADR);
1228 1228 entersq(qp->q_syncq, SQ_OPENCLOSE);
1229 1229
1230 1230 dummydev = *devp;
1231 1231 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev,
1232 1232 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) {
1233 1233 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1234 1234 mutex_enter(&STREAM(qp)->sd_lock);
1235 1235 qp->q_stream->sd_flag |= STREOPENFAIL;
1236 1236 mutex_exit(&STREAM(qp)->sd_lock);
1237 1237 return (error);
1238 1238 }
1239 1239 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1240 1240
1241 1241 /*
1242 1242 * successful open should have done qprocson()
1243 1243 */
1244 1244 ASSERT(qprocsareon(_RD(qp)));
1245 1245 return (0);
1246 1246 }
1247 1247
1248 1248 /*
1249 1249 * Detach a stream module or device.
1250 1250 * If clmode == 1 then the module or driver was opened and its
1251 1251 * close routine must be called. If clmode == 0, the module
1252 1252 * or driver was never opened or the open failed, and so its close
1253 1253 * should not be called.
1254 1254 */
1255 1255 void
1256 1256 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove)
1257 1257 {
1258 1258 queue_t *wqp = _WR(qp);
1259 1259 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB));
1260 1260
1261 1261 if (STREAM_NEEDSERVICE(STREAM(qp)))
1262 1262 stream_runservice(STREAM(qp));
1263 1263
1264 1264 if (clmode) {
1265 1265 /*
1266 1266 * Make sure that all the messages on the write side syncq are
1267 1267 * processed and nothing is left. Since we are closing, no new
1268 1268 * messages may appear there.
1269 1269 */
1270 1270 wait_q_syncq(wqp);
1271 1271
1272 1272 entersq(qp->q_syncq, SQ_OPENCLOSE);
1273 1273 if (is_remove) {
1274 1274 mutex_enter(QLOCK(qp));
1275 1275 qp->q_flag |= _QREMOVING;
1276 1276 mutex_exit(QLOCK(qp));
1277 1277 }
1278 1278 (*qp->q_qinfo->qi_qclose)(qp, flag, crp);
1279 1279 /*
1280 1280 * Check that qprocsoff() was actually called.
1281 1281 */
1282 1282 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE));
1283 1283
1284 1284 leavesq(qp->q_syncq, SQ_OPENCLOSE);
1285 1285 } else {
1286 1286 disable_svc(qp);
1287 1287 }
1288 1288
1289 1289 /*
1290 1290 * Allow any threads blocked in entersq to proceed and discover
1291 1291 * the QWCLOSE is set.
1292 1292 * Note: This assumes that all users of entersq check QWCLOSE.
1293 1293 * Currently runservice is the only entersq that can happen
1294 1294 * after removeq has finished.
1295 1295 * Removeq will have discarded all messages destined to the closing
1296 1296 * pair of queues from the syncq.
1297 1297 * NOTE: Calling a function inside an assert is unconventional.
1298 1298 * However, it does not cause any problem since flush_syncq() does
1299 1299 * not change any state except when it returns non-zero i.e.
1300 1300 * when the assert will trigger.
1301 1301 */
1302 1302 ASSERT(flush_syncq(qp->q_syncq, qp) == 0);
1303 1303 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0);
1304 1304 ASSERT((qp->q_flag & QPERMOD) ||
1305 1305 ((qp->q_syncq->sq_head == NULL) &&
1306 1306 (wqp->q_syncq->sq_head == NULL)));
1307 1307
1308 1308 /* release any fmodsw_impl_t structure held on behalf of the queue */
1309 1309 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV);
1310 1310 if (qp->q_fp != NULL)
1311 1311 fmodsw_rele(qp->q_fp);
1312 1312
1313 1313 /* freeq removes us from the outer perimeter if any */
1314 1314 freeq(qp);
1315 1315 }
1316 1316
1317 1317 /* Prevent service procedures from being called */
1318 1318 void
1319 1319 disable_svc(queue_t *qp)
1320 1320 {
1321 1321 queue_t *wqp = _WR(qp);
1322 1322
1323 1323 ASSERT(qp->q_flag & QREADR);
1324 1324 mutex_enter(QLOCK(qp));
1325 1325 qp->q_flag |= QWCLOSE;
1326 1326 mutex_exit(QLOCK(qp));
1327 1327 mutex_enter(QLOCK(wqp));
1328 1328 wqp->q_flag |= QWCLOSE;
1329 1329 mutex_exit(QLOCK(wqp));
1330 1330 }
1331 1331
1332 1332 /* Allow service procedures to be called again */
1333 1333 void
1334 1334 enable_svc(queue_t *qp)
1335 1335 {
1336 1336 queue_t *wqp = _WR(qp);
1337 1337
1338 1338 ASSERT(qp->q_flag & QREADR);
1339 1339 mutex_enter(QLOCK(qp));
1340 1340 qp->q_flag &= ~QWCLOSE;
1341 1341 mutex_exit(QLOCK(qp));
1342 1342 mutex_enter(QLOCK(wqp));
1343 1343 wqp->q_flag &= ~QWCLOSE;
1344 1344 mutex_exit(QLOCK(wqp));
1345 1345 }
1346 1346
1347 1347 /*
1348 1348 * Remove queue from qhead/qtail if it is enabled.
1349 1349 * Only reset QENAB if the queue was removed from the runlist.
1350 1350 * A queue goes through 3 stages:
1351 1351 * It is on the service list and QENAB is set.
1352 1352 * It is removed from the service list but QENAB is still set.
1353 1353 * QENAB gets changed to QINSERVICE.
1354 1354 * QINSERVICE is reset (when the service procedure is done)
1355 1355 * Thus we can not reset QENAB unless we actually removed it from the service
1356 1356 * queue.
1357 1357 */
1358 1358 void
1359 1359 remove_runlist(queue_t *qp)
1360 1360 {
1361 1361 if (qp->q_flag & QENAB && qhead != NULL) {
1362 1362 queue_t *q_chase;
1363 1363 queue_t *q_curr;
1364 1364 int removed;
1365 1365
1366 1366 mutex_enter(&service_queue);
1367 1367 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed);
1368 1368 mutex_exit(&service_queue);
1369 1369 if (removed) {
1370 1370 STRSTAT(qremoved);
1371 1371 qp->q_flag &= ~QENAB;
1372 1372 }
1373 1373 }
1374 1374 }
1375 1375
1376 1376
1377 1377 /*
1378 1378 * Wait for any pending service processing to complete.
1379 1379 * The removal of queues from the runlist is not atomic with the
1380 1380 * clearing of the QENABLED flag and setting the INSERVICE flag.
1381 1381 * consequently it is possible for remove_runlist in strclose
1382 1382 * to not find the queue on the runlist but for it to be QENABLED
1383 1383 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED
1384 1384 * as well as INSERVICE.
1385 1385 */
1386 1386 void
1387 1387 wait_svc(queue_t *qp)
1388 1388 {
1389 1389 queue_t *wqp = _WR(qp);
1390 1390
1391 1391 ASSERT(qp->q_flag & QREADR);
1392 1392
1393 1393 /*
1394 1394 * Try to remove queues from qhead/qtail list.
1395 1395 */
1396 1396 if (qhead != NULL) {
1397 1397 remove_runlist(qp);
1398 1398 remove_runlist(wqp);
1399 1399 }
1400 1400 /*
1401 1401 * Wait till the syncqs associated with the queue disappear from the
1402 1402 * background processing list.
1403 1403 * This only needs to be done for non-PERMOD perimeters since
1404 1404 * for PERMOD perimeters the syncq may be shared and will only be freed
1405 1405 * when the last module/driver is unloaded.
1406 1406 * If for PERMOD perimeters queue was on the syncq list, removeq()
1407 1407 * should call propagate_syncq() or drain_syncq() for it. Both of these
1408 1408 * functions remove the queue from its syncq list, so sqthread will not
1409 1409 * try to access the queue.
1410 1410 */
1411 1411 if (!(qp->q_flag & QPERMOD)) {
1412 1412 syncq_t *rsq = qp->q_syncq;
1413 1413 syncq_t *wsq = wqp->q_syncq;
1414 1414
1415 1415 /*
1416 1416 * Disable rsq and wsq and wait for any background processing of
1417 1417 * syncq to complete.
1418 1418 */
1419 1419 wait_sq_svc(rsq);
1420 1420 if (wsq != rsq)
1421 1421 wait_sq_svc(wsq);
1422 1422 }
1423 1423
1424 1424 mutex_enter(QLOCK(qp));
1425 1425 while (qp->q_flag & (QINSERVICE|QENAB))
1426 1426 cv_wait(&qp->q_wait, QLOCK(qp));
1427 1427 mutex_exit(QLOCK(qp));
1428 1428 mutex_enter(QLOCK(wqp));
1429 1429 while (wqp->q_flag & (QINSERVICE|QENAB))
1430 1430 cv_wait(&wqp->q_wait, QLOCK(wqp));
1431 1431 mutex_exit(QLOCK(wqp));
1432 1432 }
1433 1433
1434 1434 /*
1435 1435 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'.
1436 1436 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may
1437 1437 * also be set, and is passed through to allocb_cred_wait().
1438 1438 *
1439 1439 * Returns errno on failure, zero on success.
1440 1440 */
1441 1441 int
1442 1442 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr)
1443 1443 {
1444 1444 mblk_t *tmp;
1445 1445 ssize_t count;
1446 1446 int error = 0;
1447 1447
1448 1448 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K ||
1449 1449 (flag & (U_TO_K | K_TO_K)) == K_TO_K);
1450 1450
1451 1451 if (bp->b_datap->db_type == M_IOCTL) {
1452 1452 count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1453 1453 } else {
1454 1454 ASSERT(bp->b_datap->db_type == M_COPYIN);
1455 1455 count = ((struct copyreq *)bp->b_rptr)->cq_size;
1456 1456 }
1457 1457 /*
1458 1458 * strdoioctl validates ioc_count, so if this assert fails it
1459 1459 * cannot be due to user error.
1460 1460 */
1461 1461 ASSERT(count >= 0);
1462 1462
1463 1463 if ((tmp = allocb_cred_wait(count, (flag & STR_NOSIG), &error, cr,
1464 1464 curproc->p_pid)) == NULL) {
1465 1465 return (error);
1466 1466 }
1467 1467 error = strcopyin(arg, tmp->b_wptr, count, flag & (U_TO_K|K_TO_K));
1468 1468 if (error != 0) {
1469 1469 freeb(tmp);
1470 1470 return (error);
1471 1471 }
1472 1472 DB_CPID(tmp) = curproc->p_pid;
1473 1473 tmp->b_wptr += count;
1474 1474 bp->b_cont = tmp;
1475 1475
1476 1476 return (0);
1477 1477 }
1478 1478
1479 1479 /*
1480 1480 * Copy ioctl data to user-land. Return non-zero errno on failure,
1481 1481 * 0 for success.
1482 1482 */
1483 1483 int
1484 1484 getiocd(mblk_t *bp, char *arg, int copymode)
1485 1485 {
1486 1486 ssize_t count;
1487 1487 size_t n;
1488 1488 int error;
1489 1489
1490 1490 if (bp->b_datap->db_type == M_IOCACK)
1491 1491 count = ((struct iocblk *)bp->b_rptr)->ioc_count;
1492 1492 else {
1493 1493 ASSERT(bp->b_datap->db_type == M_COPYOUT);
1494 1494 count = ((struct copyreq *)bp->b_rptr)->cq_size;
1495 1495 }
1496 1496 ASSERT(count >= 0);
1497 1497
1498 1498 for (bp = bp->b_cont; bp && count;
1499 1499 count -= n, bp = bp->b_cont, arg += n) {
1500 1500 n = MIN(count, bp->b_wptr - bp->b_rptr);
1501 1501 error = strcopyout(bp->b_rptr, arg, n, copymode);
1502 1502 if (error)
1503 1503 return (error);
1504 1504 }
1505 1505 ASSERT(count == 0);
1506 1506 return (0);
1507 1507 }
1508 1508
1509 1509 /*
1510 1510 * Allocate a linkinfo entry given the write queue of the
1511 1511 * bottom module of the top stream and the write queue of the
1512 1512 * stream head of the bottom stream.
1513 1513 */
1514 1514 linkinfo_t *
1515 1515 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown)
1516 1516 {
1517 1517 linkinfo_t *linkp;
1518 1518
1519 1519 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP);
1520 1520
1521 1521 linkp->li_lblk.l_qtop = qup;
1522 1522 linkp->li_lblk.l_qbot = qdown;
1523 1523 linkp->li_fpdown = fpdown;
1524 1524
1525 1525 mutex_enter(&strresources);
1526 1526 linkp->li_next = linkinfo_list;
1527 1527 linkp->li_prev = NULL;
1528 1528 if (linkp->li_next)
1529 1529 linkp->li_next->li_prev = linkp;
1530 1530 linkinfo_list = linkp;
1531 1531 linkp->li_lblk.l_index = ++lnk_id;
1532 1532 ASSERT(lnk_id != 0); /* this should never wrap in practice */
1533 1533 mutex_exit(&strresources);
1534 1534
1535 1535 return (linkp);
1536 1536 }
1537 1537
1538 1538 /*
1539 1539 * Free a linkinfo entry.
1540 1540 */
1541 1541 void
1542 1542 lbfree(linkinfo_t *linkp)
1543 1543 {
1544 1544 mutex_enter(&strresources);
1545 1545 if (linkp->li_next)
1546 1546 linkp->li_next->li_prev = linkp->li_prev;
1547 1547 if (linkp->li_prev)
1548 1548 linkp->li_prev->li_next = linkp->li_next;
1549 1549 else
1550 1550 linkinfo_list = linkp->li_next;
1551 1551 mutex_exit(&strresources);
1552 1552
1553 1553 kmem_cache_free(linkinfo_cache, linkp);
1554 1554 }
1555 1555
1556 1556 /*
1557 1557 * Check for a potential linking cycle.
1558 1558 * Return 1 if a link will result in a cycle,
1559 1559 * and 0 otherwise.
1560 1560 */
1561 1561 int
1562 1562 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss)
1563 1563 {
1564 1564 struct mux_node *np;
1565 1565 struct mux_edge *ep;
1566 1566 int i;
1567 1567 major_t lomaj;
1568 1568 major_t upmaj;
1569 1569 /*
1570 1570 * if the lower stream is a pipe/FIFO, return, since link
1571 1571 * cycles can not happen on pipes/FIFOs
1572 1572 */
1573 1573 if (lostp->sd_vnode->v_type == VFIFO)
1574 1574 return (0);
1575 1575
1576 1576 for (i = 0; i < ss->ss_devcnt; i++) {
1577 1577 np = &ss->ss_mux_nodes[i];
1578 1578 MUX_CLEAR(np);
1579 1579 }
1580 1580 lomaj = getmajor(lostp->sd_vnode->v_rdev);
1581 1581 upmaj = getmajor(upstp->sd_vnode->v_rdev);
1582 1582 np = &ss->ss_mux_nodes[lomaj];
1583 1583 for (;;) {
1584 1584 if (!MUX_DIDVISIT(np)) {
1585 1585 if (np->mn_imaj == upmaj)
1586 1586 return (1);
1587 1587 if (np->mn_outp == NULL) {
1588 1588 MUX_VISIT(np);
1589 1589 if (np->mn_originp == NULL)
1590 1590 return (0);
1591 1591 np = np->mn_originp;
1592 1592 continue;
1593 1593 }
1594 1594 MUX_VISIT(np);
1595 1595 np->mn_startp = np->mn_outp;
1596 1596 } else {
1597 1597 if (np->mn_startp == NULL) {
1598 1598 if (np->mn_originp == NULL)
1599 1599 return (0);
1600 1600 else {
1601 1601 np = np->mn_originp;
1602 1602 continue;
1603 1603 }
1604 1604 }
1605 1605 /*
1606 1606 * If ep->me_nodep is a FIFO (me_nodep == NULL),
1607 1607 * ignore the edge and move on. ep->me_nodep gets
1608 1608 * set to NULL in mux_addedge() if it is a FIFO.
1609 1609 *
1610 1610 */
1611 1611 ep = np->mn_startp;
1612 1612 np->mn_startp = ep->me_nextp;
1613 1613 if (ep->me_nodep == NULL)
1614 1614 continue;
1615 1615 ep->me_nodep->mn_originp = np;
1616 1616 np = ep->me_nodep;
1617 1617 }
1618 1618 }
1619 1619 }
1620 1620
1621 1621 /*
1622 1622 * Find linkinfo entry corresponding to the parameters.
1623 1623 */
1624 1624 linkinfo_t *
1625 1625 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss)
1626 1626 {
1627 1627 linkinfo_t *linkp;
1628 1628 struct mux_edge *mep;
1629 1629 struct mux_node *mnp;
1630 1630 queue_t *qup;
1631 1631
1632 1632 mutex_enter(&strresources);
1633 1633 if ((type & LINKTYPEMASK) == LINKNORMAL) {
1634 1634 qup = getendq(stp->sd_wrq);
1635 1635 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1636 1636 if ((qup == linkp->li_lblk.l_qtop) &&
1637 1637 (!index || (index == linkp->li_lblk.l_index))) {
1638 1638 mutex_exit(&strresources);
1639 1639 return (linkp);
1640 1640 }
1641 1641 }
1642 1642 } else {
1643 1643 ASSERT((type & LINKTYPEMASK) == LINKPERSIST);
1644 1644 mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)];
1645 1645 mep = mnp->mn_outp;
1646 1646 while (mep) {
1647 1647 if ((index == 0) || (index == mep->me_muxid))
1648 1648 break;
1649 1649 mep = mep->me_nextp;
1650 1650 }
1651 1651 if (!mep) {
1652 1652 mutex_exit(&strresources);
1653 1653 return (NULL);
1654 1654 }
1655 1655 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) {
1656 1656 if ((!linkp->li_lblk.l_qtop) &&
1657 1657 (mep->me_muxid == linkp->li_lblk.l_index)) {
1658 1658 mutex_exit(&strresources);
1659 1659 return (linkp);
1660 1660 }
1661 1661 }
1662 1662 }
1663 1663 mutex_exit(&strresources);
1664 1664 return (NULL);
1665 1665 }
1666 1666
1667 1667 /*
1668 1668 * Given a queue ptr, follow the chain of q_next pointers until you reach the
1669 1669 * last queue on the chain and return it.
1670 1670 */
1671 1671 queue_t *
1672 1672 getendq(queue_t *q)
1673 1673 {
1674 1674 ASSERT(q != NULL);
1675 1675 while (_SAMESTR(q))
1676 1676 q = q->q_next;
1677 1677 return (q);
1678 1678 }
1679 1679
1680 1680 /*
1681 1681 * Wait for the syncq count to drop to zero.
1682 1682 * sq could be either outer or inner.
1683 1683 */
1684 1684
1685 1685 static void
1686 1686 wait_syncq(syncq_t *sq)
1687 1687 {
1688 1688 uint16_t count;
1689 1689
1690 1690 mutex_enter(SQLOCK(sq));
1691 1691 count = sq->sq_count;
1692 1692 SQ_PUTLOCKS_ENTER(sq);
1693 1693 SUM_SQ_PUTCOUNTS(sq, count);
1694 1694 while (count != 0) {
1695 1695 sq->sq_flags |= SQ_WANTWAKEUP;
1696 1696 SQ_PUTLOCKS_EXIT(sq);
1697 1697 cv_wait(&sq->sq_wait, SQLOCK(sq));
1698 1698 count = sq->sq_count;
1699 1699 SQ_PUTLOCKS_ENTER(sq);
1700 1700 SUM_SQ_PUTCOUNTS(sq, count);
1701 1701 }
1702 1702 SQ_PUTLOCKS_EXIT(sq);
1703 1703 mutex_exit(SQLOCK(sq));
1704 1704 }
1705 1705
1706 1706 /*
1707 1707 * Wait while there are any messages for the queue in its syncq.
1708 1708 */
1709 1709 static void
1710 1710 wait_q_syncq(queue_t *q)
1711 1711 {
1712 1712 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1713 1713 syncq_t *sq = q->q_syncq;
1714 1714
1715 1715 mutex_enter(SQLOCK(sq));
1716 1716 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) {
1717 1717 sq->sq_flags |= SQ_WANTWAKEUP;
1718 1718 cv_wait(&sq->sq_wait, SQLOCK(sq));
1719 1719 }
1720 1720 mutex_exit(SQLOCK(sq));
1721 1721 }
1722 1722 }
1723 1723
1724 1724
1725 1725 int
1726 1726 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp,
1727 1727 int lhlink)
1728 1728 {
1729 1729 struct stdata *stp;
1730 1730 struct strioctl strioc;
1731 1731 struct linkinfo *linkp;
1732 1732 struct stdata *stpdown;
1733 1733 struct streamtab *str;
1734 1734 queue_t *passq;
1735 1735 syncq_t *passyncq;
1736 1736 queue_t *rq;
1737 1737 cdevsw_impl_t *dp;
1738 1738 uint32_t qflag;
1739 1739 uint32_t sqtype;
1740 1740 perdm_t *dmp;
1741 1741 int error = 0;
1742 1742 netstack_t *ns;
1743 1743 str_stack_t *ss;
1744 1744
1745 1745 stp = vp->v_stream;
1746 1746 TRACE_1(TR_FAC_STREAMS_FR,
1747 1747 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp);
1748 1748 /*
1749 1749 * Test for invalid upper stream
1750 1750 */
1751 1751 if (stp->sd_flag & STRHUP) {
1752 1752 return (ENXIO);
1753 1753 }
1754 1754 if (vp->v_type == VFIFO) {
1755 1755 return (EINVAL);
1756 1756 }
1757 1757 if (stp->sd_strtab == NULL) {
1758 1758 return (EINVAL);
1759 1759 }
1760 1760 if (!stp->sd_strtab->st_muxwinit) {
1761 1761 return (EINVAL);
1762 1762 }
1763 1763 if (fpdown == NULL) {
1764 1764 return (EBADF);
1765 1765 }
1766 1766 ns = netstack_find_by_cred(crp);
1767 1767 ASSERT(ns != NULL);
1768 1768 ss = ns->netstack_str;
1769 1769 ASSERT(ss != NULL);
1770 1770
1771 1771 if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) {
1772 1772 netstack_rele(ss->ss_netstack);
1773 1773 return (EINVAL);
1774 1774 }
1775 1775 mutex_enter(&muxifier);
1776 1776 if (stp->sd_flag & STPLEX) {
1777 1777 mutex_exit(&muxifier);
1778 1778 netstack_rele(ss->ss_netstack);
1779 1779 return (ENXIO);
1780 1780 }
1781 1781
1782 1782 /*
1783 1783 * Test for invalid lower stream.
1784 1784 * The check for the v_type != VFIFO and having a major
1785 1785 * number not >= devcnt is done to avoid problems with
1786 1786 * adding mux_node entry past the end of mux_nodes[].
1787 1787 * For FIFO's we don't add an entry so this isn't a
1788 1788 * problem.
1789 1789 */
1790 1790 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) ||
1791 1791 (stpdown == stp) || (stpdown->sd_flag &
1792 1792 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) ||
1793 1793 ((stpdown->sd_vnode->v_type != VFIFO) &&
1794 1794 (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) ||
1795 1795 linkcycle(stp, stpdown, ss)) {
1796 1796 mutex_exit(&muxifier);
1797 1797 netstack_rele(ss->ss_netstack);
1798 1798 return (EINVAL);
1799 1799 }
1800 1800 TRACE_1(TR_FAC_STREAMS_FR,
1801 1801 TR_STPDOWN, "stpdown:%p", stpdown);
1802 1802 rq = getendq(stp->sd_wrq);
1803 1803 if (cmd == I_PLINK)
1804 1804 rq = NULL;
1805 1805
1806 1806 linkp = alloclink(rq, stpdown->sd_wrq, fpdown);
1807 1807
1808 1808 strioc.ic_cmd = cmd;
1809 1809 strioc.ic_timout = INFTIM;
1810 1810 strioc.ic_len = sizeof (struct linkblk);
1811 1811 strioc.ic_dp = (char *)&linkp->li_lblk;
1812 1812
1813 1813 /*
1814 1814 * STRPLUMB protects plumbing changes and should be set before
1815 1815 * link_addpassthru()/link_rempassthru() are called, so it is set here
1816 1816 * and cleared in the end of mlink when passthru queue is removed.
1817 1817 * Setting of STRPLUMB prevents reopens of the stream while passthru
1818 1818 * queue is in-place (it is not a proper module and doesn't have open
1819 1819 * entry point).
1820 1820 *
1821 1821 * STPLEX prevents any threads from entering the stream from above. It
1822 1822 * can't be set before the call to link_addpassthru() because putnext
1823 1823 * from below may cause stream head I/O routines to be called and these
1824 1824 * routines assert that STPLEX is not set. After link_addpassthru()
1825 1825 * nothing may come from below since the pass queue syncq is blocked.
1826 1826 * Note also that STPLEX should be cleared before the call to
1827 1827 * link_rempassthru() since when messages start flowing to the stream
1828 1828 * head (e.g. because of message propagation from the pass queue) stream
1829 1829 * head I/O routines may be called with STPLEX flag set.
1830 1830 *
1831 1831 * When STPLEX is set, nothing may come into the stream from above and
1832 1832 * it is safe to do a setq which will change stream head. So, the
1833 1833 * correct sequence of actions is:
1834 1834 *
1835 1835 * 1) Set STRPLUMB
1836 1836 * 2) Call link_addpassthru()
1837 1837 * 3) Set STPLEX
1838 1838 * 4) Call setq and update the stream state
1839 1839 * 5) Clear STPLEX
1840 1840 * 6) Call link_rempassthru()
1841 1841 * 7) Clear STRPLUMB
1842 1842 *
1843 1843 * The same sequence applies to munlink() code.
1844 1844 */
1845 1845 mutex_enter(&stpdown->sd_lock);
1846 1846 stpdown->sd_flag |= STRPLUMB;
1847 1847 mutex_exit(&stpdown->sd_lock);
1848 1848 /*
1849 1849 * Add passthru queue below lower mux. This will block
1850 1850 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
1851 1851 */
1852 1852 passq = link_addpassthru(stpdown);
1853 1853
1854 1854 mutex_enter(&stpdown->sd_lock);
1855 1855 stpdown->sd_flag |= STPLEX;
1856 1856 mutex_exit(&stpdown->sd_lock);
1857 1857
1858 1858 rq = _RD(stpdown->sd_wrq);
1859 1859 /*
1860 1860 * There may be messages in the streamhead's syncq due to messages
1861 1861 * that arrived before link_addpassthru() was done. To avoid
1862 1862 * background processing of the syncq happening simultaneous with
1863 1863 * setq processing, we disable the streamhead syncq and wait until
1864 1864 * existing background thread finishes working on it.
1865 1865 */
1866 1866 wait_sq_svc(rq->q_syncq);
1867 1867 passyncq = passq->q_syncq;
1868 1868 if (!(passyncq->sq_flags & SQ_BLOCKED))
1869 1869 blocksq(passyncq, SQ_BLOCKED, 0);
1870 1870
1871 1871 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
1872 1872 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
1873 1873 rq->q_ptr = _WR(rq)->q_ptr = NULL;
1874 1874
1875 1875 /* setq might sleep in allocator - avoid holding locks. */
1876 1876 /* Note: we are holding muxifier here. */
1877 1877
1878 1878 str = stp->sd_strtab;
1879 1879 dp = &devimpl[getmajor(vp->v_rdev)];
1880 1880 ASSERT(dp->d_str == str);
1881 1881
1882 1882 qflag = dp->d_qflag;
1883 1883 sqtype = dp->d_sqtype;
1884 1884
1885 1885 /* create perdm_t if needed */
1886 1886 if (NEED_DM(dp->d_dmp, qflag))
1887 1887 dp->d_dmp = hold_dm(str, qflag, sqtype);
1888 1888
1889 1889 dmp = dp->d_dmp;
1890 1890
1891 1891 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype,
1892 1892 B_TRUE);
1893 1893
1894 1894 /*
1895 1895 * XXX Remove any "odd" messages from the queue.
1896 1896 * Keep only M_DATA, M_PROTO, M_PCPROTO.
1897 1897 */
1898 1898 error = strdoioctl(stp, &strioc, FNATIVE,
1899 1899 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
1900 1900 if (error != 0) {
1901 1901 lbfree(linkp);
1902 1902
1903 1903 if (!(passyncq->sq_flags & SQ_BLOCKED))
1904 1904 blocksq(passyncq, SQ_BLOCKED, 0);
1905 1905 /*
1906 1906 * Restore the stream head queue and then remove
1907 1907 * the passq. Turn off STPLEX before we turn on
1908 1908 * the stream by removing the passq.
1909 1909 */
1910 1910 rq->q_ptr = _WR(rq)->q_ptr = stpdown;
1911 1911 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO,
1912 1912 B_TRUE);
1913 1913
1914 1914 mutex_enter(&stpdown->sd_lock);
1915 1915 stpdown->sd_flag &= ~STPLEX;
1916 1916 mutex_exit(&stpdown->sd_lock);
1917 1917
1918 1918 link_rempassthru(passq);
1919 1919
1920 1920 mutex_enter(&stpdown->sd_lock);
1921 1921 stpdown->sd_flag &= ~STRPLUMB;
1922 1922 /* Wakeup anyone waiting for STRPLUMB to clear. */
1923 1923 cv_broadcast(&stpdown->sd_monitor);
1924 1924 mutex_exit(&stpdown->sd_lock);
1925 1925
1926 1926 mutex_exit(&muxifier);
1927 1927 netstack_rele(ss->ss_netstack);
1928 1928 return (error);
1929 1929 }
1930 1930 mutex_enter(&fpdown->f_tlock);
1931 1931 fpdown->f_count++;
1932 1932 mutex_exit(&fpdown->f_tlock);
1933 1933
1934 1934 /*
1935 1935 * if we've made it here the linkage is all set up so we should also
1936 1936 * set up the layered driver linkages
1937 1937 */
1938 1938
1939 1939 ASSERT((cmd == I_LINK) || (cmd == I_PLINK));
1940 1940 if (cmd == I_LINK) {
1941 1941 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL);
1942 1942 } else {
1943 1943 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST);
1944 1944 }
1945 1945
1946 1946 link_rempassthru(passq);
1947 1947
1948 1948 mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss);
1949 1949
1950 1950 /*
1951 1951 * Mark the upper stream as having dependent links
1952 1952 * so that strclose can clean it up.
1953 1953 */
1954 1954 if (cmd == I_LINK) {
1955 1955 mutex_enter(&stp->sd_lock);
1956 1956 stp->sd_flag |= STRHASLINKS;
1957 1957 mutex_exit(&stp->sd_lock);
1958 1958 }
1959 1959 /*
1960 1960 * Wake up any other processes that may have been
1961 1961 * waiting on the lower stream. These will all
1962 1962 * error out.
1963 1963 */
1964 1964 mutex_enter(&stpdown->sd_lock);
1965 1965 /* The passthru module is removed so we may release STRPLUMB */
1966 1966 stpdown->sd_flag &= ~STRPLUMB;
1967 1967 cv_broadcast(&rq->q_wait);
1968 1968 cv_broadcast(&_WR(rq)->q_wait);
1969 1969 cv_broadcast(&stpdown->sd_monitor);
1970 1970 mutex_exit(&stpdown->sd_lock);
1971 1971 mutex_exit(&muxifier);
1972 1972 *rvalp = linkp->li_lblk.l_index;
1973 1973 netstack_rele(ss->ss_netstack);
1974 1974 return (0);
1975 1975 }
1976 1976
1977 1977 int
1978 1978 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink)
1979 1979 {
1980 1980 int ret;
1981 1981 struct file *fpdown;
1982 1982
1983 1983 fpdown = getf(arg);
1984 1984 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink);
1985 1985 if (fpdown != NULL)
1986 1986 releasef(arg);
1987 1987 return (ret);
1988 1988 }
1989 1989
1990 1990 /*
1991 1991 * Unlink a multiplexor link. Stp is the controlling stream for the
1992 1992 * link, and linkp points to the link's entry in the linkinfo list.
1993 1993 * The muxifier lock must be held on entry and is dropped on exit.
1994 1994 *
1995 1995 * NOTE : Currently it is assumed that mux would process all the messages
1996 1996 * sitting on it's queue before ACKing the UNLINK. It is the responsibility
1997 1997 * of the mux to handle all the messages that arrive before UNLINK.
1998 1998 * If the mux has to send down messages on its lower stream before
1999 1999 * ACKing I_UNLINK, then it *should* know to handle messages even
2000 2000 * after the UNLINK is acked (actually it should be able to handle till we
2001 2001 * re-block the read side of the pass queue here). If the mux does not
2002 2002 * open up the lower stream, any messages that arrive during UNLINK
2003 2003 * will be put in the stream head. In the case of lower stream opening
2004 2004 * up, some messages might land in the stream head depending on when
2005 2005 * the message arrived and when the read side of the pass queue was
2006 2006 * re-blocked.
2007 2007 */
2008 2008 int
2009 2009 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp,
2010 2010 str_stack_t *ss)
2011 2011 {
2012 2012 struct strioctl strioc;
2013 2013 struct stdata *stpdown;
2014 2014 queue_t *rq, *wrq;
2015 2015 queue_t *passq;
2016 2016 syncq_t *passyncq;
2017 2017 int error = 0;
2018 2018 file_t *fpdown;
2019 2019
2020 2020 ASSERT(MUTEX_HELD(&muxifier));
2021 2021
2022 2022 stpdown = linkp->li_fpdown->f_vnode->v_stream;
2023 2023
2024 2024 /*
2025 2025 * See the comment in mlink() concerning STRPLUMB/STPLEX flags.
2026 2026 */
2027 2027 mutex_enter(&stpdown->sd_lock);
2028 2028 stpdown->sd_flag |= STRPLUMB;
2029 2029 mutex_exit(&stpdown->sd_lock);
2030 2030
2031 2031 /*
2032 2032 * Add passthru queue below lower mux. This will block
2033 2033 * syncqs of lower muxs read queue during I_LINK/I_UNLINK.
2034 2034 */
2035 2035 passq = link_addpassthru(stpdown);
2036 2036
2037 2037 if ((flag & LINKTYPEMASK) == LINKNORMAL)
2038 2038 strioc.ic_cmd = I_UNLINK;
2039 2039 else
2040 2040 strioc.ic_cmd = I_PUNLINK;
2041 2041 strioc.ic_timout = INFTIM;
2042 2042 strioc.ic_len = sizeof (struct linkblk);
2043 2043 strioc.ic_dp = (char *)&linkp->li_lblk;
2044 2044
2045 2045 error = strdoioctl(stp, &strioc, FNATIVE,
2046 2046 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp);
2047 2047
2048 2048 /*
2049 2049 * If there was an error and this is not called via strclose,
2050 2050 * return to the user. Otherwise, pretend there was no error
2051 2051 * and close the link.
2052 2052 */
2053 2053 if (error) {
2054 2054 if (flag & LINKCLOSE) {
2055 2055 cmn_err(CE_WARN, "KERNEL: munlink: could not perform "
2056 2056 "unlink ioctl, closing anyway (%d)\n", error);
2057 2057 } else {
2058 2058 link_rempassthru(passq);
2059 2059 mutex_enter(&stpdown->sd_lock);
2060 2060 stpdown->sd_flag &= ~STRPLUMB;
2061 2061 cv_broadcast(&stpdown->sd_monitor);
2062 2062 mutex_exit(&stpdown->sd_lock);
2063 2063 mutex_exit(&muxifier);
2064 2064 return (error);
2065 2065 }
2066 2066 }
2067 2067
2068 2068 mux_rmvedge(stp, linkp->li_lblk.l_index, ss);
2069 2069 fpdown = linkp->li_fpdown;
2070 2070 lbfree(linkp);
2071 2071
2072 2072 /*
2073 2073 * We go ahead and drop muxifier here--it's a nasty global lock that
2074 2074 * can slow others down. It's okay to since attempts to mlink() this
2075 2075 * stream will be stopped because STPLEX is still set in the stdata
2076 2076 * structure, and munlink() is stopped because mux_rmvedge() and
2077 2077 * lbfree() have removed it from mux_nodes[] and linkinfo_list,
2078 2078 * respectively. Note that we defer the closef() of fpdown until
2079 2079 * after we drop muxifier since strclose() can call munlinkall().
2080 2080 */
2081 2081 mutex_exit(&muxifier);
2082 2082
2083 2083 wrq = stpdown->sd_wrq;
2084 2084 rq = _RD(wrq);
2085 2085
2086 2086 /*
2087 2087 * Get rid of outstanding service procedure runs, before we make
2088 2088 * it a stream head, since a stream head doesn't have any service
2089 2089 * procedure.
2090 2090 */
2091 2091 disable_svc(rq);
2092 2092 wait_svc(rq);
2093 2093
2094 2094 /*
2095 2095 * Since we don't disable the syncq for QPERMOD, we wait for whatever
2096 2096 * is queued up to be finished. mux should take care that nothing is
2097 2097 * send down to this queue. We should do it now as we're going to block
2098 2098 * passyncq if it was unblocked.
2099 2099 */
2100 2100 if (wrq->q_flag & QPERMOD) {
2101 2101 syncq_t *sq = wrq->q_syncq;
2102 2102
2103 2103 mutex_enter(SQLOCK(sq));
2104 2104 while (wrq->q_sqflags & Q_SQQUEUED) {
2105 2105 sq->sq_flags |= SQ_WANTWAKEUP;
2106 2106 cv_wait(&sq->sq_wait, SQLOCK(sq));
2107 2107 }
2108 2108 mutex_exit(SQLOCK(sq));
2109 2109 }
2110 2110 passyncq = passq->q_syncq;
2111 2111 if (!(passyncq->sq_flags & SQ_BLOCKED)) {
2112 2112
2113 2113 syncq_t *sq, *outer;
2114 2114
2115 2115 /*
2116 2116 * Messages could be flowing from underneath. We will
2117 2117 * block the read side of the passq. This would be
2118 2118 * sufficient for QPAIR and QPERQ muxes to ensure
2119 2119 * that no data is flowing up into this queue
2120 2120 * and hence no thread active in this instance of
2121 2121 * lower mux. But for QPERMOD and QMTOUTPERIM there
2122 2122 * could be messages on the inner and outer/inner
2123 2123 * syncqs respectively. We will wait for them to drain.
2124 2124 * Because passq is blocked messages end up in the syncq
2125 2125 * And qfill_syncq could possibly end up setting QFULL
2126 2126 * which will access the rq->q_flag. Hence, we have to
2127 2127 * acquire the QLOCK in setq.
2128 2128 *
2129 2129 * XXX Messages can also flow from top into this
2130 2130 * queue though the unlink is over (Ex. some instance
2131 2131 * in putnext() called from top that has still not
2132 2132 * accessed this queue. And also putq(lowerq) ?).
2133 2133 * Solution : How about blocking the l_qtop queue ?
2134 2134 * Do we really care about such pure D_MP muxes ?
2135 2135 */
2136 2136
2137 2137 blocksq(passyncq, SQ_BLOCKED, 0);
2138 2138
2139 2139 sq = rq->q_syncq;
2140 2140 if ((outer = sq->sq_outer) != NULL) {
2141 2141
2142 2142 /*
2143 2143 * We have to just wait for the outer sq_count
2144 2144 * drop to zero. As this does not prevent new
2145 2145 * messages to enter the outer perimeter, this
2146 2146 * is subject to starvation.
2147 2147 *
2148 2148 * NOTE :Because of blocksq above, messages could
2149 2149 * be in the inner syncq only because of some
2150 2150 * thread holding the outer perimeter exclusively.
2151 2151 * Hence it would be sufficient to wait for the
2152 2152 * exclusive holder of the outer perimeter to drain
2153 2153 * the inner and outer syncqs. But we will not depend
2154 2154 * on this feature and hence check the inner syncqs
2155 2155 * separately.
2156 2156 */
2157 2157 wait_syncq(outer);
2158 2158 }
2159 2159
2160 2160
2161 2161 /*
2162 2162 * There could be messages destined for
2163 2163 * this queue. Let the exclusive holder
2164 2164 * drain it.
2165 2165 */
2166 2166
2167 2167 wait_syncq(sq);
2168 2168 ASSERT((rq->q_flag & QPERMOD) ||
2169 2169 ((rq->q_syncq->sq_head == NULL) &&
2170 2170 (_WR(rq)->q_syncq->sq_head == NULL)));
2171 2171 }
2172 2172
2173 2173 /*
2174 2174 * We haven't taken care of QPERMOD case yet. QPERMOD is a special
2175 2175 * case as we don't disable its syncq or remove it off the syncq
2176 2176 * service list.
2177 2177 */
2178 2178 if (rq->q_flag & QPERMOD) {
2179 2179 syncq_t *sq = rq->q_syncq;
2180 2180
2181 2181 mutex_enter(SQLOCK(sq));
2182 2182 while (rq->q_sqflags & Q_SQQUEUED) {
2183 2183 sq->sq_flags |= SQ_WANTWAKEUP;
2184 2184 cv_wait(&sq->sq_wait, SQLOCK(sq));
2185 2185 }
2186 2186 mutex_exit(SQLOCK(sq));
2187 2187 }
2188 2188
2189 2189 /*
2190 2190 * flush_syncq changes states only when there are some messages to
2191 2191 * free, i.e. when it returns non-zero value to return.
2192 2192 */
2193 2193 ASSERT(flush_syncq(rq->q_syncq, rq) == 0);
2194 2194 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0);
2195 2195
2196 2196 /*
2197 2197 * Nobody else should know about this queue now.
2198 2198 * If the mux did not process the messages before
2199 2199 * acking the I_UNLINK, free them now.
2200 2200 */
2201 2201
2202 2202 flushq(rq, FLUSHALL);
2203 2203 flushq(_WR(rq), FLUSHALL);
2204 2204
2205 2205 /*
2206 2206 * Convert the mux lower queue into a stream head queue.
2207 2207 * Turn off STPLEX before we turn on the stream by removing the passq.
2208 2208 */
2209 2209 rq->q_ptr = wrq->q_ptr = stpdown;
2210 2210 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE);
2211 2211
2212 2212 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE);
2213 2213 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq));
2214 2214
2215 2215 enable_svc(rq);
2216 2216
2217 2217 /*
2218 2218 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still
2219 2219 * needs to be set to prevent reopen() of the stream - such reopen may
2220 2220 * try to call non-existent pass queue open routine and panic.
2221 2221 */
2222 2222 mutex_enter(&stpdown->sd_lock);
2223 2223 stpdown->sd_flag &= ~STPLEX;
2224 2224 mutex_exit(&stpdown->sd_lock);
2225 2225
2226 2226 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) ||
2227 2227 ((flag & LINKTYPEMASK) == LINKPERSIST));
2228 2228
2229 2229 /* clean up the layered driver linkages */
2230 2230 if ((flag & LINKTYPEMASK) == LINKNORMAL) {
2231 2231 ldi_munlink_fp(stp, fpdown, LINKNORMAL);
2232 2232 } else {
2233 2233 ldi_munlink_fp(stp, fpdown, LINKPERSIST);
2234 2234 }
2235 2235
2236 2236 link_rempassthru(passq);
2237 2237
2238 2238 /*
2239 2239 * Now all plumbing changes are finished and STRPLUMB is no
2240 2240 * longer needed.
2241 2241 */
2242 2242 mutex_enter(&stpdown->sd_lock);
2243 2243 stpdown->sd_flag &= ~STRPLUMB;
2244 2244 cv_broadcast(&stpdown->sd_monitor);
2245 2245 mutex_exit(&stpdown->sd_lock);
2246 2246
2247 2247 (void) closef(fpdown);
2248 2248 return (0);
2249 2249 }
2250 2250
2251 2251 /*
2252 2252 * Unlink all multiplexor links for which stp is the controlling stream.
2253 2253 * Return 0, or a non-zero errno on failure.
2254 2254 */
2255 2255 int
2256 2256 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss)
2257 2257 {
2258 2258 linkinfo_t *linkp;
2259 2259 int error = 0;
2260 2260
2261 2261 mutex_enter(&muxifier);
2262 2262 while (linkp = findlinks(stp, 0, flag, ss)) {
2263 2263 /*
2264 2264 * munlink() releases the muxifier lock.
2265 2265 */
2266 2266 if (error = munlink(stp, linkp, flag, crp, rvalp, ss))
2267 2267 return (error);
2268 2268 mutex_enter(&muxifier);
2269 2269 }
2270 2270 mutex_exit(&muxifier);
2271 2271 return (0);
2272 2272 }
2273 2273
2274 2274 /*
2275 2275 * A multiplexor link has been made. Add an
2276 2276 * edge to the directed graph.
2277 2277 */
2278 2278 void
2279 2279 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss)
2280 2280 {
2281 2281 struct mux_node *np;
2282 2282 struct mux_edge *ep;
2283 2283 major_t upmaj;
2284 2284 major_t lomaj;
2285 2285
2286 2286 upmaj = getmajor(upstp->sd_vnode->v_rdev);
2287 2287 lomaj = getmajor(lostp->sd_vnode->v_rdev);
2288 2288 np = &ss->ss_mux_nodes[upmaj];
2289 2289 if (np->mn_outp) {
2290 2290 ep = np->mn_outp;
2291 2291 while (ep->me_nextp)
2292 2292 ep = ep->me_nextp;
2293 2293 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2294 2294 ep = ep->me_nextp;
2295 2295 } else {
2296 2296 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP);
2297 2297 ep = np->mn_outp;
2298 2298 }
2299 2299 ep->me_nextp = NULL;
2300 2300 ep->me_muxid = muxid;
2301 2301 /*
2302 2302 * Save the dev_t for the purposes of str_stack_shutdown.
2303 2303 * str_stack_shutdown assumes that the device allows reopen, since
2304 2304 * this dev_t is the one after any cloning by xx_open().
2305 2305 * Would prefer finding the dev_t from before any cloning,
2306 2306 * but specfs doesn't retain that.
2307 2307 */
2308 2308 ep->me_dev = upstp->sd_vnode->v_rdev;
2309 2309 if (lostp->sd_vnode->v_type == VFIFO)
2310 2310 ep->me_nodep = NULL;
2311 2311 else
2312 2312 ep->me_nodep = &ss->ss_mux_nodes[lomaj];
2313 2313 }
2314 2314
2315 2315 /*
2316 2316 * A multiplexor link has been removed. Remove the
2317 2317 * edge in the directed graph.
2318 2318 */
2319 2319 void
2320 2320 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss)
2321 2321 {
2322 2322 struct mux_node *np;
2323 2323 struct mux_edge *ep;
2324 2324 struct mux_edge *pep = NULL;
2325 2325 major_t upmaj;
2326 2326
2327 2327 upmaj = getmajor(upstp->sd_vnode->v_rdev);
2328 2328 np = &ss->ss_mux_nodes[upmaj];
2329 2329 ASSERT(np->mn_outp != NULL);
2330 2330 ep = np->mn_outp;
2331 2331 while (ep) {
2332 2332 if (ep->me_muxid == muxid) {
2333 2333 if (pep)
2334 2334 pep->me_nextp = ep->me_nextp;
2335 2335 else
2336 2336 np->mn_outp = ep->me_nextp;
2337 2337 kmem_free(ep, sizeof (struct mux_edge));
2338 2338 return;
2339 2339 }
2340 2340 pep = ep;
2341 2341 ep = ep->me_nextp;
2342 2342 }
2343 2343 ASSERT(0); /* should not reach here */
2344 2344 }
2345 2345
2346 2346 /*
2347 2347 * Translate the device flags (from conf.h) to the corresponding
2348 2348 * qflag and sq_flag (type) values.
2349 2349 */
2350 2350 int
2351 2351 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp,
2352 2352 uint32_t *sqtypep)
2353 2353 {
2354 2354 uint32_t qflag = 0;
2355 2355 uint32_t sqtype = 0;
2356 2356
2357 2357 if (devflag & _D_OLD)
2358 2358 goto bad;
2359 2359
2360 2360 /* Inner perimeter presence and scope */
2361 2361 switch (devflag & D_MTINNER_MASK) {
2362 2362 case D_MP:
2363 2363 qflag |= QMTSAFE;
2364 2364 sqtype |= SQ_CI;
2365 2365 break;
2366 2366 case D_MTPERQ|D_MP:
2367 2367 qflag |= QPERQ;
2368 2368 break;
2369 2369 case D_MTQPAIR|D_MP:
2370 2370 qflag |= QPAIR;
2371 2371 break;
2372 2372 case D_MTPERMOD|D_MP:
2373 2373 qflag |= QPERMOD;
2374 2374 break;
2375 2375 default:
2376 2376 goto bad;
2377 2377 }
2378 2378
2379 2379 /* Outer perimeter */
2380 2380 if (devflag & D_MTOUTPERIM) {
2381 2381 switch (devflag & D_MTINNER_MASK) {
2382 2382 case D_MP:
2383 2383 case D_MTPERQ|D_MP:
2384 2384 case D_MTQPAIR|D_MP:
2385 2385 break;
2386 2386 default:
2387 2387 goto bad;
2388 2388 }
2389 2389 qflag |= QMTOUTPERIM;
2390 2390 }
2391 2391
2392 2392 /* Inner perimeter modifiers */
2393 2393 if (devflag & D_MTINNER_MOD) {
2394 2394 switch (devflag & D_MTINNER_MASK) {
2395 2395 case D_MP:
2396 2396 goto bad;
2397 2397 default:
2398 2398 break;
2399 2399 }
2400 2400 if (devflag & D_MTPUTSHARED)
2401 2401 sqtype |= SQ_CIPUT;
2402 2402 if (devflag & _D_MTOCSHARED) {
2403 2403 /*
2404 2404 * The code in putnext assumes that it has the
2405 2405 * highest concurrency by not checking sq_count.
2406 2406 * Thus _D_MTOCSHARED can only be supported when
2407 2407 * D_MTPUTSHARED is set.
2408 2408 */
2409 2409 if (!(devflag & D_MTPUTSHARED))
2410 2410 goto bad;
2411 2411 sqtype |= SQ_CIOC;
2412 2412 }
2413 2413 if (devflag & _D_MTCBSHARED) {
2414 2414 /*
2415 2415 * The code in putnext assumes that it has the
2416 2416 * highest concurrency by not checking sq_count.
2417 2417 * Thus _D_MTCBSHARED can only be supported when
2418 2418 * D_MTPUTSHARED is set.
2419 2419 */
2420 2420 if (!(devflag & D_MTPUTSHARED))
2421 2421 goto bad;
2422 2422 sqtype |= SQ_CICB;
2423 2423 }
2424 2424 if (devflag & _D_MTSVCSHARED) {
2425 2425 /*
2426 2426 * The code in putnext assumes that it has the
2427 2427 * highest concurrency by not checking sq_count.
2428 2428 * Thus _D_MTSVCSHARED can only be supported when
2429 2429 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is
2430 2430 * supported only for QPERMOD.
2431 2431 */
2432 2432 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD))
2433 2433 goto bad;
2434 2434 sqtype |= SQ_CISVC;
2435 2435 }
2436 2436 }
2437 2437
2438 2438 /* Default outer perimeter concurrency */
2439 2439 sqtype |= SQ_CO;
2440 2440
2441 2441 /* Outer perimeter modifiers */
2442 2442 if (devflag & D_MTOCEXCL) {
2443 2443 if (!(devflag & D_MTOUTPERIM)) {
2444 2444 /* No outer perimeter */
2445 2445 goto bad;
2446 2446 }
2447 2447 sqtype &= ~SQ_COOC;
2448 2448 }
2449 2449
2450 2450 /* Synchronous Streams extended qinit structure */
2451 2451 if (devflag & D_SYNCSTR)
2452 2452 qflag |= QSYNCSTR;
2453 2453
2454 2454 /*
2455 2455 * Private flag used by a transport module to indicate
2456 2456 * to sockfs that it supports direct-access mode without
2457 2457 * having to go through STREAMS.
2458 2458 */
2459 2459 if (devflag & _D_DIRECT) {
2460 2460 /* Reject unless the module is fully-MT (no perimeter) */
2461 2461 if ((qflag & QMT_TYPEMASK) != QMTSAFE)
2462 2462 goto bad;
2463 2463 qflag |= _QDIRECT;
2464 2464 }
2465 2465
2466 2466 *qflagp = qflag;
2467 2467 *sqtypep = sqtype;
2468 2468 return (0);
2469 2469
2470 2470 bad:
2471 2471 cmn_err(CE_WARN,
2472 2472 "stropen: bad MT flags (0x%x) in driver '%s'",
2473 2473 (int)(qflag & D_MTSAFETY_MASK),
2474 2474 stp->st_rdinit->qi_minfo->mi_idname);
2475 2475
2476 2476 return (EINVAL);
2477 2477 }
2478 2478
2479 2479 /*
2480 2480 * Set the interface values for a pair of queues (qinit structure,
2481 2481 * packet sizes, water marks).
2482 2482 * setq assumes that the caller does not have a claim (entersq or claimq)
2483 2483 * on the queue.
2484 2484 */
2485 2485 void
2486 2486 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit,
2487 2487 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed)
2488 2488 {
2489 2489 queue_t *wq;
2490 2490 syncq_t *sq, *outer;
2491 2491
2492 2492 ASSERT(rq->q_flag & QREADR);
2493 2493 ASSERT((qflag & QMT_TYPEMASK) != 0);
2494 2494 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL);
2495 2495
2496 2496 wq = _WR(rq);
2497 2497 rq->q_qinfo = rinit;
2498 2498 rq->q_hiwat = rinit->qi_minfo->mi_hiwat;
2499 2499 rq->q_lowat = rinit->qi_minfo->mi_lowat;
2500 2500 rq->q_minpsz = rinit->qi_minfo->mi_minpsz;
2501 2501 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz;
2502 2502 wq->q_qinfo = winit;
2503 2503 wq->q_hiwat = winit->qi_minfo->mi_hiwat;
2504 2504 wq->q_lowat = winit->qi_minfo->mi_lowat;
2505 2505 wq->q_minpsz = winit->qi_minfo->mi_minpsz;
2506 2506 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz;
2507 2507
2508 2508 /* Remove old syncqs */
2509 2509 sq = rq->q_syncq;
2510 2510 outer = sq->sq_outer;
2511 2511 if (outer != NULL) {
2512 2512 ASSERT(wq->q_syncq->sq_outer == outer);
2513 2513 outer_remove(outer, rq->q_syncq);
2514 2514 if (wq->q_syncq != rq->q_syncq)
2515 2515 outer_remove(outer, wq->q_syncq);
2516 2516 }
2517 2517 ASSERT(sq->sq_outer == NULL);
2518 2518 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2519 2519
2520 2520 if (sq != SQ(rq)) {
2521 2521 if (!(rq->q_flag & QPERMOD))
2522 2522 free_syncq(sq);
2523 2523 if (wq->q_syncq == rq->q_syncq)
2524 2524 wq->q_syncq = NULL;
2525 2525 rq->q_syncq = NULL;
2526 2526 }
2527 2527 if (wq->q_syncq != NULL && wq->q_syncq != sq &&
2528 2528 wq->q_syncq != SQ(rq)) {
2529 2529 free_syncq(wq->q_syncq);
2530 2530 wq->q_syncq = NULL;
2531 2531 }
2532 2532 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL &&
2533 2533 rq->q_syncq->sq_tail == NULL));
2534 2534 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL &&
2535 2535 wq->q_syncq->sq_tail == NULL));
2536 2536
2537 2537 if (!(rq->q_flag & QPERMOD) &&
2538 2538 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) {
2539 2539 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2540 2540 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl,
2541 2541 rq->q_syncq->sq_nciputctrl, 0);
2542 2542 ASSERT(ciputctrl_cache != NULL);
2543 2543 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl);
2544 2544 rq->q_syncq->sq_ciputctrl = NULL;
2545 2545 rq->q_syncq->sq_nciputctrl = 0;
2546 2546 }
2547 2547
2548 2548 if (!(wq->q_flag & QPERMOD) &&
2549 2549 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) {
2550 2550 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1);
2551 2551 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl,
2552 2552 wq->q_syncq->sq_nciputctrl, 0);
2553 2553 ASSERT(ciputctrl_cache != NULL);
2554 2554 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl);
2555 2555 wq->q_syncq->sq_ciputctrl = NULL;
2556 2556 wq->q_syncq->sq_nciputctrl = 0;
2557 2557 }
2558 2558
2559 2559 sq = SQ(rq);
2560 2560 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
2561 2561 ASSERT(sq->sq_outer == NULL);
2562 2562 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
2563 2563
2564 2564 /*
2565 2565 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS
2566 2566 * bits in sq_flag based on the sqtype.
2567 2567 */
2568 2568 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0);
2569 2569
2570 2570 rq->q_syncq = wq->q_syncq = sq;
2571 2571 sq->sq_type = sqtype;
2572 2572 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS);
2573 2573
2574 2574 /*
2575 2575 * We are making sq_svcflags zero,
2576 2576 * resetting SQ_DISABLED in case it was set by
2577 2577 * wait_svc() in the munlink path.
2578 2578 *
2579 2579 */
2580 2580 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0);
2581 2581 sq->sq_svcflags = 0;
2582 2582
2583 2583 /*
2584 2584 * We need to acquire the lock here for the mlink and munlink case,
2585 2585 * where canputnext, backenable, etc can access the q_flag.
2586 2586 */
2587 2587 if (lock_needed) {
2588 2588 mutex_enter(QLOCK(rq));
2589 2589 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2590 2590 mutex_exit(QLOCK(rq));
2591 2591 mutex_enter(QLOCK(wq));
2592 2592 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2593 2593 mutex_exit(QLOCK(wq));
2594 2594 } else {
2595 2595 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2596 2596 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag;
2597 2597 }
2598 2598
2599 2599 if (qflag & QPERQ) {
2600 2600 /* Allocate a separate syncq for the write side */
2601 2601 sq = new_syncq();
2602 2602 sq->sq_type = rq->q_syncq->sq_type;
2603 2603 sq->sq_flags = rq->q_syncq->sq_flags;
2604 2604 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2605 2605 sq->sq_oprev == NULL);
2606 2606 wq->q_syncq = sq;
2607 2607 }
2608 2608 if (qflag & QPERMOD) {
2609 2609 sq = dmp->dm_sq;
2610 2610
2611 2611 /*
2612 2612 * Assert that we do have an inner perimeter syncq and that it
2613 2613 * does not have an outer perimeter associated with it.
2614 2614 */
2615 2615 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
2616 2616 sq->sq_oprev == NULL);
2617 2617 rq->q_syncq = wq->q_syncq = sq;
2618 2618 }
2619 2619 if (qflag & QMTOUTPERIM) {
2620 2620 outer = dmp->dm_sq;
2621 2621
2622 2622 ASSERT(outer->sq_outer == NULL);
2623 2623 outer_insert(outer, rq->q_syncq);
2624 2624 if (wq->q_syncq != rq->q_syncq)
2625 2625 outer_insert(outer, wq->q_syncq);
2626 2626 }
2627 2627 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2628 2628 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2629 2629 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) ==
2630 2630 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS));
2631 2631 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK));
2632 2632
2633 2633 /*
2634 2634 * Initialize struio() types.
2635 2635 */
2636 2636 rq->q_struiot =
2637 2637 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE;
2638 2638 wq->q_struiot =
2639 2639 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE;
2640 2640 }
2641 2641
2642 2642 perdm_t *
2643 2643 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype)
2644 2644 {
2645 2645 syncq_t *sq;
↓ open down ↓ |
2645 lines elided |
↑ open up ↑ |
2646 2646 perdm_t **pp;
2647 2647 perdm_t *p;
2648 2648 perdm_t *dmp;
2649 2649
2650 2650 ASSERT(str != NULL);
2651 2651 ASSERT(qflag & (QPERMOD | QMTOUTPERIM));
2652 2652
2653 2653 rw_enter(&perdm_rwlock, RW_READER);
2654 2654 for (p = perdm_list; p != NULL; p = p->dm_next) {
2655 2655 if (p->dm_str == str) { /* found one */
2656 - atomic_add_32(&(p->dm_ref), 1);
2656 + atomic_inc_32(&(p->dm_ref));
2657 2657 rw_exit(&perdm_rwlock);
2658 2658 return (p);
2659 2659 }
2660 2660 }
2661 2661 rw_exit(&perdm_rwlock);
2662 2662
2663 2663 sq = new_syncq();
2664 2664 if (qflag & QPERMOD) {
2665 2665 sq->sq_type = sqtype | SQ_PERMOD;
2666 2666 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS;
2667 2667 } else {
2668 2668 ASSERT(qflag & QMTOUTPERIM);
2669 2669 sq->sq_onext = sq->sq_oprev = sq;
2670 2670 }
2671 2671
2672 2672 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP);
2673 2673 dmp->dm_sq = sq;
2674 2674 dmp->dm_str = str;
2675 2675 dmp->dm_ref = 1;
2676 2676 dmp->dm_next = NULL;
2677 2677
2678 2678 rw_enter(&perdm_rwlock, RW_WRITER);
2679 2679 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) {
2680 2680 if (p->dm_str == str) { /* already present */
2681 2681 p->dm_ref++;
2682 2682 rw_exit(&perdm_rwlock);
2683 2683 free_syncq(sq);
2684 2684 kmem_free(dmp, sizeof (perdm_t));
2685 2685 return (p);
2686 2686 }
2687 2687 }
2688 2688
2689 2689 *pp = dmp;
2690 2690 rw_exit(&perdm_rwlock);
2691 2691 return (dmp);
2692 2692 }
2693 2693
2694 2694 void
2695 2695 rele_dm(perdm_t *dmp)
2696 2696 {
2697 2697 perdm_t **pp;
2698 2698 perdm_t *p;
2699 2699
2700 2700 rw_enter(&perdm_rwlock, RW_WRITER);
2701 2701 ASSERT(dmp->dm_ref > 0);
2702 2702
2703 2703 if (--dmp->dm_ref > 0) {
2704 2704 rw_exit(&perdm_rwlock);
2705 2705 return;
2706 2706 }
2707 2707
2708 2708 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next))
2709 2709 if (p == dmp)
2710 2710 break;
2711 2711 ASSERT(p == dmp);
2712 2712 *pp = p->dm_next;
2713 2713 rw_exit(&perdm_rwlock);
2714 2714
2715 2715 /*
2716 2716 * Wait for any background processing that relies on the
2717 2717 * syncq to complete before it is freed.
2718 2718 */
2719 2719 wait_sq_svc(p->dm_sq);
2720 2720 free_syncq(p->dm_sq);
2721 2721 kmem_free(p, sizeof (perdm_t));
2722 2722 }
2723 2723
2724 2724 /*
2725 2725 * Make a protocol message given control and data buffers.
2726 2726 * n.b., this can block; be careful of what locks you hold when calling it.
2727 2727 *
2728 2728 * If sd_maxblk is less than *iosize this routine can fail part way through
2729 2729 * (due to an allocation failure). In this case on return *iosize will contain
2730 2730 * the amount that was consumed. Otherwise *iosize will not be modified
2731 2731 * i.e. it will contain the amount that was consumed.
2732 2732 */
2733 2733 int
2734 2734 strmakemsg(
2735 2735 struct strbuf *mctl,
2736 2736 ssize_t *iosize,
2737 2737 struct uio *uiop,
2738 2738 stdata_t *stp,
2739 2739 int32_t flag,
2740 2740 mblk_t **mpp)
2741 2741 {
2742 2742 mblk_t *mpctl = NULL;
2743 2743 mblk_t *mpdata = NULL;
2744 2744 int error;
2745 2745
2746 2746 ASSERT(uiop != NULL);
2747 2747
2748 2748 *mpp = NULL;
2749 2749 /* Create control part, if any */
2750 2750 if ((mctl != NULL) && (mctl->len >= 0)) {
2751 2751 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl);
2752 2752 if (error)
2753 2753 return (error);
2754 2754 }
2755 2755 /* Create data part, if any */
2756 2756 if (*iosize >= 0) {
2757 2757 error = strmakedata(iosize, uiop, stp, flag, &mpdata);
2758 2758 if (error) {
2759 2759 freemsg(mpctl);
2760 2760 return (error);
2761 2761 }
2762 2762 }
2763 2763 if (mpctl != NULL) {
2764 2764 if (mpdata != NULL)
2765 2765 linkb(mpctl, mpdata);
2766 2766 *mpp = mpctl;
2767 2767 } else {
2768 2768 *mpp = mpdata;
2769 2769 }
2770 2770 return (0);
2771 2771 }
2772 2772
2773 2773 /*
2774 2774 * Make the control part of a protocol message given a control buffer.
2775 2775 * n.b., this can block; be careful of what locks you hold when calling it.
2776 2776 */
2777 2777 int
2778 2778 strmakectl(
2779 2779 struct strbuf *mctl,
2780 2780 int32_t flag,
2781 2781 int32_t fflag,
2782 2782 mblk_t **mpp)
2783 2783 {
2784 2784 mblk_t *bp = NULL;
2785 2785 unsigned char msgtype;
2786 2786 int error = 0;
2787 2787 cred_t *cr = CRED();
2788 2788
2789 2789 /* We do not support interrupt threads using the stream head to send */
2790 2790 ASSERT(cr != NULL);
2791 2791
2792 2792 *mpp = NULL;
2793 2793 /*
2794 2794 * Create control part of message, if any.
2795 2795 */
2796 2796 if ((mctl != NULL) && (mctl->len >= 0)) {
2797 2797 caddr_t base;
2798 2798 int ctlcount;
2799 2799 int allocsz;
2800 2800
2801 2801 if (flag & RS_HIPRI)
2802 2802 msgtype = M_PCPROTO;
2803 2803 else
2804 2804 msgtype = M_PROTO;
2805 2805
2806 2806 ctlcount = mctl->len;
2807 2807 base = mctl->buf;
2808 2808
2809 2809 /*
2810 2810 * Give modules a better chance to reuse M_PROTO/M_PCPROTO
2811 2811 * blocks by increasing the size to something more usable.
2812 2812 */
2813 2813 allocsz = MAX(ctlcount, 64);
2814 2814
2815 2815 /*
2816 2816 * Range checking has already been done; simply try
2817 2817 * to allocate a message block for the ctl part.
2818 2818 */
2819 2819 while ((bp = allocb_cred(allocsz, cr,
2820 2820 curproc->p_pid)) == NULL) {
2821 2821 if (fflag & (FNDELAY|FNONBLOCK))
2822 2822 return (EAGAIN);
2823 2823 if (error = strwaitbuf(allocsz, BPRI_MED))
2824 2824 return (error);
2825 2825 }
2826 2826
2827 2827 bp->b_datap->db_type = msgtype;
2828 2828 if (copyin(base, bp->b_wptr, ctlcount)) {
2829 2829 freeb(bp);
2830 2830 return (EFAULT);
2831 2831 }
2832 2832 bp->b_wptr += ctlcount;
2833 2833 }
2834 2834 *mpp = bp;
2835 2835 return (0);
2836 2836 }
2837 2837
2838 2838 /*
2839 2839 * Make a protocol message given data buffers.
2840 2840 * n.b., this can block; be careful of what locks you hold when calling it.
2841 2841 *
2842 2842 * If sd_maxblk is less than *iosize this routine can fail part way through
2843 2843 * (due to an allocation failure). In this case on return *iosize will contain
2844 2844 * the amount that was consumed. Otherwise *iosize will not be modified
2845 2845 * i.e. it will contain the amount that was consumed.
2846 2846 */
2847 2847 int
2848 2848 strmakedata(
2849 2849 ssize_t *iosize,
2850 2850 struct uio *uiop,
2851 2851 stdata_t *stp,
2852 2852 int32_t flag,
2853 2853 mblk_t **mpp)
2854 2854 {
2855 2855 mblk_t *mp = NULL;
2856 2856 mblk_t *bp;
2857 2857 int wroff = (int)stp->sd_wroff;
2858 2858 int tail_len = (int)stp->sd_tail;
2859 2859 int extra = wroff + tail_len;
2860 2860 int error = 0;
2861 2861 ssize_t maxblk;
2862 2862 ssize_t count = *iosize;
2863 2863 cred_t *cr;
2864 2864
2865 2865 *mpp = NULL;
2866 2866 if (count < 0)
2867 2867 return (0);
2868 2868
2869 2869 /* We do not support interrupt threads using the stream head to send */
2870 2870 cr = CRED();
2871 2871 ASSERT(cr != NULL);
2872 2872
2873 2873 maxblk = stp->sd_maxblk;
2874 2874 if (maxblk == INFPSZ)
2875 2875 maxblk = count;
2876 2876
2877 2877 /*
2878 2878 * Create data part of message, if any.
2879 2879 */
2880 2880 do {
2881 2881 ssize_t size;
2882 2882 dblk_t *dp;
2883 2883
2884 2884 ASSERT(uiop);
2885 2885
2886 2886 size = MIN(count, maxblk);
2887 2887
2888 2888 while ((bp = allocb_cred(size + extra, cr,
2889 2889 curproc->p_pid)) == NULL) {
2890 2890 error = EAGAIN;
2891 2891 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) ||
2892 2892 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) {
2893 2893 if (count == *iosize) {
2894 2894 freemsg(mp);
2895 2895 return (error);
2896 2896 } else {
2897 2897 *iosize -= count;
2898 2898 *mpp = mp;
2899 2899 return (0);
2900 2900 }
2901 2901 }
2902 2902 }
2903 2903 dp = bp->b_datap;
2904 2904 dp->db_cpid = curproc->p_pid;
2905 2905 ASSERT(wroff <= dp->db_lim - bp->b_wptr);
2906 2906 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff;
2907 2907
2908 2908 if (flag & STRUIO_POSTPONE) {
2909 2909 /*
2910 2910 * Setup the stream uio portion of the
2911 2911 * dblk for subsequent use by struioget().
2912 2912 */
2913 2913 dp->db_struioflag = STRUIO_SPEC;
2914 2914 dp->db_cksumstart = 0;
2915 2915 dp->db_cksumstuff = 0;
2916 2916 dp->db_cksumend = size;
2917 2917 *(long long *)dp->db_struioun.data = 0ll;
2918 2918 bp->b_wptr += size;
2919 2919 } else {
2920 2920 if (stp->sd_copyflag & STRCOPYCACHED)
2921 2921 uiop->uio_extflg |= UIO_COPY_CACHED;
2922 2922
2923 2923 if (size != 0) {
2924 2924 error = uiomove(bp->b_wptr, size, UIO_WRITE,
2925 2925 uiop);
2926 2926 if (error != 0) {
2927 2927 freeb(bp);
2928 2928 freemsg(mp);
2929 2929 return (error);
2930 2930 }
2931 2931 }
2932 2932 bp->b_wptr += size;
2933 2933
2934 2934 if (stp->sd_wputdatafunc != NULL) {
2935 2935 mblk_t *newbp;
2936 2936
2937 2937 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode,
2938 2938 bp, NULL, NULL, NULL, NULL);
2939 2939 if (newbp == NULL) {
2940 2940 freeb(bp);
2941 2941 freemsg(mp);
2942 2942 return (ECOMM);
2943 2943 }
2944 2944 bp = newbp;
2945 2945 }
2946 2946 }
2947 2947
2948 2948 count -= size;
2949 2949
2950 2950 if (mp == NULL)
2951 2951 mp = bp;
2952 2952 else
2953 2953 linkb(mp, bp);
2954 2954 } while (count > 0);
2955 2955
2956 2956 *mpp = mp;
2957 2957 return (0);
2958 2958 }
2959 2959
2960 2960 /*
2961 2961 * Wait for a buffer to become available. Return non-zero errno
2962 2962 * if not able to wait, 0 if buffer is probably there.
2963 2963 */
2964 2964 int
2965 2965 strwaitbuf(size_t size, int pri)
2966 2966 {
2967 2967 bufcall_id_t id;
2968 2968
2969 2969 mutex_enter(&bcall_monitor);
2970 2970 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast,
2971 2971 &ttoproc(curthread)->p_flag_cv)) == 0) {
2972 2972 mutex_exit(&bcall_monitor);
2973 2973 return (ENOSR);
2974 2974 }
2975 2975 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) {
2976 2976 unbufcall(id);
2977 2977 mutex_exit(&bcall_monitor);
2978 2978 return (EINTR);
2979 2979 }
2980 2980 unbufcall(id);
2981 2981 mutex_exit(&bcall_monitor);
2982 2982 return (0);
2983 2983 }
2984 2984
2985 2985 /*
2986 2986 * This function waits for a read or write event to happen on a stream.
2987 2987 * fmode can specify FNDELAY and/or FNONBLOCK.
2988 2988 * The timeout is in ms with -1 meaning infinite.
2989 2989 * The flag values work as follows:
2990 2990 * READWAIT Check for read side errors, send M_READ
2991 2991 * GETWAIT Check for read side errors, no M_READ
2992 2992 * WRITEWAIT Check for write side errors.
2993 2993 * NOINTR Do not return error if nonblocking or timeout.
2994 2994 * STR_NOERROR Ignore all errors except STPLEX.
2995 2995 * STR_NOSIG Ignore/hold signals during the duration of the call.
2996 2996 * STR_PEEK Pass through the strgeterr().
2997 2997 */
2998 2998 int
2999 2999 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout,
3000 3000 int *done)
3001 3001 {
3002 3002 int slpflg, errs;
3003 3003 int error;
3004 3004 kcondvar_t *sleepon;
3005 3005 mblk_t *mp;
3006 3006 ssize_t *rd_count;
3007 3007 clock_t rval;
3008 3008
3009 3009 ASSERT(MUTEX_HELD(&stp->sd_lock));
3010 3010 if ((flag & READWAIT) || (flag & GETWAIT)) {
3011 3011 slpflg = RSLEEP;
3012 3012 sleepon = &_RD(stp->sd_wrq)->q_wait;
3013 3013 errs = STRDERR|STPLEX;
3014 3014 } else {
3015 3015 slpflg = WSLEEP;
3016 3016 sleepon = &stp->sd_wrq->q_wait;
3017 3017 errs = STWRERR|STRHUP|STPLEX;
3018 3018 }
3019 3019 if (flag & STR_NOERROR)
3020 3020 errs = STPLEX;
3021 3021
3022 3022 if (stp->sd_wakeq & slpflg) {
3023 3023 /*
3024 3024 * A strwakeq() is pending, no need to sleep.
3025 3025 */
3026 3026 stp->sd_wakeq &= ~slpflg;
3027 3027 *done = 0;
3028 3028 return (0);
3029 3029 }
3030 3030
3031 3031 if (stp->sd_flag & errs) {
3032 3032 /*
3033 3033 * Check for errors before going to sleep since the
3034 3034 * caller might not have checked this while holding
3035 3035 * sd_lock.
3036 3036 */
3037 3037 error = strgeterr(stp, errs, (flag & STR_PEEK));
3038 3038 if (error != 0) {
3039 3039 *done = 1;
3040 3040 return (error);
3041 3041 }
3042 3042 }
3043 3043
3044 3044 /*
3045 3045 * If any module downstream has requested read notification
3046 3046 * by setting SNDMREAD flag using M_SETOPTS, send a message
3047 3047 * down stream.
3048 3048 */
3049 3049 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) {
3050 3050 mutex_exit(&stp->sd_lock);
3051 3051 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED,
3052 3052 (flag & STR_NOSIG), &error))) {
3053 3053 mutex_enter(&stp->sd_lock);
3054 3054 *done = 1;
3055 3055 return (error);
3056 3056 }
3057 3057 mp->b_datap->db_type = M_READ;
3058 3058 rd_count = (ssize_t *)mp->b_wptr;
3059 3059 *rd_count = count;
3060 3060 mp->b_wptr += sizeof (ssize_t);
3061 3061 /*
3062 3062 * Send the number of bytes requested by the
3063 3063 * read as the argument to M_READ.
3064 3064 */
3065 3065 stream_willservice(stp);
3066 3066 putnext(stp->sd_wrq, mp);
3067 3067 stream_runservice(stp);
3068 3068 mutex_enter(&stp->sd_lock);
3069 3069
3070 3070 /*
3071 3071 * If any data arrived due to inline processing
3072 3072 * of putnext(), don't sleep.
3073 3073 */
3074 3074 if (_RD(stp->sd_wrq)->q_first != NULL) {
3075 3075 *done = 0;
3076 3076 return (0);
3077 3077 }
3078 3078 }
3079 3079
3080 3080 if (fmode & (FNDELAY|FNONBLOCK)) {
3081 3081 if (!(flag & NOINTR))
3082 3082 error = EAGAIN;
3083 3083 else
3084 3084 error = 0;
3085 3085 *done = 1;
3086 3086 return (error);
3087 3087 }
3088 3088
3089 3089 stp->sd_flag |= slpflg;
3090 3090 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2,
3091 3091 "strwaitq sleeps (2):%p, %X, %lX, %X, %p",
3092 3092 stp, flag, count, fmode, done);
3093 3093
3094 3094 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG);
3095 3095 if (rval > 0) {
3096 3096 /* EMPTY */
3097 3097 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2,
3098 3098 "strwaitq awakes(2):%X, %X, %X, %X, %X",
3099 3099 stp, flag, count, fmode, done);
3100 3100 } else if (rval == 0) {
3101 3101 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2,
3102 3102 "strwaitq interrupt #2:%p, %X, %lX, %X, %p",
3103 3103 stp, flag, count, fmode, done);
3104 3104 stp->sd_flag &= ~slpflg;
3105 3105 cv_broadcast(sleepon);
3106 3106 if (!(flag & NOINTR))
3107 3107 error = EINTR;
3108 3108 else
3109 3109 error = 0;
3110 3110 *done = 1;
3111 3111 return (error);
3112 3112 } else {
3113 3113 /* timeout */
3114 3114 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME,
3115 3115 "strwaitq timeout:%p, %X, %lX, %X, %p",
3116 3116 stp, flag, count, fmode, done);
3117 3117 *done = 1;
3118 3118 if (!(flag & NOINTR))
3119 3119 return (ETIME);
3120 3120 else
3121 3121 return (0);
3122 3122 }
3123 3123 /*
3124 3124 * If the caller implements delayed errors (i.e. queued after data)
3125 3125 * we can not check for errors here since data as well as an
3126 3126 * error might have arrived at the stream head. We return to
3127 3127 * have the caller check the read queue before checking for errors.
3128 3128 */
3129 3129 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) {
3130 3130 error = strgeterr(stp, errs, (flag & STR_PEEK));
3131 3131 if (error != 0) {
3132 3132 *done = 1;
3133 3133 return (error);
3134 3134 }
3135 3135 }
3136 3136 *done = 0;
3137 3137 return (0);
3138 3138 }
3139 3139
3140 3140 /*
3141 3141 * Perform job control discipline access checks.
3142 3142 * Return 0 for success and the errno for failure.
3143 3143 */
3144 3144
3145 3145 #define cantsend(p, t, sig) \
3146 3146 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig))
3147 3147
3148 3148 int
3149 3149 straccess(struct stdata *stp, enum jcaccess mode)
3150 3150 {
3151 3151 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */
3152 3152 kthread_t *t = curthread;
3153 3153 proc_t *p = ttoproc(t);
3154 3154 sess_t *sp;
3155 3155
3156 3156 ASSERT(mutex_owned(&stp->sd_lock));
3157 3157
3158 3158 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO)
3159 3159 return (0);
3160 3160
3161 3161 mutex_enter(&p->p_lock); /* protects p_pgidp */
3162 3162
3163 3163 for (;;) {
3164 3164 mutex_enter(&p->p_splock); /* protects p->p_sessp */
3165 3165 sp = p->p_sessp;
3166 3166 mutex_enter(&sp->s_lock); /* protects sp->* */
3167 3167
3168 3168 /*
3169 3169 * If this is not the calling process's controlling terminal
3170 3170 * or if the calling process is already in the foreground
3171 3171 * then allow access.
3172 3172 */
3173 3173 if (sp->s_dev != stp->sd_vnode->v_rdev ||
3174 3174 p->p_pgidp == stp->sd_pgidp) {
3175 3175 mutex_exit(&sp->s_lock);
3176 3176 mutex_exit(&p->p_splock);
3177 3177 mutex_exit(&p->p_lock);
3178 3178 return (0);
3179 3179 }
3180 3180
3181 3181 /*
3182 3182 * Check to see if controlling terminal has been deallocated.
3183 3183 */
3184 3184 if (sp->s_vp == NULL) {
3185 3185 if (!cantsend(p, t, SIGHUP))
3186 3186 sigtoproc(p, t, SIGHUP);
3187 3187 mutex_exit(&sp->s_lock);
3188 3188 mutex_exit(&p->p_splock);
3189 3189 mutex_exit(&p->p_lock);
3190 3190 return (EIO);
3191 3191 }
3192 3192
3193 3193 mutex_exit(&sp->s_lock);
3194 3194 mutex_exit(&p->p_splock);
3195 3195
3196 3196 if (mode == JCGETP) {
3197 3197 mutex_exit(&p->p_lock);
3198 3198 return (0);
3199 3199 }
3200 3200
3201 3201 if (mode == JCREAD) {
3202 3202 if (p->p_detached || cantsend(p, t, SIGTTIN)) {
3203 3203 mutex_exit(&p->p_lock);
3204 3204 return (EIO);
3205 3205 }
3206 3206 mutex_exit(&p->p_lock);
3207 3207 mutex_exit(&stp->sd_lock);
3208 3208 pgsignal(p->p_pgidp, SIGTTIN);
3209 3209 mutex_enter(&stp->sd_lock);
3210 3210 mutex_enter(&p->p_lock);
3211 3211 } else { /* mode == JCWRITE or JCSETP */
3212 3212 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) ||
3213 3213 cantsend(p, t, SIGTTOU)) {
3214 3214 mutex_exit(&p->p_lock);
3215 3215 return (0);
3216 3216 }
3217 3217 if (p->p_detached) {
3218 3218 mutex_exit(&p->p_lock);
3219 3219 return (EIO);
3220 3220 }
3221 3221 mutex_exit(&p->p_lock);
3222 3222 mutex_exit(&stp->sd_lock);
3223 3223 pgsignal(p->p_pgidp, SIGTTOU);
3224 3224 mutex_enter(&stp->sd_lock);
3225 3225 mutex_enter(&p->p_lock);
3226 3226 }
3227 3227
3228 3228 /*
3229 3229 * We call cv_wait_sig_swap() to cause the appropriate
3230 3230 * action for the jobcontrol signal to take place.
3231 3231 * If the signal is being caught, we will take the
3232 3232 * EINTR error return. Otherwise, the default action
3233 3233 * of causing the process to stop will take place.
3234 3234 * In this case, we rely on the periodic cv_broadcast() on
3235 3235 * &lbolt_cv to wake us up to loop around and test again.
3236 3236 * We can't get here if the signal is ignored or
3237 3237 * if the current thread is blocking the signal.
3238 3238 */
3239 3239 mutex_exit(&stp->sd_lock);
3240 3240 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) {
3241 3241 mutex_exit(&p->p_lock);
3242 3242 mutex_enter(&stp->sd_lock);
3243 3243 return (EINTR);
3244 3244 }
3245 3245 mutex_exit(&p->p_lock);
3246 3246 mutex_enter(&stp->sd_lock);
3247 3247 mutex_enter(&p->p_lock);
3248 3248 }
3249 3249 }
3250 3250
3251 3251 /*
3252 3252 * Return size of message of block type (bp->b_datap->db_type)
3253 3253 */
3254 3254 size_t
3255 3255 xmsgsize(mblk_t *bp)
3256 3256 {
3257 3257 unsigned char type;
3258 3258 size_t count = 0;
3259 3259
3260 3260 type = bp->b_datap->db_type;
3261 3261
3262 3262 for (; bp; bp = bp->b_cont) {
3263 3263 if (type != bp->b_datap->db_type)
3264 3264 break;
3265 3265 ASSERT(bp->b_wptr >= bp->b_rptr);
3266 3266 count += bp->b_wptr - bp->b_rptr;
3267 3267 }
3268 3268 return (count);
3269 3269 }
3270 3270
3271 3271 /*
3272 3272 * Allocate a stream head.
3273 3273 */
3274 3274 struct stdata *
3275 3275 shalloc(queue_t *qp)
3276 3276 {
3277 3277 stdata_t *stp;
3278 3278
3279 3279 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP);
3280 3280
3281 3281 stp->sd_wrq = _WR(qp);
3282 3282 stp->sd_strtab = NULL;
3283 3283 stp->sd_iocid = 0;
3284 3284 stp->sd_mate = NULL;
3285 3285 stp->sd_freezer = NULL;
3286 3286 stp->sd_refcnt = 0;
3287 3287 stp->sd_wakeq = 0;
3288 3288 stp->sd_anchor = 0;
3289 3289 stp->sd_struiowrq = NULL;
3290 3290 stp->sd_struiordq = NULL;
3291 3291 stp->sd_struiodnak = 0;
3292 3292 stp->sd_struionak = NULL;
3293 3293 stp->sd_t_audit_data = NULL;
3294 3294 stp->sd_rput_opt = 0;
3295 3295 stp->sd_wput_opt = 0;
3296 3296 stp->sd_read_opt = 0;
3297 3297 stp->sd_rprotofunc = strrput_proto;
3298 3298 stp->sd_rmiscfunc = strrput_misc;
3299 3299 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL;
3300 3300 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL;
3301 3301 stp->sd_ciputctrl = NULL;
3302 3302 stp->sd_nciputctrl = 0;
3303 3303 stp->sd_qhead = NULL;
3304 3304 stp->sd_qtail = NULL;
3305 3305 stp->sd_servid = NULL;
3306 3306 stp->sd_nqueues = 0;
3307 3307 stp->sd_svcflags = 0;
3308 3308 stp->sd_copyflag = 0;
3309 3309
3310 3310 return (stp);
3311 3311 }
3312 3312
3313 3313 /*
3314 3314 * Free a stream head.
3315 3315 */
3316 3316 void
3317 3317 shfree(stdata_t *stp)
3318 3318 {
3319 3319 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
3320 3320
3321 3321 stp->sd_wrq = NULL;
3322 3322
3323 3323 mutex_enter(&stp->sd_qlock);
3324 3324 while (stp->sd_svcflags & STRS_SCHEDULED) {
3325 3325 STRSTAT(strwaits);
3326 3326 cv_wait(&stp->sd_qcv, &stp->sd_qlock);
3327 3327 }
3328 3328 mutex_exit(&stp->sd_qlock);
3329 3329
3330 3330 if (stp->sd_ciputctrl != NULL) {
3331 3331 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1);
3332 3332 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl,
3333 3333 stp->sd_nciputctrl, 0);
3334 3334 ASSERT(ciputctrl_cache != NULL);
3335 3335 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl);
3336 3336 stp->sd_ciputctrl = NULL;
3337 3337 stp->sd_nciputctrl = 0;
3338 3338 }
3339 3339 ASSERT(stp->sd_qhead == NULL);
3340 3340 ASSERT(stp->sd_qtail == NULL);
3341 3341 ASSERT(stp->sd_nqueues == 0);
3342 3342 kmem_cache_free(stream_head_cache, stp);
3343 3343 }
3344 3344
3345 3345 /*
3346 3346 * Allocate a pair of queues and a syncq for the pair
3347 3347 */
3348 3348 queue_t *
3349 3349 allocq(void)
3350 3350 {
3351 3351 queinfo_t *qip;
3352 3352 queue_t *qp, *wqp;
3353 3353 syncq_t *sq;
3354 3354
3355 3355 qip = kmem_cache_alloc(queue_cache, KM_SLEEP);
3356 3356
3357 3357 qp = &qip->qu_rqueue;
3358 3358 wqp = &qip->qu_wqueue;
3359 3359 sq = &qip->qu_syncq;
3360 3360
3361 3361 qp->q_last = NULL;
3362 3362 qp->q_next = NULL;
3363 3363 qp->q_ptr = NULL;
3364 3364 qp->q_flag = QUSE | QREADR;
3365 3365 qp->q_bandp = NULL;
3366 3366 qp->q_stream = NULL;
3367 3367 qp->q_syncq = sq;
3368 3368 qp->q_nband = 0;
3369 3369 qp->q_nfsrv = NULL;
3370 3370 qp->q_draining = 0;
3371 3371 qp->q_syncqmsgs = 0;
3372 3372 qp->q_spri = 0;
3373 3373 qp->q_qtstamp = 0;
3374 3374 qp->q_sqtstamp = 0;
3375 3375 qp->q_fp = NULL;
3376 3376
3377 3377 wqp->q_last = NULL;
3378 3378 wqp->q_next = NULL;
3379 3379 wqp->q_ptr = NULL;
3380 3380 wqp->q_flag = QUSE;
3381 3381 wqp->q_bandp = NULL;
3382 3382 wqp->q_stream = NULL;
3383 3383 wqp->q_syncq = sq;
3384 3384 wqp->q_nband = 0;
3385 3385 wqp->q_nfsrv = NULL;
3386 3386 wqp->q_draining = 0;
3387 3387 wqp->q_syncqmsgs = 0;
3388 3388 wqp->q_qtstamp = 0;
3389 3389 wqp->q_sqtstamp = 0;
3390 3390 wqp->q_spri = 0;
3391 3391
3392 3392 sq->sq_count = 0;
3393 3393 sq->sq_rmqcount = 0;
3394 3394 sq->sq_flags = 0;
3395 3395 sq->sq_type = 0;
3396 3396 sq->sq_callbflags = 0;
3397 3397 sq->sq_cancelid = 0;
3398 3398 sq->sq_ciputctrl = NULL;
3399 3399 sq->sq_nciputctrl = 0;
3400 3400 sq->sq_needexcl = 0;
3401 3401 sq->sq_svcflags = 0;
3402 3402
3403 3403 return (qp);
3404 3404 }
3405 3405
3406 3406 /*
3407 3407 * Free a pair of queues and the "attached" syncq.
3408 3408 * Discard any messages left on the syncq(s), remove the syncq(s) from the
3409 3409 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq.
3410 3410 */
3411 3411 void
3412 3412 freeq(queue_t *qp)
3413 3413 {
3414 3414 qband_t *qbp, *nqbp;
3415 3415 syncq_t *sq, *outer;
3416 3416 queue_t *wqp = _WR(qp);
3417 3417
3418 3418 ASSERT(qp->q_flag & QREADR);
3419 3419
3420 3420 /*
3421 3421 * If a previously dispatched taskq job is scheduled to run
3422 3422 * sync_service() or a service routine is scheduled for the
3423 3423 * queues about to be freed, wait here until all service is
3424 3424 * done on the queue and all associated queues and syncqs.
3425 3425 */
3426 3426 wait_svc(qp);
3427 3427
3428 3428 (void) flush_syncq(qp->q_syncq, qp);
3429 3429 (void) flush_syncq(wqp->q_syncq, wqp);
3430 3430 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0);
3431 3431
3432 3432 /*
3433 3433 * Flush the queues before q_next is set to NULL This is needed
3434 3434 * in order to backenable any downstream queue before we go away.
3435 3435 * Note: we are already removed from the stream so that the
3436 3436 * backenabling will not cause any messages to be delivered to our
3437 3437 * put procedures.
3438 3438 */
3439 3439 flushq(qp, FLUSHALL);
3440 3440 flushq(wqp, FLUSHALL);
3441 3441
3442 3442 /* Tidy up - removeq only does a half-remove from stream */
3443 3443 qp->q_next = wqp->q_next = NULL;
3444 3444 ASSERT(!(qp->q_flag & QENAB));
3445 3445 ASSERT(!(wqp->q_flag & QENAB));
3446 3446
3447 3447 outer = qp->q_syncq->sq_outer;
3448 3448 if (outer != NULL) {
3449 3449 outer_remove(outer, qp->q_syncq);
3450 3450 if (wqp->q_syncq != qp->q_syncq)
3451 3451 outer_remove(outer, wqp->q_syncq);
3452 3452 }
3453 3453 /*
3454 3454 * Free any syncqs that are outside what allocq returned.
3455 3455 */
3456 3456 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD))
3457 3457 free_syncq(qp->q_syncq);
3458 3458 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp))
3459 3459 free_syncq(wqp->q_syncq);
3460 3460
3461 3461 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3462 3462 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0);
3463 3463 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
3464 3464 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp)));
3465 3465 sq = SQ(qp);
3466 3466 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
3467 3467 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL);
3468 3468 ASSERT(sq->sq_outer == NULL);
3469 3469 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL);
3470 3470 ASSERT(sq->sq_callbpend == NULL);
3471 3471 ASSERT(sq->sq_needexcl == 0);
3472 3472
3473 3473 if (sq->sq_ciputctrl != NULL) {
3474 3474 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
3475 3475 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
3476 3476 sq->sq_nciputctrl, 0);
3477 3477 ASSERT(ciputctrl_cache != NULL);
3478 3478 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
3479 3479 sq->sq_ciputctrl = NULL;
3480 3480 sq->sq_nciputctrl = 0;
3481 3481 }
3482 3482
3483 3483 ASSERT(qp->q_first == NULL && wqp->q_first == NULL);
3484 3484 ASSERT(qp->q_count == 0 && wqp->q_count == 0);
3485 3485 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0);
3486 3486
3487 3487 qp->q_flag &= ~QUSE;
3488 3488 wqp->q_flag &= ~QUSE;
3489 3489
3490 3490 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */
3491 3491 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */
3492 3492
3493 3493 qbp = qp->q_bandp;
3494 3494 while (qbp) {
3495 3495 nqbp = qbp->qb_next;
3496 3496 freeband(qbp);
3497 3497 qbp = nqbp;
3498 3498 }
3499 3499 qbp = wqp->q_bandp;
3500 3500 while (qbp) {
3501 3501 nqbp = qbp->qb_next;
3502 3502 freeband(qbp);
3503 3503 qbp = nqbp;
3504 3504 }
3505 3505 kmem_cache_free(queue_cache, qp);
3506 3506 }
3507 3507
3508 3508 /*
3509 3509 * Allocate a qband structure.
3510 3510 */
3511 3511 qband_t *
3512 3512 allocband(void)
3513 3513 {
3514 3514 qband_t *qbp;
3515 3515
3516 3516 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP);
3517 3517 if (qbp == NULL)
3518 3518 return (NULL);
3519 3519
3520 3520 qbp->qb_next = NULL;
3521 3521 qbp->qb_count = 0;
3522 3522 qbp->qb_mblkcnt = 0;
3523 3523 qbp->qb_first = NULL;
3524 3524 qbp->qb_last = NULL;
3525 3525 qbp->qb_flag = 0;
3526 3526
3527 3527 return (qbp);
3528 3528 }
3529 3529
3530 3530 /*
3531 3531 * Free a qband structure.
3532 3532 */
3533 3533 void
3534 3534 freeband(qband_t *qbp)
3535 3535 {
3536 3536 kmem_cache_free(qband_cache, qbp);
3537 3537 }
3538 3538
3539 3539 /*
3540 3540 * Just like putnextctl(9F), except that allocb_wait() is used.
3541 3541 *
3542 3542 * Consolidation Private, and of course only callable from the stream head or
3543 3543 * routines that may block.
3544 3544 */
3545 3545 int
3546 3546 putnextctl_wait(queue_t *q, int type)
3547 3547 {
3548 3548 mblk_t *bp;
3549 3549 int error;
3550 3550
3551 3551 if ((datamsg(type) && (type != M_DELAY)) ||
3552 3552 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL)
3553 3553 return (0);
3554 3554
3555 3555 bp->b_datap->db_type = (unsigned char)type;
3556 3556 putnext(q, bp);
3557 3557 return (1);
3558 3558 }
3559 3559
3560 3560 /*
3561 3561 * Run any possible bufcalls.
3562 3562 */
3563 3563 void
3564 3564 runbufcalls(void)
3565 3565 {
3566 3566 strbufcall_t *bcp;
3567 3567
3568 3568 mutex_enter(&bcall_monitor);
3569 3569 mutex_enter(&strbcall_lock);
3570 3570
3571 3571 if (strbcalls.bc_head) {
3572 3572 size_t count;
3573 3573 int nevent;
3574 3574
3575 3575 /*
3576 3576 * count how many events are on the list
3577 3577 * now so we can check to avoid looping
3578 3578 * in low memory situations
3579 3579 */
3580 3580 nevent = 0;
3581 3581 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next)
3582 3582 nevent++;
3583 3583
3584 3584 /*
3585 3585 * get estimate of available memory from kmem_avail().
3586 3586 * awake all bufcall functions waiting for
3587 3587 * memory whose request could be satisfied
3588 3588 * by 'count' memory and let 'em fight for it.
3589 3589 */
3590 3590 count = kmem_avail();
3591 3591 while ((bcp = strbcalls.bc_head) != NULL && nevent) {
3592 3592 STRSTAT(bufcalls);
3593 3593 --nevent;
3594 3594 if (bcp->bc_size <= count) {
3595 3595 bcp->bc_executor = curthread;
3596 3596 mutex_exit(&strbcall_lock);
3597 3597 (*bcp->bc_func)(bcp->bc_arg);
3598 3598 mutex_enter(&strbcall_lock);
3599 3599 bcp->bc_executor = NULL;
3600 3600 cv_broadcast(&bcall_cv);
3601 3601 strbcalls.bc_head = bcp->bc_next;
3602 3602 kmem_free(bcp, sizeof (strbufcall_t));
3603 3603 } else {
3604 3604 /*
3605 3605 * too big, try again later - note
3606 3606 * that nevent was decremented above
3607 3607 * so we won't retry this one on this
3608 3608 * iteration of the loop
3609 3609 */
3610 3610 if (bcp->bc_next != NULL) {
3611 3611 strbcalls.bc_head = bcp->bc_next;
3612 3612 bcp->bc_next = NULL;
3613 3613 strbcalls.bc_tail->bc_next = bcp;
3614 3614 strbcalls.bc_tail = bcp;
3615 3615 }
3616 3616 }
3617 3617 }
3618 3618 if (strbcalls.bc_head == NULL)
3619 3619 strbcalls.bc_tail = NULL;
3620 3620 }
3621 3621
3622 3622 mutex_exit(&strbcall_lock);
3623 3623 mutex_exit(&bcall_monitor);
3624 3624 }
3625 3625
3626 3626
3627 3627 /*
3628 3628 * Actually run queue's service routine.
3629 3629 */
3630 3630 static void
3631 3631 runservice(queue_t *q)
3632 3632 {
3633 3633 qband_t *qbp;
3634 3634
3635 3635 ASSERT(q->q_qinfo->qi_srvp);
3636 3636 again:
3637 3637 entersq(q->q_syncq, SQ_SVC);
3638 3638 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START,
3639 3639 "runservice starts:%p", q);
3640 3640
3641 3641 if (!(q->q_flag & QWCLOSE))
3642 3642 (*q->q_qinfo->qi_srvp)(q);
3643 3643
3644 3644 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END,
3645 3645 "runservice ends:(%p)", q);
3646 3646
3647 3647 leavesq(q->q_syncq, SQ_SVC);
3648 3648
3649 3649 mutex_enter(QLOCK(q));
3650 3650 if (q->q_flag & QENAB) {
3651 3651 q->q_flag &= ~QENAB;
3652 3652 mutex_exit(QLOCK(q));
3653 3653 goto again;
3654 3654 }
3655 3655 q->q_flag &= ~QINSERVICE;
3656 3656 q->q_flag &= ~QBACK;
3657 3657 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next)
3658 3658 qbp->qb_flag &= ~QB_BACK;
3659 3659 /*
3660 3660 * Wakeup thread waiting for the service procedure
3661 3661 * to be run (strclose and qdetach).
3662 3662 */
3663 3663 cv_broadcast(&q->q_wait);
3664 3664
3665 3665 mutex_exit(QLOCK(q));
3666 3666 }
3667 3667
3668 3668 /*
3669 3669 * Background processing of bufcalls.
3670 3670 */
3671 3671 void
3672 3672 streams_bufcall_service(void)
3673 3673 {
3674 3674 callb_cpr_t cprinfo;
3675 3675
3676 3676 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr,
3677 3677 "streams_bufcall_service");
3678 3678
3679 3679 mutex_enter(&strbcall_lock);
3680 3680
3681 3681 for (;;) {
3682 3682 if (strbcalls.bc_head != NULL && kmem_avail() > 0) {
3683 3683 mutex_exit(&strbcall_lock);
3684 3684 runbufcalls();
3685 3685 mutex_enter(&strbcall_lock);
3686 3686 }
3687 3687 if (strbcalls.bc_head != NULL) {
3688 3688 STRSTAT(bcwaits);
3689 3689 /* Wait for memory to become available */
3690 3690 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3691 3691 (void) cv_reltimedwait(&memavail_cv, &strbcall_lock,
3692 3692 SEC_TO_TICK(60), TR_CLOCK_TICK);
3693 3693 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3694 3694 }
3695 3695
3696 3696 /* Wait for new work to arrive */
3697 3697 if (strbcalls.bc_head == NULL) {
3698 3698 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3699 3699 cv_wait(&strbcall_cv, &strbcall_lock);
3700 3700 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock);
3701 3701 }
3702 3702 }
3703 3703 }
3704 3704
3705 3705 /*
3706 3706 * Background processing of streams background tasks which failed
3707 3707 * taskq_dispatch.
3708 3708 */
3709 3709 static void
3710 3710 streams_qbkgrnd_service(void)
3711 3711 {
3712 3712 callb_cpr_t cprinfo;
3713 3713 queue_t *q;
3714 3714
3715 3715 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3716 3716 "streams_bkgrnd_service");
3717 3717
3718 3718 mutex_enter(&service_queue);
3719 3719
3720 3720 for (;;) {
3721 3721 /*
3722 3722 * Wait for work to arrive.
3723 3723 */
3724 3724 while ((freebs_list == NULL) && (qhead == NULL)) {
3725 3725 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3726 3726 cv_wait(&services_to_run, &service_queue);
3727 3727 CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3728 3728 }
3729 3729 /*
3730 3730 * Handle all pending freebs requests to free memory.
3731 3731 */
3732 3732 while (freebs_list != NULL) {
3733 3733 mblk_t *mp = freebs_list;
3734 3734 freebs_list = mp->b_next;
3735 3735 mutex_exit(&service_queue);
3736 3736 mblk_free(mp);
3737 3737 mutex_enter(&service_queue);
3738 3738 }
3739 3739 /*
3740 3740 * Run pending queues.
3741 3741 */
3742 3742 while (qhead != NULL) {
3743 3743 DQ(q, qhead, qtail, q_link);
3744 3744 ASSERT(q != NULL);
3745 3745 mutex_exit(&service_queue);
3746 3746 queue_service(q);
3747 3747 mutex_enter(&service_queue);
3748 3748 }
3749 3749 ASSERT(qhead == NULL && qtail == NULL);
3750 3750 }
3751 3751 }
3752 3752
3753 3753 /*
3754 3754 * Background processing of streams background tasks which failed
3755 3755 * taskq_dispatch.
3756 3756 */
3757 3757 static void
3758 3758 streams_sqbkgrnd_service(void)
3759 3759 {
3760 3760 callb_cpr_t cprinfo;
3761 3761 syncq_t *sq;
3762 3762
3763 3763 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr,
3764 3764 "streams_sqbkgrnd_service");
3765 3765
3766 3766 mutex_enter(&service_queue);
3767 3767
3768 3768 for (;;) {
3769 3769 /*
3770 3770 * Wait for work to arrive.
3771 3771 */
3772 3772 while (sqhead == NULL) {
3773 3773 CALLB_CPR_SAFE_BEGIN(&cprinfo);
3774 3774 cv_wait(&syncqs_to_run, &service_queue);
3775 3775 CALLB_CPR_SAFE_END(&cprinfo, &service_queue);
3776 3776 }
3777 3777
3778 3778 /*
3779 3779 * Run pending syncqs.
3780 3780 */
3781 3781 while (sqhead != NULL) {
3782 3782 DQ(sq, sqhead, sqtail, sq_next);
3783 3783 ASSERT(sq != NULL);
3784 3784 ASSERT(sq->sq_svcflags & SQ_BGTHREAD);
3785 3785 mutex_exit(&service_queue);
3786 3786 syncq_service(sq);
3787 3787 mutex_enter(&service_queue);
3788 3788 }
3789 3789 }
3790 3790 }
3791 3791
3792 3792 /*
3793 3793 * Disable the syncq and wait for background syncq processing to complete.
3794 3794 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the
3795 3795 * list.
3796 3796 */
3797 3797 void
3798 3798 wait_sq_svc(syncq_t *sq)
3799 3799 {
3800 3800 mutex_enter(SQLOCK(sq));
3801 3801 sq->sq_svcflags |= SQ_DISABLED;
3802 3802 if (sq->sq_svcflags & SQ_BGTHREAD) {
3803 3803 syncq_t *sq_chase;
3804 3804 syncq_t *sq_curr;
3805 3805 int removed;
3806 3806
3807 3807 ASSERT(sq->sq_servcount == 1);
3808 3808 mutex_enter(&service_queue);
3809 3809 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed);
3810 3810 mutex_exit(&service_queue);
3811 3811 if (removed) {
3812 3812 sq->sq_svcflags &= ~SQ_BGTHREAD;
3813 3813 sq->sq_servcount = 0;
3814 3814 STRSTAT(sqremoved);
3815 3815 goto done;
3816 3816 }
3817 3817 }
3818 3818 while (sq->sq_servcount != 0) {
3819 3819 sq->sq_flags |= SQ_WANTWAKEUP;
3820 3820 cv_wait(&sq->sq_wait, SQLOCK(sq));
3821 3821 }
3822 3822 done:
3823 3823 mutex_exit(SQLOCK(sq));
3824 3824 }
3825 3825
3826 3826 /*
3827 3827 * Put a syncq on the list of syncq's to be serviced by the sqthread.
3828 3828 * Add the argument to the end of the sqhead list and set the flag
3829 3829 * indicating this syncq has been enabled. If it has already been
3830 3830 * enabled, don't do anything.
3831 3831 * This routine assumes that SQLOCK is held.
3832 3832 * NOTE that the lock order is to have the SQLOCK first,
3833 3833 * so if the service_syncq lock is held, we need to release it
3834 3834 * before acquiring the SQLOCK (mostly relevant for the background
3835 3835 * thread, and this seems to be common among the STREAMS global locks).
3836 3836 * Note that the sq_svcflags are protected by the SQLOCK.
3837 3837 */
3838 3838 void
3839 3839 sqenable(syncq_t *sq)
3840 3840 {
3841 3841 /*
3842 3842 * This is probably not important except for where I believe it
3843 3843 * is being called. At that point, it should be held (and it
3844 3844 * is a pain to release it just for this routine, so don't do
3845 3845 * it).
3846 3846 */
3847 3847 ASSERT(MUTEX_HELD(SQLOCK(sq)));
3848 3848
3849 3849 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL);
3850 3850 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD);
3851 3851
3852 3852 /*
3853 3853 * Do not put on list if background thread is scheduled or
3854 3854 * syncq is disabled.
3855 3855 */
3856 3856 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD))
3857 3857 return;
3858 3858
3859 3859 /*
3860 3860 * Check whether we should enable sq at all.
3861 3861 * Non PERMOD syncqs may be drained by at most one thread.
3862 3862 * PERMOD syncqs may be drained by several threads but we limit the
3863 3863 * total amount to the lesser of
3864 3864 * Number of queues on the squeue and
3865 3865 * Number of CPUs.
3866 3866 */
3867 3867 if (sq->sq_servcount != 0) {
3868 3868 if (((sq->sq_type & SQ_PERMOD) == 0) ||
3869 3869 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) {
3870 3870 STRSTAT(sqtoomany);
3871 3871 return;
3872 3872 }
3873 3873 }
3874 3874
3875 3875 sq->sq_tstamp = ddi_get_lbolt();
3876 3876 STRSTAT(sqenables);
3877 3877
3878 3878 /* Attempt a taskq dispatch */
3879 3879 sq->sq_servid = (void *)taskq_dispatch(streams_taskq,
3880 3880 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE);
3881 3881 if (sq->sq_servid != NULL) {
3882 3882 sq->sq_servcount++;
3883 3883 return;
3884 3884 }
3885 3885
3886 3886 /*
3887 3887 * This taskq dispatch failed, but a previous one may have succeeded.
3888 3888 * Don't try to schedule on the background thread whilst there is
3889 3889 * outstanding taskq processing.
3890 3890 */
3891 3891 if (sq->sq_servcount != 0)
3892 3892 return;
3893 3893
3894 3894 /*
3895 3895 * System is low on resources and can't perform a non-sleeping
3896 3896 * dispatch. Schedule the syncq for a background thread and mark the
3897 3897 * syncq to avoid any further taskq dispatch attempts.
3898 3898 */
3899 3899 mutex_enter(&service_queue);
3900 3900 STRSTAT(taskqfails);
3901 3901 ENQUEUE(sq, sqhead, sqtail, sq_next);
3902 3902 sq->sq_svcflags |= SQ_BGTHREAD;
3903 3903 sq->sq_servcount = 1;
3904 3904 cv_signal(&syncqs_to_run);
3905 3905 mutex_exit(&service_queue);
3906 3906 }
3907 3907
3908 3908 /*
3909 3909 * Note: fifo_close() depends on the mblk_t on the queue being freed
3910 3910 * asynchronously. The asynchronous freeing of messages breaks the
3911 3911 * recursive call chain of fifo_close() while there are I_SENDFD type of
3912 3912 * messages referring to other file pointers on the queue. Then when
3913 3913 * closing pipes it can avoid stack overflow in case of daisy-chained
3914 3914 * pipes, and also avoid deadlock in case of fifonode_t pairs (which
3915 3915 * share the same fifolock_t).
3916 3916 *
3917 3917 * No need to kpreempt_disable to access cpu_seqid. If we migrate and
3918 3918 * the esb queue does not match the new CPU, that is OK.
3919 3919 */
3920 3920 void
3921 3921 freebs_enqueue(mblk_t *mp, dblk_t *dbp)
3922 3922 {
3923 3923 int qindex = CPU->cpu_seqid >> esbq_log2_cpus_per_q;
3924 3924 esb_queue_t *eqp;
3925 3925
3926 3926 ASSERT(dbp->db_mblk == mp);
3927 3927 ASSERT(qindex < esbq_nelem);
3928 3928
3929 3929 eqp = system_esbq_array;
3930 3930 if (eqp != NULL) {
3931 3931 eqp += qindex;
3932 3932 } else {
3933 3933 mutex_enter(&esbq_lock);
3934 3934 if (kmem_ready && system_esbq_array == NULL)
3935 3935 system_esbq_array = (esb_queue_t *)kmem_zalloc(
3936 3936 esbq_nelem * sizeof (esb_queue_t), KM_NOSLEEP);
3937 3937 mutex_exit(&esbq_lock);
3938 3938 eqp = system_esbq_array;
3939 3939 if (eqp != NULL)
3940 3940 eqp += qindex;
3941 3941 else
3942 3942 eqp = &system_esbq;
3943 3943 }
3944 3944
3945 3945 /*
3946 3946 * Check data sanity. The dblock should have non-empty free function.
3947 3947 * It is better to panic here then later when the dblock is freed
3948 3948 * asynchronously when the context is lost.
3949 3949 */
3950 3950 if (dbp->db_frtnp->free_func == NULL) {
3951 3951 panic("freebs_enqueue: dblock %p has a NULL free callback",
3952 3952 (void *)dbp);
3953 3953 }
3954 3954
3955 3955 mutex_enter(&eqp->eq_lock);
3956 3956 /* queue the new mblk on the esballoc queue */
3957 3957 if (eqp->eq_head == NULL) {
3958 3958 eqp->eq_head = eqp->eq_tail = mp;
3959 3959 } else {
3960 3960 eqp->eq_tail->b_next = mp;
3961 3961 eqp->eq_tail = mp;
3962 3962 }
3963 3963 eqp->eq_len++;
3964 3964
3965 3965 /* If we're the first thread to reach the threshold, process */
3966 3966 if (eqp->eq_len >= esbq_max_qlen &&
3967 3967 !(eqp->eq_flags & ESBQ_PROCESSING))
3968 3968 esballoc_process_queue(eqp);
3969 3969
3970 3970 esballoc_set_timer(eqp, esbq_timeout);
3971 3971 mutex_exit(&eqp->eq_lock);
3972 3972 }
3973 3973
3974 3974 static void
3975 3975 esballoc_process_queue(esb_queue_t *eqp)
3976 3976 {
3977 3977 mblk_t *mp;
3978 3978
3979 3979 ASSERT(MUTEX_HELD(&eqp->eq_lock));
3980 3980
3981 3981 eqp->eq_flags |= ESBQ_PROCESSING;
3982 3982
3983 3983 do {
3984 3984 /*
3985 3985 * Detach the message chain for processing.
3986 3986 */
3987 3987 mp = eqp->eq_head;
3988 3988 eqp->eq_tail->b_next = NULL;
3989 3989 eqp->eq_head = eqp->eq_tail = NULL;
3990 3990 eqp->eq_len = 0;
3991 3991 mutex_exit(&eqp->eq_lock);
3992 3992
3993 3993 /*
3994 3994 * Process the message chain.
3995 3995 */
3996 3996 esballoc_enqueue_mblk(mp);
3997 3997 mutex_enter(&eqp->eq_lock);
3998 3998 } while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0));
3999 3999
4000 4000 eqp->eq_flags &= ~ESBQ_PROCESSING;
4001 4001 }
4002 4002
4003 4003 /*
4004 4004 * taskq callback routine to free esballoced mblk's
4005 4005 */
4006 4006 static void
4007 4007 esballoc_mblk_free(mblk_t *mp)
4008 4008 {
4009 4009 mblk_t *nextmp;
4010 4010
4011 4011 for (; mp != NULL; mp = nextmp) {
4012 4012 nextmp = mp->b_next;
4013 4013 mp->b_next = NULL;
4014 4014 mblk_free(mp);
4015 4015 }
4016 4016 }
4017 4017
4018 4018 static void
4019 4019 esballoc_enqueue_mblk(mblk_t *mp)
4020 4020 {
4021 4021
4022 4022 if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp,
4023 4023 TQ_NOSLEEP) == NULL) {
4024 4024 mblk_t *first_mp = mp;
4025 4025 /*
4026 4026 * System is low on resources and can't perform a non-sleeping
4027 4027 * dispatch. Schedule for a background thread.
4028 4028 */
4029 4029 mutex_enter(&service_queue);
4030 4030 STRSTAT(taskqfails);
4031 4031
4032 4032 while (mp->b_next != NULL)
4033 4033 mp = mp->b_next;
4034 4034
4035 4035 mp->b_next = freebs_list;
4036 4036 freebs_list = first_mp;
4037 4037 cv_signal(&services_to_run);
4038 4038 mutex_exit(&service_queue);
4039 4039 }
4040 4040 }
4041 4041
4042 4042 static void
4043 4043 esballoc_timer(void *arg)
4044 4044 {
4045 4045 esb_queue_t *eqp = arg;
4046 4046
4047 4047 mutex_enter(&eqp->eq_lock);
4048 4048 eqp->eq_flags &= ~ESBQ_TIMER;
4049 4049
4050 4050 if (!(eqp->eq_flags & ESBQ_PROCESSING) &&
4051 4051 eqp->eq_len > 0)
4052 4052 esballoc_process_queue(eqp);
4053 4053
4054 4054 esballoc_set_timer(eqp, esbq_timeout);
4055 4055 mutex_exit(&eqp->eq_lock);
4056 4056 }
4057 4057
4058 4058 static void
4059 4059 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout)
4060 4060 {
4061 4061 ASSERT(MUTEX_HELD(&eqp->eq_lock));
4062 4062
4063 4063 if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) {
4064 4064 (void) timeout(esballoc_timer, eqp, eq_timeout);
4065 4065 eqp->eq_flags |= ESBQ_TIMER;
4066 4066 }
4067 4067 }
4068 4068
4069 4069 /*
4070 4070 * Setup esbq array length based upon NCPU scaled by CPUs per
4071 4071 * queue. Use static system_esbq until kmem_ready and we can
4072 4072 * create an array in freebs_enqueue().
4073 4073 */
4074 4074 void
4075 4075 esballoc_queue_init(void)
4076 4076 {
4077 4077 esbq_log2_cpus_per_q = highbit(esbq_cpus_per_q - 1);
4078 4078 esbq_cpus_per_q = 1 << esbq_log2_cpus_per_q;
4079 4079 esbq_nelem = howmany(NCPU, esbq_cpus_per_q);
4080 4080 system_esbq.eq_len = 0;
4081 4081 system_esbq.eq_head = system_esbq.eq_tail = NULL;
4082 4082 system_esbq.eq_flags = 0;
4083 4083 }
4084 4084
4085 4085 /*
4086 4086 * Set the QBACK or QB_BACK flag in the given queue for
4087 4087 * the given priority band.
4088 4088 */
4089 4089 void
4090 4090 setqback(queue_t *q, unsigned char pri)
4091 4091 {
4092 4092 int i;
4093 4093 qband_t *qbp;
4094 4094 qband_t **qbpp;
4095 4095
4096 4096 ASSERT(MUTEX_HELD(QLOCK(q)));
4097 4097 if (pri != 0) {
4098 4098 if (pri > q->q_nband) {
4099 4099 qbpp = &q->q_bandp;
4100 4100 while (*qbpp)
4101 4101 qbpp = &(*qbpp)->qb_next;
4102 4102 while (pri > q->q_nband) {
4103 4103 if ((*qbpp = allocband()) == NULL) {
4104 4104 cmn_err(CE_WARN,
4105 4105 "setqback: can't allocate qband\n");
4106 4106 return;
4107 4107 }
4108 4108 (*qbpp)->qb_hiwat = q->q_hiwat;
4109 4109 (*qbpp)->qb_lowat = q->q_lowat;
4110 4110 q->q_nband++;
4111 4111 qbpp = &(*qbpp)->qb_next;
4112 4112 }
4113 4113 }
4114 4114 qbp = q->q_bandp;
4115 4115 i = pri;
4116 4116 while (--i)
4117 4117 qbp = qbp->qb_next;
4118 4118 qbp->qb_flag |= QB_BACK;
4119 4119 } else {
4120 4120 q->q_flag |= QBACK;
4121 4121 }
4122 4122 }
4123 4123
4124 4124 int
4125 4125 strcopyin(void *from, void *to, size_t len, int copyflag)
4126 4126 {
4127 4127 if (copyflag & U_TO_K) {
4128 4128 ASSERT((copyflag & K_TO_K) == 0);
4129 4129 if (copyin(from, to, len))
4130 4130 return (EFAULT);
4131 4131 } else {
4132 4132 ASSERT(copyflag & K_TO_K);
4133 4133 bcopy(from, to, len);
4134 4134 }
4135 4135 return (0);
4136 4136 }
4137 4137
4138 4138 int
4139 4139 strcopyout(void *from, void *to, size_t len, int copyflag)
4140 4140 {
4141 4141 if (copyflag & U_TO_K) {
4142 4142 if (copyout(from, to, len))
4143 4143 return (EFAULT);
4144 4144 } else {
4145 4145 ASSERT(copyflag & K_TO_K);
4146 4146 bcopy(from, to, len);
4147 4147 }
4148 4148 return (0);
4149 4149 }
4150 4150
4151 4151 /*
4152 4152 * strsignal_nolock() posts a signal to the process(es) at the stream head.
4153 4153 * It assumes that the stream head lock is already held, whereas strsignal()
4154 4154 * acquires the lock first. This routine was created because a few callers
4155 4155 * release the stream head lock before calling only to re-acquire it after
4156 4156 * it returns.
4157 4157 */
4158 4158 void
4159 4159 strsignal_nolock(stdata_t *stp, int sig, uchar_t band)
4160 4160 {
4161 4161 ASSERT(MUTEX_HELD(&stp->sd_lock));
4162 4162 switch (sig) {
4163 4163 case SIGPOLL:
4164 4164 if (stp->sd_sigflags & S_MSG)
4165 4165 strsendsig(stp->sd_siglist, S_MSG, band, 0);
4166 4166 break;
4167 4167 default:
4168 4168 if (stp->sd_pgidp)
4169 4169 pgsignal(stp->sd_pgidp, sig);
4170 4170 break;
4171 4171 }
4172 4172 }
4173 4173
4174 4174 void
4175 4175 strsignal(stdata_t *stp, int sig, int32_t band)
4176 4176 {
4177 4177 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG,
4178 4178 "strsignal:%p, %X, %X", stp, sig, band);
4179 4179
4180 4180 mutex_enter(&stp->sd_lock);
4181 4181 switch (sig) {
4182 4182 case SIGPOLL:
4183 4183 if (stp->sd_sigflags & S_MSG)
4184 4184 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0);
4185 4185 break;
4186 4186
4187 4187 default:
4188 4188 if (stp->sd_pgidp) {
4189 4189 pgsignal(stp->sd_pgidp, sig);
4190 4190 }
4191 4191 break;
4192 4192 }
4193 4193 mutex_exit(&stp->sd_lock);
4194 4194 }
4195 4195
4196 4196 void
4197 4197 strhup(stdata_t *stp)
4198 4198 {
4199 4199 ASSERT(mutex_owned(&stp->sd_lock));
4200 4200 pollwakeup(&stp->sd_pollist, POLLHUP);
4201 4201 if (stp->sd_sigflags & S_HANGUP)
4202 4202 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0);
4203 4203 }
4204 4204
4205 4205 /*
4206 4206 * Backenable the first queue upstream from `q' with a service procedure.
4207 4207 */
4208 4208 void
4209 4209 backenable(queue_t *q, uchar_t pri)
4210 4210 {
4211 4211 queue_t *nq;
4212 4212
4213 4213 /*
4214 4214 * Our presence might not prevent other modules in our own
4215 4215 * stream from popping/pushing since the caller of getq might not
4216 4216 * have a claim on the queue (some drivers do a getq on somebody
4217 4217 * else's queue - they know that the queue itself is not going away
4218 4218 * but the framework has to guarantee q_next in that stream).
4219 4219 */
4220 4220 claimstr(q);
4221 4221
4222 4222 /* Find nearest back queue with service proc */
4223 4223 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) {
4224 4224 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq));
4225 4225 }
4226 4226
4227 4227 if (nq) {
4228 4228 kthread_t *freezer;
4229 4229 /*
4230 4230 * backenable can be called either with no locks held
4231 4231 * or with the stream frozen (the latter occurs when a module
4232 4232 * calls rmvq with the stream frozen). If the stream is frozen
4233 4233 * by the caller the caller will hold all qlocks in the stream.
4234 4234 * Note that a frozen stream doesn't freeze a mated stream,
4235 4235 * so we explicitly check for that.
4236 4236 */
4237 4237 freezer = STREAM(q)->sd_freezer;
4238 4238 if (freezer != curthread || STREAM(q) != STREAM(nq)) {
4239 4239 mutex_enter(QLOCK(nq));
4240 4240 }
4241 4241 #ifdef DEBUG
4242 4242 else {
4243 4243 ASSERT(frozenstr(q));
4244 4244 ASSERT(MUTEX_HELD(QLOCK(q)));
4245 4245 ASSERT(MUTEX_HELD(QLOCK(nq)));
4246 4246 }
4247 4247 #endif
4248 4248 setqback(nq, pri);
4249 4249 qenable_locked(nq);
4250 4250 if (freezer != curthread || STREAM(q) != STREAM(nq))
4251 4251 mutex_exit(QLOCK(nq));
4252 4252 }
4253 4253 releasestr(q);
4254 4254 }
4255 4255
4256 4256 /*
4257 4257 * Return the appropriate errno when one of flags_to_check is set
4258 4258 * in sd_flags. Uses the exported error routines if they are set.
4259 4259 * Will return 0 if non error is set (or if the exported error routines
4260 4260 * do not return an error).
4261 4261 *
4262 4262 * If there is both a read and write error to check, we prefer the read error.
4263 4263 * Also, give preference to recorded errno's over the error functions.
4264 4264 * The flags that are handled are:
4265 4265 * STPLEX return EINVAL
4266 4266 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST)
4267 4267 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST)
4268 4268 * STRHUP return sd_werror
4269 4269 *
4270 4270 * If the caller indicates that the operation is a peek, a nonpersistent error
4271 4271 * is not cleared.
4272 4272 */
4273 4273 int
4274 4274 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek)
4275 4275 {
4276 4276 int32_t sd_flag = stp->sd_flag & flags_to_check;
4277 4277 int error = 0;
4278 4278
4279 4279 ASSERT(MUTEX_HELD(&stp->sd_lock));
4280 4280 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0);
4281 4281 if (sd_flag & STPLEX)
4282 4282 error = EINVAL;
4283 4283 else if (sd_flag & STRDERR) {
4284 4284 error = stp->sd_rerror;
4285 4285 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) {
4286 4286 /*
4287 4287 * Read errors are non-persistent i.e. discarded once
4288 4288 * returned to a non-peeking caller,
4289 4289 */
4290 4290 stp->sd_rerror = 0;
4291 4291 stp->sd_flag &= ~STRDERR;
4292 4292 }
4293 4293 if (error == 0 && stp->sd_rderrfunc != NULL) {
4294 4294 int clearerr = 0;
4295 4295
4296 4296 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek,
4297 4297 &clearerr);
4298 4298 if (clearerr) {
4299 4299 stp->sd_flag &= ~STRDERR;
4300 4300 stp->sd_rderrfunc = NULL;
4301 4301 }
4302 4302 }
4303 4303 } else if (sd_flag & STWRERR) {
4304 4304 error = stp->sd_werror;
4305 4305 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) {
4306 4306 /*
4307 4307 * Write errors are non-persistent i.e. discarded once
4308 4308 * returned to a non-peeking caller,
4309 4309 */
4310 4310 stp->sd_werror = 0;
4311 4311 stp->sd_flag &= ~STWRERR;
4312 4312 }
4313 4313 if (error == 0 && stp->sd_wrerrfunc != NULL) {
4314 4314 int clearerr = 0;
4315 4315
4316 4316 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek,
4317 4317 &clearerr);
4318 4318 if (clearerr) {
4319 4319 stp->sd_flag &= ~STWRERR;
4320 4320 stp->sd_wrerrfunc = NULL;
4321 4321 }
4322 4322 }
4323 4323 } else if (sd_flag & STRHUP) {
4324 4324 /* sd_werror set when STRHUP */
4325 4325 error = stp->sd_werror;
4326 4326 }
4327 4327 return (error);
4328 4328 }
4329 4329
4330 4330
4331 4331 /*
4332 4332 * Single-thread open/close/push/pop
4333 4333 * for twisted streams also
4334 4334 */
4335 4335 int
4336 4336 strstartplumb(stdata_t *stp, int flag, int cmd)
4337 4337 {
4338 4338 int waited = 1;
4339 4339 int error = 0;
4340 4340
4341 4341 if (STRMATED(stp)) {
4342 4342 struct stdata *stmatep = stp->sd_mate;
4343 4343
4344 4344 STRLOCKMATES(stp);
4345 4345 while (waited) {
4346 4346 waited = 0;
4347 4347 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4348 4348 if ((cmd == I_POP) &&
4349 4349 (flag & (FNDELAY|FNONBLOCK))) {
4350 4350 STRUNLOCKMATES(stp);
4351 4351 return (EAGAIN);
4352 4352 }
4353 4353 waited = 1;
4354 4354 mutex_exit(&stp->sd_lock);
4355 4355 if (!cv_wait_sig(&stmatep->sd_monitor,
4356 4356 &stmatep->sd_lock)) {
4357 4357 mutex_exit(&stmatep->sd_lock);
4358 4358 return (EINTR);
4359 4359 }
4360 4360 mutex_exit(&stmatep->sd_lock);
4361 4361 STRLOCKMATES(stp);
4362 4362 }
4363 4363 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4364 4364 if ((cmd == I_POP) &&
4365 4365 (flag & (FNDELAY|FNONBLOCK))) {
4366 4366 STRUNLOCKMATES(stp);
4367 4367 return (EAGAIN);
4368 4368 }
4369 4369 waited = 1;
4370 4370 mutex_exit(&stmatep->sd_lock);
4371 4371 if (!cv_wait_sig(&stp->sd_monitor,
4372 4372 &stp->sd_lock)) {
4373 4373 mutex_exit(&stp->sd_lock);
4374 4374 return (EINTR);
4375 4375 }
4376 4376 mutex_exit(&stp->sd_lock);
4377 4377 STRLOCKMATES(stp);
4378 4378 }
4379 4379 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4380 4380 error = strgeterr(stp,
4381 4381 STRDERR|STWRERR|STRHUP|STPLEX, 0);
4382 4382 if (error != 0) {
4383 4383 STRUNLOCKMATES(stp);
4384 4384 return (error);
4385 4385 }
4386 4386 }
4387 4387 }
4388 4388 stp->sd_flag |= STRPLUMB;
4389 4389 STRUNLOCKMATES(stp);
4390 4390 } else {
4391 4391 mutex_enter(&stp->sd_lock);
4392 4392 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) {
4393 4393 if (((cmd == I_POP) || (cmd == _I_REMOVE)) &&
4394 4394 (flag & (FNDELAY|FNONBLOCK))) {
4395 4395 mutex_exit(&stp->sd_lock);
4396 4396 return (EAGAIN);
4397 4397 }
4398 4398 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) {
4399 4399 mutex_exit(&stp->sd_lock);
4400 4400 return (EINTR);
4401 4401 }
4402 4402 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) {
4403 4403 error = strgeterr(stp,
4404 4404 STRDERR|STWRERR|STRHUP|STPLEX, 0);
4405 4405 if (error != 0) {
4406 4406 mutex_exit(&stp->sd_lock);
4407 4407 return (error);
4408 4408 }
4409 4409 }
4410 4410 }
4411 4411 stp->sd_flag |= STRPLUMB;
4412 4412 mutex_exit(&stp->sd_lock);
4413 4413 }
4414 4414 return (0);
4415 4415 }
4416 4416
4417 4417 /*
4418 4418 * Complete the plumbing operation associated with stream `stp'.
4419 4419 */
4420 4420 void
4421 4421 strendplumb(stdata_t *stp)
4422 4422 {
4423 4423 ASSERT(MUTEX_HELD(&stp->sd_lock));
4424 4424 ASSERT(stp->sd_flag & STRPLUMB);
4425 4425 stp->sd_flag &= ~STRPLUMB;
4426 4426 cv_broadcast(&stp->sd_monitor);
4427 4427 }
4428 4428
4429 4429 /*
4430 4430 * This describes how the STREAMS framework handles synchronization
4431 4431 * during open/push and close/pop.
4432 4432 * The key interfaces for open and close are qprocson and qprocsoff,
4433 4433 * respectively. While the close case in general is harder both open
4434 4434 * have close have significant similarities.
4435 4435 *
4436 4436 * During close the STREAMS framework has to both ensure that there
4437 4437 * are no stale references to the queue pair (and syncq) that
4438 4438 * are being closed and also provide the guarantees that are documented
4439 4439 * in qprocsoff(9F).
4440 4440 * If there are stale references to the queue that is closing it can
4441 4441 * result in kernel memory corruption or kernel panics.
4442 4442 *
4443 4443 * Note that is it up to the module/driver to ensure that it itself
4444 4444 * does not have any stale references to the closing queues once its close
4445 4445 * routine returns. This includes:
4446 4446 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines
4447 4447 * associated with the queues. For timeout and bufcall callbacks the
4448 4448 * module/driver also has to ensure (or wait for) any callbacks that
4449 4449 * are in progress.
4450 4450 * - If the module/driver is using esballoc it has to ensure that any
4451 4451 * esballoc free functions do not refer to a queue that has closed.
4452 4452 * (Note that in general the close routine can not wait for the esballoc'ed
4453 4453 * messages to be freed since that can cause a deadlock.)
4454 4454 * - Cancelling any interrupts that refer to the closing queues and
4455 4455 * also ensuring that there are no interrupts in progress that will
4456 4456 * refer to the closing queues once the close routine returns.
4457 4457 * - For multiplexors removing any driver global state that refers to
4458 4458 * the closing queue and also ensuring that there are no threads in
4459 4459 * the multiplexor that has picked up a queue pointer but not yet
4460 4460 * finished using it.
4461 4461 *
4462 4462 * In addition, a driver/module can only reference the q_next pointer
4463 4463 * in its open, close, put, or service procedures or in a
4464 4464 * qtimeout/qbufcall callback procedure executing "on" the correct
4465 4465 * stream. Thus it can not reference the q_next pointer in an interrupt
4466 4466 * routine or a timeout, bufcall or esballoc callback routine. Likewise
4467 4467 * it can not reference q_next of a different queue e.g. in a mux that
4468 4468 * passes messages from one queues put/service procedure to another queue.
4469 4469 * In all the cases when the driver/module can not access the q_next
4470 4470 * field it must use the *next* versions e.g. canputnext instead of
4471 4471 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...).
4472 4472 *
4473 4473 *
4474 4474 * Assuming that the driver/module conforms to the above constraints
4475 4475 * the STREAMS framework has to avoid stale references to q_next for all
4476 4476 * the framework internal cases which include (but are not limited to):
4477 4477 * - Threads in canput/canputnext/backenable and elsewhere that are
4478 4478 * walking q_next.
4479 4479 * - Messages on a syncq that have a reference to the queue through b_queue.
4480 4480 * - Messages on an outer perimeter (syncq) that have a reference to the
4481 4481 * queue through b_queue.
4482 4482 * - Threads that use q_nfsrv (e.g. canput) to find a queue.
4483 4483 * Note that only canput and bcanput use q_nfsrv without any locking.
4484 4484 *
4485 4485 * The STREAMS framework providing the qprocsoff(9F) guarantees means that
4486 4486 * after qprocsoff returns, the framework has to ensure that no threads can
4487 4487 * enter the put or service routines for the closing read or write-side queue.
4488 4488 * In addition to preventing "direct" entry into the put procedures
4489 4489 * the framework also has to prevent messages being drained from
4490 4490 * the syncq or the outer perimeter.
4491 4491 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only
4492 4492 * mechanism to prevent qwriter(PERIM_OUTER) from running after
4493 4493 * qprocsoff has returned.
4494 4494 * Note that if a module/driver uses put(9F) on one of its own queues
4495 4495 * it is up to the module/driver to ensure that the put() doesn't
4496 4496 * get called when the queue is closing.
4497 4497 *
4498 4498 *
4499 4499 * The framework aspects of the above "contract" is implemented by
4500 4500 * qprocsoff, removeq, and strlock:
4501 4501 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from
4502 4502 * entering the service procedures.
4503 4503 * - strlock acquires the sd_lock and sd_reflock to prevent putnext,
4504 4504 * canputnext, backenable etc from dereferencing the q_next that will
4505 4505 * soon change.
4506 4506 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext
4507 4507 * or other q_next walker that uses claimstr/releasestr to finish.
4508 4508 * - optionally for every syncq in the stream strlock acquires all the
4509 4509 * sq_lock's and waits for all sq_counts to drop to a value that indicates
4510 4510 * that no thread executes in the put or service procedures and that no
4511 4511 * thread is draining into the module/driver. This ensures that no
4512 4512 * open, close, put, service, or qtimeout/qbufcall callback procedure is
4513 4513 * currently executing hence no such thread can end up with the old stale
4514 4514 * q_next value and no canput/backenable can have the old stale
4515 4515 * q_nfsrv/q_next.
4516 4516 * - qdetach (wait_svc) makes sure that any scheduled or running threads
4517 4517 * have either finished or observed the QWCLOSE flag and gone away.
4518 4518 */
4519 4519
4520 4520
4521 4521 /*
4522 4522 * Get all the locks necessary to change q_next.
4523 4523 *
4524 4524 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the
4525 4525 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that
4526 4526 * the only threads inside the syncq are threads currently calling removeq().
4527 4527 * Since threads calling removeq() are in the process of removing their queues
4528 4528 * from the stream, we do not need to worry about them accessing a stale q_next
4529 4529 * pointer and thus we do not need to wait for them to exit (in fact, waiting
4530 4530 * for them can cause deadlock).
4531 4531 *
4532 4532 * This routine is subject to starvation since it does not set any flag to
4533 4533 * prevent threads from entering a module in the stream (i.e. sq_count can
4534 4534 * increase on some syncq while it is waiting on some other syncq).
4535 4535 *
4536 4536 * Assumes that only one thread attempts to call strlock for a given
4537 4537 * stream. If this is not the case the two threads would deadlock.
4538 4538 * This assumption is guaranteed since strlock is only called by insertq
4539 4539 * and removeq and streams plumbing changes are single-threaded for
4540 4540 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags.
4541 4541 *
4542 4542 * For pipes, it is not difficult to atomically designate a pair of streams
4543 4543 * to be mated. Once mated atomically by the framework the twisted pair remain
4544 4544 * configured that way until dismantled atomically by the framework.
4545 4545 * When plumbing takes place on a twisted stream it is necessary to ensure that
4546 4546 * this operation is done exclusively on the twisted stream since two such
4547 4547 * operations, each initiated on different ends of the pipe will deadlock
4548 4548 * waiting for each other to complete.
4549 4549 *
4550 4550 * On entry, no locks should be held.
4551 4551 * The locks acquired and held by strlock depends on a few factors.
4552 4552 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired
4553 4553 * and held on exit and all sq_count are at an acceptable level.
4554 4554 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with
4555 4555 * sd_refcnt being zero.
4556 4556 */
4557 4557
4558 4558 static void
4559 4559 strlock(struct stdata *stp, sqlist_t *sqlist)
4560 4560 {
4561 4561 syncql_t *sql, *sql2;
4562 4562 retry:
4563 4563 /*
4564 4564 * Wait for any claimstr to go away.
4565 4565 */
4566 4566 if (STRMATED(stp)) {
4567 4567 struct stdata *stp1, *stp2;
4568 4568
4569 4569 STRLOCKMATES(stp);
4570 4570 /*
4571 4571 * Note that the selection of locking order is not
4572 4572 * important, just that they are always acquired in
4573 4573 * the same order. To assure this, we choose this
4574 4574 * order based on the value of the pointer, and since
4575 4575 * the pointer will not change for the life of this
4576 4576 * pair, we will always grab the locks in the same
4577 4577 * order (and hence, prevent deadlocks).
4578 4578 */
4579 4579 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) {
4580 4580 stp1 = stp;
4581 4581 stp2 = stp->sd_mate;
4582 4582 } else {
4583 4583 stp2 = stp;
4584 4584 stp1 = stp->sd_mate;
4585 4585 }
4586 4586 mutex_enter(&stp1->sd_reflock);
4587 4587 if (stp1->sd_refcnt > 0) {
4588 4588 STRUNLOCKMATES(stp);
4589 4589 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock);
4590 4590 mutex_exit(&stp1->sd_reflock);
4591 4591 goto retry;
4592 4592 }
4593 4593 mutex_enter(&stp2->sd_reflock);
4594 4594 if (stp2->sd_refcnt > 0) {
4595 4595 STRUNLOCKMATES(stp);
4596 4596 mutex_exit(&stp1->sd_reflock);
4597 4597 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock);
4598 4598 mutex_exit(&stp2->sd_reflock);
4599 4599 goto retry;
4600 4600 }
4601 4601 STREAM_PUTLOCKS_ENTER(stp1);
4602 4602 STREAM_PUTLOCKS_ENTER(stp2);
4603 4603 } else {
4604 4604 mutex_enter(&stp->sd_lock);
4605 4605 mutex_enter(&stp->sd_reflock);
4606 4606 while (stp->sd_refcnt > 0) {
4607 4607 mutex_exit(&stp->sd_lock);
4608 4608 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock);
4609 4609 if (mutex_tryenter(&stp->sd_lock) == 0) {
4610 4610 mutex_exit(&stp->sd_reflock);
4611 4611 mutex_enter(&stp->sd_lock);
4612 4612 mutex_enter(&stp->sd_reflock);
4613 4613 }
4614 4614 }
4615 4615 STREAM_PUTLOCKS_ENTER(stp);
4616 4616 }
4617 4617
4618 4618 if (sqlist == NULL)
4619 4619 return;
4620 4620
4621 4621 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4622 4622 syncq_t *sq = sql->sql_sq;
4623 4623 uint16_t count;
4624 4624
4625 4625 mutex_enter(SQLOCK(sq));
4626 4626 count = sq->sq_count;
4627 4627 ASSERT(sq->sq_rmqcount <= count);
4628 4628 SQ_PUTLOCKS_ENTER(sq);
4629 4629 SUM_SQ_PUTCOUNTS(sq, count);
4630 4630 if (count == sq->sq_rmqcount)
4631 4631 continue;
4632 4632
4633 4633 /* Failed - drop all locks that we have acquired so far */
4634 4634 if (STRMATED(stp)) {
4635 4635 STREAM_PUTLOCKS_EXIT(stp);
4636 4636 STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4637 4637 STRUNLOCKMATES(stp);
4638 4638 mutex_exit(&stp->sd_reflock);
4639 4639 mutex_exit(&stp->sd_mate->sd_reflock);
4640 4640 } else {
4641 4641 STREAM_PUTLOCKS_EXIT(stp);
4642 4642 mutex_exit(&stp->sd_lock);
4643 4643 mutex_exit(&stp->sd_reflock);
4644 4644 }
4645 4645 for (sql2 = sqlist->sqlist_head; sql2 != sql;
4646 4646 sql2 = sql2->sql_next) {
4647 4647 SQ_PUTLOCKS_EXIT(sql2->sql_sq);
4648 4648 mutex_exit(SQLOCK(sql2->sql_sq));
4649 4649 }
4650 4650
4651 4651 /*
4652 4652 * The wait loop below may starve when there are many threads
4653 4653 * claiming the syncq. This is especially a problem with permod
4654 4654 * syncqs (IP). To lessen the impact of the problem we increment
4655 4655 * sq_needexcl and clear fastbits so that putnexts will slow
4656 4656 * down and call sqenable instead of draining right away.
4657 4657 */
4658 4658 sq->sq_needexcl++;
4659 4659 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
4660 4660 while (count > sq->sq_rmqcount) {
4661 4661 sq->sq_flags |= SQ_WANTWAKEUP;
4662 4662 SQ_PUTLOCKS_EXIT(sq);
4663 4663 cv_wait(&sq->sq_wait, SQLOCK(sq));
4664 4664 count = sq->sq_count;
4665 4665 SQ_PUTLOCKS_ENTER(sq);
4666 4666 SUM_SQ_PUTCOUNTS(sq, count);
4667 4667 }
4668 4668 sq->sq_needexcl--;
4669 4669 if (sq->sq_needexcl == 0)
4670 4670 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
4671 4671 SQ_PUTLOCKS_EXIT(sq);
4672 4672 ASSERT(count == sq->sq_rmqcount);
4673 4673 mutex_exit(SQLOCK(sq));
4674 4674 goto retry;
4675 4675 }
4676 4676 }
4677 4677
4678 4678 /*
4679 4679 * Drop all the locks that strlock acquired.
4680 4680 */
4681 4681 static void
4682 4682 strunlock(struct stdata *stp, sqlist_t *sqlist)
4683 4683 {
4684 4684 syncql_t *sql;
4685 4685
4686 4686 if (STRMATED(stp)) {
4687 4687 STREAM_PUTLOCKS_EXIT(stp);
4688 4688 STREAM_PUTLOCKS_EXIT(stp->sd_mate);
4689 4689 STRUNLOCKMATES(stp);
4690 4690 mutex_exit(&stp->sd_reflock);
4691 4691 mutex_exit(&stp->sd_mate->sd_reflock);
4692 4692 } else {
4693 4693 STREAM_PUTLOCKS_EXIT(stp);
4694 4694 mutex_exit(&stp->sd_lock);
4695 4695 mutex_exit(&stp->sd_reflock);
4696 4696 }
4697 4697
4698 4698 if (sqlist == NULL)
4699 4699 return;
4700 4700
4701 4701 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) {
4702 4702 SQ_PUTLOCKS_EXIT(sql->sql_sq);
4703 4703 mutex_exit(SQLOCK(sql->sql_sq));
4704 4704 }
4705 4705 }
4706 4706
4707 4707 /*
4708 4708 * When the module has service procedure, we need check if the next
4709 4709 * module which has service procedure is in flow control to trigger
4710 4710 * the backenable.
4711 4711 */
4712 4712 static void
4713 4713 backenable_insertedq(queue_t *q)
4714 4714 {
4715 4715 qband_t *qbp;
4716 4716
4717 4717 claimstr(q);
4718 4718 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) {
4719 4719 if (q->q_next->q_nfsrv->q_flag & QWANTW)
4720 4720 backenable(q, 0);
4721 4721
4722 4722 qbp = q->q_next->q_nfsrv->q_bandp;
4723 4723 for (; qbp != NULL; qbp = qbp->qb_next)
4724 4724 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL)
4725 4725 backenable(q, qbp->qb_first->b_band);
4726 4726 }
4727 4727 releasestr(q);
4728 4728 }
4729 4729
4730 4730 /*
4731 4731 * Given two read queues, insert a new single one after another.
4732 4732 *
4733 4733 * This routine acquires all the necessary locks in order to change
4734 4734 * q_next and related pointer using strlock().
4735 4735 * It depends on the stream head ensuring that there are no concurrent
4736 4736 * insertq or removeq on the same stream. The stream head ensures this
4737 4737 * using the flags STWOPEN, STRCLOSE, and STRPLUMB.
4738 4738 *
4739 4739 * Note that no syncq locks are held during the q_next change. This is
4740 4740 * applied to all streams since, unlike removeq, there is no problem of stale
4741 4741 * pointers when adding a module to the stream. Thus drivers/modules that do a
4742 4742 * canput(rq->q_next) would never get a closed/freed queue pointer even if we
4743 4743 * applied this optimization to all streams.
4744 4744 */
4745 4745 void
4746 4746 insertq(struct stdata *stp, queue_t *new)
4747 4747 {
4748 4748 queue_t *after;
4749 4749 queue_t *wafter;
4750 4750 queue_t *wnew = _WR(new);
4751 4751 boolean_t have_fifo = B_FALSE;
4752 4752
4753 4753 if (new->q_flag & _QINSERTING) {
4754 4754 ASSERT(stp->sd_vnode->v_type != VFIFO);
4755 4755 after = new->q_next;
4756 4756 wafter = _WR(new->q_next);
4757 4757 } else {
4758 4758 after = _RD(stp->sd_wrq);
4759 4759 wafter = stp->sd_wrq;
4760 4760 }
4761 4761
4762 4762 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ,
4763 4763 "insertq:%p, %p", after, new);
4764 4764 ASSERT(after->q_flag & QREADR);
4765 4765 ASSERT(new->q_flag & QREADR);
4766 4766
4767 4767 strlock(stp, NULL);
4768 4768
4769 4769 /* Do we have a FIFO? */
4770 4770 if (wafter->q_next == after) {
4771 4771 have_fifo = B_TRUE;
4772 4772 wnew->q_next = new;
4773 4773 } else {
4774 4774 wnew->q_next = wafter->q_next;
4775 4775 }
4776 4776 new->q_next = after;
4777 4777
4778 4778 set_nfsrv_ptr(new, wnew, after, wafter);
4779 4779 /*
4780 4780 * set_nfsrv_ptr() needs to know if this is an insertion or not,
4781 4781 * so only reset this flag after calling it.
4782 4782 */
4783 4783 new->q_flag &= ~_QINSERTING;
4784 4784
4785 4785 if (have_fifo) {
4786 4786 wafter->q_next = wnew;
4787 4787 } else {
4788 4788 if (wafter->q_next)
4789 4789 _OTHERQ(wafter->q_next)->q_next = new;
4790 4790 wafter->q_next = wnew;
4791 4791 }
4792 4792
4793 4793 set_qend(new);
4794 4794 /* The QEND flag might have to be updated for the upstream guy */
4795 4795 set_qend(after);
4796 4796
4797 4797 ASSERT(_SAMESTR(new) == O_SAMESTR(new));
4798 4798 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew));
4799 4799 ASSERT(_SAMESTR(after) == O_SAMESTR(after));
4800 4800 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter));
4801 4801 strsetuio(stp);
4802 4802
4803 4803 /*
4804 4804 * If this was a module insertion, bump the push count.
4805 4805 */
4806 4806 if (!(new->q_flag & QISDRV))
4807 4807 stp->sd_pushcnt++;
4808 4808
4809 4809 strunlock(stp, NULL);
4810 4810
4811 4811 /* check if the write Q needs backenable */
4812 4812 backenable_insertedq(wnew);
4813 4813
4814 4814 /* check if the read Q needs backenable */
4815 4815 backenable_insertedq(new);
4816 4816 }
4817 4817
4818 4818 /*
4819 4819 * Given a read queue, unlink it from any neighbors.
4820 4820 *
4821 4821 * This routine acquires all the necessary locks in order to
4822 4822 * change q_next and related pointers and also guard against
4823 4823 * stale references (e.g. through q_next) to the queue that
4824 4824 * is being removed. It also plays part of the role in ensuring
4825 4825 * that the module's/driver's put procedure doesn't get called
4826 4826 * after qprocsoff returns.
4827 4827 *
4828 4828 * Removeq depends on the stream head ensuring that there are
4829 4829 * no concurrent insertq or removeq on the same stream. The
4830 4830 * stream head ensures this using the flags STWOPEN, STRCLOSE and
4831 4831 * STRPLUMB.
4832 4832 *
4833 4833 * The set of locks needed to remove the queue is different in
4834 4834 * different cases:
4835 4835 *
4836 4836 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after
4837 4837 * waiting for the syncq reference count to drop to 0 indicating that no
4838 4838 * non-close threads are present anywhere in the stream. This ensures that any
4839 4839 * module/driver can reference q_next in its open, close, put, or service
4840 4840 * procedures.
4841 4841 *
4842 4842 * The sq_rmqcount counter tracks the number of threads inside removeq().
4843 4843 * strlock() ensures that there is either no threads executing inside perimeter
4844 4844 * or there is only a thread calling qprocsoff().
4845 4845 *
4846 4846 * strlock() compares the value of sq_count with the number of threads inside
4847 4847 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup
4848 4848 * any threads waiting in strlock() when the sq_rmqcount increases.
4849 4849 */
4850 4850
4851 4851 void
4852 4852 removeq(queue_t *qp)
4853 4853 {
4854 4854 queue_t *wqp = _WR(qp);
4855 4855 struct stdata *stp = STREAM(qp);
4856 4856 sqlist_t *sqlist = NULL;
4857 4857 boolean_t isdriver;
4858 4858 int moved;
4859 4859 syncq_t *sq = qp->q_syncq;
4860 4860 syncq_t *wsq = wqp->q_syncq;
4861 4861
4862 4862 ASSERT(stp);
4863 4863
4864 4864 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ,
4865 4865 "removeq:%p %p", qp, wqp);
4866 4866 ASSERT(qp->q_flag&QREADR);
4867 4867
4868 4868 /*
4869 4869 * For queues using Synchronous streams, we must wait for all threads in
4870 4870 * rwnext() to drain out before proceeding.
4871 4871 */
4872 4872 if (qp->q_flag & QSYNCSTR) {
4873 4873 /* First, we need wakeup any threads blocked in rwnext() */
4874 4874 mutex_enter(SQLOCK(sq));
4875 4875 if (sq->sq_flags & SQ_WANTWAKEUP) {
4876 4876 sq->sq_flags &= ~SQ_WANTWAKEUP;
4877 4877 cv_broadcast(&sq->sq_wait);
4878 4878 }
4879 4879 mutex_exit(SQLOCK(sq));
4880 4880
4881 4881 if (wsq != sq) {
4882 4882 mutex_enter(SQLOCK(wsq));
4883 4883 if (wsq->sq_flags & SQ_WANTWAKEUP) {
4884 4884 wsq->sq_flags &= ~SQ_WANTWAKEUP;
4885 4885 cv_broadcast(&wsq->sq_wait);
4886 4886 }
4887 4887 mutex_exit(SQLOCK(wsq));
4888 4888 }
4889 4889
4890 4890 mutex_enter(QLOCK(qp));
4891 4891 while (qp->q_rwcnt > 0) {
4892 4892 qp->q_flag |= QWANTRMQSYNC;
4893 4893 cv_wait(&qp->q_wait, QLOCK(qp));
4894 4894 }
4895 4895 mutex_exit(QLOCK(qp));
4896 4896
4897 4897 mutex_enter(QLOCK(wqp));
4898 4898 while (wqp->q_rwcnt > 0) {
4899 4899 wqp->q_flag |= QWANTRMQSYNC;
4900 4900 cv_wait(&wqp->q_wait, QLOCK(wqp));
4901 4901 }
4902 4902 mutex_exit(QLOCK(wqp));
4903 4903 }
4904 4904
4905 4905 mutex_enter(SQLOCK(sq));
4906 4906 sq->sq_rmqcount++;
4907 4907 if (sq->sq_flags & SQ_WANTWAKEUP) {
4908 4908 sq->sq_flags &= ~SQ_WANTWAKEUP;
4909 4909 cv_broadcast(&sq->sq_wait);
4910 4910 }
4911 4911 mutex_exit(SQLOCK(sq));
4912 4912
4913 4913 isdriver = (qp->q_flag & QISDRV);
4914 4914
4915 4915 sqlist = sqlist_build(qp, stp, STRMATED(stp));
4916 4916 strlock(stp, sqlist);
4917 4917
4918 4918 reset_nfsrv_ptr(qp, wqp);
4919 4919
4920 4920 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp);
4921 4921 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp);
4922 4922 /* Do we have a FIFO? */
4923 4923 if (wqp->q_next == qp) {
4924 4924 stp->sd_wrq->q_next = _RD(stp->sd_wrq);
4925 4925 } else {
4926 4926 if (wqp->q_next)
4927 4927 backq(qp)->q_next = qp->q_next;
4928 4928 if (qp->q_next)
4929 4929 backq(wqp)->q_next = wqp->q_next;
4930 4930 }
4931 4931
4932 4932 /* The QEND flag might have to be updated for the upstream guy */
4933 4933 if (qp->q_next)
4934 4934 set_qend(qp->q_next);
4935 4935
4936 4936 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq));
4937 4937 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq)));
4938 4938
4939 4939 /*
4940 4940 * Move any messages destined for the put procedures to the next
4941 4941 * syncq in line. Otherwise free them.
4942 4942 */
4943 4943 moved = 0;
4944 4944 /*
4945 4945 * Quick check to see whether there are any messages or events.
4946 4946 */
4947 4947 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS))
4948 4948 moved += propagate_syncq(qp);
4949 4949 if (wqp->q_syncqmsgs != 0 ||
4950 4950 (wqp->q_syncq->sq_flags & SQ_EVENTS))
4951 4951 moved += propagate_syncq(wqp);
4952 4952
4953 4953 strsetuio(stp);
4954 4954
4955 4955 /*
4956 4956 * If this was a module removal, decrement the push count.
4957 4957 */
4958 4958 if (!isdriver)
4959 4959 stp->sd_pushcnt--;
4960 4960
4961 4961 strunlock(stp, sqlist);
4962 4962 sqlist_free(sqlist);
4963 4963
4964 4964 /*
4965 4965 * Make sure any messages that were propagated are drained.
4966 4966 * Also clear any QFULL bit caused by messages that were propagated.
4967 4967 */
4968 4968
4969 4969 if (qp->q_next != NULL) {
4970 4970 clr_qfull(qp);
4971 4971 /*
4972 4972 * For the driver calling qprocsoff, propagate_syncq
4973 4973 * frees all the messages instead of putting it in
4974 4974 * the stream head
4975 4975 */
4976 4976 if (!isdriver && (moved > 0))
4977 4977 emptysq(qp->q_next->q_syncq);
4978 4978 }
4979 4979 if (wqp->q_next != NULL) {
4980 4980 clr_qfull(wqp);
4981 4981 /*
4982 4982 * We come here for any pop of a module except for the
4983 4983 * case of driver being removed. We don't call emptysq
4984 4984 * if we did not move any messages. This will avoid holding
4985 4985 * PERMOD syncq locks in emptysq
4986 4986 */
4987 4987 if (moved > 0)
4988 4988 emptysq(wqp->q_next->q_syncq);
4989 4989 }
4990 4990
4991 4991 mutex_enter(SQLOCK(sq));
4992 4992 sq->sq_rmqcount--;
4993 4993 mutex_exit(SQLOCK(sq));
4994 4994 }
4995 4995
4996 4996 /*
4997 4997 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or
4998 4998 * SQ_WRITER) on a syncq.
4999 4999 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the
5000 5000 * sync queue and waits until sq_count reaches maxcnt.
5001 5001 *
5002 5002 * If maxcnt is -1 there's no need to grab sq_putlocks since the caller
5003 5003 * does not care about putnext threads that are in the middle of calling put
5004 5004 * entry points.
5005 5005 *
5006 5006 * This routine is used for both inner and outer syncqs.
5007 5007 */
5008 5008 static void
5009 5009 blocksq(syncq_t *sq, ushort_t flag, int maxcnt)
5010 5010 {
5011 5011 uint16_t count = 0;
5012 5012
5013 5013 mutex_enter(SQLOCK(sq));
5014 5014 /*
5015 5015 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset.
5016 5016 * SQ_FROZEN will be set if there is a frozen stream that has a
5017 5017 * queue which also refers to this "shared" syncq.
5018 5018 * SQ_BLOCKED will be set if there is "off" queue which also
5019 5019 * refers to this "shared" syncq.
5020 5020 */
5021 5021 if (maxcnt != -1) {
5022 5022 count = sq->sq_count;
5023 5023 SQ_PUTLOCKS_ENTER(sq);
5024 5024 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
5025 5025 SUM_SQ_PUTCOUNTS(sq, count);
5026 5026 }
5027 5027 sq->sq_needexcl++;
5028 5028 ASSERT(sq->sq_needexcl != 0); /* wraparound */
5029 5029
5030 5030 while ((sq->sq_flags & flag) ||
5031 5031 (maxcnt != -1 && count > (unsigned)maxcnt)) {
5032 5032 sq->sq_flags |= SQ_WANTWAKEUP;
5033 5033 if (maxcnt != -1) {
5034 5034 SQ_PUTLOCKS_EXIT(sq);
5035 5035 }
5036 5036 cv_wait(&sq->sq_wait, SQLOCK(sq));
5037 5037 if (maxcnt != -1) {
5038 5038 count = sq->sq_count;
5039 5039 SQ_PUTLOCKS_ENTER(sq);
5040 5040 SUM_SQ_PUTCOUNTS(sq, count);
5041 5041 }
5042 5042 }
5043 5043 sq->sq_needexcl--;
5044 5044 sq->sq_flags |= flag;
5045 5045 ASSERT(maxcnt == -1 || count == maxcnt);
5046 5046 if (maxcnt != -1) {
5047 5047 if (sq->sq_needexcl == 0) {
5048 5048 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
5049 5049 }
5050 5050 SQ_PUTLOCKS_EXIT(sq);
5051 5051 } else if (sq->sq_needexcl == 0) {
5052 5052 SQ_PUTCOUNT_SETFAST(sq);
5053 5053 }
5054 5054
5055 5055 mutex_exit(SQLOCK(sq));
5056 5056 }
5057 5057
5058 5058 /*
5059 5059 * Reset a flag that was set with blocksq.
5060 5060 *
5061 5061 * Can not use this routine to reset SQ_WRITER.
5062 5062 *
5063 5063 * If "isouter" is set then the syncq is assumed to be an outer perimeter
5064 5064 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread
5065 5065 * to handle the queued qwriter operations.
5066 5066 *
5067 5067 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5068 5068 * sq_putlocks are used.
5069 5069 */
5070 5070 static void
5071 5071 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter)
5072 5072 {
5073 5073 uint16_t flags;
5074 5074
5075 5075 mutex_enter(SQLOCK(sq));
5076 5076 ASSERT(resetflag != SQ_WRITER);
5077 5077 ASSERT(sq->sq_flags & resetflag);
5078 5078 flags = sq->sq_flags & ~resetflag;
5079 5079 sq->sq_flags = flags;
5080 5080 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) {
5081 5081 if (flags & SQ_WANTWAKEUP) {
5082 5082 flags &= ~SQ_WANTWAKEUP;
5083 5083 cv_broadcast(&sq->sq_wait);
5084 5084 }
5085 5085 sq->sq_flags = flags;
5086 5086 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5087 5087 if (!isouter) {
5088 5088 /* drain_syncq drops SQLOCK */
5089 5089 drain_syncq(sq);
5090 5090 return;
5091 5091 }
5092 5092 }
5093 5093 }
5094 5094 mutex_exit(SQLOCK(sq));
5095 5095 }
5096 5096
5097 5097 /*
5098 5098 * Reset a flag that was set with blocksq.
5099 5099 * Does not drain the syncq. Use emptysq() for that.
5100 5100 * Returns 1 if SQ_QUEUED is set. Otherwise 0.
5101 5101 *
5102 5102 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5103 5103 * sq_putlocks are used.
5104 5104 */
5105 5105 static int
5106 5106 dropsq(syncq_t *sq, uint16_t resetflag)
5107 5107 {
5108 5108 uint16_t flags;
5109 5109
5110 5110 mutex_enter(SQLOCK(sq));
5111 5111 ASSERT(sq->sq_flags & resetflag);
5112 5112 flags = sq->sq_flags & ~resetflag;
5113 5113 if (flags & SQ_WANTWAKEUP) {
5114 5114 flags &= ~SQ_WANTWAKEUP;
5115 5115 cv_broadcast(&sq->sq_wait);
5116 5116 }
5117 5117 sq->sq_flags = flags;
5118 5118 mutex_exit(SQLOCK(sq));
5119 5119 if (flags & SQ_QUEUED)
5120 5120 return (1);
5121 5121 return (0);
5122 5122 }
5123 5123
5124 5124 /*
5125 5125 * Empty all the messages on a syncq.
5126 5126 *
5127 5127 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5128 5128 * sq_putlocks are used.
5129 5129 */
5130 5130 static void
5131 5131 emptysq(syncq_t *sq)
5132 5132 {
5133 5133 uint16_t flags;
5134 5134
5135 5135 mutex_enter(SQLOCK(sq));
5136 5136 flags = sq->sq_flags;
5137 5137 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5138 5138 /*
5139 5139 * To prevent potential recursive invocation of drain_syncq we
5140 5140 * do not call drain_syncq if count is non-zero.
5141 5141 */
5142 5142 if (sq->sq_count == 0) {
5143 5143 /* drain_syncq() drops SQLOCK */
5144 5144 drain_syncq(sq);
5145 5145 return;
5146 5146 } else
5147 5147 sqenable(sq);
5148 5148 }
5149 5149 mutex_exit(SQLOCK(sq));
5150 5150 }
5151 5151
5152 5152 /*
5153 5153 * Ordered insert while removing duplicates.
5154 5154 */
5155 5155 static void
5156 5156 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp)
5157 5157 {
5158 5158 syncql_t *sqlp, **prev_sqlpp, *new_sqlp;
5159 5159
5160 5160 prev_sqlpp = &sqlist->sqlist_head;
5161 5161 while ((sqlp = *prev_sqlpp) != NULL) {
5162 5162 if (sqlp->sql_sq >= sqp) {
5163 5163 if (sqlp->sql_sq == sqp) /* duplicate */
5164 5164 return;
5165 5165 break;
5166 5166 }
5167 5167 prev_sqlpp = &sqlp->sql_next;
5168 5168 }
5169 5169 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++];
5170 5170 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size);
5171 5171 new_sqlp->sql_next = sqlp;
5172 5172 new_sqlp->sql_sq = sqp;
5173 5173 *prev_sqlpp = new_sqlp;
5174 5174 }
5175 5175
5176 5176 /*
5177 5177 * Walk the write side queues until we hit either the driver
5178 5178 * or a twist in the stream (_SAMESTR will return false in both
5179 5179 * these cases) then turn around and walk the read side queues
5180 5180 * back up to the stream head.
5181 5181 */
5182 5182 static void
5183 5183 sqlist_insertall(sqlist_t *sqlist, queue_t *q)
5184 5184 {
5185 5185 while (q != NULL) {
5186 5186 sqlist_insert(sqlist, q->q_syncq);
5187 5187
5188 5188 if (_SAMESTR(q))
5189 5189 q = q->q_next;
5190 5190 else if (!(q->q_flag & QREADR))
5191 5191 q = _RD(q);
5192 5192 else
5193 5193 q = NULL;
5194 5194 }
5195 5195 }
5196 5196
5197 5197 /*
5198 5198 * Allocate and build a list of all syncqs in a stream and the syncq(s)
5199 5199 * associated with the "q" parameter. The resulting list is sorted in a
5200 5200 * canonical order and is free of duplicates.
5201 5201 * Assumes the passed queue is a _RD(q).
5202 5202 */
5203 5203 static sqlist_t *
5204 5204 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist)
5205 5205 {
5206 5206 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP);
5207 5207
5208 5208 /*
5209 5209 * start with the current queue/qpair
5210 5210 */
5211 5211 ASSERT(q->q_flag & QREADR);
5212 5212
5213 5213 sqlist_insert(sqlist, q->q_syncq);
5214 5214 sqlist_insert(sqlist, _WR(q)->q_syncq);
5215 5215
5216 5216 sqlist_insertall(sqlist, stp->sd_wrq);
5217 5217 if (do_twist)
5218 5218 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq);
5219 5219
5220 5220 return (sqlist);
5221 5221 }
5222 5222
5223 5223 static sqlist_t *
5224 5224 sqlist_alloc(struct stdata *stp, int kmflag)
5225 5225 {
5226 5226 size_t sqlist_size;
5227 5227 sqlist_t *sqlist;
5228 5228
5229 5229 /*
5230 5230 * Allocate 2 syncql_t's for each pushed module. Note that
5231 5231 * the sqlist_t structure already has 4 syncql_t's built in:
5232 5232 * 2 for the stream head, and 2 for the driver/other stream head.
5233 5233 */
5234 5234 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt +
5235 5235 sizeof (sqlist_t);
5236 5236 if (STRMATED(stp))
5237 5237 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt;
5238 5238 sqlist = kmem_alloc(sqlist_size, kmflag);
5239 5239
5240 5240 sqlist->sqlist_head = NULL;
5241 5241 sqlist->sqlist_size = sqlist_size;
5242 5242 sqlist->sqlist_index = 0;
5243 5243
5244 5244 return (sqlist);
5245 5245 }
5246 5246
5247 5247 /*
5248 5248 * Free the list created by sqlist_alloc()
5249 5249 */
5250 5250 static void
5251 5251 sqlist_free(sqlist_t *sqlist)
5252 5252 {
5253 5253 kmem_free(sqlist, sqlist->sqlist_size);
5254 5254 }
5255 5255
5256 5256 /*
5257 5257 * Prevent any new entries into any syncq in this stream.
5258 5258 * Used by freezestr.
5259 5259 */
5260 5260 void
5261 5261 strblock(queue_t *q)
5262 5262 {
5263 5263 struct stdata *stp;
5264 5264 syncql_t *sql;
5265 5265 sqlist_t *sqlist;
5266 5266
5267 5267 q = _RD(q);
5268 5268
5269 5269 stp = STREAM(q);
5270 5270 ASSERT(stp != NULL);
5271 5271
5272 5272 /*
5273 5273 * Get a sorted list with all the duplicates removed containing
5274 5274 * all the syncqs referenced by this stream.
5275 5275 */
5276 5276 sqlist = sqlist_build(q, stp, B_FALSE);
5277 5277 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5278 5278 blocksq(sql->sql_sq, SQ_FROZEN, -1);
5279 5279 sqlist_free(sqlist);
5280 5280 }
5281 5281
5282 5282 /*
5283 5283 * Release the block on new entries into this stream
5284 5284 */
5285 5285 void
5286 5286 strunblock(queue_t *q)
5287 5287 {
5288 5288 struct stdata *stp;
5289 5289 syncql_t *sql;
5290 5290 sqlist_t *sqlist;
5291 5291 int drain_needed;
5292 5292
5293 5293 q = _RD(q);
5294 5294
5295 5295 /*
5296 5296 * Get a sorted list with all the duplicates removed containing
5297 5297 * all the syncqs referenced by this stream.
5298 5298 * Have to drop the SQ_FROZEN flag on all the syncqs before
5299 5299 * starting to drain them; otherwise the draining might
5300 5300 * cause a freezestr in some module on the stream (which
5301 5301 * would deadlock).
5302 5302 */
5303 5303 stp = STREAM(q);
5304 5304 ASSERT(stp != NULL);
5305 5305 sqlist = sqlist_build(q, stp, B_FALSE);
5306 5306 drain_needed = 0;
5307 5307 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next)
5308 5308 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN);
5309 5309 if (drain_needed) {
5310 5310 for (sql = sqlist->sqlist_head; sql != NULL;
5311 5311 sql = sql->sql_next)
5312 5312 emptysq(sql->sql_sq);
5313 5313 }
5314 5314 sqlist_free(sqlist);
5315 5315 }
5316 5316
5317 5317 #ifdef DEBUG
5318 5318 static int
5319 5319 qprocsareon(queue_t *rq)
5320 5320 {
5321 5321 if (rq->q_next == NULL)
5322 5322 return (0);
5323 5323 return (_WR(rq->q_next)->q_next == _WR(rq));
5324 5324 }
5325 5325
5326 5326 int
5327 5327 qclaimed(queue_t *q)
5328 5328 {
5329 5329 uint_t count;
5330 5330
5331 5331 count = q->q_syncq->sq_count;
5332 5332 SUM_SQ_PUTCOUNTS(q->q_syncq, count);
5333 5333 return (count != 0);
5334 5334 }
5335 5335
5336 5336 /*
5337 5337 * Check if anyone has frozen this stream with freezestr
5338 5338 */
5339 5339 int
5340 5340 frozenstr(queue_t *q)
5341 5341 {
5342 5342 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0);
5343 5343 }
5344 5344 #endif /* DEBUG */
5345 5345
5346 5346 /*
5347 5347 * Enter a queue.
5348 5348 * Obsoleted interface. Should not be used.
5349 5349 */
5350 5350 void
5351 5351 enterq(queue_t *q)
5352 5352 {
5353 5353 entersq(q->q_syncq, SQ_CALLBACK);
5354 5354 }
5355 5355
5356 5356 void
5357 5357 leaveq(queue_t *q)
5358 5358 {
5359 5359 leavesq(q->q_syncq, SQ_CALLBACK);
5360 5360 }
5361 5361
5362 5362 /*
5363 5363 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits
5364 5364 * to check.
5365 5365 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter
5366 5366 * calls and the running of open, close and service procedures.
5367 5367 *
5368 5368 * If c_inner bit is set no need to grab sq_putlocks since we don't care
5369 5369 * if other threads have entered or are entering put entry point.
5370 5370 *
5371 5371 * If c_inner bit is set it might have been possible to use
5372 5372 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize
5373 5373 * open/close path for IP) but since the count may need to be decremented in
5374 5374 * qwait() we wouldn't know which counter to decrement. Currently counter is
5375 5375 * selected by current cpu_seqid and current CPU can change at any moment. XXX
5376 5376 * in the future we might use curthread id bits to select the counter and this
5377 5377 * would stay constant across routine calls.
5378 5378 */
5379 5379 void
5380 5380 entersq(syncq_t *sq, int entrypoint)
5381 5381 {
5382 5382 uint16_t count = 0;
5383 5383 uint16_t flags;
5384 5384 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
5385 5385 uint16_t type;
5386 5386 uint_t c_inner = entrypoint & SQ_CI;
5387 5387 uint_t c_outer = entrypoint & SQ_CO;
5388 5388
5389 5389 /*
5390 5390 * Increment ref count to keep closes out of this queue.
5391 5391 */
5392 5392 ASSERT(sq);
5393 5393 ASSERT(c_inner && c_outer);
5394 5394 mutex_enter(SQLOCK(sq));
5395 5395 flags = sq->sq_flags;
5396 5396 type = sq->sq_type;
5397 5397 if (!(type & c_inner)) {
5398 5398 /* Make sure all putcounts now use slowlock. */
5399 5399 count = sq->sq_count;
5400 5400 SQ_PUTLOCKS_ENTER(sq);
5401 5401 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
5402 5402 SUM_SQ_PUTCOUNTS(sq, count);
5403 5403 sq->sq_needexcl++;
5404 5404 ASSERT(sq->sq_needexcl != 0); /* wraparound */
5405 5405 waitflags |= SQ_MESSAGES;
5406 5406 }
5407 5407 /*
5408 5408 * Wait until we can enter the inner perimeter.
5409 5409 * If we want exclusive access we wait until sq_count is 0.
5410 5410 * We have to do this before entering the outer perimeter in order
5411 5411 * to preserve put/close message ordering.
5412 5412 */
5413 5413 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) {
5414 5414 sq->sq_flags = flags | SQ_WANTWAKEUP;
5415 5415 if (!(type & c_inner)) {
5416 5416 SQ_PUTLOCKS_EXIT(sq);
5417 5417 }
5418 5418 cv_wait(&sq->sq_wait, SQLOCK(sq));
5419 5419 if (!(type & c_inner)) {
5420 5420 count = sq->sq_count;
5421 5421 SQ_PUTLOCKS_ENTER(sq);
5422 5422 SUM_SQ_PUTCOUNTS(sq, count);
5423 5423 }
5424 5424 flags = sq->sq_flags;
5425 5425 }
5426 5426
5427 5427 if (!(type & c_inner)) {
5428 5428 ASSERT(sq->sq_needexcl > 0);
5429 5429 sq->sq_needexcl--;
5430 5430 if (sq->sq_needexcl == 0) {
5431 5431 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
5432 5432 }
5433 5433 }
5434 5434
5435 5435 /* Check if we need to enter the outer perimeter */
5436 5436 if (!(type & c_outer)) {
5437 5437 /*
5438 5438 * We have to enter the outer perimeter exclusively before
5439 5439 * we can increment sq_count to avoid deadlock. This implies
5440 5440 * that we have to re-check sq_flags and sq_count.
5441 5441 *
5442 5442 * is it possible to have c_inner set when c_outer is not set?
5443 5443 */
5444 5444 if (!(type & c_inner)) {
5445 5445 SQ_PUTLOCKS_EXIT(sq);
5446 5446 }
5447 5447 mutex_exit(SQLOCK(sq));
5448 5448 outer_enter(sq->sq_outer, SQ_GOAWAY);
5449 5449 mutex_enter(SQLOCK(sq));
5450 5450 flags = sq->sq_flags;
5451 5451 /*
5452 5452 * there should be no need to recheck sq_putcounts
5453 5453 * because outer_enter() has already waited for them to clear
5454 5454 * after setting SQ_WRITER.
5455 5455 */
5456 5456 count = sq->sq_count;
5457 5457 #ifdef DEBUG
5458 5458 /*
5459 5459 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead
5460 5460 * of doing an ASSERT internally. Others should do
5461 5461 * something like
5462 5462 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0);
5463 5463 * without the need to #ifdef DEBUG it.
5464 5464 */
5465 5465 SUMCHECK_SQ_PUTCOUNTS(sq, 0);
5466 5466 #endif
5467 5467 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) ||
5468 5468 (!(type & c_inner) && count != 0)) {
5469 5469 sq->sq_flags = flags | SQ_WANTWAKEUP;
5470 5470 cv_wait(&sq->sq_wait, SQLOCK(sq));
5471 5471 count = sq->sq_count;
5472 5472 flags = sq->sq_flags;
5473 5473 }
5474 5474 }
5475 5475
5476 5476 sq->sq_count++;
5477 5477 ASSERT(sq->sq_count != 0); /* Wraparound */
5478 5478 if (!(type & c_inner)) {
5479 5479 /* Exclusive entry */
5480 5480 ASSERT(sq->sq_count == 1);
5481 5481 sq->sq_flags |= SQ_EXCL;
5482 5482 if (type & c_outer) {
5483 5483 SQ_PUTLOCKS_EXIT(sq);
5484 5484 }
5485 5485 }
5486 5486 mutex_exit(SQLOCK(sq));
5487 5487 }
5488 5488
5489 5489 /*
5490 5490 * Leave a syncq. Announce to framework that closes may proceed.
5491 5491 * c_inner and c_outer specify which concurrency bits to check.
5492 5492 *
5493 5493 * Must never be called from driver or module put entry point.
5494 5494 *
5495 5495 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5496 5496 * sq_putlocks are used.
5497 5497 */
5498 5498 void
5499 5499 leavesq(syncq_t *sq, int entrypoint)
5500 5500 {
5501 5501 uint16_t flags;
5502 5502 uint16_t type;
5503 5503 uint_t c_outer = entrypoint & SQ_CO;
5504 5504 #ifdef DEBUG
5505 5505 uint_t c_inner = entrypoint & SQ_CI;
5506 5506 #endif
5507 5507
5508 5508 /*
5509 5509 * Decrement ref count, drain the syncq if possible, and wake up
5510 5510 * any waiting close.
5511 5511 */
5512 5512 ASSERT(sq);
5513 5513 ASSERT(c_inner && c_outer);
5514 5514 mutex_enter(SQLOCK(sq));
5515 5515 flags = sq->sq_flags;
5516 5516 type = sq->sq_type;
5517 5517 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) {
5518 5518
5519 5519 if (flags & SQ_WANTWAKEUP) {
5520 5520 flags &= ~SQ_WANTWAKEUP;
5521 5521 cv_broadcast(&sq->sq_wait);
5522 5522 }
5523 5523 if (flags & SQ_WANTEXWAKEUP) {
5524 5524 flags &= ~SQ_WANTEXWAKEUP;
5525 5525 cv_broadcast(&sq->sq_exitwait);
5526 5526 }
5527 5527
5528 5528 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) {
5529 5529 /*
5530 5530 * The syncq needs to be drained. "Exit" the syncq
5531 5531 * before calling drain_syncq.
5532 5532 */
5533 5533 ASSERT(sq->sq_count != 0);
5534 5534 sq->sq_count--;
5535 5535 ASSERT((flags & SQ_EXCL) || (type & c_inner));
5536 5536 sq->sq_flags = flags & ~SQ_EXCL;
5537 5537 drain_syncq(sq);
5538 5538 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
5539 5539 /* Check if we need to exit the outer perimeter */
5540 5540 /* XXX will this ever be true? */
5541 5541 if (!(type & c_outer))
5542 5542 outer_exit(sq->sq_outer);
5543 5543 return;
5544 5544 }
5545 5545 }
5546 5546 ASSERT(sq->sq_count != 0);
5547 5547 sq->sq_count--;
5548 5548 ASSERT((flags & SQ_EXCL) || (type & c_inner));
5549 5549 sq->sq_flags = flags & ~SQ_EXCL;
5550 5550 mutex_exit(SQLOCK(sq));
5551 5551
5552 5552 /* Check if we need to exit the outer perimeter */
5553 5553 if (!(sq->sq_type & c_outer))
5554 5554 outer_exit(sq->sq_outer);
5555 5555 }
5556 5556
5557 5557 /*
5558 5558 * Prevent q_next from changing in this stream by incrementing sq_count.
5559 5559 *
5560 5560 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5561 5561 * sq_putlocks are used.
5562 5562 */
5563 5563 void
5564 5564 claimq(queue_t *qp)
5565 5565 {
5566 5566 syncq_t *sq = qp->q_syncq;
5567 5567
5568 5568 mutex_enter(SQLOCK(sq));
5569 5569 sq->sq_count++;
5570 5570 ASSERT(sq->sq_count != 0); /* Wraparound */
5571 5571 mutex_exit(SQLOCK(sq));
5572 5572 }
5573 5573
5574 5574 /*
5575 5575 * Undo claimq.
5576 5576 *
5577 5577 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
5578 5578 * sq_putlocks are used.
5579 5579 */
5580 5580 void
5581 5581 releaseq(queue_t *qp)
5582 5582 {
5583 5583 syncq_t *sq = qp->q_syncq;
5584 5584 uint16_t flags;
5585 5585
5586 5586 mutex_enter(SQLOCK(sq));
5587 5587 ASSERT(sq->sq_count > 0);
5588 5588 sq->sq_count--;
5589 5589
5590 5590 flags = sq->sq_flags;
5591 5591 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) {
5592 5592 if (flags & SQ_WANTWAKEUP) {
5593 5593 flags &= ~SQ_WANTWAKEUP;
5594 5594 cv_broadcast(&sq->sq_wait);
5595 5595 }
5596 5596 sq->sq_flags = flags;
5597 5597 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) {
5598 5598 /*
5599 5599 * To prevent potential recursive invocation of
5600 5600 * drain_syncq we do not call drain_syncq if count is
5601 5601 * non-zero.
5602 5602 */
5603 5603 if (sq->sq_count == 0) {
5604 5604 drain_syncq(sq);
5605 5605 return;
5606 5606 } else
5607 5607 sqenable(sq);
5608 5608 }
5609 5609 }
5610 5610 mutex_exit(SQLOCK(sq));
5611 5611 }
5612 5612
5613 5613 /*
5614 5614 * Prevent q_next from changing in this stream by incrementing sd_refcnt.
5615 5615 */
5616 5616 void
5617 5617 claimstr(queue_t *qp)
5618 5618 {
5619 5619 struct stdata *stp = STREAM(qp);
5620 5620
5621 5621 mutex_enter(&stp->sd_reflock);
5622 5622 stp->sd_refcnt++;
5623 5623 ASSERT(stp->sd_refcnt != 0); /* Wraparound */
5624 5624 mutex_exit(&stp->sd_reflock);
5625 5625 }
5626 5626
5627 5627 /*
5628 5628 * Undo claimstr.
5629 5629 */
5630 5630 void
5631 5631 releasestr(queue_t *qp)
5632 5632 {
5633 5633 struct stdata *stp = STREAM(qp);
5634 5634
5635 5635 mutex_enter(&stp->sd_reflock);
5636 5636 ASSERT(stp->sd_refcnt != 0);
5637 5637 if (--stp->sd_refcnt == 0)
5638 5638 cv_broadcast(&stp->sd_refmonitor);
5639 5639 mutex_exit(&stp->sd_reflock);
5640 5640 }
5641 5641
5642 5642 static syncq_t *
5643 5643 new_syncq(void)
5644 5644 {
5645 5645 return (kmem_cache_alloc(syncq_cache, KM_SLEEP));
5646 5646 }
5647 5647
5648 5648 static void
5649 5649 free_syncq(syncq_t *sq)
5650 5650 {
5651 5651 ASSERT(sq->sq_head == NULL);
5652 5652 ASSERT(sq->sq_outer == NULL);
5653 5653 ASSERT(sq->sq_callbpend == NULL);
5654 5654 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) ||
5655 5655 (sq->sq_onext == sq && sq->sq_oprev == sq));
5656 5656
5657 5657 if (sq->sq_ciputctrl != NULL) {
5658 5658 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1);
5659 5659 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl,
5660 5660 sq->sq_nciputctrl, 0);
5661 5661 ASSERT(ciputctrl_cache != NULL);
5662 5662 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl);
5663 5663 }
5664 5664
5665 5665 sq->sq_tail = NULL;
5666 5666 sq->sq_evhead = NULL;
5667 5667 sq->sq_evtail = NULL;
5668 5668 sq->sq_ciputctrl = NULL;
5669 5669 sq->sq_nciputctrl = 0;
5670 5670 sq->sq_count = 0;
5671 5671 sq->sq_rmqcount = 0;
5672 5672 sq->sq_callbflags = 0;
5673 5673 sq->sq_cancelid = 0;
5674 5674 sq->sq_next = NULL;
5675 5675 sq->sq_needexcl = 0;
5676 5676 sq->sq_svcflags = 0;
5677 5677 sq->sq_nqueues = 0;
5678 5678 sq->sq_pri = 0;
5679 5679 sq->sq_onext = NULL;
5680 5680 sq->sq_oprev = NULL;
5681 5681 sq->sq_flags = 0;
5682 5682 sq->sq_type = 0;
5683 5683 sq->sq_servcount = 0;
5684 5684
5685 5685 kmem_cache_free(syncq_cache, sq);
5686 5686 }
5687 5687
5688 5688 /* Outer perimeter code */
5689 5689
5690 5690 /*
5691 5691 * The outer syncq uses the fields and flags in the syncq slightly
5692 5692 * differently from the inner syncqs.
5693 5693 * sq_count Incremented when there are pending or running
5694 5694 * writers at the outer perimeter to prevent the set of
5695 5695 * inner syncqs that belong to the outer perimeter from
5696 5696 * changing.
5697 5697 * sq_head/tail List of deferred qwriter(OUTER) operations.
5698 5698 *
5699 5699 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while
5700 5700 * inner syncqs are added to or removed from the
5701 5701 * outer perimeter.
5702 5702 * SQ_QUEUED sq_head/tail has messages or events queued.
5703 5703 *
5704 5704 * SQ_WRITER A thread is currently traversing all the inner syncqs
5705 5705 * setting the SQ_WRITER flag.
5706 5706 */
5707 5707
5708 5708 /*
5709 5709 * Get write access at the outer perimeter.
5710 5710 * Note that read access is done by entersq, putnext, and put by simply
5711 5711 * incrementing sq_count in the inner syncq.
5712 5712 *
5713 5713 * Waits until "flags" is no longer set in the outer to prevent multiple
5714 5714 * threads from having write access at the same time. SQ_WRITER has to be part
5715 5715 * of "flags".
5716 5716 *
5717 5717 * Increases sq_count on the outer syncq to keep away outer_insert/remove
5718 5718 * until the outer_exit is finished.
5719 5719 *
5720 5720 * outer_enter is vulnerable to starvation since it does not prevent new
5721 5721 * threads from entering the inner syncqs while it is waiting for sq_count to
5722 5722 * go to zero.
5723 5723 */
5724 5724 void
5725 5725 outer_enter(syncq_t *outer, uint16_t flags)
5726 5726 {
5727 5727 syncq_t *sq;
5728 5728 int wait_needed;
5729 5729 uint16_t count;
5730 5730
5731 5731 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5732 5732 outer->sq_oprev != NULL);
5733 5733 ASSERT(flags & SQ_WRITER);
5734 5734
5735 5735 retry:
5736 5736 mutex_enter(SQLOCK(outer));
5737 5737 while (outer->sq_flags & flags) {
5738 5738 outer->sq_flags |= SQ_WANTWAKEUP;
5739 5739 cv_wait(&outer->sq_wait, SQLOCK(outer));
5740 5740 }
5741 5741
5742 5742 ASSERT(!(outer->sq_flags & SQ_WRITER));
5743 5743 outer->sq_flags |= SQ_WRITER;
5744 5744 outer->sq_count++;
5745 5745 ASSERT(outer->sq_count != 0); /* wraparound */
5746 5746 wait_needed = 0;
5747 5747 /*
5748 5748 * Set SQ_WRITER on all the inner syncqs while holding
5749 5749 * the SQLOCK on the outer syncq. This ensures that the changing
5750 5750 * of SQ_WRITER is atomic under the outer SQLOCK.
5751 5751 */
5752 5752 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5753 5753 mutex_enter(SQLOCK(sq));
5754 5754 count = sq->sq_count;
5755 5755 SQ_PUTLOCKS_ENTER(sq);
5756 5756 sq->sq_flags |= SQ_WRITER;
5757 5757 SUM_SQ_PUTCOUNTS(sq, count);
5758 5758 if (count != 0)
5759 5759 wait_needed = 1;
5760 5760 SQ_PUTLOCKS_EXIT(sq);
5761 5761 mutex_exit(SQLOCK(sq));
5762 5762 }
5763 5763 mutex_exit(SQLOCK(outer));
5764 5764
5765 5765 /*
5766 5766 * Get everybody out of the syncqs sequentially.
5767 5767 * Note that we don't actually need to acquire the PUTLOCKS, since
5768 5768 * we have already cleared the fastbit, and set QWRITER. By
5769 5769 * definition, the count can not increase since putnext will
5770 5770 * take the slowlock path (and the purpose of acquiring the
5771 5771 * putlocks was to make sure it didn't increase while we were
5772 5772 * waiting).
5773 5773 *
5774 5774 * Note that we still acquire the PUTLOCKS to be safe.
5775 5775 */
5776 5776 if (wait_needed) {
5777 5777 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
5778 5778 mutex_enter(SQLOCK(sq));
5779 5779 count = sq->sq_count;
5780 5780 SQ_PUTLOCKS_ENTER(sq);
5781 5781 SUM_SQ_PUTCOUNTS(sq, count);
5782 5782 while (count != 0) {
5783 5783 sq->sq_flags |= SQ_WANTWAKEUP;
5784 5784 SQ_PUTLOCKS_EXIT(sq);
5785 5785 cv_wait(&sq->sq_wait, SQLOCK(sq));
5786 5786 count = sq->sq_count;
5787 5787 SQ_PUTLOCKS_ENTER(sq);
5788 5788 SUM_SQ_PUTCOUNTS(sq, count);
5789 5789 }
5790 5790 SQ_PUTLOCKS_EXIT(sq);
5791 5791 mutex_exit(SQLOCK(sq));
5792 5792 }
5793 5793 /*
5794 5794 * Verify that none of the flags got set while we
5795 5795 * were waiting for the sq_counts to drop.
5796 5796 * If this happens we exit and retry entering the
5797 5797 * outer perimeter.
5798 5798 */
5799 5799 mutex_enter(SQLOCK(outer));
5800 5800 if (outer->sq_flags & (flags & ~SQ_WRITER)) {
5801 5801 mutex_exit(SQLOCK(outer));
5802 5802 outer_exit(outer);
5803 5803 goto retry;
5804 5804 }
5805 5805 mutex_exit(SQLOCK(outer));
5806 5806 }
5807 5807 }
5808 5808
5809 5809 /*
5810 5810 * Drop the write access at the outer perimeter.
5811 5811 * Read access is dropped implicitly (by putnext, put, and leavesq) by
5812 5812 * decrementing sq_count.
5813 5813 */
5814 5814 void
5815 5815 outer_exit(syncq_t *outer)
5816 5816 {
5817 5817 syncq_t *sq;
5818 5818 int drain_needed;
5819 5819 uint16_t flags;
5820 5820
5821 5821 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5822 5822 outer->sq_oprev != NULL);
5823 5823 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer)));
5824 5824
5825 5825 /*
5826 5826 * Atomically (from the perspective of threads calling become_writer)
5827 5827 * drop the write access at the outer perimeter by holding
5828 5828 * SQLOCK(outer) across all the dropsq calls and the resetting of
5829 5829 * SQ_WRITER.
5830 5830 * This defines a locking order between the outer perimeter
5831 5831 * SQLOCK and the inner perimeter SQLOCKs.
5832 5832 */
5833 5833 mutex_enter(SQLOCK(outer));
5834 5834 flags = outer->sq_flags;
5835 5835 ASSERT(outer->sq_flags & SQ_WRITER);
5836 5836 if (flags & SQ_QUEUED) {
5837 5837 write_now(outer);
5838 5838 flags = outer->sq_flags;
5839 5839 }
5840 5840
5841 5841 /*
5842 5842 * sq_onext is stable since sq_count has not yet been decreased.
5843 5843 * Reset the SQ_WRITER flags in all syncqs.
5844 5844 * After dropping SQ_WRITER on the outer syncq we empty all the
5845 5845 * inner syncqs.
5846 5846 */
5847 5847 drain_needed = 0;
5848 5848 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5849 5849 drain_needed += dropsq(sq, SQ_WRITER);
5850 5850 ASSERT(!(outer->sq_flags & SQ_QUEUED));
5851 5851 flags &= ~SQ_WRITER;
5852 5852 if (drain_needed) {
5853 5853 outer->sq_flags = flags;
5854 5854 mutex_exit(SQLOCK(outer));
5855 5855 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext)
5856 5856 emptysq(sq);
5857 5857 mutex_enter(SQLOCK(outer));
5858 5858 flags = outer->sq_flags;
5859 5859 }
5860 5860 if (flags & SQ_WANTWAKEUP) {
5861 5861 flags &= ~SQ_WANTWAKEUP;
5862 5862 cv_broadcast(&outer->sq_wait);
5863 5863 }
5864 5864 outer->sq_flags = flags;
5865 5865 ASSERT(outer->sq_count > 0);
5866 5866 outer->sq_count--;
5867 5867 mutex_exit(SQLOCK(outer));
5868 5868 }
5869 5869
5870 5870 /*
5871 5871 * Add another syncq to an outer perimeter.
5872 5872 * Block out all other access to the outer perimeter while it is being
5873 5873 * changed using blocksq.
5874 5874 * Assumes that the caller has *not* done an outer_enter.
5875 5875 *
5876 5876 * Vulnerable to starvation in blocksq.
5877 5877 */
5878 5878 static void
5879 5879 outer_insert(syncq_t *outer, syncq_t *sq)
5880 5880 {
5881 5881 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5882 5882 outer->sq_oprev != NULL);
5883 5883 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL &&
5884 5884 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */
5885 5885
5886 5886 /* Get exclusive access to the outer perimeter list */
5887 5887 blocksq(outer, SQ_BLOCKED, 0);
5888 5888 ASSERT(outer->sq_flags & SQ_BLOCKED);
5889 5889 ASSERT(!(outer->sq_flags & SQ_WRITER));
5890 5890
5891 5891 mutex_enter(SQLOCK(sq));
5892 5892 sq->sq_outer = outer;
5893 5893 outer->sq_onext->sq_oprev = sq;
5894 5894 sq->sq_onext = outer->sq_onext;
5895 5895 outer->sq_onext = sq;
5896 5896 sq->sq_oprev = outer;
5897 5897 mutex_exit(SQLOCK(sq));
5898 5898 unblocksq(outer, SQ_BLOCKED, 1);
5899 5899 }
5900 5900
5901 5901 /*
5902 5902 * Remove a syncq from an outer perimeter.
5903 5903 * Block out all other access to the outer perimeter while it is being
5904 5904 * changed using blocksq.
5905 5905 * Assumes that the caller has *not* done an outer_enter.
5906 5906 *
5907 5907 * Vulnerable to starvation in blocksq.
5908 5908 */
5909 5909 static void
5910 5910 outer_remove(syncq_t *outer, syncq_t *sq)
5911 5911 {
5912 5912 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5913 5913 outer->sq_oprev != NULL);
5914 5914 ASSERT(sq->sq_outer == outer);
5915 5915
5916 5916 /* Get exclusive access to the outer perimeter list */
5917 5917 blocksq(outer, SQ_BLOCKED, 0);
5918 5918 ASSERT(outer->sq_flags & SQ_BLOCKED);
5919 5919 ASSERT(!(outer->sq_flags & SQ_WRITER));
5920 5920
5921 5921 mutex_enter(SQLOCK(sq));
5922 5922 sq->sq_outer = NULL;
5923 5923 sq->sq_onext->sq_oprev = sq->sq_oprev;
5924 5924 sq->sq_oprev->sq_onext = sq->sq_onext;
5925 5925 sq->sq_oprev = sq->sq_onext = NULL;
5926 5926 mutex_exit(SQLOCK(sq));
5927 5927 unblocksq(outer, SQ_BLOCKED, 1);
5928 5928 }
5929 5929
5930 5930 /*
5931 5931 * Queue a deferred qwriter(OUTER) callback for this outer perimeter.
5932 5932 * If this is the first callback for this outer perimeter then add
5933 5933 * this outer perimeter to the list of outer perimeters that
5934 5934 * the qwriter_outer_thread will process.
5935 5935 *
5936 5936 * Increments sq_count in the outer syncq to prevent the membership
5937 5937 * of the outer perimeter (in terms of inner syncqs) to change while
5938 5938 * the callback is pending.
5939 5939 */
5940 5940 static void
5941 5941 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp)
5942 5942 {
5943 5943 ASSERT(MUTEX_HELD(SQLOCK(outer)));
5944 5944
5945 5945 mp->b_prev = (mblk_t *)func;
5946 5946 mp->b_queue = q;
5947 5947 mp->b_next = NULL;
5948 5948 outer->sq_count++; /* Decremented when dequeued */
5949 5949 ASSERT(outer->sq_count != 0); /* Wraparound */
5950 5950 if (outer->sq_evhead == NULL) {
5951 5951 /* First message. */
5952 5952 outer->sq_evhead = outer->sq_evtail = mp;
5953 5953 outer->sq_flags |= SQ_EVENTS;
5954 5954 mutex_exit(SQLOCK(outer));
5955 5955 STRSTAT(qwr_outer);
5956 5956 (void) taskq_dispatch(streams_taskq,
5957 5957 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP);
5958 5958 } else {
5959 5959 ASSERT(outer->sq_flags & SQ_EVENTS);
5960 5960 outer->sq_evtail->b_next = mp;
5961 5961 outer->sq_evtail = mp;
5962 5962 mutex_exit(SQLOCK(outer));
5963 5963 }
5964 5964 }
5965 5965
5966 5966 /*
5967 5967 * Try and upgrade to write access at the outer perimeter. If this can
5968 5968 * not be done without blocking then queue the callback to be done
5969 5969 * by the qwriter_outer_thread.
5970 5970 *
5971 5971 * This routine can only be called from put or service procedures plus
5972 5972 * asynchronous callback routines that have properly entered the queue (with
5973 5973 * entersq). Thus qwriter(OUTER) assumes the caller has one claim on the syncq
5974 5974 * associated with q.
5975 5975 */
5976 5976 void
5977 5977 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)())
5978 5978 {
5979 5979 syncq_t *osq, *sq, *outer;
5980 5980 int failed;
5981 5981 uint16_t flags;
5982 5982
5983 5983 osq = q->q_syncq;
5984 5984 outer = osq->sq_outer;
5985 5985 if (outer == NULL)
5986 5986 panic("qwriter(PERIM_OUTER): no outer perimeter");
5987 5987 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
5988 5988 outer->sq_oprev != NULL);
5989 5989
5990 5990 mutex_enter(SQLOCK(outer));
5991 5991 flags = outer->sq_flags;
5992 5992 /*
5993 5993 * If some thread is traversing sq_next, or if we are blocked by
5994 5994 * outer_insert or outer_remove, or if the we already have queued
5995 5995 * callbacks, then queue this callback for later processing.
5996 5996 *
5997 5997 * Also queue the qwriter for an interrupt thread in order
5998 5998 * to reduce the time spent running at high IPL.
5999 5999 * to identify there are events.
6000 6000 */
6001 6001 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) {
6002 6002 /*
6003 6003 * Queue the become_writer request.
6004 6004 * The queueing is atomic under SQLOCK(outer) in order
6005 6005 * to synchronize with outer_exit.
6006 6006 * queue_writer will drop the outer SQLOCK
6007 6007 */
6008 6008 if (flags & SQ_BLOCKED) {
6009 6009 /* Must set SQ_WRITER on inner perimeter */
6010 6010 mutex_enter(SQLOCK(osq));
6011 6011 osq->sq_flags |= SQ_WRITER;
6012 6012 mutex_exit(SQLOCK(osq));
6013 6013 } else {
6014 6014 if (!(flags & SQ_WRITER)) {
6015 6015 /*
6016 6016 * The outer could have been SQ_BLOCKED thus
6017 6017 * SQ_WRITER might not be set on the inner.
6018 6018 */
6019 6019 mutex_enter(SQLOCK(osq));
6020 6020 osq->sq_flags |= SQ_WRITER;
6021 6021 mutex_exit(SQLOCK(osq));
6022 6022 }
6023 6023 ASSERT(osq->sq_flags & SQ_WRITER);
6024 6024 }
6025 6025 queue_writer(outer, func, q, mp);
6026 6026 return;
6027 6027 }
6028 6028 /*
6029 6029 * We are half-way to exclusive access to the outer perimeter.
6030 6030 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove
6031 6031 * while the inner syncqs are traversed.
6032 6032 */
6033 6033 outer->sq_count++;
6034 6034 ASSERT(outer->sq_count != 0); /* wraparound */
6035 6035 flags |= SQ_WRITER;
6036 6036 /*
6037 6037 * Check if we can run the function immediately. Mark all
6038 6038 * syncqs with the writer flag to prevent new entries into
6039 6039 * put and service procedures.
6040 6040 *
6041 6041 * Set SQ_WRITER on all the inner syncqs while holding
6042 6042 * the SQLOCK on the outer syncq. This ensures that the changing
6043 6043 * of SQ_WRITER is atomic under the outer SQLOCK.
6044 6044 */
6045 6045 failed = 0;
6046 6046 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) {
6047 6047 uint16_t count;
6048 6048 uint_t maxcnt = (sq == osq) ? 1 : 0;
6049 6049
6050 6050 mutex_enter(SQLOCK(sq));
6051 6051 count = sq->sq_count;
6052 6052 SQ_PUTLOCKS_ENTER(sq);
6053 6053 SUM_SQ_PUTCOUNTS(sq, count);
6054 6054 if (sq->sq_count > maxcnt)
6055 6055 failed = 1;
6056 6056 sq->sq_flags |= SQ_WRITER;
6057 6057 SQ_PUTLOCKS_EXIT(sq);
6058 6058 mutex_exit(SQLOCK(sq));
6059 6059 }
6060 6060 if (failed) {
6061 6061 /*
6062 6062 * Some other thread has a read claim on the outer perimeter.
6063 6063 * Queue the callback for deferred processing.
6064 6064 *
6065 6065 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER
6066 6066 * so that other qwriter(OUTER) calls will queue their
6067 6067 * callbacks as well. queue_writer increments sq_count so we
6068 6068 * decrement to compensate for the our increment.
6069 6069 *
6070 6070 * Dropping SQ_WRITER enables the writer thread to work
6071 6071 * on this outer perimeter.
6072 6072 */
6073 6073 outer->sq_flags = flags;
6074 6074 queue_writer(outer, func, q, mp);
6075 6075 /* queue_writer dropper the lock */
6076 6076 mutex_enter(SQLOCK(outer));
6077 6077 ASSERT(outer->sq_count > 0);
6078 6078 outer->sq_count--;
6079 6079 ASSERT(outer->sq_flags & SQ_WRITER);
6080 6080 flags = outer->sq_flags;
6081 6081 flags &= ~SQ_WRITER;
6082 6082 if (flags & SQ_WANTWAKEUP) {
6083 6083 flags &= ~SQ_WANTWAKEUP;
6084 6084 cv_broadcast(&outer->sq_wait);
6085 6085 }
6086 6086 outer->sq_flags = flags;
6087 6087 mutex_exit(SQLOCK(outer));
6088 6088 return;
6089 6089 } else {
6090 6090 outer->sq_flags = flags;
6091 6091 mutex_exit(SQLOCK(outer));
6092 6092 }
6093 6093
6094 6094 /* Can run it immediately */
6095 6095 (*func)(q, mp);
6096 6096
6097 6097 outer_exit(outer);
6098 6098 }
6099 6099
6100 6100 /*
6101 6101 * Dequeue all writer callbacks from the outer perimeter and run them.
6102 6102 */
6103 6103 static void
6104 6104 write_now(syncq_t *outer)
6105 6105 {
6106 6106 mblk_t *mp;
6107 6107 queue_t *q;
6108 6108 void (*func)();
6109 6109
6110 6110 ASSERT(MUTEX_HELD(SQLOCK(outer)));
6111 6111 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL &&
6112 6112 outer->sq_oprev != NULL);
6113 6113 while ((mp = outer->sq_evhead) != NULL) {
6114 6114 /*
6115 6115 * queues cannot be placed on the queuelist on the outer
6116 6116 * perimeter.
6117 6117 */
6118 6118 ASSERT(!(outer->sq_flags & SQ_MESSAGES));
6119 6119 ASSERT((outer->sq_flags & SQ_EVENTS));
6120 6120
6121 6121 outer->sq_evhead = mp->b_next;
6122 6122 if (outer->sq_evhead == NULL) {
6123 6123 outer->sq_evtail = NULL;
6124 6124 outer->sq_flags &= ~SQ_EVENTS;
6125 6125 }
6126 6126 ASSERT(outer->sq_count != 0);
6127 6127 outer->sq_count--; /* Incremented when enqueued. */
6128 6128 mutex_exit(SQLOCK(outer));
6129 6129 /*
6130 6130 * Drop the message if the queue is closing.
6131 6131 * Make sure that the queue is "claimed" when the callback
6132 6132 * is run in order to satisfy various ASSERTs.
6133 6133 */
6134 6134 q = mp->b_queue;
6135 6135 func = (void (*)())mp->b_prev;
6136 6136 ASSERT(func != NULL);
6137 6137 mp->b_next = mp->b_prev = NULL;
6138 6138 if (q->q_flag & QWCLOSE) {
6139 6139 freemsg(mp);
6140 6140 } else {
6141 6141 claimq(q);
6142 6142 (*func)(q, mp);
6143 6143 releaseq(q);
6144 6144 }
6145 6145 mutex_enter(SQLOCK(outer));
6146 6146 }
6147 6147 ASSERT(MUTEX_HELD(SQLOCK(outer)));
6148 6148 }
6149 6149
6150 6150 /*
6151 6151 * The list of messages on the inner syncq is effectively hashed
6152 6152 * by destination queue. These destination queues are doubly
6153 6153 * linked lists (hopefully) in priority order. Messages are then
6154 6154 * put on the queue referenced by the q_sqhead/q_sqtail elements.
6155 6155 * Additional messages are linked together by the b_next/b_prev
6156 6156 * elements in the mblk, with (similar to putq()) the first message
6157 6157 * having a NULL b_prev and the last message having a NULL b_next.
6158 6158 *
6159 6159 * Events, such as qwriter callbacks, are put onto a list in FIFO
6160 6160 * order referenced by sq_evhead, and sq_evtail. This is a singly
6161 6161 * linked list, and messages here MUST be processed in the order queued.
6162 6162 */
6163 6163
6164 6164 /*
6165 6165 * Run the events on the syncq event list (sq_evhead).
6166 6166 * Assumes there is only one claim on the syncq, it is
6167 6167 * already exclusive (SQ_EXCL set), and the SQLOCK held.
6168 6168 * Messages here are processed in order, with the SQ_EXCL bit
6169 6169 * held all the way through till the last message is processed.
6170 6170 */
6171 6171 void
6172 6172 sq_run_events(syncq_t *sq)
6173 6173 {
6174 6174 mblk_t *bp;
6175 6175 queue_t *qp;
6176 6176 uint16_t flags = sq->sq_flags;
6177 6177 void (*func)();
6178 6178
6179 6179 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6180 6180 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6181 6181 sq->sq_oprev == NULL) ||
6182 6182 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6183 6183 sq->sq_oprev != NULL));
6184 6184
6185 6185 ASSERT(flags & SQ_EXCL);
6186 6186 ASSERT(sq->sq_count == 1);
6187 6187
6188 6188 /*
6189 6189 * We need to process all of the events on this list. It
6190 6190 * is possible that new events will be added while we are
6191 6191 * away processing a callback, so on every loop, we start
6192 6192 * back at the beginning of the list.
6193 6193 */
6194 6194 /*
6195 6195 * We have to reaccess sq_evhead since there is a
6196 6196 * possibility of a new entry while we were running
6197 6197 * the callback.
6198 6198 */
6199 6199 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) {
6200 6200 ASSERT(bp->b_queue->q_syncq == sq);
6201 6201 ASSERT(sq->sq_flags & SQ_EVENTS);
6202 6202
6203 6203 qp = bp->b_queue;
6204 6204 func = (void (*)())bp->b_prev;
6205 6205 ASSERT(func != NULL);
6206 6206
6207 6207 /*
6208 6208 * Messages from the event queue must be taken off in
6209 6209 * FIFO order.
6210 6210 */
6211 6211 ASSERT(sq->sq_evhead == bp);
6212 6212 sq->sq_evhead = bp->b_next;
6213 6213
6214 6214 if (bp->b_next == NULL) {
6215 6215 /* Deleting last */
6216 6216 ASSERT(sq->sq_evtail == bp);
6217 6217 sq->sq_evtail = NULL;
6218 6218 sq->sq_flags &= ~SQ_EVENTS;
6219 6219 }
6220 6220 bp->b_prev = bp->b_next = NULL;
6221 6221 ASSERT(bp->b_datap->db_ref != 0);
6222 6222
6223 6223 mutex_exit(SQLOCK(sq));
6224 6224
6225 6225 (*func)(qp, bp);
6226 6226
6227 6227 mutex_enter(SQLOCK(sq));
6228 6228 /*
6229 6229 * re-read the flags, since they could have changed.
6230 6230 */
6231 6231 flags = sq->sq_flags;
6232 6232 ASSERT(flags & SQ_EXCL);
6233 6233 }
6234 6234 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL);
6235 6235 ASSERT(!(sq->sq_flags & SQ_EVENTS));
6236 6236
6237 6237 if (flags & SQ_WANTWAKEUP) {
6238 6238 flags &= ~SQ_WANTWAKEUP;
6239 6239 cv_broadcast(&sq->sq_wait);
6240 6240 }
6241 6241 if (flags & SQ_WANTEXWAKEUP) {
6242 6242 flags &= ~SQ_WANTEXWAKEUP;
6243 6243 cv_broadcast(&sq->sq_exitwait);
6244 6244 }
6245 6245 sq->sq_flags = flags;
6246 6246 }
6247 6247
6248 6248 /*
6249 6249 * Put messages on the event list.
6250 6250 * If we can go exclusive now, do so and process the event list, otherwise
6251 6251 * let the last claim service this list (or wake the sqthread).
6252 6252 * This procedure assumes SQLOCK is held. To run the event list, it
6253 6253 * must be called with no claims.
6254 6254 */
6255 6255 static void
6256 6256 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)())
6257 6257 {
6258 6258 uint16_t count;
6259 6259
6260 6260 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6261 6261 ASSERT(func != NULL);
6262 6262
6263 6263 /*
6264 6264 * This is a callback. Add it to the list of callbacks
6265 6265 * and see about upgrading.
6266 6266 */
6267 6267 mp->b_prev = (mblk_t *)func;
6268 6268 mp->b_queue = q;
6269 6269 mp->b_next = NULL;
6270 6270 if (sq->sq_evhead == NULL) {
6271 6271 sq->sq_evhead = sq->sq_evtail = mp;
6272 6272 sq->sq_flags |= SQ_EVENTS;
6273 6273 } else {
6274 6274 ASSERT(sq->sq_evtail != NULL);
6275 6275 ASSERT(sq->sq_evtail->b_next == NULL);
6276 6276 ASSERT(sq->sq_flags & SQ_EVENTS);
6277 6277 sq->sq_evtail->b_next = mp;
6278 6278 sq->sq_evtail = mp;
6279 6279 }
6280 6280 /*
6281 6281 * We have set SQ_EVENTS, so threads will have to
6282 6282 * unwind out of the perimeter, and new entries will
6283 6283 * not grab a putlock. But we still need to know
6284 6284 * how many threads have already made a claim to the
6285 6285 * syncq, so grab the putlocks, and sum the counts.
6286 6286 * If there are no claims on the syncq, we can upgrade
6287 6287 * to exclusive, and run the event list.
6288 6288 * NOTE: We hold the SQLOCK, so we can just grab the
6289 6289 * putlocks.
6290 6290 */
6291 6291 count = sq->sq_count;
6292 6292 SQ_PUTLOCKS_ENTER(sq);
6293 6293 SUM_SQ_PUTCOUNTS(sq, count);
6294 6294 /*
6295 6295 * We have no claim, so we need to check if there
6296 6296 * are no others, then we can upgrade.
6297 6297 */
6298 6298 /*
6299 6299 * There are currently no claims on
6300 6300 * the syncq by this thread (at least on this entry). The thread who has
6301 6301 * the claim should drain syncq.
6302 6302 */
6303 6303 if (count > 0) {
6304 6304 /*
6305 6305 * Can't upgrade - other threads inside.
6306 6306 */
6307 6307 SQ_PUTLOCKS_EXIT(sq);
6308 6308 mutex_exit(SQLOCK(sq));
6309 6309 return;
6310 6310 }
6311 6311 /*
6312 6312 * Need to set SQ_EXCL and make a claim on the syncq.
6313 6313 */
6314 6314 ASSERT((sq->sq_flags & SQ_EXCL) == 0);
6315 6315 sq->sq_flags |= SQ_EXCL;
6316 6316 ASSERT(sq->sq_count == 0);
6317 6317 sq->sq_count++;
6318 6318 SQ_PUTLOCKS_EXIT(sq);
6319 6319
6320 6320 /* Process the events list */
6321 6321 sq_run_events(sq);
6322 6322
6323 6323 /*
6324 6324 * Release our claim...
6325 6325 */
6326 6326 sq->sq_count--;
6327 6327
6328 6328 /*
6329 6329 * And release SQ_EXCL.
6330 6330 * We don't need to acquire the putlocks to release
6331 6331 * SQ_EXCL, since we are exclusive, and hold the SQLOCK.
6332 6332 */
6333 6333 sq->sq_flags &= ~SQ_EXCL;
6334 6334
6335 6335 /*
6336 6336 * sq_run_events should have released SQ_EXCL
6337 6337 */
6338 6338 ASSERT(!(sq->sq_flags & SQ_EXCL));
6339 6339
6340 6340 /*
6341 6341 * If anything happened while we were running the
6342 6342 * events (or was there before), we need to process
6343 6343 * them now. We shouldn't be exclusive sine we
6344 6344 * released the perimeter above (plus, we asserted
6345 6345 * for it).
6346 6346 */
6347 6347 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED))
6348 6348 drain_syncq(sq);
6349 6349 else
6350 6350 mutex_exit(SQLOCK(sq));
6351 6351 }
6352 6352
6353 6353 /*
6354 6354 * Perform delayed processing. The caller has to make sure that it is safe
6355 6355 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are
6356 6356 * set).
6357 6357 *
6358 6358 * Assume that the caller has NO claims on the syncq. However, a claim
6359 6359 * on the syncq does not indicate that a thread is draining the syncq.
6360 6360 * There may be more claims on the syncq than there are threads draining
6361 6361 * (i.e. #_threads_draining <= sq_count)
6362 6362 *
6363 6363 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set
6364 6364 * in order to preserve qwriter(OUTER) ordering constraints.
6365 6365 *
6366 6366 * sq_putcount only needs to be checked when dispatching the queued
6367 6367 * writer call for CIPUT sync queue, but this is handled in sq_run_events.
6368 6368 */
6369 6369 void
6370 6370 drain_syncq(syncq_t *sq)
6371 6371 {
6372 6372 queue_t *qp;
6373 6373 uint16_t count;
6374 6374 uint16_t type = sq->sq_type;
6375 6375 uint16_t flags = sq->sq_flags;
6376 6376 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE;
6377 6377
6378 6378 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6379 6379 "drain_syncq start:%p", sq);
6380 6380 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6381 6381 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6382 6382 sq->sq_oprev == NULL) ||
6383 6383 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6384 6384 sq->sq_oprev != NULL));
6385 6385
6386 6386 /*
6387 6387 * Drop SQ_SERVICE flag.
6388 6388 */
6389 6389 if (bg_service)
6390 6390 sq->sq_svcflags &= ~SQ_SERVICE;
6391 6391
6392 6392 /*
6393 6393 * If SQ_EXCL is set, someone else is processing this syncq - let him
6394 6394 * finish the job.
6395 6395 */
6396 6396 if (flags & SQ_EXCL) {
6397 6397 if (bg_service) {
6398 6398 ASSERT(sq->sq_servcount != 0);
6399 6399 sq->sq_servcount--;
6400 6400 }
6401 6401 mutex_exit(SQLOCK(sq));
6402 6402 return;
6403 6403 }
6404 6404
6405 6405 /*
6406 6406 * This routine can be called by a background thread if
6407 6407 * it was scheduled by a hi-priority thread. SO, if there are
6408 6408 * NOT messages queued, return (remember, we have the SQLOCK,
6409 6409 * and it cannot change until we release it). Wakeup any waiters also.
6410 6410 */
6411 6411 if (!(flags & SQ_QUEUED)) {
6412 6412 if (flags & SQ_WANTWAKEUP) {
6413 6413 flags &= ~SQ_WANTWAKEUP;
6414 6414 cv_broadcast(&sq->sq_wait);
6415 6415 }
6416 6416 if (flags & SQ_WANTEXWAKEUP) {
6417 6417 flags &= ~SQ_WANTEXWAKEUP;
6418 6418 cv_broadcast(&sq->sq_exitwait);
6419 6419 }
6420 6420 sq->sq_flags = flags;
6421 6421 if (bg_service) {
6422 6422 ASSERT(sq->sq_servcount != 0);
6423 6423 sq->sq_servcount--;
6424 6424 }
6425 6425 mutex_exit(SQLOCK(sq));
6426 6426 return;
6427 6427 }
6428 6428
6429 6429 /*
6430 6430 * If this is not a concurrent put perimeter, we need to
6431 6431 * become exclusive to drain. Also, if not CIPUT, we would
6432 6432 * not have acquired a putlock, so we don't need to check
6433 6433 * the putcounts. If not entering with a claim, we test
6434 6434 * for sq_count == 0.
6435 6435 */
6436 6436 type = sq->sq_type;
6437 6437 if (!(type & SQ_CIPUT)) {
6438 6438 if (sq->sq_count > 1) {
6439 6439 if (bg_service) {
6440 6440 ASSERT(sq->sq_servcount != 0);
6441 6441 sq->sq_servcount--;
6442 6442 }
6443 6443 mutex_exit(SQLOCK(sq));
6444 6444 return;
6445 6445 }
6446 6446 sq->sq_flags |= SQ_EXCL;
6447 6447 }
6448 6448
6449 6449 /*
6450 6450 * This is where we make a claim to the syncq.
6451 6451 * This can either be done by incrementing a putlock, or
6452 6452 * the sq_count. But since we already have the SQLOCK
6453 6453 * here, we just bump the sq_count.
6454 6454 *
6455 6455 * Note that after we make a claim, we need to let the code
6456 6456 * fall through to the end of this routine to clean itself
6457 6457 * up. A return in the while loop will put the syncq in a
6458 6458 * very bad state.
6459 6459 */
6460 6460 sq->sq_count++;
6461 6461 ASSERT(sq->sq_count != 0); /* wraparound */
6462 6462
6463 6463 while ((flags = sq->sq_flags) & SQ_QUEUED) {
6464 6464 /*
6465 6465 * If we are told to stayaway or went exclusive,
6466 6466 * we are done.
6467 6467 */
6468 6468 if (flags & (SQ_STAYAWAY)) {
6469 6469 break;
6470 6470 }
6471 6471
6472 6472 /*
6473 6473 * If there are events to run, do so.
6474 6474 * We have one claim to the syncq, so if there are
6475 6475 * more than one, other threads are running.
6476 6476 */
6477 6477 if (sq->sq_evhead != NULL) {
6478 6478 ASSERT(sq->sq_flags & SQ_EVENTS);
6479 6479
6480 6480 count = sq->sq_count;
6481 6481 SQ_PUTLOCKS_ENTER(sq);
6482 6482 SUM_SQ_PUTCOUNTS(sq, count);
6483 6483 if (count > 1) {
6484 6484 SQ_PUTLOCKS_EXIT(sq);
6485 6485 /* Can't upgrade - other threads inside */
6486 6486 break;
6487 6487 }
6488 6488 ASSERT((flags & SQ_EXCL) == 0);
6489 6489 sq->sq_flags = flags | SQ_EXCL;
6490 6490 SQ_PUTLOCKS_EXIT(sq);
6491 6491 /*
6492 6492 * we have the only claim, run the events,
6493 6493 * sq_run_events will clear the SQ_EXCL flag.
6494 6494 */
6495 6495 sq_run_events(sq);
6496 6496
6497 6497 /*
6498 6498 * If this is a CIPUT perimeter, we need
6499 6499 * to drop the SQ_EXCL flag so we can properly
6500 6500 * continue draining the syncq.
6501 6501 */
6502 6502 if (type & SQ_CIPUT) {
6503 6503 ASSERT(sq->sq_flags & SQ_EXCL);
6504 6504 sq->sq_flags &= ~SQ_EXCL;
6505 6505 }
6506 6506
6507 6507 /*
6508 6508 * And go back to the beginning just in case
6509 6509 * anything changed while we were away.
6510 6510 */
6511 6511 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT));
6512 6512 continue;
6513 6513 }
6514 6514
6515 6515 ASSERT(sq->sq_evhead == NULL);
6516 6516 ASSERT(!(sq->sq_flags & SQ_EVENTS));
6517 6517
6518 6518 /*
6519 6519 * Find the queue that is not draining.
6520 6520 *
6521 6521 * q_draining is protected by QLOCK which we do not hold.
6522 6522 * But if it was set, then a thread was draining, and if it gets
6523 6523 * cleared, then it was because the thread has successfully
6524 6524 * drained the syncq, or a GOAWAY state occurred. For the GOAWAY
6525 6525 * state to happen, a thread needs the SQLOCK which we hold, and
6526 6526 * if there was such a flag, we would have already seen it.
6527 6527 */
6528 6528
6529 6529 for (qp = sq->sq_head;
6530 6530 qp != NULL && (qp->q_draining ||
6531 6531 (qp->q_sqflags & Q_SQDRAINING));
6532 6532 qp = qp->q_sqnext)
6533 6533 ;
6534 6534
6535 6535 if (qp == NULL)
6536 6536 break;
6537 6537
6538 6538 /*
6539 6539 * We have a queue to work on, and we hold the
6540 6540 * SQLOCK and one claim, call qdrain_syncq.
6541 6541 * This means we need to release the SQLOCK and
6542 6542 * acquire the QLOCK (OK since we have a claim).
6543 6543 * Note that qdrain_syncq will actually dequeue
6544 6544 * this queue from the sq_head list when it is
6545 6545 * convinced all the work is done and release
6546 6546 * the QLOCK before returning.
6547 6547 */
6548 6548 qp->q_sqflags |= Q_SQDRAINING;
6549 6549 mutex_exit(SQLOCK(sq));
6550 6550 mutex_enter(QLOCK(qp));
6551 6551 qdrain_syncq(sq, qp);
6552 6552 mutex_enter(SQLOCK(sq));
6553 6553
6554 6554 /* The queue is drained */
6555 6555 ASSERT(qp->q_sqflags & Q_SQDRAINING);
6556 6556 qp->q_sqflags &= ~Q_SQDRAINING;
6557 6557 /*
6558 6558 * NOTE: After this point qp should not be used since it may be
6559 6559 * closed.
6560 6560 */
6561 6561 }
6562 6562
6563 6563 ASSERT(MUTEX_HELD(SQLOCK(sq)));
6564 6564 flags = sq->sq_flags;
6565 6565
6566 6566 /*
6567 6567 * sq->sq_head cannot change because we hold the
6568 6568 * sqlock. However, a thread CAN decide that it is no longer
6569 6569 * going to drain that queue. However, this should be due to
6570 6570 * a GOAWAY state, and we should see that here.
6571 6571 *
6572 6572 * This loop is not very efficient. One solution may be adding a second
6573 6573 * pointer to the "draining" queue, but it is difficult to do when
6574 6574 * queues are inserted in the middle due to priority ordering. Another
6575 6575 * possibility is to yank the queue out of the sq list and put it onto
6576 6576 * the "draining list" and then put it back if it can't be drained.
6577 6577 */
6578 6578
6579 6579 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) ||
6580 6580 (type & SQ_CI) || sq->sq_head->q_draining);
6581 6581
6582 6582 /* Drop SQ_EXCL for non-CIPUT perimeters */
6583 6583 if (!(type & SQ_CIPUT))
6584 6584 flags &= ~SQ_EXCL;
6585 6585 ASSERT((flags & SQ_EXCL) == 0);
6586 6586
6587 6587 /* Wake up any waiters. */
6588 6588 if (flags & SQ_WANTWAKEUP) {
6589 6589 flags &= ~SQ_WANTWAKEUP;
6590 6590 cv_broadcast(&sq->sq_wait);
6591 6591 }
6592 6592 if (flags & SQ_WANTEXWAKEUP) {
6593 6593 flags &= ~SQ_WANTEXWAKEUP;
6594 6594 cv_broadcast(&sq->sq_exitwait);
6595 6595 }
6596 6596 sq->sq_flags = flags;
6597 6597
6598 6598 ASSERT(sq->sq_count != 0);
6599 6599 /* Release our claim. */
6600 6600 sq->sq_count--;
6601 6601
6602 6602 if (bg_service) {
6603 6603 ASSERT(sq->sq_servcount != 0);
6604 6604 sq->sq_servcount--;
6605 6605 }
6606 6606
6607 6607 mutex_exit(SQLOCK(sq));
6608 6608
6609 6609 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6610 6610 "drain_syncq end:%p", sq);
6611 6611 }
6612 6612
6613 6613
6614 6614 /*
6615 6615 *
6616 6616 * qdrain_syncq can be called (currently) from only one of two places:
6617 6617 * drain_syncq
6618 6618 * putnext (or some variation of it).
6619 6619 * and eventually
6620 6620 * qwait(_sig)
6621 6621 *
6622 6622 * If called from drain_syncq, we found it in the list of queues needing
6623 6623 * service, so there is work to be done (or it wouldn't be in the list).
6624 6624 *
6625 6625 * If called from some putnext variation, it was because the
6626 6626 * perimeter is open, but messages are blocking a putnext and
6627 6627 * there is not a thread working on it. Now a thread could start
6628 6628 * working on it while we are getting ready to do so ourself, but
6629 6629 * the thread would set the q_draining flag, and we can spin out.
6630 6630 *
6631 6631 * As for qwait(_sig), I think I shall let it continue to call
6632 6632 * drain_syncq directly (after all, it will get here eventually).
6633 6633 *
6634 6634 * qdrain_syncq has to terminate when:
6635 6635 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering
6636 6636 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering
6637 6637 *
6638 6638 * ASSUMES:
6639 6639 * One claim
6640 6640 * QLOCK held
6641 6641 * SQLOCK not held
6642 6642 * Will release QLOCK before returning
6643 6643 */
6644 6644 void
6645 6645 qdrain_syncq(syncq_t *sq, queue_t *q)
6646 6646 {
6647 6647 mblk_t *bp;
6648 6648 #ifdef DEBUG
6649 6649 uint16_t count;
6650 6650 #endif
6651 6651
6652 6652 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START,
6653 6653 "drain_syncq start:%p", sq);
6654 6654 ASSERT(q->q_syncq == sq);
6655 6655 ASSERT(MUTEX_HELD(QLOCK(q)));
6656 6656 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6657 6657 /*
6658 6658 * For non-CIPUT perimeters, we should be called with the exclusive bit
6659 6659 * set already. For CIPUT perimeters, we will be doing a concurrent
6660 6660 * drain, so it better not be set.
6661 6661 */
6662 6662 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT)));
6663 6663 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)));
6664 6664 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL));
6665 6665 /*
6666 6666 * All outer pointers are set, or none of them are
6667 6667 */
6668 6668 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6669 6669 sq->sq_oprev == NULL) ||
6670 6670 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6671 6671 sq->sq_oprev != NULL));
6672 6672 #ifdef DEBUG
6673 6673 count = sq->sq_count;
6674 6674 /*
6675 6675 * This is OK without the putlocks, because we have one
6676 6676 * claim either from the sq_count, or a putcount. We could
6677 6677 * get an erroneous value from other counts, but ours won't
6678 6678 * change, so one way or another, we will have at least a
6679 6679 * value of one.
6680 6680 */
6681 6681 SUM_SQ_PUTCOUNTS(sq, count);
6682 6682 ASSERT(count >= 1);
6683 6683 #endif /* DEBUG */
6684 6684
6685 6685 /*
6686 6686 * The first thing to do is find out if a thread is already draining
6687 6687 * this queue. If so, we are done, just return.
6688 6688 */
6689 6689 if (q->q_draining) {
6690 6690 mutex_exit(QLOCK(q));
6691 6691 return;
6692 6692 }
6693 6693
6694 6694 /*
6695 6695 * If the perimeter is exclusive, there is nothing we can do right now,
6696 6696 * go away. Note that there is nothing to prevent this case from
6697 6697 * changing right after this check, but the spin-out will catch it.
6698 6698 */
6699 6699
6700 6700 /* Tell other threads that we are draining this queue */
6701 6701 q->q_draining = 1; /* Protected by QLOCK */
6702 6702
6703 6703 /*
6704 6704 * If there is nothing to do, clear QFULL as necessary. This caters for
6705 6705 * the case where an empty queue was enqueued onto the syncq.
6706 6706 */
6707 6707 if (q->q_sqhead == NULL) {
6708 6708 ASSERT(q->q_syncqmsgs == 0);
6709 6709 mutex_exit(QLOCK(q));
6710 6710 clr_qfull(q);
6711 6711 mutex_enter(QLOCK(q));
6712 6712 }
6713 6713
6714 6714 /*
6715 6715 * Note that q_sqhead must be re-checked here in case another message
6716 6716 * was enqueued whilst QLOCK was dropped during the call to clr_qfull.
6717 6717 */
6718 6718 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) {
6719 6719 /*
6720 6720 * Because we can enter this routine just because a putnext is
6721 6721 * blocked, we need to spin out if the perimeter wants to go
6722 6722 * exclusive as well as just blocked. We need to spin out also
6723 6723 * if events are queued on the syncq.
6724 6724 * Don't check for SQ_EXCL, because non-CIPUT perimeters would
6725 6725 * set it, and it can't become exclusive while we hold a claim.
6726 6726 */
6727 6727 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) {
6728 6728 break;
6729 6729 }
6730 6730
6731 6731 #ifdef DEBUG
6732 6732 /*
6733 6733 * Since we are in qdrain_syncq, we already know the queue,
6734 6734 * but for sanity, we want to check this against the qp that
6735 6735 * was passed in by bp->b_queue.
6736 6736 */
6737 6737
6738 6738 ASSERT(bp->b_queue == q);
6739 6739 ASSERT(bp->b_queue->q_syncq == sq);
6740 6740 bp->b_queue = NULL;
6741 6741
6742 6742 /*
6743 6743 * We would have the following check in the DEBUG code:
6744 6744 *
6745 6745 * if (bp->b_prev != NULL) {
6746 6746 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp);
6747 6747 * }
6748 6748 *
6749 6749 * This can't be done, however, since IP modifies qinfo
6750 6750 * structure at run-time (switching between IPv4 qinfo and IPv6
6751 6751 * qinfo), invalidating the check.
6752 6752 * So the assignment to func is left here, but the ASSERT itself
6753 6753 * is removed until the whole issue is resolved.
6754 6754 */
6755 6755 #endif
6756 6756 ASSERT(q->q_sqhead == bp);
6757 6757 q->q_sqhead = bp->b_next;
6758 6758 bp->b_prev = bp->b_next = NULL;
6759 6759 ASSERT(q->q_syncqmsgs > 0);
6760 6760 mutex_exit(QLOCK(q));
6761 6761
6762 6762 ASSERT(bp->b_datap->db_ref != 0);
6763 6763
6764 6764 (void) (*q->q_qinfo->qi_putp)(q, bp);
6765 6765
6766 6766 mutex_enter(QLOCK(q));
6767 6767
6768 6768 /*
6769 6769 * q_syncqmsgs should only be decremented after executing the
6770 6770 * put procedure to avoid message re-ordering. This is due to an
6771 6771 * optimisation in putnext() which can call the put procedure
6772 6772 * directly if it sees q_syncqmsgs == 0 (despite Q_SQQUEUED
6773 6773 * being set).
6774 6774 *
6775 6775 * We also need to clear QFULL in the next service procedure
6776 6776 * queue if this is the last message destined for that queue.
6777 6777 *
6778 6778 * It would make better sense to have some sort of tunable for
6779 6779 * the low water mark, but these semantics are not yet defined.
6780 6780 * So, alas, we use a constant.
6781 6781 */
6782 6782 if (--q->q_syncqmsgs == 0) {
6783 6783 mutex_exit(QLOCK(q));
6784 6784 clr_qfull(q);
6785 6785 mutex_enter(QLOCK(q));
6786 6786 }
6787 6787
6788 6788 /*
6789 6789 * Always clear SQ_EXCL when CIPUT in order to handle
6790 6790 * qwriter(INNER). The putp() can call qwriter and get exclusive
6791 6791 * access IFF this is the only claim. So, we need to test for
6792 6792 * this possibility, acquire the mutex and clear the bit.
6793 6793 */
6794 6794 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) {
6795 6795 mutex_enter(SQLOCK(sq));
6796 6796 sq->sq_flags &= ~SQ_EXCL;
6797 6797 mutex_exit(SQLOCK(sq));
6798 6798 }
6799 6799 }
6800 6800
6801 6801 /*
6802 6802 * We should either have no messages on this queue, or we were told to
6803 6803 * goaway by a waiter (which we will wake up at the end of this
6804 6804 * function).
6805 6805 */
6806 6806 ASSERT((q->q_sqhead == NULL) ||
6807 6807 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)));
6808 6808
6809 6809 ASSERT(MUTEX_HELD(QLOCK(q)));
6810 6810 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6811 6811
6812 6812 /* Remove the q from the syncq list if all the messages are drained. */
6813 6813 if (q->q_sqhead == NULL) {
6814 6814 ASSERT(q->q_syncqmsgs == 0);
6815 6815 mutex_enter(SQLOCK(sq));
6816 6816 if (q->q_sqflags & Q_SQQUEUED)
6817 6817 SQRM_Q(sq, q);
6818 6818 mutex_exit(SQLOCK(sq));
6819 6819 /*
6820 6820 * Since the queue is removed from the list, reset its priority.
6821 6821 */
6822 6822 q->q_spri = 0;
6823 6823 }
6824 6824
6825 6825 /*
6826 6826 * Remember, the q_draining flag is used to let another thread know
6827 6827 * that there is a thread currently draining the messages for a queue.
6828 6828 * Since we are now done with this queue (even if there may be messages
6829 6829 * still there), we need to clear this flag so some thread will work on
6830 6830 * it if needed.
6831 6831 */
6832 6832 ASSERT(q->q_draining);
6833 6833 q->q_draining = 0;
6834 6834
6835 6835 /* Called with a claim, so OK to drop all locks. */
6836 6836 mutex_exit(QLOCK(q));
6837 6837
6838 6838 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END,
6839 6839 "drain_syncq end:%p", sq);
6840 6840 }
6841 6841 /* END OF QDRAIN_SYNCQ */
6842 6842
6843 6843
6844 6844 /*
6845 6845 * This is the mate to qdrain_syncq, except that it is putting the message onto
6846 6846 * the queue instead of draining. Since the message is destined for the queue
6847 6847 * that is selected, there is no need to identify the function because the
6848 6848 * message is intended for the put routine for the queue. For debug kernels,
6849 6849 * this routine will do it anyway just in case.
6850 6850 *
6851 6851 * After the message is enqueued on the syncq, it calls putnext_tail()
6852 6852 * which will schedule a background thread to actually process the message.
6853 6853 *
6854 6854 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and
6855 6855 * SQLOCK(sq) and QLOCK(q) are not held.
6856 6856 */
6857 6857 void
6858 6858 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp)
6859 6859 {
6860 6860 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq)));
6861 6861 ASSERT(MUTEX_NOT_HELD(QLOCK(q)));
6862 6862 ASSERT(sq->sq_count > 0);
6863 6863 ASSERT(q->q_syncq == sq);
6864 6864 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL &&
6865 6865 sq->sq_oprev == NULL) ||
6866 6866 (sq->sq_outer != NULL && sq->sq_onext != NULL &&
6867 6867 sq->sq_oprev != NULL));
6868 6868
6869 6869 mutex_enter(QLOCK(q));
6870 6870
6871 6871 #ifdef DEBUG
6872 6872 /*
6873 6873 * This is used for debug in the qfill_syncq/qdrain_syncq case
6874 6874 * to trace the queue that the message is intended for. Note
6875 6875 * that the original use was to identify the queue and function
6876 6876 * to call on the drain. In the new syncq, we have the context
6877 6877 * of the queue that we are draining, so call it's putproc and
6878 6878 * don't rely on the saved values. But for debug this is still
6879 6879 * useful information.
6880 6880 */
6881 6881 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp;
6882 6882 mp->b_queue = q;
6883 6883 mp->b_next = NULL;
6884 6884 #endif
6885 6885 ASSERT(q->q_syncq == sq);
6886 6886 /*
6887 6887 * Enqueue the message on the list.
6888 6888 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to
6889 6889 * protect it. So it's ok to acquire SQLOCK after SQPUT_MP().
6890 6890 */
6891 6891 SQPUT_MP(q, mp);
6892 6892 mutex_enter(SQLOCK(sq));
6893 6893
6894 6894 /*
6895 6895 * And queue on syncq for scheduling, if not already queued.
6896 6896 * Note that we need the SQLOCK for this, and for testing flags
6897 6897 * at the end to see if we will drain. So grab it now, and
6898 6898 * release it before we call qdrain_syncq or return.
6899 6899 */
6900 6900 if (!(q->q_sqflags & Q_SQQUEUED)) {
6901 6901 q->q_spri = curthread->t_pri;
6902 6902 SQPUT_Q(sq, q);
6903 6903 }
6904 6904 #ifdef DEBUG
6905 6905 else {
6906 6906 /*
6907 6907 * All of these conditions MUST be true!
6908 6908 */
6909 6909 ASSERT(sq->sq_tail != NULL);
6910 6910 if (sq->sq_tail == sq->sq_head) {
6911 6911 ASSERT((q->q_sqprev == NULL) &&
6912 6912 (q->q_sqnext == NULL));
6913 6913 } else {
6914 6914 ASSERT((q->q_sqprev != NULL) ||
6915 6915 (q->q_sqnext != NULL));
6916 6916 }
6917 6917 ASSERT(sq->sq_flags & SQ_QUEUED);
6918 6918 ASSERT(q->q_syncqmsgs != 0);
6919 6919 ASSERT(q->q_sqflags & Q_SQQUEUED);
6920 6920 }
6921 6921 #endif
6922 6922 mutex_exit(QLOCK(q));
6923 6923 /*
6924 6924 * SQLOCK is still held, so sq_count can be safely decremented.
6925 6925 */
6926 6926 sq->sq_count--;
6927 6927
6928 6928 putnext_tail(sq, q, 0);
6929 6929 /* Should not reference sq or q after this point. */
6930 6930 }
6931 6931
6932 6932 /* End of qfill_syncq */
6933 6933
6934 6934 /*
6935 6935 * Remove all messages from a syncq (if qp is NULL) or remove all messages
6936 6936 * that would be put into qp by drain_syncq.
6937 6937 * Used when deleting the syncq (qp == NULL) or when detaching
6938 6938 * a queue (qp != NULL).
6939 6939 * Return non-zero if one or more messages were freed.
6940 6940 *
6941 6941 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when
6942 6942 * sq_putlocks are used.
6943 6943 *
6944 6944 * NOTE: This function assumes that it is called from the close() context and
6945 6945 * that all the queues in the syncq are going away. For this reason it doesn't
6946 6946 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is
6947 6947 * currently valid, but it is useful to rethink this function to behave properly
6948 6948 * in other cases.
6949 6949 */
6950 6950 int
6951 6951 flush_syncq(syncq_t *sq, queue_t *qp)
6952 6952 {
6953 6953 mblk_t *bp, *mp_head, *mp_next, *mp_prev;
6954 6954 queue_t *q;
6955 6955 int ret = 0;
6956 6956
6957 6957 mutex_enter(SQLOCK(sq));
6958 6958
6959 6959 /*
6960 6960 * Before we leave, we need to make sure there are no
6961 6961 * events listed for this queue. All events for this queue
6962 6962 * will just be freed.
6963 6963 */
6964 6964 if (qp != NULL && sq->sq_evhead != NULL) {
6965 6965 ASSERT(sq->sq_flags & SQ_EVENTS);
6966 6966
6967 6967 mp_prev = NULL;
6968 6968 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) {
6969 6969 mp_next = bp->b_next;
6970 6970 if (bp->b_queue == qp) {
6971 6971 /* Delete this message */
6972 6972 if (mp_prev != NULL) {
6973 6973 mp_prev->b_next = mp_next;
6974 6974 /*
6975 6975 * Update sq_evtail if the last element
6976 6976 * is removed.
6977 6977 */
6978 6978 if (bp == sq->sq_evtail) {
6979 6979 ASSERT(mp_next == NULL);
6980 6980 sq->sq_evtail = mp_prev;
6981 6981 }
6982 6982 } else
6983 6983 sq->sq_evhead = mp_next;
6984 6984 if (sq->sq_evhead == NULL)
6985 6985 sq->sq_flags &= ~SQ_EVENTS;
6986 6986 bp->b_prev = bp->b_next = NULL;
6987 6987 freemsg(bp);
6988 6988 ret++;
6989 6989 } else {
6990 6990 mp_prev = bp;
6991 6991 }
6992 6992 }
6993 6993 }
6994 6994
6995 6995 /*
6996 6996 * Walk sq_head and:
6997 6997 * - match qp if qp is set, remove it's messages
6998 6998 * - all if qp is not set
6999 6999 */
7000 7000 q = sq->sq_head;
7001 7001 while (q != NULL) {
7002 7002 ASSERT(q->q_syncq == sq);
7003 7003 if ((qp == NULL) || (qp == q)) {
7004 7004 /*
7005 7005 * Yank the messages as a list off the queue
7006 7006 */
7007 7007 mp_head = q->q_sqhead;
7008 7008 /*
7009 7009 * We do not have QLOCK(q) here (which is safe due to
7010 7010 * assumptions mentioned above). To obtain the lock we
7011 7011 * need to release SQLOCK which may allow lots of things
7012 7012 * to change upon us. This place requires more analysis.
7013 7013 */
7014 7014 q->q_sqhead = q->q_sqtail = NULL;
7015 7015 ASSERT(mp_head->b_queue &&
7016 7016 mp_head->b_queue->q_syncq == sq);
7017 7017
7018 7018 /*
7019 7019 * Free each of the messages.
7020 7020 */
7021 7021 for (bp = mp_head; bp != NULL; bp = mp_next) {
7022 7022 mp_next = bp->b_next;
7023 7023 bp->b_prev = bp->b_next = NULL;
7024 7024 freemsg(bp);
7025 7025 ret++;
7026 7026 }
7027 7027 /*
7028 7028 * Now remove the queue from the syncq.
7029 7029 */
7030 7030 ASSERT(q->q_sqflags & Q_SQQUEUED);
7031 7031 SQRM_Q(sq, q);
7032 7032 q->q_spri = 0;
7033 7033 q->q_syncqmsgs = 0;
7034 7034
7035 7035 /*
7036 7036 * If qp was specified, we are done with it and are
7037 7037 * going to drop SQLOCK(sq) and return. We wakeup syncq
7038 7038 * waiters while we still have the SQLOCK.
7039 7039 */
7040 7040 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) {
7041 7041 sq->sq_flags &= ~SQ_WANTWAKEUP;
7042 7042 cv_broadcast(&sq->sq_wait);
7043 7043 }
7044 7044 /* Drop SQLOCK across clr_qfull */
7045 7045 mutex_exit(SQLOCK(sq));
7046 7046
7047 7047 /*
7048 7048 * We avoid doing the test that drain_syncq does and
7049 7049 * unconditionally clear qfull for every flushed
7050 7050 * message. Since flush_syncq is only called during
7051 7051 * close this should not be a problem.
7052 7052 */
7053 7053 clr_qfull(q);
7054 7054 if (qp != NULL) {
7055 7055 return (ret);
7056 7056 } else {
7057 7057 mutex_enter(SQLOCK(sq));
7058 7058 /*
7059 7059 * The head was removed by SQRM_Q above.
7060 7060 * reread the new head and flush it.
7061 7061 */
7062 7062 q = sq->sq_head;
7063 7063 }
7064 7064 } else {
7065 7065 q = q->q_sqnext;
7066 7066 }
7067 7067 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7068 7068 }
7069 7069
7070 7070 if (sq->sq_flags & SQ_WANTWAKEUP) {
7071 7071 sq->sq_flags &= ~SQ_WANTWAKEUP;
7072 7072 cv_broadcast(&sq->sq_wait);
7073 7073 }
7074 7074
7075 7075 mutex_exit(SQLOCK(sq));
7076 7076 return (ret);
7077 7077 }
7078 7078
7079 7079 /*
7080 7080 * Propagate all messages from a syncq to the next syncq that are associated
7081 7081 * with the specified queue. If the queue is attached to a driver or if the
7082 7082 * messages have been added due to a qwriter(PERIM_INNER), free the messages.
7083 7083 *
7084 7084 * Assumes that the stream is strlock()'ed. We don't come here if there
7085 7085 * are no messages to propagate.
7086 7086 *
7087 7087 * NOTE : If the queue is attached to a driver, all the messages are freed
7088 7088 * as there is no point in propagating the messages from the driver syncq
7089 7089 * to the closing stream head which will in turn get freed later.
7090 7090 */
7091 7091 static int
7092 7092 propagate_syncq(queue_t *qp)
7093 7093 {
7094 7094 mblk_t *bp, *head, *tail, *prev, *next;
7095 7095 syncq_t *sq;
7096 7096 queue_t *nqp;
7097 7097 syncq_t *nsq;
7098 7098 boolean_t isdriver;
7099 7099 int moved = 0;
7100 7100 uint16_t flags;
7101 7101 pri_t priority = curthread->t_pri;
7102 7102 #ifdef DEBUG
7103 7103 void (*func)();
7104 7104 #endif
7105 7105
7106 7106 sq = qp->q_syncq;
7107 7107 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7108 7108 /* debug macro */
7109 7109 SQ_PUTLOCKS_HELD(sq);
7110 7110 /*
7111 7111 * As entersq() does not increment the sq_count for
7112 7112 * the write side, check sq_count for non-QPERQ
7113 7113 * perimeters alone.
7114 7114 */
7115 7115 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1));
7116 7116
7117 7117 /*
7118 7118 * propagate_syncq() can be called because of either messages on the
7119 7119 * queue syncq or because on events on the queue syncq. Do actual
7120 7120 * message propagations if there are any messages.
7121 7121 */
7122 7122 if (qp->q_syncqmsgs) {
7123 7123 isdriver = (qp->q_flag & QISDRV);
7124 7124
7125 7125 if (!isdriver) {
7126 7126 nqp = qp->q_next;
7127 7127 nsq = nqp->q_syncq;
7128 7128 ASSERT(MUTEX_HELD(SQLOCK(nsq)));
7129 7129 /* debug macro */
7130 7130 SQ_PUTLOCKS_HELD(nsq);
7131 7131 #ifdef DEBUG
7132 7132 func = (void (*)())nqp->q_qinfo->qi_putp;
7133 7133 #endif
7134 7134 }
7135 7135
7136 7136 SQRM_Q(sq, qp);
7137 7137 priority = MAX(qp->q_spri, priority);
7138 7138 qp->q_spri = 0;
7139 7139 head = qp->q_sqhead;
7140 7140 tail = qp->q_sqtail;
7141 7141 qp->q_sqhead = qp->q_sqtail = NULL;
7142 7142 qp->q_syncqmsgs = 0;
7143 7143
7144 7144 /*
7145 7145 * Walk the list of messages, and free them if this is a driver,
7146 7146 * otherwise reset the b_prev and b_queue value to the new putp.
7147 7147 * Afterward, we will just add the head to the end of the next
7148 7148 * syncq, and point the tail to the end of this one.
7149 7149 */
7150 7150
7151 7151 for (bp = head; bp != NULL; bp = next) {
7152 7152 next = bp->b_next;
7153 7153 if (isdriver) {
7154 7154 bp->b_prev = bp->b_next = NULL;
7155 7155 freemsg(bp);
7156 7156 continue;
7157 7157 }
7158 7158 /* Change the q values for this message */
7159 7159 bp->b_queue = nqp;
7160 7160 #ifdef DEBUG
7161 7161 bp->b_prev = (mblk_t *)func;
7162 7162 #endif
7163 7163 moved++;
7164 7164 }
7165 7165 /*
7166 7166 * Attach list of messages to the end of the new queue (if there
7167 7167 * is a list of messages).
7168 7168 */
7169 7169
7170 7170 if (!isdriver && head != NULL) {
7171 7171 ASSERT(tail != NULL);
7172 7172 if (nqp->q_sqhead == NULL) {
7173 7173 nqp->q_sqhead = head;
7174 7174 } else {
7175 7175 ASSERT(nqp->q_sqtail != NULL);
7176 7176 nqp->q_sqtail->b_next = head;
7177 7177 }
7178 7178 nqp->q_sqtail = tail;
7179 7179 /*
7180 7180 * When messages are moved from high priority queue to
7181 7181 * another queue, the destination queue priority is
7182 7182 * upgraded.
7183 7183 */
7184 7184
7185 7185 if (priority > nqp->q_spri)
7186 7186 nqp->q_spri = priority;
7187 7187
7188 7188 SQPUT_Q(nsq, nqp);
7189 7189
7190 7190 nqp->q_syncqmsgs += moved;
7191 7191 ASSERT(nqp->q_syncqmsgs != 0);
7192 7192 }
7193 7193 }
7194 7194
7195 7195 /*
7196 7196 * Before we leave, we need to make sure there are no
7197 7197 * events listed for this queue. All events for this queue
7198 7198 * will just be freed.
7199 7199 */
7200 7200 if (sq->sq_evhead != NULL) {
7201 7201 ASSERT(sq->sq_flags & SQ_EVENTS);
7202 7202 prev = NULL;
7203 7203 for (bp = sq->sq_evhead; bp != NULL; bp = next) {
7204 7204 next = bp->b_next;
7205 7205 if (bp->b_queue == qp) {
7206 7206 /* Delete this message */
7207 7207 if (prev != NULL) {
7208 7208 prev->b_next = next;
7209 7209 /*
7210 7210 * Update sq_evtail if the last element
7211 7211 * is removed.
7212 7212 */
7213 7213 if (bp == sq->sq_evtail) {
7214 7214 ASSERT(next == NULL);
7215 7215 sq->sq_evtail = prev;
7216 7216 }
7217 7217 } else
7218 7218 sq->sq_evhead = next;
7219 7219 if (sq->sq_evhead == NULL)
7220 7220 sq->sq_flags &= ~SQ_EVENTS;
7221 7221 bp->b_prev = bp->b_next = NULL;
7222 7222 freemsg(bp);
7223 7223 } else {
7224 7224 prev = bp;
7225 7225 }
7226 7226 }
7227 7227 }
7228 7228
7229 7229 flags = sq->sq_flags;
7230 7230
7231 7231 /* Wake up any waiter before leaving. */
7232 7232 if (flags & SQ_WANTWAKEUP) {
7233 7233 flags &= ~SQ_WANTWAKEUP;
7234 7234 cv_broadcast(&sq->sq_wait);
7235 7235 }
7236 7236 sq->sq_flags = flags;
7237 7237
7238 7238 return (moved);
7239 7239 }
7240 7240
7241 7241 /*
7242 7242 * Try and upgrade to exclusive access at the inner perimeter. If this can
7243 7243 * not be done without blocking then request will be queued on the syncq
7244 7244 * and drain_syncq will run it later.
7245 7245 *
7246 7246 * This routine can only be called from put or service procedures plus
7247 7247 * asynchronous callback routines that have properly entered the queue (with
7248 7248 * entersq). Thus qwriter_inner assumes the caller has one claim on the syncq
7249 7249 * associated with q.
7250 7250 */
7251 7251 void
7252 7252 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)())
7253 7253 {
7254 7254 syncq_t *sq = q->q_syncq;
7255 7255 uint16_t count;
7256 7256
7257 7257 mutex_enter(SQLOCK(sq));
7258 7258 count = sq->sq_count;
7259 7259 SQ_PUTLOCKS_ENTER(sq);
7260 7260 SUM_SQ_PUTCOUNTS(sq, count);
7261 7261 ASSERT(count >= 1);
7262 7262 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC));
7263 7263
7264 7264 if (count == 1) {
7265 7265 /*
7266 7266 * Can upgrade. This case also handles nested qwriter calls
7267 7267 * (when the qwriter callback function calls qwriter). In that
7268 7268 * case SQ_EXCL is already set.
7269 7269 */
7270 7270 sq->sq_flags |= SQ_EXCL;
7271 7271 SQ_PUTLOCKS_EXIT(sq);
7272 7272 mutex_exit(SQLOCK(sq));
7273 7273 (*func)(q, mp);
7274 7274 /*
7275 7275 * Assumes that leavesq, putnext, and drain_syncq will reset
7276 7276 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on
7277 7277 * until putnext, leavesq, or drain_syncq drops it.
7278 7278 * That way we handle nested qwriter(INNER) without dropping
7279 7279 * SQ_EXCL until the outermost qwriter callback routine is
7280 7280 * done.
7281 7281 */
7282 7282 return;
7283 7283 }
7284 7284 SQ_PUTLOCKS_EXIT(sq);
7285 7285 sqfill_events(sq, q, mp, func);
7286 7286 }
7287 7287
7288 7288 /*
7289 7289 * Synchronous callback support functions
7290 7290 */
7291 7291
7292 7292 /*
7293 7293 * Allocate a callback parameter structure.
7294 7294 * Assumes that caller initializes the flags and the id.
7295 7295 * Acquires SQLOCK(sq) if non-NULL is returned.
7296 7296 */
7297 7297 callbparams_t *
7298 7298 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags)
7299 7299 {
7300 7300 callbparams_t *cbp;
7301 7301 size_t size = sizeof (callbparams_t);
7302 7302
7303 7303 cbp = kmem_alloc(size, kmflags & ~KM_PANIC);
7304 7304
7305 7305 /*
7306 7306 * Only try tryhard allocation if the caller is ready to panic.
7307 7307 * Otherwise just fail.
7308 7308 */
7309 7309 if (cbp == NULL) {
7310 7310 if (kmflags & KM_PANIC)
7311 7311 cbp = kmem_alloc_tryhard(sizeof (callbparams_t),
7312 7312 &size, kmflags);
7313 7313 else
7314 7314 return (NULL);
7315 7315 }
7316 7316
7317 7317 ASSERT(size >= sizeof (callbparams_t));
7318 7318 cbp->cbp_size = size;
7319 7319 cbp->cbp_sq = sq;
7320 7320 cbp->cbp_func = func;
7321 7321 cbp->cbp_arg = arg;
7322 7322 mutex_enter(SQLOCK(sq));
7323 7323 cbp->cbp_next = sq->sq_callbpend;
7324 7324 sq->sq_callbpend = cbp;
7325 7325 return (cbp);
7326 7326 }
7327 7327
7328 7328 void
7329 7329 callbparams_free(syncq_t *sq, callbparams_t *cbp)
7330 7330 {
7331 7331 callbparams_t **pp, *p;
7332 7332
7333 7333 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7334 7334
7335 7335 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7336 7336 if (p == cbp) {
7337 7337 *pp = p->cbp_next;
7338 7338 kmem_free(p, p->cbp_size);
7339 7339 return;
7340 7340 }
7341 7341 }
7342 7342 (void) (STRLOG(0, 0, 0, SL_CONSOLE,
7343 7343 "callbparams_free: not found\n"));
7344 7344 }
7345 7345
7346 7346 void
7347 7347 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag)
7348 7348 {
7349 7349 callbparams_t **pp, *p;
7350 7350
7351 7351 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7352 7352
7353 7353 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) {
7354 7354 if (p->cbp_id == id && p->cbp_flags == flag) {
7355 7355 *pp = p->cbp_next;
7356 7356 kmem_free(p, p->cbp_size);
7357 7357 return;
7358 7358 }
7359 7359 }
7360 7360 (void) (STRLOG(0, 0, 0, SL_CONSOLE,
7361 7361 "callbparams_free_id: not found\n"));
7362 7362 }
7363 7363
7364 7364 /*
7365 7365 * Callback wrapper function used by once-only callbacks that can be
7366 7366 * cancelled (qtimeout and qbufcall)
7367 7367 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be
7368 7368 * cancelled by the qun* functions.
7369 7369 */
7370 7370 void
7371 7371 qcallbwrapper(void *arg)
7372 7372 {
7373 7373 callbparams_t *cbp = arg;
7374 7374 syncq_t *sq;
7375 7375 uint16_t count = 0;
7376 7376 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL;
7377 7377 uint16_t type;
7378 7378
7379 7379 sq = cbp->cbp_sq;
7380 7380 mutex_enter(SQLOCK(sq));
7381 7381 type = sq->sq_type;
7382 7382 if (!(type & SQ_CICB)) {
7383 7383 count = sq->sq_count;
7384 7384 SQ_PUTLOCKS_ENTER(sq);
7385 7385 SQ_PUTCOUNT_CLRFAST_LOCKED(sq);
7386 7386 SUM_SQ_PUTCOUNTS(sq, count);
7387 7387 sq->sq_needexcl++;
7388 7388 ASSERT(sq->sq_needexcl != 0); /* wraparound */
7389 7389 waitflags |= SQ_MESSAGES;
7390 7390 }
7391 7391 /* Can not handle exclusive entry at outer perimeter */
7392 7392 ASSERT(type & SQ_COCB);
7393 7393
7394 7394 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) {
7395 7395 if ((sq->sq_callbflags & cbp->cbp_flags) &&
7396 7396 (sq->sq_cancelid == cbp->cbp_id)) {
7397 7397 /* timeout has been cancelled */
7398 7398 sq->sq_callbflags |= SQ_CALLB_BYPASSED;
7399 7399 callbparams_free(sq, cbp);
7400 7400 if (!(type & SQ_CICB)) {
7401 7401 ASSERT(sq->sq_needexcl > 0);
7402 7402 sq->sq_needexcl--;
7403 7403 if (sq->sq_needexcl == 0) {
7404 7404 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7405 7405 }
7406 7406 SQ_PUTLOCKS_EXIT(sq);
7407 7407 }
7408 7408 mutex_exit(SQLOCK(sq));
7409 7409 return;
7410 7410 }
7411 7411 sq->sq_flags |= SQ_WANTWAKEUP;
7412 7412 if (!(type & SQ_CICB)) {
7413 7413 SQ_PUTLOCKS_EXIT(sq);
7414 7414 }
7415 7415 cv_wait(&sq->sq_wait, SQLOCK(sq));
7416 7416 if (!(type & SQ_CICB)) {
7417 7417 count = sq->sq_count;
7418 7418 SQ_PUTLOCKS_ENTER(sq);
7419 7419 SUM_SQ_PUTCOUNTS(sq, count);
7420 7420 }
7421 7421 }
7422 7422
7423 7423 sq->sq_count++;
7424 7424 ASSERT(sq->sq_count != 0); /* Wraparound */
7425 7425 if (!(type & SQ_CICB)) {
7426 7426 ASSERT(count == 0);
7427 7427 sq->sq_flags |= SQ_EXCL;
7428 7428 ASSERT(sq->sq_needexcl > 0);
7429 7429 sq->sq_needexcl--;
7430 7430 if (sq->sq_needexcl == 0) {
7431 7431 SQ_PUTCOUNT_SETFAST_LOCKED(sq);
7432 7432 }
7433 7433 SQ_PUTLOCKS_EXIT(sq);
7434 7434 }
7435 7435
7436 7436 mutex_exit(SQLOCK(sq));
7437 7437
7438 7438 cbp->cbp_func(cbp->cbp_arg);
7439 7439
7440 7440 /*
7441 7441 * We drop the lock only for leavesq to re-acquire it.
7442 7442 * Possible optimization is inline of leavesq.
7443 7443 */
7444 7444 mutex_enter(SQLOCK(sq));
7445 7445 callbparams_free(sq, cbp);
7446 7446 mutex_exit(SQLOCK(sq));
7447 7447 leavesq(sq, SQ_CALLBACK);
7448 7448 }
7449 7449
7450 7450 /*
7451 7451 * No need to grab sq_putlocks here. See comment in strsubr.h that
7452 7452 * explains when sq_putlocks are used.
7453 7453 *
7454 7454 * sq_count (or one of the sq_putcounts) has already been
7455 7455 * decremented by the caller, and if SQ_QUEUED, we need to call
7456 7456 * drain_syncq (the global syncq drain).
7457 7457 * If putnext_tail is called with the SQ_EXCL bit set, we are in
7458 7458 * one of two states, non-CIPUT perimeter, and we need to clear
7459 7459 * it, or we went exclusive in the put procedure. In any case,
7460 7460 * we want to clear the bit now, and it is probably easier to do
7461 7461 * this at the beginning of this function (remember, we hold
7462 7462 * the SQLOCK). Lastly, if there are other messages queued
7463 7463 * on the syncq (and not for our destination), enable the syncq
7464 7464 * for background work.
7465 7465 */
7466 7466
7467 7467 /* ARGSUSED */
7468 7468 void
7469 7469 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags)
7470 7470 {
7471 7471 uint16_t flags = sq->sq_flags;
7472 7472
7473 7473 ASSERT(MUTEX_HELD(SQLOCK(sq)));
7474 7474 ASSERT(MUTEX_NOT_HELD(QLOCK(qp)));
7475 7475
7476 7476 /* Clear SQ_EXCL if set in passflags */
7477 7477 if (passflags & SQ_EXCL) {
7478 7478 flags &= ~SQ_EXCL;
7479 7479 }
7480 7480 if (flags & SQ_WANTWAKEUP) {
7481 7481 flags &= ~SQ_WANTWAKEUP;
7482 7482 cv_broadcast(&sq->sq_wait);
7483 7483 }
7484 7484 if (flags & SQ_WANTEXWAKEUP) {
7485 7485 flags &= ~SQ_WANTEXWAKEUP;
7486 7486 cv_broadcast(&sq->sq_exitwait);
7487 7487 }
7488 7488 sq->sq_flags = flags;
7489 7489
7490 7490 /*
7491 7491 * We have cleared SQ_EXCL if we were asked to, and started
7492 7492 * the wakeup process for waiters. If there are no writers
7493 7493 * then we need to drain the syncq if we were told to, or
7494 7494 * enable the background thread to do it.
7495 7495 */
7496 7496 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) {
7497 7497 if ((passflags & SQ_QUEUED) ||
7498 7498 (sq->sq_svcflags & SQ_DISABLED)) {
7499 7499 /* drain_syncq will take care of events in the list */
7500 7500 drain_syncq(sq);
7501 7501 return;
7502 7502 } else if (flags & SQ_QUEUED) {
7503 7503 sqenable(sq);
7504 7504 }
7505 7505 }
7506 7506 /* Drop the SQLOCK on exit */
7507 7507 mutex_exit(SQLOCK(sq));
7508 7508 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END,
7509 7509 "putnext_end:(%p, %p, %p) done", NULL, qp, sq);
7510 7510 }
7511 7511
7512 7512 void
7513 7513 set_qend(queue_t *q)
7514 7514 {
7515 7515 mutex_enter(QLOCK(q));
7516 7516 if (!O_SAMESTR(q))
7517 7517 q->q_flag |= QEND;
7518 7518 else
7519 7519 q->q_flag &= ~QEND;
7520 7520 mutex_exit(QLOCK(q));
7521 7521 q = _OTHERQ(q);
7522 7522 mutex_enter(QLOCK(q));
7523 7523 if (!O_SAMESTR(q))
7524 7524 q->q_flag |= QEND;
7525 7525 else
7526 7526 q->q_flag &= ~QEND;
7527 7527 mutex_exit(QLOCK(q));
7528 7528 }
7529 7529
7530 7530 /*
7531 7531 * Set QFULL in next service procedure queue (that cares) if not already
7532 7532 * set and if there are already more messages on the syncq than
7533 7533 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on
7534 7534 * any syncq.
7535 7535 *
7536 7536 * The fq here is the next queue with a service procedure. This is where
7537 7537 * we would fail canputnext, so this is where we need to set QFULL.
7538 7538 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag.
7539 7539 *
7540 7540 * We already have QLOCK at this point. To avoid cross-locks with
7541 7541 * freezestr() which grabs all QLOCKs and with strlock() which grabs both
7542 7542 * SQLOCK and sd_reflock, we need to drop respective locks first.
7543 7543 */
7544 7544 void
7545 7545 set_qfull(queue_t *q)
7546 7546 {
7547 7547 queue_t *fq = NULL;
7548 7548
7549 7549 ASSERT(MUTEX_HELD(QLOCK(q)));
7550 7550 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) &&
7551 7551 (q->q_syncqmsgs > sq_max_size)) {
7552 7552 if ((fq = q->q_nfsrv) == q) {
7553 7553 fq->q_flag |= QFULL;
7554 7554 } else {
7555 7555 mutex_exit(QLOCK(q));
7556 7556 mutex_enter(QLOCK(fq));
7557 7557 fq->q_flag |= QFULL;
7558 7558 mutex_exit(QLOCK(fq));
7559 7559 mutex_enter(QLOCK(q));
7560 7560 }
7561 7561 }
7562 7562 }
7563 7563
7564 7564 void
7565 7565 clr_qfull(queue_t *q)
7566 7566 {
7567 7567 queue_t *oq = q;
7568 7568
7569 7569 q = q->q_nfsrv;
7570 7570 /* Fast check if there is any work to do before getting the lock. */
7571 7571 if ((q->q_flag & (QFULL|QWANTW)) == 0) {
7572 7572 return;
7573 7573 }
7574 7574
7575 7575 /*
7576 7576 * Do not reset QFULL (and backenable) if the q_count is the reason
7577 7577 * for QFULL being set.
7578 7578 */
7579 7579 mutex_enter(QLOCK(q));
7580 7580 /*
7581 7581 * If queue is empty i.e q_mblkcnt is zero, queue can not be full.
7582 7582 * Hence clear the QFULL.
7583 7583 * If both q_count and q_mblkcnt are less than the hiwat mark,
7584 7584 * clear the QFULL.
7585 7585 */
7586 7586 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) &&
7587 7587 (q->q_mblkcnt < q->q_hiwat))) {
7588 7588 q->q_flag &= ~QFULL;
7589 7589 /*
7590 7590 * A little more confusing, how about this way:
7591 7591 * if someone wants to write,
7592 7592 * AND
7593 7593 * both counts are less than the lowat mark
7594 7594 * OR
7595 7595 * the lowat mark is zero
7596 7596 * THEN
7597 7597 * backenable
7598 7598 */
7599 7599 if ((q->q_flag & QWANTW) &&
7600 7600 (((q->q_count < q->q_lowat) &&
7601 7601 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) {
7602 7602 q->q_flag &= ~QWANTW;
7603 7603 mutex_exit(QLOCK(q));
7604 7604 backenable(oq, 0);
7605 7605 } else
7606 7606 mutex_exit(QLOCK(q));
7607 7607 } else
7608 7608 mutex_exit(QLOCK(q));
7609 7609 }
7610 7610
7611 7611 /*
7612 7612 * Set the forward service procedure pointer.
7613 7613 *
7614 7614 * Called at insert-time to cache a queue's next forward service procedure in
7615 7615 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted
7616 7616 * has a service procedure then q_nfsrv points to itself. If the queue to be
7617 7617 * inserted does not have a service procedure, then q_nfsrv points to the next
7618 7618 * queue forward that has a service procedure. If the queue is at the logical
7619 7619 * end of the stream (driver for write side, stream head for the read side)
7620 7620 * and does not have a service procedure, then q_nfsrv also points to itself.
7621 7621 */
7622 7622 void
7623 7623 set_nfsrv_ptr(
7624 7624 queue_t *rnew, /* read queue pointer to new module */
7625 7625 queue_t *wnew, /* write queue pointer to new module */
7626 7626 queue_t *prev_rq, /* read queue pointer to the module above */
7627 7627 queue_t *prev_wq) /* write queue pointer to the module above */
7628 7628 {
7629 7629 queue_t *qp;
7630 7630
7631 7631 if (prev_wq->q_next == NULL) {
7632 7632 /*
7633 7633 * Insert the driver, initialize the driver and stream head.
7634 7634 * In this case, prev_rq/prev_wq should be the stream head.
7635 7635 * _I_INSERT does not allow inserting a driver. Make sure
7636 7636 * that it is not an insertion.
7637 7637 */
7638 7638 ASSERT(!(rnew->q_flag & _QINSERTING));
7639 7639 wnew->q_nfsrv = wnew;
7640 7640 if (rnew->q_qinfo->qi_srvp)
7641 7641 rnew->q_nfsrv = rnew;
7642 7642 else
7643 7643 rnew->q_nfsrv = prev_rq;
7644 7644 prev_rq->q_nfsrv = prev_rq;
7645 7645 prev_wq->q_nfsrv = prev_wq;
7646 7646 } else {
7647 7647 /*
7648 7648 * set up read side q_nfsrv pointer. This MUST be done
7649 7649 * before setting the write side, because the setting of
7650 7650 * the write side for a fifo may depend on it.
7651 7651 *
7652 7652 * Suppose we have a fifo that only has pipemod pushed.
7653 7653 * pipemod has no read or write service procedures, so
7654 7654 * nfsrv for both pipemod queues points to prev_rq (the
7655 7655 * stream read head). Now push bufmod (which has only a
7656 7656 * read service procedure). Doing the write side first,
7657 7657 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which
7658 7658 * is WRONG; the next queue forward from wnew with a
7659 7659 * service procedure will be rnew, not the stream read head.
7660 7660 * Since the downstream queue (which in the case of a fifo
7661 7661 * is the read queue rnew) can affect upstream queues, it
7662 7662 * needs to be done first. Setting up the read side first
7663 7663 * sets nfsrv for both pipemod queues to rnew and then
7664 7664 * when the write side is set up, wnew-q_nfsrv will also
7665 7665 * point to rnew.
7666 7666 */
7667 7667 if (rnew->q_qinfo->qi_srvp) {
7668 7668 /*
7669 7669 * use _OTHERQ() because, if this is a pipe, next
7670 7670 * module may have been pushed from other end and
7671 7671 * q_next could be a read queue.
7672 7672 */
7673 7673 qp = _OTHERQ(prev_wq->q_next);
7674 7674 while (qp && qp->q_nfsrv != qp) {
7675 7675 qp->q_nfsrv = rnew;
7676 7676 qp = backq(qp);
7677 7677 }
7678 7678 rnew->q_nfsrv = rnew;
7679 7679 } else
7680 7680 rnew->q_nfsrv = prev_rq->q_nfsrv;
7681 7681
7682 7682 /* set up write side q_nfsrv pointer */
7683 7683 if (wnew->q_qinfo->qi_srvp) {
7684 7684 wnew->q_nfsrv = wnew;
7685 7685
7686 7686 /*
7687 7687 * For insertion, need to update nfsrv of the modules
7688 7688 * above which do not have a service routine.
7689 7689 */
7690 7690 if (rnew->q_flag & _QINSERTING) {
7691 7691 for (qp = prev_wq;
7692 7692 qp != NULL && qp->q_nfsrv != qp;
7693 7693 qp = backq(qp)) {
7694 7694 qp->q_nfsrv = wnew->q_nfsrv;
7695 7695 }
7696 7696 }
7697 7697 } else {
7698 7698 if (prev_wq->q_next == prev_rq)
7699 7699 /*
7700 7700 * Since prev_wq/prev_rq are the middle of a
7701 7701 * fifo, wnew/rnew will also be the middle of
7702 7702 * a fifo and wnew's nfsrv is same as rnew's.
7703 7703 */
7704 7704 wnew->q_nfsrv = rnew->q_nfsrv;
7705 7705 else
7706 7706 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv;
7707 7707 }
7708 7708 }
7709 7709 }
7710 7710
7711 7711 /*
7712 7712 * Reset the forward service procedure pointer; called at remove-time.
7713 7713 */
7714 7714 void
7715 7715 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp)
7716 7716 {
7717 7717 queue_t *tmp_qp;
7718 7718
7719 7719 /* Reset the write side q_nfsrv pointer for _I_REMOVE */
7720 7720 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) {
7721 7721 for (tmp_qp = backq(wqp);
7722 7722 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp;
7723 7723 tmp_qp = backq(tmp_qp)) {
7724 7724 tmp_qp->q_nfsrv = wqp->q_nfsrv;
7725 7725 }
7726 7726 }
7727 7727
7728 7728 /* reset the read side q_nfsrv pointer */
7729 7729 if (rqp->q_qinfo->qi_srvp) {
7730 7730 if (wqp->q_next) { /* non-driver case */
7731 7731 tmp_qp = _OTHERQ(wqp->q_next);
7732 7732 while (tmp_qp && tmp_qp->q_nfsrv == rqp) {
7733 7733 /* Note that rqp->q_next cannot be NULL */
7734 7734 ASSERT(rqp->q_next != NULL);
7735 7735 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv;
7736 7736 tmp_qp = backq(tmp_qp);
7737 7737 }
7738 7738 }
7739 7739 }
7740 7740 }
7741 7741
7742 7742 /*
7743 7743 * This routine should be called after all stream geometry changes to update
7744 7744 * the stream head cached struio() rd/wr queue pointers. Note must be called
7745 7745 * with the streamlock()ed.
7746 7746 *
7747 7747 * Note: only enables Synchronous STREAMS for a side of a Stream which has
7748 7748 * an explicit synchronous barrier module queue. That is, a queue that
7749 7749 * has specified a struio() type.
7750 7750 */
7751 7751 static void
7752 7752 strsetuio(stdata_t *stp)
7753 7753 {
7754 7754 queue_t *wrq;
7755 7755
7756 7756 if (stp->sd_flag & STPLEX) {
7757 7757 /*
7758 7758 * Not streamhead, but a mux, so no Synchronous STREAMS.
7759 7759 */
7760 7760 stp->sd_struiowrq = NULL;
7761 7761 stp->sd_struiordq = NULL;
7762 7762 return;
7763 7763 }
7764 7764 /*
7765 7765 * Scan the write queue(s) while synchronous
7766 7766 * until we find a qinfo uio type specified.
7767 7767 */
7768 7768 wrq = stp->sd_wrq->q_next;
7769 7769 while (wrq) {
7770 7770 if (wrq->q_struiot == STRUIOT_NONE) {
7771 7771 wrq = 0;
7772 7772 break;
7773 7773 }
7774 7774 if (wrq->q_struiot != STRUIOT_DONTCARE)
7775 7775 break;
7776 7776 if (! _SAMESTR(wrq)) {
7777 7777 wrq = 0;
7778 7778 break;
7779 7779 }
7780 7780 wrq = wrq->q_next;
7781 7781 }
7782 7782 stp->sd_struiowrq = wrq;
7783 7783 /*
7784 7784 * Scan the read queue(s) while synchronous
7785 7785 * until we find a qinfo uio type specified.
7786 7786 */
7787 7787 wrq = stp->sd_wrq->q_next;
7788 7788 while (wrq) {
7789 7789 if (_RD(wrq)->q_struiot == STRUIOT_NONE) {
7790 7790 wrq = 0;
7791 7791 break;
7792 7792 }
7793 7793 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE)
7794 7794 break;
7795 7795 if (! _SAMESTR(wrq)) {
7796 7796 wrq = 0;
7797 7797 break;
7798 7798 }
7799 7799 wrq = wrq->q_next;
7800 7800 }
7801 7801 stp->sd_struiordq = wrq ? _RD(wrq) : 0;
7802 7802 }
7803 7803
7804 7804 /*
7805 7805 * pass_wput, unblocks the passthru queues, so that
7806 7806 * messages can arrive at muxs lower read queue, before
7807 7807 * I_LINK/I_UNLINK is acked/nacked.
7808 7808 */
7809 7809 static void
7810 7810 pass_wput(queue_t *q, mblk_t *mp)
7811 7811 {
7812 7812 syncq_t *sq;
7813 7813
7814 7814 sq = _RD(q)->q_syncq;
7815 7815 if (sq->sq_flags & SQ_BLOCKED)
7816 7816 unblocksq(sq, SQ_BLOCKED, 0);
7817 7817 putnext(q, mp);
7818 7818 }
7819 7819
7820 7820 /*
7821 7821 * Set up queues for the link/unlink.
7822 7822 * Create a new queue and block it and then insert it
7823 7823 * below the stream head on the lower stream.
7824 7824 * This prevents any messages from arriving during the setq
7825 7825 * as well as while the mux is processing the LINK/I_UNLINK.
7826 7826 * The blocked passq is unblocked once the LINK/I_UNLINK has
7827 7827 * been acked or nacked or if a message is generated and sent
7828 7828 * down muxs write put procedure.
7829 7829 * See pass_wput().
7830 7830 *
7831 7831 * After the new queue is inserted, all messages coming from below are
7832 7832 * blocked. The call to strlock will ensure that all activity in the stream head
7833 7833 * read queue syncq is stopped (sq_count drops to zero).
7834 7834 */
7835 7835 static queue_t *
7836 7836 link_addpassthru(stdata_t *stpdown)
7837 7837 {
7838 7838 queue_t *passq;
7839 7839 sqlist_t sqlist;
7840 7840
7841 7841 passq = allocq();
7842 7842 STREAM(passq) = STREAM(_WR(passq)) = stpdown;
7843 7843 /* setq might sleep in allocator - avoid holding locks. */
7844 7844 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ,
7845 7845 SQ_CI|SQ_CO, B_FALSE);
7846 7846 claimq(passq);
7847 7847 blocksq(passq->q_syncq, SQ_BLOCKED, 1);
7848 7848 insertq(STREAM(passq), passq);
7849 7849
7850 7850 /*
7851 7851 * Use strlock() to wait for the stream head sq_count to drop to zero
7852 7852 * since we are going to change q_ptr in the stream head. Note that
7853 7853 * insertq() doesn't wait for any syncq counts to drop to zero.
7854 7854 */
7855 7855 sqlist.sqlist_head = NULL;
7856 7856 sqlist.sqlist_index = 0;
7857 7857 sqlist.sqlist_size = sizeof (sqlist_t);
7858 7858 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq);
7859 7859 strlock(stpdown, &sqlist);
7860 7860 strunlock(stpdown, &sqlist);
7861 7861
7862 7862 releaseq(passq);
7863 7863 return (passq);
7864 7864 }
7865 7865
7866 7866 /*
7867 7867 * Let messages flow up into the mux by removing
7868 7868 * the passq.
7869 7869 */
7870 7870 static void
7871 7871 link_rempassthru(queue_t *passq)
7872 7872 {
7873 7873 claimq(passq);
7874 7874 removeq(passq);
7875 7875 releaseq(passq);
7876 7876 freeq(passq);
7877 7877 }
7878 7878
7879 7879 /*
7880 7880 * Wait for the condition variable pointed to by `cvp' to be signaled,
7881 7881 * or for `tim' milliseconds to elapse, whichever comes first. If `tim'
7882 7882 * is negative, then there is no time limit. If `nosigs' is non-zero,
7883 7883 * then the wait will be non-interruptible.
7884 7884 *
7885 7885 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout.
7886 7886 */
7887 7887 clock_t
7888 7888 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs)
7889 7889 {
7890 7890 clock_t ret;
7891 7891
7892 7892 if (tim < 0) {
7893 7893 if (nosigs) {
7894 7894 cv_wait(cvp, mp);
7895 7895 ret = 1;
7896 7896 } else {
7897 7897 ret = cv_wait_sig(cvp, mp);
7898 7898 }
7899 7899 } else if (tim > 0) {
7900 7900 /*
7901 7901 * convert milliseconds to clock ticks
7902 7902 */
7903 7903 if (nosigs) {
7904 7904 ret = cv_reltimedwait(cvp, mp,
7905 7905 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK);
7906 7906 } else {
7907 7907 ret = cv_reltimedwait_sig(cvp, mp,
7908 7908 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK);
7909 7909 }
7910 7910 } else {
7911 7911 ret = -1;
7912 7912 }
7913 7913 return (ret);
7914 7914 }
7915 7915
7916 7916 /*
7917 7917 * Wait until the stream head can determine if it is at the mark but
7918 7918 * don't wait forever to prevent a race condition between the "mark" state
7919 7919 * in the stream head and any mark state in the caller/user of this routine.
7920 7920 *
7921 7921 * This is used by sockets and for a socket it would be incorrect
7922 7922 * to return a failure for SIOCATMARK when there is no data in the receive
7923 7923 * queue and the marked urgent data is traveling up the stream.
7924 7924 *
7925 7925 * This routine waits until the mark is known by waiting for one of these
7926 7926 * three events:
7927 7927 * The stream head read queue becoming non-empty (including an EOF).
7928 7928 * The STRATMARK flag being set (due to a MSGMARKNEXT message).
7929 7929 * The STRNOTATMARK flag being set (which indicates that the transport
7930 7930 * has sent a MSGNOTMARKNEXT message to indicate that it is not at
7931 7931 * the mark).
7932 7932 *
7933 7933 * The routine returns 1 if the stream is at the mark; 0 if it can
7934 7934 * be determined that the stream is not at the mark.
7935 7935 * If the wait times out and it can't determine
7936 7936 * whether or not the stream might be at the mark the routine will return -1.
7937 7937 *
7938 7938 * Note: This routine should only be used when a mark is pending i.e.,
7939 7939 * in the socket case the SIGURG has been posted.
7940 7940 * Note2: This can not wakeup just because synchronous streams indicate
7941 7941 * that data is available since it is not possible to use the synchronous
7942 7942 * streams interfaces to determine the b_flag value for the data queued below
7943 7943 * the stream head.
7944 7944 */
7945 7945 int
7946 7946 strwaitmark(vnode_t *vp)
7947 7947 {
7948 7948 struct stdata *stp = vp->v_stream;
7949 7949 queue_t *rq = _RD(stp->sd_wrq);
7950 7950 int mark;
7951 7951
7952 7952 mutex_enter(&stp->sd_lock);
7953 7953 while (rq->q_first == NULL &&
7954 7954 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) {
7955 7955 stp->sd_flag |= RSLEEP;
7956 7956
7957 7957 /* Wait for 100 milliseconds for any state change. */
7958 7958 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) {
7959 7959 mutex_exit(&stp->sd_lock);
7960 7960 return (-1);
7961 7961 }
7962 7962 }
7963 7963 if (stp->sd_flag & STRATMARK)
7964 7964 mark = 1;
7965 7965 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK))
7966 7966 mark = 1;
7967 7967 else
7968 7968 mark = 0;
7969 7969
7970 7970 mutex_exit(&stp->sd_lock);
7971 7971 return (mark);
7972 7972 }
7973 7973
7974 7974 /*
7975 7975 * Set a read side error. If persist is set change the socket error
7976 7976 * to persistent. If errfunc is set install the function as the exported
7977 7977 * error handler.
7978 7978 */
7979 7979 void
7980 7980 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
7981 7981 {
7982 7982 struct stdata *stp = vp->v_stream;
7983 7983
7984 7984 mutex_enter(&stp->sd_lock);
7985 7985 stp->sd_rerror = error;
7986 7986 if (error == 0 && errfunc == NULL)
7987 7987 stp->sd_flag &= ~STRDERR;
7988 7988 else
7989 7989 stp->sd_flag |= STRDERR;
7990 7990 if (persist) {
7991 7991 stp->sd_flag &= ~STRDERRNONPERSIST;
7992 7992 } else {
7993 7993 stp->sd_flag |= STRDERRNONPERSIST;
7994 7994 }
7995 7995 stp->sd_rderrfunc = errfunc;
7996 7996 if (error != 0 || errfunc != NULL) {
7997 7997 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */
7998 7998 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */
7999 7999 cv_broadcast(&stp->sd_monitor); /* ioctllers */
8000 8000
8001 8001 mutex_exit(&stp->sd_lock);
8002 8002 pollwakeup(&stp->sd_pollist, POLLERR);
8003 8003 mutex_enter(&stp->sd_lock);
8004 8004
8005 8005 if (stp->sd_sigflags & S_ERROR)
8006 8006 strsendsig(stp->sd_siglist, S_ERROR, 0, error);
8007 8007 }
8008 8008 mutex_exit(&stp->sd_lock);
8009 8009 }
8010 8010
8011 8011 /*
8012 8012 * Set a write side error. If persist is set change the socket error
8013 8013 * to persistent.
8014 8014 */
8015 8015 void
8016 8016 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc)
8017 8017 {
8018 8018 struct stdata *stp = vp->v_stream;
8019 8019
8020 8020 mutex_enter(&stp->sd_lock);
8021 8021 stp->sd_werror = error;
8022 8022 if (error == 0 && errfunc == NULL)
8023 8023 stp->sd_flag &= ~STWRERR;
8024 8024 else
8025 8025 stp->sd_flag |= STWRERR;
8026 8026 if (persist) {
8027 8027 stp->sd_flag &= ~STWRERRNONPERSIST;
8028 8028 } else {
8029 8029 stp->sd_flag |= STWRERRNONPERSIST;
8030 8030 }
8031 8031 stp->sd_wrerrfunc = errfunc;
8032 8032 if (error != 0 || errfunc != NULL) {
8033 8033 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */
8034 8034 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */
8035 8035 cv_broadcast(&stp->sd_monitor); /* ioctllers */
8036 8036
8037 8037 mutex_exit(&stp->sd_lock);
8038 8038 pollwakeup(&stp->sd_pollist, POLLERR);
8039 8039 mutex_enter(&stp->sd_lock);
8040 8040
8041 8041 if (stp->sd_sigflags & S_ERROR)
8042 8042 strsendsig(stp->sd_siglist, S_ERROR, 0, error);
8043 8043 }
8044 8044 mutex_exit(&stp->sd_lock);
8045 8045 }
8046 8046
8047 8047 /*
8048 8048 * Make the stream return 0 (EOF) when all data has been read.
8049 8049 * No effect on write side.
8050 8050 */
8051 8051 void
8052 8052 strseteof(vnode_t *vp, int eof)
8053 8053 {
8054 8054 struct stdata *stp = vp->v_stream;
8055 8055
8056 8056 mutex_enter(&stp->sd_lock);
8057 8057 if (!eof) {
8058 8058 stp->sd_flag &= ~STREOF;
8059 8059 mutex_exit(&stp->sd_lock);
8060 8060 return;
8061 8061 }
8062 8062 stp->sd_flag |= STREOF;
8063 8063 if (stp->sd_flag & RSLEEP) {
8064 8064 stp->sd_flag &= ~RSLEEP;
8065 8065 cv_broadcast(&_RD(stp->sd_wrq)->q_wait);
8066 8066 }
8067 8067
8068 8068 mutex_exit(&stp->sd_lock);
8069 8069 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM);
8070 8070 mutex_enter(&stp->sd_lock);
8071 8071
8072 8072 if (stp->sd_sigflags & (S_INPUT|S_RDNORM))
8073 8073 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0);
8074 8074 mutex_exit(&stp->sd_lock);
8075 8075 }
8076 8076
8077 8077 void
8078 8078 strflushrq(vnode_t *vp, int flag)
8079 8079 {
8080 8080 struct stdata *stp = vp->v_stream;
8081 8081
8082 8082 mutex_enter(&stp->sd_lock);
8083 8083 flushq(_RD(stp->sd_wrq), flag);
8084 8084 mutex_exit(&stp->sd_lock);
8085 8085 }
8086 8086
8087 8087 void
8088 8088 strsetrputhooks(vnode_t *vp, uint_t flags,
8089 8089 msgfunc_t protofunc, msgfunc_t miscfunc)
8090 8090 {
8091 8091 struct stdata *stp = vp->v_stream;
8092 8092
8093 8093 mutex_enter(&stp->sd_lock);
8094 8094
8095 8095 if (protofunc == NULL)
8096 8096 stp->sd_rprotofunc = strrput_proto;
8097 8097 else
8098 8098 stp->sd_rprotofunc = protofunc;
8099 8099
8100 8100 if (miscfunc == NULL)
8101 8101 stp->sd_rmiscfunc = strrput_misc;
8102 8102 else
8103 8103 stp->sd_rmiscfunc = miscfunc;
8104 8104
8105 8105 if (flags & SH_CONSOL_DATA)
8106 8106 stp->sd_rput_opt |= SR_CONSOL_DATA;
8107 8107 else
8108 8108 stp->sd_rput_opt &= ~SR_CONSOL_DATA;
8109 8109
8110 8110 if (flags & SH_SIGALLDATA)
8111 8111 stp->sd_rput_opt |= SR_SIGALLDATA;
8112 8112 else
8113 8113 stp->sd_rput_opt &= ~SR_SIGALLDATA;
8114 8114
8115 8115 if (flags & SH_IGN_ZEROLEN)
8116 8116 stp->sd_rput_opt |= SR_IGN_ZEROLEN;
8117 8117 else
8118 8118 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN;
8119 8119
8120 8120 mutex_exit(&stp->sd_lock);
8121 8121 }
8122 8122
8123 8123 void
8124 8124 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime)
8125 8125 {
8126 8126 struct stdata *stp = vp->v_stream;
8127 8127
8128 8128 mutex_enter(&stp->sd_lock);
8129 8129 stp->sd_closetime = closetime;
8130 8130
8131 8131 if (flags & SH_SIGPIPE)
8132 8132 stp->sd_wput_opt |= SW_SIGPIPE;
8133 8133 else
8134 8134 stp->sd_wput_opt &= ~SW_SIGPIPE;
8135 8135 if (flags & SH_RECHECK_ERR)
8136 8136 stp->sd_wput_opt |= SW_RECHECK_ERR;
8137 8137 else
8138 8138 stp->sd_wput_opt &= ~SW_RECHECK_ERR;
8139 8139
8140 8140 mutex_exit(&stp->sd_lock);
8141 8141 }
8142 8142
8143 8143 void
8144 8144 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc)
8145 8145 {
8146 8146 struct stdata *stp = vp->v_stream;
8147 8147
8148 8148 mutex_enter(&stp->sd_lock);
8149 8149
8150 8150 stp->sd_rputdatafunc = rdatafunc;
8151 8151 stp->sd_wputdatafunc = wdatafunc;
8152 8152
8153 8153 mutex_exit(&stp->sd_lock);
8154 8154 }
8155 8155
8156 8156 /* Used within framework when the queue is already locked */
8157 8157 void
8158 8158 qenable_locked(queue_t *q)
8159 8159 {
8160 8160 stdata_t *stp = STREAM(q);
8161 8161
8162 8162 ASSERT(MUTEX_HELD(QLOCK(q)));
8163 8163
8164 8164 if (!q->q_qinfo->qi_srvp)
8165 8165 return;
8166 8166
8167 8167 /*
8168 8168 * Do not place on run queue if already enabled or closing.
8169 8169 */
8170 8170 if (q->q_flag & (QWCLOSE|QENAB))
8171 8171 return;
8172 8172
8173 8173 /*
8174 8174 * mark queue enabled and place on run list if it is not already being
8175 8175 * serviced. If it is serviced, the runservice() function will detect
8176 8176 * that QENAB is set and call service procedure before clearing
8177 8177 * QINSERVICE flag.
8178 8178 */
8179 8179 q->q_flag |= QENAB;
8180 8180 if (q->q_flag & QINSERVICE)
8181 8181 return;
8182 8182
8183 8183 /* Record the time of qenable */
8184 8184 q->q_qtstamp = ddi_get_lbolt();
8185 8185
8186 8186 /*
8187 8187 * Put the queue in the stp list and schedule it for background
8188 8188 * processing if it is not already scheduled or if stream head does not
8189 8189 * intent to process it in the foreground later by setting
8190 8190 * STRS_WILLSERVICE flag.
8191 8191 */
8192 8192 mutex_enter(&stp->sd_qlock);
8193 8193 /*
8194 8194 * If there are already something on the list, stp flags should show
8195 8195 * intention to drain it.
8196 8196 */
8197 8197 IMPLY(STREAM_NEEDSERVICE(stp),
8198 8198 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED)));
8199 8199
8200 8200 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link);
8201 8201 stp->sd_nqueues++;
8202 8202
8203 8203 /*
8204 8204 * If no one will drain this stream we are the first producer and
8205 8205 * need to schedule it for background thread.
8206 8206 */
8207 8207 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) {
8208 8208 /*
8209 8209 * No one will service this stream later, so we have to
8210 8210 * schedule it now.
8211 8211 */
8212 8212 STRSTAT(stenables);
8213 8213 stp->sd_svcflags |= STRS_SCHEDULED;
8214 8214 stp->sd_servid = (void *)taskq_dispatch(streams_taskq,
8215 8215 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE);
8216 8216
8217 8217 if (stp->sd_servid == NULL) {
8218 8218 /*
8219 8219 * Task queue failed so fail over to the backup
8220 8220 * servicing thread.
8221 8221 */
8222 8222 STRSTAT(taskqfails);
8223 8223 /*
8224 8224 * It is safe to clear STRS_SCHEDULED flag because it
8225 8225 * was set by this thread above.
8226 8226 */
8227 8227 stp->sd_svcflags &= ~STRS_SCHEDULED;
8228 8228
8229 8229 /*
8230 8230 * Failover scheduling is protected by service_queue
8231 8231 * lock.
8232 8232 */
8233 8233 mutex_enter(&service_queue);
8234 8234 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q));
8235 8235 ASSERT(q->q_link == NULL);
8236 8236 /*
8237 8237 * Append the queue to qhead/qtail list.
8238 8238 */
8239 8239 if (qhead == NULL)
8240 8240 qhead = q;
8241 8241 else
8242 8242 qtail->q_link = q;
8243 8243 qtail = q;
8244 8244 /*
8245 8245 * Clear stp queue list.
8246 8246 */
8247 8247 stp->sd_qhead = stp->sd_qtail = NULL;
8248 8248 stp->sd_nqueues = 0;
8249 8249 /*
8250 8250 * Wakeup background queue processing thread.
8251 8251 */
8252 8252 cv_signal(&services_to_run);
8253 8253 mutex_exit(&service_queue);
8254 8254 }
8255 8255 }
8256 8256 mutex_exit(&stp->sd_qlock);
8257 8257 }
8258 8258
8259 8259 static void
8260 8260 queue_service(queue_t *q)
8261 8261 {
8262 8262 /*
8263 8263 * The queue in the list should have
8264 8264 * QENAB flag set and should not have
8265 8265 * QINSERVICE flag set. QINSERVICE is
8266 8266 * set when the queue is dequeued and
8267 8267 * qenable_locked doesn't enqueue a
8268 8268 * queue with QINSERVICE set.
8269 8269 */
8270 8270
8271 8271 ASSERT(!(q->q_flag & QINSERVICE));
8272 8272 ASSERT((q->q_flag & QENAB));
8273 8273 mutex_enter(QLOCK(q));
8274 8274 q->q_flag &= ~QENAB;
8275 8275 q->q_flag |= QINSERVICE;
8276 8276 mutex_exit(QLOCK(q));
8277 8277 runservice(q);
8278 8278 }
8279 8279
8280 8280 static void
8281 8281 syncq_service(syncq_t *sq)
8282 8282 {
8283 8283 STRSTAT(syncqservice);
8284 8284 mutex_enter(SQLOCK(sq));
8285 8285 ASSERT(!(sq->sq_svcflags & SQ_SERVICE));
8286 8286 ASSERT(sq->sq_servcount != 0);
8287 8287 ASSERT(sq->sq_next == NULL);
8288 8288
8289 8289 /* if we came here from the background thread, clear the flag */
8290 8290 if (sq->sq_svcflags & SQ_BGTHREAD)
8291 8291 sq->sq_svcflags &= ~SQ_BGTHREAD;
8292 8292
8293 8293 /* let drain_syncq know that it's being called in the background */
8294 8294 sq->sq_svcflags |= SQ_SERVICE;
8295 8295 drain_syncq(sq);
8296 8296 }
8297 8297
8298 8298 static void
8299 8299 qwriter_outer_service(syncq_t *outer)
8300 8300 {
8301 8301 /*
8302 8302 * Note that SQ_WRITER is used on the outer perimeter
8303 8303 * to signal that a qwriter(OUTER) is either investigating
8304 8304 * running or that it is actually running a function.
8305 8305 */
8306 8306 outer_enter(outer, SQ_BLOCKED|SQ_WRITER);
8307 8307
8308 8308 /*
8309 8309 * All inner syncq are empty and have SQ_WRITER set
8310 8310 * to block entering the outer perimeter.
8311 8311 *
8312 8312 * We do not need to explicitly call write_now since
8313 8313 * outer_exit does it for us.
8314 8314 */
8315 8315 outer_exit(outer);
8316 8316 }
8317 8317
8318 8318 static void
8319 8319 mblk_free(mblk_t *mp)
8320 8320 {
8321 8321 dblk_t *dbp = mp->b_datap;
8322 8322 frtn_t *frp = dbp->db_frtnp;
8323 8323
8324 8324 mp->b_next = NULL;
8325 8325 if (dbp->db_fthdr != NULL)
8326 8326 str_ftfree(dbp);
8327 8327
8328 8328 ASSERT(dbp->db_fthdr == NULL);
8329 8329 frp->free_func(frp->free_arg);
8330 8330 ASSERT(dbp->db_mblk == mp);
8331 8331
8332 8332 if (dbp->db_credp != NULL) {
8333 8333 crfree(dbp->db_credp);
8334 8334 dbp->db_credp = NULL;
8335 8335 }
8336 8336 dbp->db_cpid = -1;
8337 8337 dbp->db_struioflag = 0;
8338 8338 dbp->db_struioun.cksum.flags = 0;
8339 8339
8340 8340 kmem_cache_free(dbp->db_cache, dbp);
8341 8341 }
8342 8342
8343 8343 /*
8344 8344 * Background processing of the stream queue list.
8345 8345 */
8346 8346 static void
8347 8347 stream_service(stdata_t *stp)
8348 8348 {
8349 8349 queue_t *q;
8350 8350
8351 8351 mutex_enter(&stp->sd_qlock);
8352 8352
8353 8353 STR_SERVICE(stp, q);
8354 8354
8355 8355 stp->sd_svcflags &= ~STRS_SCHEDULED;
8356 8356 stp->sd_servid = NULL;
8357 8357 cv_signal(&stp->sd_qcv);
8358 8358 mutex_exit(&stp->sd_qlock);
8359 8359 }
8360 8360
8361 8361 /*
8362 8362 * Foreground processing of the stream queue list.
8363 8363 */
8364 8364 void
8365 8365 stream_runservice(stdata_t *stp)
8366 8366 {
8367 8367 queue_t *q;
8368 8368
8369 8369 mutex_enter(&stp->sd_qlock);
8370 8370 STRSTAT(rservice);
8371 8371 /*
8372 8372 * We are going to drain this stream queue list, so qenable_locked will
8373 8373 * not schedule it until we finish.
8374 8374 */
8375 8375 stp->sd_svcflags |= STRS_WILLSERVICE;
8376 8376
8377 8377 STR_SERVICE(stp, q);
8378 8378
8379 8379 stp->sd_svcflags &= ~STRS_WILLSERVICE;
8380 8380 mutex_exit(&stp->sd_qlock);
8381 8381 /*
8382 8382 * Help backup background thread to drain the qhead/qtail list.
8383 8383 */
8384 8384 while (qhead != NULL) {
8385 8385 STRSTAT(qhelps);
8386 8386 mutex_enter(&service_queue);
8387 8387 DQ(q, qhead, qtail, q_link);
8388 8388 mutex_exit(&service_queue);
8389 8389 if (q != NULL)
8390 8390 queue_service(q);
8391 8391 }
8392 8392 }
8393 8393
8394 8394 void
8395 8395 stream_willservice(stdata_t *stp)
8396 8396 {
8397 8397 mutex_enter(&stp->sd_qlock);
8398 8398 stp->sd_svcflags |= STRS_WILLSERVICE;
8399 8399 mutex_exit(&stp->sd_qlock);
8400 8400 }
8401 8401
8402 8402 /*
8403 8403 * Replace the cred currently in the mblk with a different one.
8404 8404 * Also update db_cpid.
8405 8405 */
8406 8406 void
8407 8407 mblk_setcred(mblk_t *mp, cred_t *cr, pid_t cpid)
8408 8408 {
8409 8409 dblk_t *dbp = mp->b_datap;
8410 8410 cred_t *ocr = dbp->db_credp;
8411 8411
8412 8412 ASSERT(cr != NULL);
8413 8413
8414 8414 if (cr != ocr) {
8415 8415 crhold(dbp->db_credp = cr);
8416 8416 if (ocr != NULL)
8417 8417 crfree(ocr);
8418 8418 }
8419 8419 /* Don't overwrite with NOPID */
8420 8420 if (cpid != NOPID)
8421 8421 dbp->db_cpid = cpid;
8422 8422 }
8423 8423
8424 8424 /*
8425 8425 * If the src message has a cred, then replace the cred currently in the mblk
8426 8426 * with it.
8427 8427 * Also update db_cpid.
8428 8428 */
8429 8429 void
8430 8430 mblk_copycred(mblk_t *mp, const mblk_t *src)
8431 8431 {
8432 8432 dblk_t *dbp = mp->b_datap;
8433 8433 cred_t *cr, *ocr;
8434 8434 pid_t cpid;
8435 8435
8436 8436 cr = msg_getcred(src, &cpid);
8437 8437 if (cr == NULL)
8438 8438 return;
8439 8439
8440 8440 ocr = dbp->db_credp;
8441 8441 if (cr != ocr) {
8442 8442 crhold(dbp->db_credp = cr);
8443 8443 if (ocr != NULL)
8444 8444 crfree(ocr);
8445 8445 }
8446 8446 /* Don't overwrite with NOPID */
8447 8447 if (cpid != NOPID)
8448 8448 dbp->db_cpid = cpid;
8449 8449 }
8450 8450
8451 8451 int
8452 8452 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd,
8453 8453 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value,
8454 8454 uint32_t flags, int km_flags)
8455 8455 {
8456 8456 int rc = 0;
8457 8457
8458 8458 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8459 8459 if (mp->b_datap->db_type == M_DATA) {
8460 8460 /* Associate values for M_DATA type */
8461 8461 DB_CKSUMSTART(mp) = (intptr_t)start;
8462 8462 DB_CKSUMSTUFF(mp) = (intptr_t)stuff;
8463 8463 DB_CKSUMEND(mp) = (intptr_t)end;
8464 8464 DB_CKSUMFLAGS(mp) = flags;
8465 8465 DB_CKSUM16(mp) = (uint16_t)value;
8466 8466
8467 8467 } else {
8468 8468 pattrinfo_t pa_info;
8469 8469
8470 8470 ASSERT(mmd != NULL);
8471 8471
8472 8472 pa_info.type = PATTR_HCKSUM;
8473 8473 pa_info.len = sizeof (pattr_hcksum_t);
8474 8474
8475 8475 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) {
8476 8476 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf;
8477 8477
8478 8478 hck->hcksum_start_offset = start;
8479 8479 hck->hcksum_stuff_offset = stuff;
8480 8480 hck->hcksum_end_offset = end;
8481 8481 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value;
8482 8482 hck->hcksum_flags = flags;
8483 8483 } else {
8484 8484 rc = -1;
8485 8485 }
8486 8486 }
8487 8487 return (rc);
8488 8488 }
8489 8489
8490 8490 void
8491 8491 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd,
8492 8492 uint32_t *start, uint32_t *stuff, uint32_t *end,
8493 8493 uint32_t *value, uint32_t *flags)
8494 8494 {
8495 8495 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8496 8496 if (mp->b_datap->db_type == M_DATA) {
8497 8497 if (flags != NULL) {
8498 8498 *flags = DB_CKSUMFLAGS(mp) & HCK_FLAGS;
8499 8499 if ((*flags & (HCK_PARTIALCKSUM |
8500 8500 HCK_FULLCKSUM)) != 0) {
8501 8501 if (value != NULL)
8502 8502 *value = (uint32_t)DB_CKSUM16(mp);
8503 8503 if ((*flags & HCK_PARTIALCKSUM) != 0) {
8504 8504 if (start != NULL)
8505 8505 *start =
8506 8506 (uint32_t)DB_CKSUMSTART(mp);
8507 8507 if (stuff != NULL)
8508 8508 *stuff =
8509 8509 (uint32_t)DB_CKSUMSTUFF(mp);
8510 8510 if (end != NULL)
8511 8511 *end =
8512 8512 (uint32_t)DB_CKSUMEND(mp);
8513 8513 }
8514 8514 }
8515 8515 }
8516 8516 } else {
8517 8517 pattrinfo_t hck_attr = {PATTR_HCKSUM};
8518 8518
8519 8519 ASSERT(mmd != NULL);
8520 8520
8521 8521 /* get hardware checksum attribute */
8522 8522 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) {
8523 8523 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf;
8524 8524
8525 8525 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t));
8526 8526 if (flags != NULL)
8527 8527 *flags = hck->hcksum_flags;
8528 8528 if (start != NULL)
8529 8529 *start = hck->hcksum_start_offset;
8530 8530 if (stuff != NULL)
8531 8531 *stuff = hck->hcksum_stuff_offset;
8532 8532 if (end != NULL)
8533 8533 *end = hck->hcksum_end_offset;
8534 8534 if (value != NULL)
8535 8535 *value = (uint32_t)
8536 8536 hck->hcksum_cksum_val.inet_cksum;
8537 8537 }
8538 8538 }
8539 8539 }
8540 8540
8541 8541 void
8542 8542 lso_info_set(mblk_t *mp, uint32_t mss, uint32_t flags)
8543 8543 {
8544 8544 ASSERT(DB_TYPE(mp) == M_DATA);
8545 8545 ASSERT((flags & ~HW_LSO_FLAGS) == 0);
8546 8546
8547 8547 /* Set the flags */
8548 8548 DB_LSOFLAGS(mp) |= flags;
8549 8549 DB_LSOMSS(mp) = mss;
8550 8550 }
8551 8551
8552 8552 void
8553 8553 lso_info_cleanup(mblk_t *mp)
8554 8554 {
8555 8555 ASSERT(DB_TYPE(mp) == M_DATA);
8556 8556
8557 8557 /* Clear the flags */
8558 8558 DB_LSOFLAGS(mp) &= ~HW_LSO_FLAGS;
8559 8559 DB_LSOMSS(mp) = 0;
8560 8560 }
8561 8561
8562 8562 /*
8563 8563 * Checksum buffer *bp for len bytes with psum partial checksum,
8564 8564 * or 0 if none, and return the 16 bit partial checksum.
8565 8565 */
8566 8566 unsigned
8567 8567 bcksum(uchar_t *bp, int len, unsigned int psum)
8568 8568 {
8569 8569 int odd = len & 1;
8570 8570 extern unsigned int ip_ocsum();
8571 8571
8572 8572 if (((intptr_t)bp & 1) == 0 && !odd) {
8573 8573 /*
8574 8574 * Bp is 16 bit aligned and len is multiple of 16 bit word.
8575 8575 */
8576 8576 return (ip_ocsum((ushort_t *)bp, len >> 1, psum));
8577 8577 }
8578 8578 if (((intptr_t)bp & 1) != 0) {
8579 8579 /*
8580 8580 * Bp isn't 16 bit aligned.
8581 8581 */
8582 8582 unsigned int tsum;
8583 8583
8584 8584 #ifdef _LITTLE_ENDIAN
8585 8585 psum += *bp;
8586 8586 #else
8587 8587 psum += *bp << 8;
8588 8588 #endif
8589 8589 len--;
8590 8590 bp++;
8591 8591 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0);
8592 8592 psum += (tsum << 8) & 0xffff | (tsum >> 8);
8593 8593 if (len & 1) {
8594 8594 bp += len - 1;
8595 8595 #ifdef _LITTLE_ENDIAN
8596 8596 psum += *bp << 8;
8597 8597 #else
8598 8598 psum += *bp;
8599 8599 #endif
8600 8600 }
8601 8601 } else {
8602 8602 /*
8603 8603 * Bp is 16 bit aligned.
8604 8604 */
8605 8605 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum);
8606 8606 if (odd) {
8607 8607 bp += len - 1;
8608 8608 #ifdef _LITTLE_ENDIAN
8609 8609 psum += *bp;
8610 8610 #else
8611 8611 psum += *bp << 8;
8612 8612 #endif
8613 8613 }
8614 8614 }
8615 8615 /*
8616 8616 * Normalize psum to 16 bits before returning the new partial
8617 8617 * checksum. The max psum value before normalization is 0x3FDFE.
8618 8618 */
8619 8619 return ((psum >> 16) + (psum & 0xFFFF));
8620 8620 }
8621 8621
8622 8622 boolean_t
8623 8623 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd)
8624 8624 {
8625 8625 boolean_t rc;
8626 8626
8627 8627 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA);
8628 8628 if (DB_TYPE(mp) == M_DATA) {
8629 8629 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0);
8630 8630 } else {
8631 8631 pattrinfo_t zcopy_attr = {PATTR_ZCOPY};
8632 8632
8633 8633 ASSERT(mmd != NULL);
8634 8634 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL);
8635 8635 }
8636 8636 return (rc);
8637 8637 }
8638 8638
8639 8639 void
8640 8640 freemsgchain(mblk_t *mp)
8641 8641 {
8642 8642 mblk_t *next;
8643 8643
8644 8644 while (mp != NULL) {
8645 8645 next = mp->b_next;
8646 8646 mp->b_next = NULL;
8647 8647
8648 8648 freemsg(mp);
8649 8649 mp = next;
8650 8650 }
8651 8651 }
8652 8652
8653 8653 mblk_t *
8654 8654 copymsgchain(mblk_t *mp)
8655 8655 {
8656 8656 mblk_t *nmp = NULL;
8657 8657 mblk_t **nmpp = &nmp;
8658 8658
8659 8659 for (; mp != NULL; mp = mp->b_next) {
8660 8660 if ((*nmpp = copymsg(mp)) == NULL) {
8661 8661 freemsgchain(nmp);
8662 8662 return (NULL);
8663 8663 }
8664 8664
8665 8665 nmpp = &((*nmpp)->b_next);
8666 8666 }
8667 8667
8668 8668 return (nmp);
8669 8669 }
8670 8670
8671 8671 /* NOTE: Do not add code after this point. */
8672 8672 #undef QLOCK
8673 8673
8674 8674 /*
8675 8675 * Replacement for QLOCK macro for those that can't use it.
8676 8676 */
8677 8677 kmutex_t *
8678 8678 QLOCK(queue_t *q)
8679 8679 {
8680 8680 return (&(q)->q_lock);
8681 8681 }
8682 8682
8683 8683 /*
8684 8684 * Dummy runqueues/queuerun functions functions for backwards compatibility.
8685 8685 */
8686 8686 #undef runqueues
8687 8687 void
8688 8688 runqueues(void)
8689 8689 {
8690 8690 }
8691 8691
8692 8692 #undef queuerun
8693 8693 void
8694 8694 queuerun(void)
8695 8695 {
8696 8696 }
8697 8697
8698 8698 /*
8699 8699 * Initialize the STR stack instance, which tracks autopush and persistent
8700 8700 * links.
8701 8701 */
8702 8702 /* ARGSUSED */
8703 8703 static void *
8704 8704 str_stack_init(netstackid_t stackid, netstack_t *ns)
8705 8705 {
8706 8706 str_stack_t *ss;
8707 8707 int i;
8708 8708
8709 8709 ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP);
8710 8710 ss->ss_netstack = ns;
8711 8711
8712 8712 /*
8713 8713 * set up autopush
8714 8714 */
8715 8715 sad_initspace(ss);
8716 8716
8717 8717 /*
8718 8718 * set up mux_node structures.
8719 8719 */
8720 8720 ss->ss_devcnt = devcnt; /* In case it should change before free */
8721 8721 ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) *
8722 8722 ss->ss_devcnt), KM_SLEEP);
8723 8723 for (i = 0; i < ss->ss_devcnt; i++)
8724 8724 ss->ss_mux_nodes[i].mn_imaj = i;
8725 8725 return (ss);
8726 8726 }
8727 8727
8728 8728 /*
8729 8729 * Note: run at zone shutdown and not destroy so that the PLINKs are
8730 8730 * gone by the time other cleanup happens from the destroy callbacks.
8731 8731 */
8732 8732 static void
8733 8733 str_stack_shutdown(netstackid_t stackid, void *arg)
8734 8734 {
8735 8735 str_stack_t *ss = (str_stack_t *)arg;
8736 8736 int i;
8737 8737 cred_t *cr;
8738 8738
8739 8739 cr = zone_get_kcred(netstackid_to_zoneid(stackid));
8740 8740 ASSERT(cr != NULL);
8741 8741
8742 8742 /* Undo all the I_PLINKs for this zone */
8743 8743 for (i = 0; i < ss->ss_devcnt; i++) {
8744 8744 struct mux_edge *ep;
8745 8745 ldi_handle_t lh;
8746 8746 ldi_ident_t li;
8747 8747 int ret;
8748 8748 int rval;
8749 8749 dev_t rdev;
8750 8750
8751 8751 ep = ss->ss_mux_nodes[i].mn_outp;
8752 8752 if (ep == NULL)
8753 8753 continue;
8754 8754 ret = ldi_ident_from_major((major_t)i, &li);
8755 8755 if (ret != 0) {
8756 8756 continue;
8757 8757 }
8758 8758 rdev = ep->me_dev;
8759 8759 ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE,
8760 8760 cr, &lh, li);
8761 8761 if (ret != 0) {
8762 8762 ldi_ident_release(li);
8763 8763 continue;
8764 8764 }
8765 8765
8766 8766 ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL,
8767 8767 cr, &rval);
8768 8768 if (ret) {
8769 8769 (void) ldi_close(lh, FREAD|FWRITE, cr);
8770 8770 ldi_ident_release(li);
8771 8771 continue;
8772 8772 }
8773 8773 (void) ldi_close(lh, FREAD|FWRITE, cr);
8774 8774
8775 8775 /* Close layered handles */
8776 8776 ldi_ident_release(li);
8777 8777 }
8778 8778 crfree(cr);
8779 8779
8780 8780 sad_freespace(ss);
8781 8781
8782 8782 kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt);
8783 8783 ss->ss_mux_nodes = NULL;
8784 8784 }
8785 8785
8786 8786 /*
8787 8787 * Free the structure; str_stack_shutdown did the other cleanup work.
8788 8788 */
8789 8789 /* ARGSUSED */
8790 8790 static void
8791 8791 str_stack_fini(netstackid_t stackid, void *arg)
8792 8792 {
8793 8793 str_stack_t *ss = (str_stack_t *)arg;
8794 8794
8795 8795 kmem_free(ss, sizeof (*ss));
8796 8796 }
↓ open down ↓ |
6130 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX