Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/multidata.c
+++ new/usr/src/uts/common/io/multidata.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 -#pragma ident "%Z%%M% %I% %E% SMI"
28 -
29 27 /*
30 28 * Multidata, as described in the following papers:
31 29 *
32 30 * Adi Masputra,
33 31 * Multidata V.2: VA-Disjoint Packet Extents Framework Interface
34 32 * Design Specification. August 2004.
35 33 * Available as http://sac.sfbay/PSARC/2004/594/materials/mmd2.pdf.
36 34 *
37 35 * Adi Masputra,
38 36 * Multidata Interface Design Specification. Sep 2002.
39 37 * Available as http://sac.sfbay/PSARC/2002/276/materials/mmd.pdf.
40 38 *
41 39 * Adi Masputra, Frank DiMambro, Kacheong Poon,
42 40 * An Efficient Networking Transmit Mechanism for Solaris:
43 41 * Multidata Transmit (MDT). May 2002.
44 42 * Available as http://sac.sfbay/PSARC/2002/276/materials/mdt.pdf.
45 43 */
46 44
47 45 #include <sys/types.h>
48 46 #include <sys/stream.h>
49 47 #include <sys/dlpi.h>
50 48 #include <sys/stropts.h>
51 49 #include <sys/strsun.h>
52 50 #include <sys/strlog.h>
53 51 #include <sys/strsubr.h>
54 52 #include <sys/sysmacros.h>
55 53 #include <sys/cmn_err.h>
56 54 #include <sys/debug.h>
57 55 #include <sys/kmem.h>
58 56 #include <sys/atomic.h>
59 57
60 58 #include <sys/multidata.h>
61 59 #include <sys/multidata_impl.h>
62 60
63 61 static int mmd_constructor(void *, void *, int);
64 62 static void mmd_destructor(void *, void *);
65 63 static int pdslab_constructor(void *, void *, int);
66 64 static void pdslab_destructor(void *, void *);
67 65 static int pattbl_constructor(void *, void *, int);
68 66 static void pattbl_destructor(void *, void *);
69 67 static void mmd_esballoc_free(caddr_t);
70 68 static int mmd_copy_pattbl(patbkt_t *, multidata_t *, pdesc_t *, int);
71 69
72 70 static boolean_t pbuf_ref_valid(multidata_t *, pdescinfo_t *);
73 71 #pragma inline(pbuf_ref_valid)
74 72
75 73 static boolean_t pdi_in_range(pdescinfo_t *, pdescinfo_t *);
76 74 #pragma inline(pdi_in_range)
77 75
78 76 static pdesc_t *mmd_addpdesc_int(multidata_t *, pdescinfo_t *, int *, int);
79 77 #pragma inline(mmd_addpdesc_int)
80 78
81 79 static void mmd_destroy_pattbl(patbkt_t **);
82 80 #pragma inline(mmd_destroy_pattbl)
83 81
84 82 static pattr_t *mmd_find_pattr(patbkt_t *, uint_t);
85 83 #pragma inline(mmd_find_pattr)
86 84
87 85 static pdesc_t *mmd_destroy_pdesc(multidata_t *, pdesc_t *);
88 86 #pragma inline(mmd_destroy_pdesc)
89 87
90 88 static pdesc_t *mmd_getpdesc(multidata_t *, pdesc_t *, pdescinfo_t *, uint_t,
91 89 boolean_t);
92 90 #pragma inline(mmd_getpdesc)
93 91
94 92 static struct kmem_cache *mmd_cache;
95 93 static struct kmem_cache *pd_slab_cache;
96 94 static struct kmem_cache *pattbl_cache;
97 95
98 96 int mmd_debug = 1;
99 97 #define MMD_DEBUG(s) if (mmd_debug > 0) cmn_err s
100 98
101 99 /*
102 100 * Set to this to true to bypass pdesc bounds checking.
103 101 */
104 102 boolean_t mmd_speed_over_safety = B_FALSE;
105 103
106 104 /*
107 105 * Patchable kmem_cache flags.
108 106 */
109 107 int mmd_kmem_flags = 0;
110 108 int pdslab_kmem_flags = 0;
111 109 int pattbl_kmem_flags = 0;
112 110
113 111 /*
114 112 * Alignment (in bytes) of our kmem caches.
115 113 */
116 114 #define MULTIDATA_CACHE_ALIGN 64
117 115
118 116 /*
119 117 * Default number of packet descriptors per descriptor slab. Making
120 118 * this too small will trigger more descriptor slab allocation; making
121 119 * it too large will create too many unclaimed descriptors.
122 120 */
123 121 #define PDSLAB_SZ 15
124 122 uint_t pdslab_sz = PDSLAB_SZ;
125 123
126 124 /*
127 125 * Default attribute hash table size. It's okay to set this to a small
128 126 * value (even to 1) because there aren't that many attributes currently
129 127 * defined, and because we assume there won't be many attributes associated
130 128 * with a Multidata at a given time. Increasing the size will reduce
131 129 * attribute search time (given a large number of attributes in a Multidata),
132 130 * and decreasing it will reduce the memory footprints and the overhead
133 131 * associated with managing the table.
134 132 */
135 133 #define PATTBL_SZ 1
136 134 uint_t pattbl_sz = PATTBL_SZ;
137 135
138 136 /*
139 137 * Attribute hash key.
140 138 */
141 139 #define PATTBL_HASH(x, sz) ((x) % (sz))
142 140
143 141 /*
144 142 * Structure that precedes each Multidata metadata.
145 143 */
146 144 struct mmd_buf_info {
147 145 frtn_t frp; /* free routine */
148 146 uint_t buf_len; /* length of kmem buffer */
149 147 };
150 148
151 149 /*
152 150 * The size of each metadata buffer.
153 151 */
154 152 #define MMD_CACHE_SIZE \
155 153 (sizeof (struct mmd_buf_info) + sizeof (multidata_t))
156 154
157 155 /*
158 156 * Called during startup in order to create the Multidata kmem caches.
159 157 */
160 158 void
161 159 mmd_init(void)
162 160 {
163 161 pdslab_sz = MAX(1, pdslab_sz); /* at least 1 descriptor */
164 162 pattbl_sz = MAX(1, pattbl_sz); /* at least 1 bucket */
165 163
166 164 mmd_cache = kmem_cache_create("multidata", MMD_CACHE_SIZE,
167 165 MULTIDATA_CACHE_ALIGN, mmd_constructor, mmd_destructor,
168 166 NULL, NULL, NULL, mmd_kmem_flags);
169 167
170 168 pd_slab_cache = kmem_cache_create("multidata_pdslab",
171 169 PDESC_SLAB_SIZE(pdslab_sz), MULTIDATA_CACHE_ALIGN,
172 170 pdslab_constructor, pdslab_destructor, NULL,
173 171 (void *)(uintptr_t)pdslab_sz, NULL, pdslab_kmem_flags);
174 172
175 173 pattbl_cache = kmem_cache_create("multidata_pattbl",
176 174 sizeof (patbkt_t) * pattbl_sz, MULTIDATA_CACHE_ALIGN,
177 175 pattbl_constructor, pattbl_destructor, NULL,
178 176 (void *)(uintptr_t)pattbl_sz, NULL, pattbl_kmem_flags);
179 177 }
180 178
181 179 /*
182 180 * Create a Multidata message block.
183 181 */
184 182 multidata_t *
185 183 mmd_alloc(mblk_t *hdr_mp, mblk_t **mmd_mp, int kmflags)
186 184 {
187 185 uchar_t *buf;
188 186 multidata_t *mmd;
189 187 uint_t mmd_mplen;
190 188 struct mmd_buf_info *buf_info;
191 189
192 190 ASSERT(hdr_mp != NULL);
193 191 ASSERT(mmd_mp != NULL);
194 192
195 193 /*
196 194 * Caller should never pass in a chain of mblks since we
197 195 * only care about the first one, hence the assertions.
198 196 */
199 197 ASSERT(hdr_mp->b_cont == NULL);
200 198
201 199 if ((buf = kmem_cache_alloc(mmd_cache, kmflags)) == NULL)
202 200 return (NULL);
203 201
204 202 buf_info = (struct mmd_buf_info *)buf;
205 203 buf_info->frp.free_arg = (caddr_t)buf;
206 204
207 205 mmd = (multidata_t *)(buf_info + 1);
208 206 mmd_mplen = sizeof (*mmd);
209 207
210 208 if ((*mmd_mp = desballoc((uchar_t *)mmd, mmd_mplen, BPRI_HI,
211 209 &(buf_info->frp))) == NULL) {
212 210 kmem_cache_free(mmd_cache, buf);
213 211 return (NULL);
214 212 }
215 213
216 214 DB_TYPE(*mmd_mp) = M_MULTIDATA;
217 215 (*mmd_mp)->b_wptr += mmd_mplen;
218 216 mmd->mmd_dp = (*mmd_mp)->b_datap;
219 217 mmd->mmd_hbuf = hdr_mp;
220 218
221 219 return (mmd);
222 220 }
223 221
224 222 /*
225 223 * Associate additional payload buffer to the Multidata.
226 224 */
227 225 int
228 226 mmd_addpldbuf(multidata_t *mmd, mblk_t *pld_mp)
229 227 {
230 228 int i;
231 229
232 230 ASSERT(mmd != NULL);
233 231 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
234 232 ASSERT(pld_mp != NULL);
235 233
236 234 mutex_enter(&mmd->mmd_pd_slab_lock);
237 235 for (i = 0; i < MULTIDATA_MAX_PBUFS &&
238 236 mmd->mmd_pbuf_cnt < MULTIDATA_MAX_PBUFS; i++) {
239 237 if (mmd->mmd_pbuf[i] == pld_mp) {
240 238 /* duplicate entry */
241 239 MMD_DEBUG((CE_WARN, "mmd_addpldbuf: error adding "
242 240 "pld 0x%p to mmd 0x%p since it has been "
243 241 "previously added into slot %d (total %d)\n",
244 242 (void *)pld_mp, (void *)mmd, i, mmd->mmd_pbuf_cnt));
245 243 mutex_exit(&mmd->mmd_pd_slab_lock);
246 244 return (-1);
247 245 } else if (mmd->mmd_pbuf[i] == NULL) {
248 246 mmd->mmd_pbuf[i] = pld_mp;
249 247 mmd->mmd_pbuf_cnt++;
250 248 mutex_exit(&mmd->mmd_pd_slab_lock);
251 249 return (i);
252 250 }
253 251 }
254 252
255 253 /* all slots are taken */
256 254 MMD_DEBUG((CE_WARN, "mmd_addpldbuf: error adding pld 0x%p to mmd 0x%p "
257 255 "since no slot space is left (total %d max %d)\n", (void *)pld_mp,
258 256 (void *)mmd, mmd->mmd_pbuf_cnt, MULTIDATA_MAX_PBUFS));
259 257 mutex_exit(&mmd->mmd_pd_slab_lock);
260 258
261 259 return (-1);
262 260 }
263 261
264 262 /*
265 263 * Multidata metadata kmem cache constructor routine.
266 264 */
267 265 /* ARGSUSED */
268 266 static int
269 267 mmd_constructor(void *buf, void *cdrarg, int kmflags)
270 268 {
271 269 struct mmd_buf_info *buf_info;
272 270 multidata_t *mmd;
273 271
274 272 bzero((void *)buf, MMD_CACHE_SIZE);
275 273
276 274 buf_info = (struct mmd_buf_info *)buf;
277 275 buf_info->frp.free_func = mmd_esballoc_free;
278 276 buf_info->buf_len = MMD_CACHE_SIZE;
279 277
280 278 mmd = (multidata_t *)(buf_info + 1);
281 279 mmd->mmd_magic = MULTIDATA_MAGIC;
282 280
283 281 mutex_init(&(mmd->mmd_pd_slab_lock), NULL, MUTEX_DRIVER, NULL);
284 282 QL_INIT(&(mmd->mmd_pd_slab_q));
285 283 QL_INIT(&(mmd->mmd_pd_q));
286 284
287 285 return (0);
288 286 }
289 287
290 288 /*
291 289 * Multidata metadata kmem cache destructor routine.
292 290 */
293 291 /* ARGSUSED */
294 292 static void
295 293 mmd_destructor(void *buf, void *cdrarg)
296 294 {
297 295 multidata_t *mmd;
298 296 #ifdef DEBUG
299 297 int i;
300 298 #endif
301 299
302 300 mmd = (multidata_t *)((uchar_t *)buf + sizeof (struct mmd_buf_info));
303 301
304 302 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
305 303 ASSERT(mmd->mmd_dp == NULL);
306 304 ASSERT(mmd->mmd_hbuf == NULL);
307 305 ASSERT(mmd->mmd_pbuf_cnt == 0);
308 306 #ifdef DEBUG
309 307 for (i = 0; i < MULTIDATA_MAX_PBUFS; i++)
310 308 ASSERT(mmd->mmd_pbuf[i] == NULL);
311 309 #endif
312 310 ASSERT(mmd->mmd_pattbl == NULL);
313 311
314 312 mutex_destroy(&(mmd->mmd_pd_slab_lock));
315 313 ASSERT(mmd->mmd_pd_slab_q.ql_next == &(mmd->mmd_pd_slab_q));
316 314 ASSERT(mmd->mmd_slab_cnt == 0);
317 315 ASSERT(mmd->mmd_pd_q.ql_next == &(mmd->mmd_pd_q));
318 316 ASSERT(mmd->mmd_pd_cnt == 0);
319 317 ASSERT(mmd->mmd_hbuf_ref == 0);
320 318 ASSERT(mmd->mmd_pbuf_ref == 0);
321 319 }
322 320
323 321 /*
324 322 * Multidata message block free callback routine.
325 323 */
326 324 static void
327 325 mmd_esballoc_free(caddr_t buf)
328 326 {
329 327 multidata_t *mmd;
330 328 pdesc_t *pd;
331 329 pdesc_slab_t *slab;
332 330 int i;
333 331
334 332 ASSERT(buf != NULL);
335 333 ASSERT(((struct mmd_buf_info *)buf)->buf_len == MMD_CACHE_SIZE);
336 334
337 335 mmd = (multidata_t *)(buf + sizeof (struct mmd_buf_info));
338 336 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
339 337
340 338 ASSERT(mmd->mmd_dp != NULL);
341 339 ASSERT(mmd->mmd_dp->db_ref == 1);
342 340
343 341 /* remove all packet descriptors and private attributes */
344 342 pd = Q2PD(mmd->mmd_pd_q.ql_next);
345 343 while (pd != Q2PD(&(mmd->mmd_pd_q)))
346 344 pd = mmd_destroy_pdesc(mmd, pd);
347 345
348 346 ASSERT(mmd->mmd_pd_q.ql_next == &(mmd->mmd_pd_q));
349 347 ASSERT(mmd->mmd_pd_cnt == 0);
350 348 ASSERT(mmd->mmd_hbuf_ref == 0);
351 349 ASSERT(mmd->mmd_pbuf_ref == 0);
352 350
353 351 /* remove all global attributes */
354 352 if (mmd->mmd_pattbl != NULL)
355 353 mmd_destroy_pattbl(&(mmd->mmd_pattbl));
356 354
357 355 /* remove all descriptor slabs */
358 356 slab = Q2PDSLAB(mmd->mmd_pd_slab_q.ql_next);
359 357 while (slab != Q2PDSLAB(&(mmd->mmd_pd_slab_q))) {
360 358 pdesc_slab_t *slab_next = Q2PDSLAB(slab->pds_next);
361 359
362 360 remque(&(slab->pds_next));
363 361 slab->pds_next = NULL;
364 362 slab->pds_prev = NULL;
365 363 slab->pds_mmd = NULL;
366 364 slab->pds_used = 0;
367 365 kmem_cache_free(pd_slab_cache, slab);
368 366
369 367 ASSERT(mmd->mmd_slab_cnt > 0);
370 368 mmd->mmd_slab_cnt--;
371 369 slab = slab_next;
372 370 }
373 371 ASSERT(mmd->mmd_pd_slab_q.ql_next == &(mmd->mmd_pd_slab_q));
374 372 ASSERT(mmd->mmd_slab_cnt == 0);
375 373
376 374 mmd->mmd_dp = NULL;
377 375
378 376 /* finally, free all associated message blocks */
379 377 if (mmd->mmd_hbuf != NULL) {
380 378 freeb(mmd->mmd_hbuf);
381 379 mmd->mmd_hbuf = NULL;
382 380 }
383 381
384 382 for (i = 0; i < MULTIDATA_MAX_PBUFS; i++) {
385 383 if (mmd->mmd_pbuf[i] != NULL) {
386 384 freeb(mmd->mmd_pbuf[i]);
387 385 mmd->mmd_pbuf[i] = NULL;
388 386 ASSERT(mmd->mmd_pbuf_cnt > 0);
389 387 mmd->mmd_pbuf_cnt--;
390 388 }
391 389 }
392 390
393 391 ASSERT(mmd->mmd_pbuf_cnt == 0);
394 392 ASSERT(MUTEX_NOT_HELD(&(mmd->mmd_pd_slab_lock)));
395 393 kmem_cache_free(mmd_cache, buf);
396 394 }
397 395
398 396 /*
399 397 * Multidata message block copy routine, called by copyb() when it
400 398 * encounters a M_MULTIDATA data block type. This routine should
401 399 * not be called by anyone other than copyb(), since it may go away
402 400 * (read: become static to this module) once some sort of copy callback
403 401 * routine is made available.
404 402 */
405 403 mblk_t *
406 404 mmd_copy(mblk_t *bp, int kmflags)
407 405 {
408 406 multidata_t *mmd, *n_mmd;
409 407 mblk_t *n_hbuf = NULL, *n_pbuf[MULTIDATA_MAX_PBUFS];
410 408 mblk_t **pmp_last = &n_pbuf[MULTIDATA_MAX_PBUFS - 1];
411 409 mblk_t **pmp;
412 410 mblk_t *n_bp = NULL;
413 411 pdesc_t *pd;
414 412 uint_t n_pbuf_cnt = 0;
415 413 int idx, i;
416 414
417 415 #define FREE_PBUFS() { \
418 416 for (pmp = &n_pbuf[0]; pmp <= pmp_last; pmp++) \
419 417 if (*pmp != NULL) freeb(*pmp); \
420 418 }
421 419
422 420 #define REL_OFF(p, base, n_base) \
423 421 ((uchar_t *)(n_base) + ((uchar_t *)(p) - (uchar_t *)base))
424 422
425 423 ASSERT(bp != NULL && DB_TYPE(bp) == M_MULTIDATA);
426 424 mmd = mmd_getmultidata(bp);
427 425
428 426 /* copy the header buffer */
429 427 if (mmd->mmd_hbuf != NULL && (n_hbuf = copyb(mmd->mmd_hbuf)) == NULL)
430 428 return (NULL);
431 429
432 430 /* copy the payload buffer(s) */
433 431 mutex_enter(&mmd->mmd_pd_slab_lock);
434 432 bzero((void *)&n_pbuf[0], sizeof (mblk_t *) * MULTIDATA_MAX_PBUFS);
435 433 n_pbuf_cnt = mmd->mmd_pbuf_cnt;
436 434 for (i = 0; i < n_pbuf_cnt; i++) {
437 435 ASSERT(mmd->mmd_pbuf[i] != NULL);
438 436 n_pbuf[i] = copyb(mmd->mmd_pbuf[i]);
439 437 if (n_pbuf[i] == NULL) {
440 438 FREE_PBUFS();
441 439 mutex_exit(&mmd->mmd_pd_slab_lock);
442 440 return (NULL);
443 441 }
444 442 }
445 443
446 444 /* allocate new Multidata */
447 445 n_mmd = mmd_alloc(n_hbuf, &n_bp, kmflags);
448 446 if (n_mmd == NULL) {
449 447 if (n_hbuf != NULL)
450 448 freeb(n_hbuf);
451 449 if (n_pbuf_cnt != 0)
452 450 FREE_PBUFS();
453 451 mutex_exit(&mmd->mmd_pd_slab_lock);
454 452 return (NULL);
455 453 }
456 454
457 455 /*
458 456 * Add payload buffer(s); upon success, leave n_pbuf array
459 457 * alone, as the newly-created Multidata had already contained
460 458 * the mblk pointers stored in the array. These will be freed
461 459 * along with the Multidata itself.
462 460 */
463 461 for (i = 0, pmp = &n_pbuf[0]; i < n_pbuf_cnt; i++, pmp++) {
464 462 idx = mmd_addpldbuf(n_mmd, *pmp);
465 463 if (idx < 0) {
466 464 FREE_PBUFS();
467 465 freeb(n_bp);
468 466 mutex_exit(&mmd->mmd_pd_slab_lock);
469 467 return (NULL);
470 468 }
471 469 }
472 470
473 471 /* copy over global attributes */
474 472 if (mmd->mmd_pattbl != NULL &&
475 473 mmd_copy_pattbl(mmd->mmd_pattbl, n_mmd, NULL, kmflags) < 0) {
476 474 freeb(n_bp);
477 475 mutex_exit(&mmd->mmd_pd_slab_lock);
478 476 return (NULL);
479 477 }
480 478
481 479 /* copy over packet descriptors and their atttributes */
482 480 pd = mmd_getpdesc(mmd, NULL, NULL, 1, B_TRUE); /* first pdesc */
483 481 while (pd != NULL) {
484 482 pdesc_t *n_pd;
485 483 pdescinfo_t *pdi, n_pdi;
486 484 uchar_t *n_base, *base;
487 485 pdesc_t *pd_next;
488 486
489 487 /* next pdesc */
490 488 pd_next = mmd_getpdesc(pd->pd_slab->pds_mmd, pd, NULL,
491 489 1, B_TRUE);
492 490
493 491 /* skip if already removed */
494 492 if (pd->pd_flags & PDESC_REM_DEFER) {
495 493 pd = pd_next;
496 494 continue;
497 495 }
498 496
499 497 pdi = &(pd->pd_pdi);
500 498 bzero(&n_pdi, sizeof (n_pdi));
501 499
502 500 /*
503 501 * Calculate new descriptor values based on the offset of
504 502 * each pointer relative to the associated buffer(s).
505 503 */
506 504 ASSERT(pdi->flags & PDESC_HAS_REF);
507 505 if (pdi->flags & PDESC_HBUF_REF) {
508 506 n_base = n_mmd->mmd_hbuf->b_rptr;
509 507 base = mmd->mmd_hbuf->b_rptr;
510 508
511 509 n_pdi.flags |= PDESC_HBUF_REF;
512 510 n_pdi.hdr_base = REL_OFF(pdi->hdr_base, base, n_base);
513 511 n_pdi.hdr_rptr = REL_OFF(pdi->hdr_rptr, base, n_base);
514 512 n_pdi.hdr_wptr = REL_OFF(pdi->hdr_wptr, base, n_base);
515 513 n_pdi.hdr_lim = REL_OFF(pdi->hdr_lim, base, n_base);
516 514 }
517 515
518 516 if (pdi->flags & PDESC_PBUF_REF) {
519 517 n_pdi.flags |= PDESC_PBUF_REF;
520 518 n_pdi.pld_cnt = pdi->pld_cnt;
521 519
522 520 for (i = 0; i < pdi->pld_cnt; i++) {
523 521 idx = pdi->pld_ary[i].pld_pbuf_idx;
524 522 ASSERT(idx < MULTIDATA_MAX_PBUFS);
525 523 ASSERT(n_mmd->mmd_pbuf[idx] != NULL);
526 524 ASSERT(mmd->mmd_pbuf[idx] != NULL);
527 525
528 526 n_base = n_mmd->mmd_pbuf[idx]->b_rptr;
529 527 base = mmd->mmd_pbuf[idx]->b_rptr;
530 528
531 529 n_pdi.pld_ary[i].pld_pbuf_idx = idx;
532 530
533 531 /*
534 532 * We can't copy the pointers just like that,
535 533 * so calculate the relative offset.
536 534 */
537 535 n_pdi.pld_ary[i].pld_rptr =
538 536 REL_OFF(pdi->pld_ary[i].pld_rptr,
539 537 base, n_base);
540 538 n_pdi.pld_ary[i].pld_wptr =
541 539 REL_OFF(pdi->pld_ary[i].pld_wptr,
542 540 base, n_base);
543 541 }
544 542 }
545 543
546 544 /* add the new descriptor to the new Multidata */
547 545 n_pd = mmd_addpdesc_int(n_mmd, &n_pdi, NULL, kmflags);
548 546
549 547 if (n_pd == NULL || (pd->pd_pattbl != NULL &&
550 548 mmd_copy_pattbl(pd->pd_pattbl, n_mmd, n_pd, kmflags) < 0)) {
551 549 freeb(n_bp);
552 550 mutex_exit(&mmd->mmd_pd_slab_lock);
553 551 return (NULL);
554 552 }
555 553
556 554 pd = pd_next;
557 555 }
558 556 #undef REL_OFF
559 557 #undef FREE_PBUFS
560 558
561 559 mutex_exit(&mmd->mmd_pd_slab_lock);
562 560 return (n_bp);
563 561 }
564 562
565 563 /*
566 564 * Given a Multidata message block, return the Multidata metadata handle.
567 565 */
568 566 multidata_t *
569 567 mmd_getmultidata(mblk_t *mp)
570 568 {
571 569 multidata_t *mmd;
572 570
573 571 ASSERT(mp != NULL);
574 572
575 573 if (DB_TYPE(mp) != M_MULTIDATA)
576 574 return (NULL);
577 575
578 576 mmd = (multidata_t *)mp->b_rptr;
579 577 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
580 578
581 579 return (mmd);
582 580 }
583 581
584 582 /*
585 583 * Return the start and end addresses of the associated buffer(s).
586 584 */
587 585 void
588 586 mmd_getregions(multidata_t *mmd, mbufinfo_t *mbi)
589 587 {
590 588 int i;
591 589
592 590 ASSERT(mmd != NULL);
593 591 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
594 592 ASSERT(mbi != NULL);
595 593
596 594 bzero((void *)mbi, sizeof (mbufinfo_t));
597 595
598 596 if (mmd->mmd_hbuf != NULL) {
599 597 mbi->hbuf_rptr = mmd->mmd_hbuf->b_rptr;
600 598 mbi->hbuf_wptr = mmd->mmd_hbuf->b_wptr;
601 599 }
602 600
603 601 mutex_enter(&mmd->mmd_pd_slab_lock);
604 602 for (i = 0; i < mmd->mmd_pbuf_cnt; i++) {
605 603 ASSERT(mmd->mmd_pbuf[i] != NULL);
606 604 mbi->pbuf_ary[i].pbuf_rptr = mmd->mmd_pbuf[i]->b_rptr;
607 605 mbi->pbuf_ary[i].pbuf_wptr = mmd->mmd_pbuf[i]->b_wptr;
608 606
609 607 }
610 608 mbi->pbuf_cnt = mmd->mmd_pbuf_cnt;
611 609 mutex_exit(&mmd->mmd_pd_slab_lock);
612 610 }
613 611
614 612 /*
615 613 * Return the Multidata statistics.
616 614 */
617 615 uint_t
618 616 mmd_getcnt(multidata_t *mmd, uint_t *hbuf_ref, uint_t *pbuf_ref)
619 617 {
620 618 uint_t pd_cnt;
621 619
622 620 ASSERT(mmd != NULL);
623 621 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
624 622
625 623 mutex_enter(&(mmd->mmd_pd_slab_lock));
626 624 if (hbuf_ref != NULL)
627 625 *hbuf_ref = mmd->mmd_hbuf_ref;
628 626 if (pbuf_ref != NULL)
629 627 *pbuf_ref = mmd->mmd_pbuf_ref;
630 628 pd_cnt = mmd->mmd_pd_cnt;
631 629 mutex_exit(&(mmd->mmd_pd_slab_lock));
632 630
633 631 return (pd_cnt);
634 632 }
635 633
636 634 #define HBUF_REF_VALID(mmd, pdi) \
637 635 ((mmd)->mmd_hbuf != NULL && (pdi)->hdr_rptr != NULL && \
638 636 (pdi)->hdr_wptr != NULL && (pdi)->hdr_base != NULL && \
639 637 (pdi)->hdr_lim != NULL && (pdi)->hdr_lim >= (pdi)->hdr_base && \
640 638 (pdi)->hdr_wptr >= (pdi)->hdr_rptr && \
641 639 (pdi)->hdr_base <= (pdi)->hdr_rptr && \
642 640 (pdi)->hdr_lim >= (pdi)->hdr_wptr && \
643 641 (pdi)->hdr_base >= (mmd)->mmd_hbuf->b_rptr && \
644 642 MBLKIN((mmd)->mmd_hbuf, \
645 643 (pdi->hdr_base - (mmd)->mmd_hbuf->b_rptr), \
646 644 PDESC_HDRSIZE(pdi)))
647 645
648 646 /*
649 647 * Bounds check payload area(s).
650 648 */
651 649 static boolean_t
652 650 pbuf_ref_valid(multidata_t *mmd, pdescinfo_t *pdi)
653 651 {
654 652 int i = 0, idx;
655 653 boolean_t valid = B_TRUE;
656 654 struct pld_ary_s *pa;
657 655
658 656 mutex_enter(&mmd->mmd_pd_slab_lock);
659 657 if (pdi->pld_cnt == 0 || pdi->pld_cnt > mmd->mmd_pbuf_cnt) {
660 658 mutex_exit(&mmd->mmd_pd_slab_lock);
661 659 return (B_FALSE);
662 660 }
663 661
664 662 pa = &pdi->pld_ary[0];
665 663 while (valid && i < pdi->pld_cnt) {
666 664 valid = (((idx = pa->pld_pbuf_idx) < mmd->mmd_pbuf_cnt) &&
667 665 pa->pld_rptr != NULL && pa->pld_wptr != NULL &&
668 666 pa->pld_wptr >= pa->pld_rptr &&
669 667 pa->pld_rptr >= mmd->mmd_pbuf[idx]->b_rptr &&
670 668 MBLKIN(mmd->mmd_pbuf[idx], (pa->pld_rptr -
671 669 mmd->mmd_pbuf[idx]->b_rptr),
672 670 PDESC_PLD_SPAN_SIZE(pdi, i)));
673 671
674 672 if (!valid) {
675 673 MMD_DEBUG((CE_WARN,
676 674 "pbuf_ref_valid: pdi 0x%p pld out of bound; "
677 675 "index %d has pld_cnt %d pbuf_idx %d "
678 676 "(mmd_pbuf_cnt %d), "
679 677 "pld_rptr 0x%p pld_wptr 0x%p len %d "
680 678 "(valid 0x%p-0x%p len %d)\n", (void *)pdi,
681 679 i, pdi->pld_cnt, idx, mmd->mmd_pbuf_cnt,
682 680 (void *)pa->pld_rptr,
683 681 (void *)pa->pld_wptr,
684 682 (int)PDESC_PLD_SPAN_SIZE(pdi, i),
685 683 (void *)mmd->mmd_pbuf[idx]->b_rptr,
686 684 (void *)mmd->mmd_pbuf[idx]->b_wptr,
687 685 (int)MBLKL(mmd->mmd_pbuf[idx])));
688 686 }
689 687
690 688 /* advance to next entry */
691 689 i++;
692 690 pa++;
693 691 }
694 692
695 693 mutex_exit(&mmd->mmd_pd_slab_lock);
696 694 return (valid);
697 695 }
698 696
699 697 /*
700 698 * Add a packet descriptor to the Multidata.
701 699 */
702 700 pdesc_t *
703 701 mmd_addpdesc(multidata_t *mmd, pdescinfo_t *pdi, int *err, int kmflags)
704 702 {
705 703 ASSERT(mmd != NULL);
706 704 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
707 705 ASSERT(pdi != NULL);
708 706 ASSERT(pdi->flags & PDESC_HAS_REF);
709 707
710 708 /* do the references refer to invalid memory regions? */
711 709 if (!mmd_speed_over_safety &&
712 710 (((pdi->flags & PDESC_HBUF_REF) && !HBUF_REF_VALID(mmd, pdi)) ||
713 711 ((pdi->flags & PDESC_PBUF_REF) && !pbuf_ref_valid(mmd, pdi)))) {
714 712 if (err != NULL)
715 713 *err = EINVAL;
716 714 return (NULL);
717 715 }
718 716
719 717 return (mmd_addpdesc_int(mmd, pdi, err, kmflags));
720 718 }
721 719
722 720 /*
723 721 * Internal routine to add a packet descriptor, called when mmd_addpdesc
724 722 * or mmd_copy tries to allocate and add a descriptor to a Multidata.
725 723 */
726 724 static pdesc_t *
727 725 mmd_addpdesc_int(multidata_t *mmd, pdescinfo_t *pdi, int *err, int kmflags)
728 726 {
729 727 pdesc_slab_t *slab, *slab_last;
730 728 pdesc_t *pd;
731 729
732 730 ASSERT(pdi->flags & PDESC_HAS_REF);
733 731 ASSERT(!(pdi->flags & PDESC_HBUF_REF) || HBUF_REF_VALID(mmd, pdi));
734 732 ASSERT(!(pdi->flags & PDESC_PBUF_REF) || pbuf_ref_valid(mmd, pdi));
735 733
736 734 if (err != NULL)
737 735 *err = 0;
738 736
739 737 mutex_enter(&(mmd->mmd_pd_slab_lock));
740 738 /*
741 739 * Is slab list empty or the last-added slab is full? If so,
742 740 * allocate new slab for the descriptor; otherwise, use the
743 741 * last-added slab instead.
744 742 */
745 743 slab_last = Q2PDSLAB(mmd->mmd_pd_slab_q.ql_prev);
746 744 if (mmd->mmd_pd_slab_q.ql_next == &(mmd->mmd_pd_slab_q) ||
747 745 slab_last->pds_used == slab_last->pds_sz) {
748 746 slab = kmem_cache_alloc(pd_slab_cache, kmflags);
749 747 if (slab == NULL) {
750 748 if (err != NULL)
751 749 *err = ENOMEM;
752 750 mutex_exit(&(mmd->mmd_pd_slab_lock));
753 751 return (NULL);
754 752 }
755 753 slab->pds_mmd = mmd;
756 754
757 755 ASSERT(slab->pds_used == 0);
758 756 ASSERT(slab->pds_next == NULL && slab->pds_prev == NULL);
759 757
760 758 /* insert slab at end of list */
761 759 insque(&(slab->pds_next), mmd->mmd_pd_slab_q.ql_prev);
762 760 mmd->mmd_slab_cnt++;
763 761 } else {
764 762 slab = slab_last;
765 763 }
766 764 ASSERT(slab->pds_used < slab->pds_sz);
767 765 pd = &(slab->pds_free_desc[slab->pds_used++]);
768 766 ASSERT(pd->pd_magic == PDESC_MAGIC);
769 767 pd->pd_next = NULL;
770 768 pd->pd_prev = NULL;
771 769 pd->pd_slab = slab;
772 770 pd->pd_pattbl = NULL;
773 771
774 772 /* copy over the descriptor info from caller */
775 773 PDI_COPY(pdi, &(pd->pd_pdi));
776 774
777 775 if (pd->pd_flags & PDESC_HBUF_REF)
778 776 mmd->mmd_hbuf_ref++;
779 777 if (pd->pd_flags & PDESC_PBUF_REF)
780 778 mmd->mmd_pbuf_ref += pd->pd_pdi.pld_cnt;
781 779 mmd->mmd_pd_cnt++;
782 780
783 781 /* insert descriptor at end of list */
784 782 insque(&(pd->pd_next), mmd->mmd_pd_q.ql_prev);
785 783 mutex_exit(&(mmd->mmd_pd_slab_lock));
786 784
787 785 return (pd);
788 786 }
789 787
790 788 /*
791 789 * Packet descriptor slab kmem cache constructor routine.
792 790 */
793 791 /* ARGSUSED */
794 792 static int
795 793 pdslab_constructor(void *buf, void *cdrarg, int kmflags)
796 794 {
797 795 pdesc_slab_t *slab;
798 796 uint_t cnt = (uint_t)(uintptr_t)cdrarg;
799 797 int i;
800 798
801 799 ASSERT(cnt > 0); /* slab size can't be zero */
802 800
803 801 slab = (pdesc_slab_t *)buf;
804 802 slab->pds_next = NULL;
805 803 slab->pds_prev = NULL;
806 804 slab->pds_mmd = NULL;
807 805 slab->pds_used = 0;
808 806 slab->pds_sz = cnt;
809 807
810 808 for (i = 0; i < cnt; i++) {
811 809 pdesc_t *pd = &(slab->pds_free_desc[i]);
812 810 pd->pd_magic = PDESC_MAGIC;
813 811 }
814 812 return (0);
815 813 }
816 814
817 815 /*
818 816 * Packet descriptor slab kmem cache destructor routine.
819 817 */
820 818 /* ARGSUSED */
821 819 static void
822 820 pdslab_destructor(void *buf, void *cdrarg)
823 821 {
824 822 pdesc_slab_t *slab;
825 823
826 824 slab = (pdesc_slab_t *)buf;
827 825 ASSERT(slab->pds_next == NULL);
828 826 ASSERT(slab->pds_prev == NULL);
829 827 ASSERT(slab->pds_mmd == NULL);
830 828 ASSERT(slab->pds_used == 0);
831 829 ASSERT(slab->pds_sz > 0);
832 830 }
833 831
834 832 /*
835 833 * Remove a packet descriptor from the in-use descriptor list,
836 834 * called by mmd_rempdesc or during free.
837 835 */
838 836 static pdesc_t *
839 837 mmd_destroy_pdesc(multidata_t *mmd, pdesc_t *pd)
840 838 {
841 839 pdesc_t *pd_next;
842 840
843 841 pd_next = Q2PD(pd->pd_next);
844 842 remque(&(pd->pd_next));
845 843
846 844 /* remove all local attributes */
847 845 if (pd->pd_pattbl != NULL)
848 846 mmd_destroy_pattbl(&(pd->pd_pattbl));
849 847
850 848 /* don't decrease counts for a removed descriptor */
851 849 if (!(pd->pd_flags & PDESC_REM_DEFER)) {
852 850 if (pd->pd_flags & PDESC_HBUF_REF) {
853 851 ASSERT(mmd->mmd_hbuf_ref > 0);
854 852 mmd->mmd_hbuf_ref--;
855 853 }
856 854 if (pd->pd_flags & PDESC_PBUF_REF) {
857 855 ASSERT(mmd->mmd_pbuf_ref > 0);
858 856 mmd->mmd_pbuf_ref -= pd->pd_pdi.pld_cnt;
859 857 }
860 858 ASSERT(mmd->mmd_pd_cnt > 0);
861 859 mmd->mmd_pd_cnt--;
862 860 }
863 861 return (pd_next);
864 862 }
865 863
866 864 /*
867 865 * Remove a packet descriptor from the Multidata.
868 866 */
869 867 void
870 868 mmd_rempdesc(pdesc_t *pd)
871 869 {
872 870 multidata_t *mmd;
873 871
874 872 ASSERT(pd->pd_magic == PDESC_MAGIC);
875 873 ASSERT(pd->pd_slab != NULL);
876 874
877 875 mmd = pd->pd_slab->pds_mmd;
878 876 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
879 877
880 878 mutex_enter(&(mmd->mmd_pd_slab_lock));
881 879 /*
882 880 * We can't deallocate the associated resources if the Multidata
883 881 * is shared with other threads, because it's possible that the
884 882 * descriptor handle value is held by those threads. That's why
885 883 * we simply mark the entry as "removed" and decrement the counts.
886 884 * If there are no other threads, then we free the descriptor.
887 885 */
888 886 if (mmd->mmd_dp->db_ref > 1) {
889 887 pd->pd_flags |= PDESC_REM_DEFER;
890 888 if (pd->pd_flags & PDESC_HBUF_REF) {
891 889 ASSERT(mmd->mmd_hbuf_ref > 0);
892 890 mmd->mmd_hbuf_ref--;
893 891 }
894 892 if (pd->pd_flags & PDESC_PBUF_REF) {
895 893 ASSERT(mmd->mmd_pbuf_ref > 0);
896 894 mmd->mmd_pbuf_ref -= pd->pd_pdi.pld_cnt;
897 895 }
898 896 ASSERT(mmd->mmd_pd_cnt > 0);
899 897 mmd->mmd_pd_cnt--;
900 898 } else {
901 899 (void) mmd_destroy_pdesc(mmd, pd);
902 900 }
903 901 mutex_exit(&(mmd->mmd_pd_slab_lock));
904 902 }
905 903
906 904 /*
907 905 * A generic routine to traverse the packet descriptor in-use list.
908 906 */
909 907 static pdesc_t *
910 908 mmd_getpdesc(multidata_t *mmd, pdesc_t *pd, pdescinfo_t *pdi, uint_t forw,
911 909 boolean_t mutex_held)
912 910 {
913 911 pdesc_t *pd_head;
914 912
915 913 ASSERT(pd == NULL || pd->pd_slab->pds_mmd == mmd);
916 914 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
917 915 ASSERT(!mutex_held || MUTEX_HELD(&(mmd->mmd_pd_slab_lock)));
918 916
919 917 if (!mutex_held)
920 918 mutex_enter(&(mmd->mmd_pd_slab_lock));
921 919 pd_head = Q2PD(&(mmd->mmd_pd_q));
922 920
923 921 if (pd == NULL) {
924 922 /*
925 923 * We're called by mmd_get{first,last}pdesc, and so
926 924 * return either the first or last list element.
927 925 */
928 926 pd = forw ? Q2PD(mmd->mmd_pd_q.ql_next) :
929 927 Q2PD(mmd->mmd_pd_q.ql_prev);
930 928 } else {
931 929 /*
932 930 * We're called by mmd_get{next,prev}pdesc, and so
933 931 * return either the next or previous list element.
934 932 */
935 933 pd = forw ? Q2PD(pd->pd_next) : Q2PD(pd->pd_prev);
936 934 }
937 935
938 936 while (pd != pd_head) {
939 937 /* skip element if it has been removed */
940 938 if (!(pd->pd_flags & PDESC_REM_DEFER))
941 939 break;
942 940 pd = forw ? Q2PD(pd->pd_next) : Q2PD(pd->pd_prev);
943 941 }
944 942 if (!mutex_held)
945 943 mutex_exit(&(mmd->mmd_pd_slab_lock));
946 944
947 945 /* return NULL if we're back at the beginning */
948 946 if (pd == pd_head)
949 947 pd = NULL;
950 948
951 949 /* got an entry; copy descriptor info to caller */
952 950 if (pd != NULL && pdi != NULL)
953 951 PDI_COPY(&(pd->pd_pdi), pdi);
954 952
955 953 ASSERT(pd == NULL || pd->pd_magic == PDESC_MAGIC);
956 954 return (pd);
957 955
958 956 }
959 957
960 958 /*
961 959 * Return the first packet descriptor in the in-use list.
962 960 */
963 961 pdesc_t *
964 962 mmd_getfirstpdesc(multidata_t *mmd, pdescinfo_t *pdi)
965 963 {
966 964 return (mmd_getpdesc(mmd, NULL, pdi, 1, B_FALSE));
967 965 }
968 966
969 967 /*
970 968 * Return the last packet descriptor in the in-use list.
971 969 */
972 970 pdesc_t *
973 971 mmd_getlastpdesc(multidata_t *mmd, pdescinfo_t *pdi)
974 972 {
975 973 return (mmd_getpdesc(mmd, NULL, pdi, 0, B_FALSE));
976 974 }
977 975
978 976 /*
979 977 * Return the next packet descriptor in the in-use list.
980 978 */
981 979 pdesc_t *
982 980 mmd_getnextpdesc(pdesc_t *pd, pdescinfo_t *pdi)
983 981 {
984 982 return (mmd_getpdesc(pd->pd_slab->pds_mmd, pd, pdi, 1, B_FALSE));
985 983 }
986 984
987 985 /*
988 986 * Return the previous packet descriptor in the in-use list.
989 987 */
990 988 pdesc_t *
991 989 mmd_getprevpdesc(pdesc_t *pd, pdescinfo_t *pdi)
992 990 {
993 991 return (mmd_getpdesc(pd->pd_slab->pds_mmd, pd, pdi, 0, B_FALSE));
994 992 }
995 993
996 994 /*
997 995 * Check to see if pdi stretches over c_pdi; used to ensure that a packet
998 996 * descriptor's header and payload span may not be extended beyond the
999 997 * current boundaries.
1000 998 */
1001 999 static boolean_t
1002 1000 pdi_in_range(pdescinfo_t *pdi, pdescinfo_t *c_pdi)
1003 1001 {
1004 1002 int i;
1005 1003 struct pld_ary_s *pa = &pdi->pld_ary[0];
1006 1004 struct pld_ary_s *c_pa = &c_pdi->pld_ary[0];
1007 1005
1008 1006 if (pdi->hdr_base < c_pdi->hdr_base || pdi->hdr_lim > c_pdi->hdr_lim)
1009 1007 return (B_FALSE);
1010 1008
1011 1009 /*
1012 1010 * We don't allow the number of span to be reduced, for the sake
1013 1011 * of simplicity. Instead, we provide PDESC_PLD_SPAN_CLEAR() to
1014 1012 * clear a packet descriptor. Note that we allow the span count to
1015 1013 * be increased, and the bounds check for the new one happens
1016 1014 * in pbuf_ref_valid.
1017 1015 */
1018 1016 if (pdi->pld_cnt < c_pdi->pld_cnt)
1019 1017 return (B_FALSE);
1020 1018
1021 1019 /* compare only those which are currently defined */
1022 1020 for (i = 0; i < c_pdi->pld_cnt; i++, pa++, c_pa++) {
1023 1021 if (pa->pld_pbuf_idx != c_pa->pld_pbuf_idx ||
1024 1022 pa->pld_rptr < c_pa->pld_rptr ||
1025 1023 pa->pld_wptr > c_pa->pld_wptr)
1026 1024 return (B_FALSE);
1027 1025 }
1028 1026 return (B_TRUE);
1029 1027 }
1030 1028
1031 1029 /*
1032 1030 * Modify the layout of a packet descriptor.
1033 1031 */
1034 1032 pdesc_t *
1035 1033 mmd_adjpdesc(pdesc_t *pd, pdescinfo_t *pdi)
1036 1034 {
1037 1035 multidata_t *mmd;
1038 1036 pdescinfo_t *c_pdi;
1039 1037
1040 1038 ASSERT(pd != NULL);
1041 1039 ASSERT(pdi != NULL);
1042 1040 ASSERT(pd->pd_magic == PDESC_MAGIC);
1043 1041
1044 1042 mmd = pd->pd_slab->pds_mmd;
1045 1043 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
1046 1044
1047 1045 /* entry has been removed */
1048 1046 if (pd->pd_flags & PDESC_REM_DEFER)
1049 1047 return (NULL);
1050 1048
1051 1049 /* caller doesn't intend to specify any buffer reference? */
1052 1050 if (!(pdi->flags & PDESC_HAS_REF))
1053 1051 return (NULL);
1054 1052
1055 1053 /* do the references refer to invalid memory regions? */
1056 1054 if (!mmd_speed_over_safety &&
1057 1055 (((pdi->flags & PDESC_HBUF_REF) && !HBUF_REF_VALID(mmd, pdi)) ||
1058 1056 ((pdi->flags & PDESC_PBUF_REF) && !pbuf_ref_valid(mmd, pdi))))
1059 1057 return (NULL);
1060 1058
1061 1059 /* they're not subsets of current references? */
1062 1060 c_pdi = &(pd->pd_pdi);
1063 1061 if (!pdi_in_range(pdi, c_pdi))
1064 1062 return (NULL);
1065 1063
1066 1064 /* copy over the descriptor info from caller */
1067 1065 PDI_COPY(pdi, c_pdi);
1068 1066
1069 1067 return (pd);
1070 1068 }
1071 1069
1072 1070 /*
1073 1071 * Copy the contents of a packet descriptor into a new buffer. If the
1074 1072 * descriptor points to more than one buffer fragments, the contents
1075 1073 * of both fragments will be joined, with the header buffer fragment
1076 1074 * preceding the payload buffer fragment(s).
1077 1075 */
1078 1076 mblk_t *
1079 1077 mmd_transform(pdesc_t *pd)
1080 1078 {
1081 1079 multidata_t *mmd;
1082 1080 pdescinfo_t *pdi;
1083 1081 mblk_t *mp;
1084 1082 int h_size = 0, p_size = 0;
1085 1083 int i, len;
1086 1084
1087 1085 ASSERT(pd != NULL);
1088 1086 ASSERT(pd->pd_magic == PDESC_MAGIC);
1089 1087
1090 1088 mmd = pd->pd_slab->pds_mmd;
1091 1089 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
1092 1090
1093 1091 /* entry has been removed */
1094 1092 if (pd->pd_flags & PDESC_REM_DEFER)
1095 1093 return (NULL);
1096 1094
1097 1095 mutex_enter(&mmd->mmd_pd_slab_lock);
1098 1096 pdi = &(pd->pd_pdi);
1099 1097 if (pdi->flags & PDESC_HBUF_REF)
1100 1098 h_size = PDESC_HDRL(pdi);
1101 1099 if (pdi->flags & PDESC_PBUF_REF) {
1102 1100 for (i = 0; i < pdi->pld_cnt; i++)
1103 1101 p_size += PDESC_PLD_SPAN_SIZE(pdi, i);
1104 1102 }
1105 1103
1106 1104 /* allocate space large enough to hold the fragment(s) */
1107 1105 ASSERT(h_size + p_size >= 0);
1108 1106 if ((mp = allocb(h_size + p_size, BPRI_HI)) == NULL) {
1109 1107 mutex_exit(&mmd->mmd_pd_slab_lock);
1110 1108 return (NULL);
1111 1109 }
1112 1110
1113 1111 /* copy over the header fragment */
1114 1112 if ((pdi->flags & PDESC_HBUF_REF) && h_size > 0) {
1115 1113 bcopy(pdi->hdr_rptr, mp->b_wptr, h_size);
1116 1114 mp->b_wptr += h_size;
1117 1115 }
1118 1116
1119 1117 /* copy over the payload fragment */
1120 1118 if ((pdi->flags & PDESC_PBUF_REF) && p_size > 0) {
1121 1119 for (i = 0; i < pdi->pld_cnt; i++) {
1122 1120 len = PDESC_PLD_SPAN_SIZE(pdi, i);
1123 1121 if (len > 0) {
1124 1122 bcopy(pdi->pld_ary[i].pld_rptr,
1125 1123 mp->b_wptr, len);
1126 1124 mp->b_wptr += len;
1127 1125 }
1128 1126 }
1129 1127 }
1130 1128
1131 1129 mutex_exit(&mmd->mmd_pd_slab_lock);
1132 1130 return (mp);
1133 1131 }
1134 1132
1135 1133 /*
1136 1134 * Return a chain of mblks representing the Multidata packet.
1137 1135 */
1138 1136 mblk_t *
1139 1137 mmd_transform_link(pdesc_t *pd)
1140 1138 {
1141 1139 multidata_t *mmd;
1142 1140 pdescinfo_t *pdi;
1143 1141 mblk_t *nmp = NULL;
1144 1142
1145 1143 ASSERT(pd != NULL);
1146 1144 ASSERT(pd->pd_magic == PDESC_MAGIC);
1147 1145
1148 1146 mmd = pd->pd_slab->pds_mmd;
1149 1147 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
1150 1148
1151 1149 /* entry has been removed */
1152 1150 if (pd->pd_flags & PDESC_REM_DEFER)
1153 1151 return (NULL);
1154 1152
1155 1153 pdi = &(pd->pd_pdi);
1156 1154
1157 1155 /* duplicate header buffer */
1158 1156 if ((pdi->flags & PDESC_HBUF_REF)) {
1159 1157 if ((nmp = dupb(mmd->mmd_hbuf)) == NULL)
1160 1158 return (NULL);
1161 1159 nmp->b_rptr = pdi->hdr_rptr;
1162 1160 nmp->b_wptr = pdi->hdr_wptr;
1163 1161 }
1164 1162
1165 1163 /* duplicate payload buffer(s) */
1166 1164 if (pdi->flags & PDESC_PBUF_REF) {
1167 1165 int i;
1168 1166 mblk_t *mp;
1169 1167 struct pld_ary_s *pa = &pdi->pld_ary[0];
1170 1168
1171 1169 mutex_enter(&mmd->mmd_pd_slab_lock);
1172 1170 for (i = 0; i < pdi->pld_cnt; i++, pa++) {
1173 1171 ASSERT(mmd->mmd_pbuf[pa->pld_pbuf_idx] != NULL);
1174 1172
1175 1173 /* skip empty ones */
1176 1174 if (PDESC_PLD_SPAN_SIZE(pdi, i) == 0)
1177 1175 continue;
1178 1176
1179 1177 mp = dupb(mmd->mmd_pbuf[pa->pld_pbuf_idx]);
1180 1178 if (mp == NULL) {
1181 1179 if (nmp != NULL)
1182 1180 freemsg(nmp);
1183 1181 mutex_exit(&mmd->mmd_pd_slab_lock);
1184 1182 return (NULL);
1185 1183 }
1186 1184 mp->b_rptr = pa->pld_rptr;
1187 1185 mp->b_wptr = pa->pld_wptr;
1188 1186 if (nmp == NULL)
1189 1187 nmp = mp;
1190 1188 else
1191 1189 linkb(nmp, mp);
1192 1190 }
1193 1191 mutex_exit(&mmd->mmd_pd_slab_lock);
1194 1192 }
1195 1193
1196 1194 return (nmp);
1197 1195 }
1198 1196
1199 1197 /*
1200 1198 * Return duplicate message block(s) of the associated buffer(s).
1201 1199 */
1202 1200 int
1203 1201 mmd_dupbufs(multidata_t *mmd, mblk_t **hmp, mblk_t **pmp)
1204 1202 {
1205 1203 ASSERT(mmd != NULL);
1206 1204 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
1207 1205
1208 1206 if (hmp != NULL) {
1209 1207 *hmp = NULL;
1210 1208 if (mmd->mmd_hbuf != NULL &&
1211 1209 (*hmp = dupb(mmd->mmd_hbuf)) == NULL)
1212 1210 return (-1);
1213 1211 }
1214 1212
1215 1213 if (pmp != NULL) {
1216 1214 int i;
1217 1215 mblk_t *mp;
1218 1216
1219 1217 mutex_enter(&mmd->mmd_pd_slab_lock);
1220 1218 *pmp = NULL;
1221 1219 for (i = 0; i < mmd->mmd_pbuf_cnt; i++) {
1222 1220 ASSERT(mmd->mmd_pbuf[i] != NULL);
1223 1221 mp = dupb(mmd->mmd_pbuf[i]);
1224 1222 if (mp == NULL) {
1225 1223 if (hmp != NULL && *hmp != NULL)
1226 1224 freeb(*hmp);
1227 1225 if (*pmp != NULL)
1228 1226 freemsg(*pmp);
1229 1227 mutex_exit(&mmd->mmd_pd_slab_lock);
1230 1228 return (-1);
1231 1229 }
1232 1230 if (*pmp == NULL)
1233 1231 *pmp = mp;
1234 1232 else
1235 1233 linkb(*pmp, mp);
1236 1234 }
1237 1235 mutex_exit(&mmd->mmd_pd_slab_lock);
1238 1236 }
1239 1237
1240 1238 return (0);
1241 1239 }
1242 1240
1243 1241 /*
1244 1242 * Return the layout of a packet descriptor.
1245 1243 */
1246 1244 int
1247 1245 mmd_getpdescinfo(pdesc_t *pd, pdescinfo_t *pdi)
1248 1246 {
1249 1247 ASSERT(pd != NULL);
1250 1248 ASSERT(pd->pd_magic == PDESC_MAGIC);
1251 1249 ASSERT(pd->pd_slab != NULL);
1252 1250 ASSERT(pd->pd_slab->pds_mmd->mmd_magic == MULTIDATA_MAGIC);
1253 1251 ASSERT(pdi != NULL);
1254 1252
1255 1253 /* entry has been removed */
1256 1254 if (pd->pd_flags & PDESC_REM_DEFER)
1257 1255 return (-1);
1258 1256
1259 1257 /* copy descriptor info to caller */
1260 1258 PDI_COPY(&(pd->pd_pdi), pdi);
1261 1259
1262 1260 return (0);
1263 1261 }
1264 1262
1265 1263 /*
1266 1264 * Add a global or local attribute to a Multidata. Global attribute
1267 1265 * association is specified by a NULL packet descriptor.
1268 1266 */
1269 1267 pattr_t *
1270 1268 mmd_addpattr(multidata_t *mmd, pdesc_t *pd, pattrinfo_t *pai,
1271 1269 boolean_t persistent, int kmflags)
1272 1270 {
1273 1271 patbkt_t **tbl_p;
1274 1272 patbkt_t *tbl, *o_tbl;
1275 1273 patbkt_t *bkt;
1276 1274 pattr_t *pa;
1277 1275 uint_t size;
1278 1276
1279 1277 ASSERT(mmd != NULL);
1280 1278 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
1281 1279 ASSERT(pd == NULL || pd->pd_magic == PDESC_MAGIC);
1282 1280 ASSERT(pai != NULL);
1283 1281
1284 1282 /* pointer to the attribute hash table (local or global) */
1285 1283 tbl_p = pd != NULL ? &(pd->pd_pattbl) : &(mmd->mmd_pattbl);
1286 1284
↓ open down ↓ |
1248 lines elided |
↑ open up ↑ |
1287 1285 /*
1288 1286 * See if the hash table has not yet been created; if so,
1289 1287 * we create the table and store its address atomically.
1290 1288 */
1291 1289 if ((tbl = *tbl_p) == NULL) {
1292 1290 tbl = kmem_cache_alloc(pattbl_cache, kmflags);
1293 1291 if (tbl == NULL)
1294 1292 return (NULL);
1295 1293
1296 1294 /* if someone got there first, use his table instead */
1297 - if ((o_tbl = casptr(tbl_p, NULL, tbl)) != NULL) {
1295 + if ((o_tbl = atomic_cas_ptr(tbl_p, NULL, tbl)) != NULL) {
1298 1296 kmem_cache_free(pattbl_cache, tbl);
1299 1297 tbl = o_tbl;
1300 1298 }
1301 1299 }
1302 1300
1303 1301 ASSERT(tbl->pbkt_tbl_sz > 0);
1304 1302 bkt = &(tbl[PATTBL_HASH(pai->type, tbl->pbkt_tbl_sz)]);
1305 1303
1306 1304 /* attribute of the same type already exists? */
1307 1305 if ((pa = mmd_find_pattr(bkt, pai->type)) != NULL)
1308 1306 return (NULL);
1309 1307
1310 1308 size = sizeof (*pa) + pai->len;
1311 1309 if ((pa = kmem_zalloc(size, kmflags)) == NULL)
1312 1310 return (NULL);
1313 1311
1314 1312 pa->pat_magic = PATTR_MAGIC;
1315 1313 pa->pat_lock = &(bkt->pbkt_lock);
1316 1314 pa->pat_mmd = mmd;
1317 1315 pa->pat_buflen = size;
1318 1316 pa->pat_type = pai->type;
1319 1317 pai->buf = pai->len > 0 ? ((uchar_t *)(pa + 1)) : NULL;
1320 1318
1321 1319 if (persistent)
1322 1320 pa->pat_flags = PATTR_PERSIST;
1323 1321
1324 1322 /* insert attribute at end of hash chain */
1325 1323 mutex_enter(&(bkt->pbkt_lock));
1326 1324 insque(&(pa->pat_next), bkt->pbkt_pattr_q.ql_prev);
1327 1325 mutex_exit(&(bkt->pbkt_lock));
1328 1326
1329 1327 return (pa);
1330 1328 }
1331 1329
1332 1330 /*
1333 1331 * Attribute hash table kmem cache constructor routine.
1334 1332 */
1335 1333 /* ARGSUSED */
1336 1334 static int
1337 1335 pattbl_constructor(void *buf, void *cdrarg, int kmflags)
1338 1336 {
1339 1337 patbkt_t *bkt;
1340 1338 uint_t tbl_sz = (uint_t)(uintptr_t)cdrarg;
1341 1339 uint_t i;
1342 1340
1343 1341 ASSERT(tbl_sz > 0); /* table size can't be zero */
1344 1342
1345 1343 for (i = 0, bkt = (patbkt_t *)buf; i < tbl_sz; i++, bkt++) {
1346 1344 mutex_init(&(bkt->pbkt_lock), NULL, MUTEX_DRIVER, NULL);
1347 1345 QL_INIT(&(bkt->pbkt_pattr_q));
1348 1346
1349 1347 /* first bucket contains the table size */
1350 1348 bkt->pbkt_tbl_sz = i == 0 ? tbl_sz : 0;
1351 1349 }
1352 1350 return (0);
1353 1351 }
1354 1352
1355 1353 /*
1356 1354 * Attribute hash table kmem cache destructor routine.
1357 1355 */
1358 1356 /* ARGSUSED */
1359 1357 static void
1360 1358 pattbl_destructor(void *buf, void *cdrarg)
1361 1359 {
1362 1360 patbkt_t *bkt;
1363 1361 uint_t tbl_sz = (uint_t)(uintptr_t)cdrarg;
1364 1362 uint_t i;
1365 1363
1366 1364 ASSERT(tbl_sz > 0); /* table size can't be zero */
1367 1365
1368 1366 for (i = 0, bkt = (patbkt_t *)buf; i < tbl_sz; i++, bkt++) {
1369 1367 mutex_destroy(&(bkt->pbkt_lock));
1370 1368 ASSERT(bkt->pbkt_pattr_q.ql_next == &(bkt->pbkt_pattr_q));
1371 1369 ASSERT(i > 0 || bkt->pbkt_tbl_sz == tbl_sz);
1372 1370 }
1373 1371 }
1374 1372
1375 1373 /*
1376 1374 * Destroy an attribute hash table, called by mmd_rempdesc or during free.
1377 1375 */
1378 1376 static void
1379 1377 mmd_destroy_pattbl(patbkt_t **tbl)
1380 1378 {
1381 1379 patbkt_t *bkt;
1382 1380 pattr_t *pa, *pa_next;
1383 1381 uint_t i, tbl_sz;
1384 1382
1385 1383 ASSERT(tbl != NULL);
1386 1384 bkt = *tbl;
1387 1385 tbl_sz = bkt->pbkt_tbl_sz;
1388 1386
1389 1387 /* make sure caller passes in the first bucket */
1390 1388 ASSERT(tbl_sz > 0);
1391 1389
1392 1390 /* destroy the contents of each bucket */
1393 1391 for (i = 0; i < tbl_sz; i++, bkt++) {
1394 1392 /* we ought to be exclusive at this point */
1395 1393 ASSERT(MUTEX_NOT_HELD(&(bkt->pbkt_lock)));
1396 1394
1397 1395 pa = Q2PATTR(bkt->pbkt_pattr_q.ql_next);
1398 1396 while (pa != Q2PATTR(&(bkt->pbkt_pattr_q))) {
1399 1397 ASSERT(pa->pat_magic == PATTR_MAGIC);
1400 1398 pa_next = Q2PATTR(pa->pat_next);
1401 1399 remque(&(pa->pat_next));
1402 1400 kmem_free(pa, pa->pat_buflen);
1403 1401 pa = pa_next;
1404 1402 }
1405 1403 }
1406 1404
1407 1405 kmem_cache_free(pattbl_cache, *tbl);
1408 1406 *tbl = NULL;
1409 1407
1410 1408 /* commit all previous stores */
1411 1409 membar_producer();
1412 1410 }
1413 1411
1414 1412 /*
1415 1413 * Copy the contents of an attribute hash table, called by mmd_copy.
1416 1414 */
1417 1415 static int
1418 1416 mmd_copy_pattbl(patbkt_t *src_tbl, multidata_t *n_mmd, pdesc_t *n_pd,
1419 1417 int kmflags)
1420 1418 {
1421 1419 patbkt_t *bkt;
1422 1420 pattr_t *pa;
1423 1421 pattrinfo_t pai;
1424 1422 uint_t i, tbl_sz;
1425 1423
1426 1424 ASSERT(src_tbl != NULL);
1427 1425 bkt = src_tbl;
1428 1426 tbl_sz = bkt->pbkt_tbl_sz;
1429 1427
1430 1428 /* make sure caller passes in the first bucket */
1431 1429 ASSERT(tbl_sz > 0);
1432 1430
1433 1431 for (i = 0; i < tbl_sz; i++, bkt++) {
1434 1432 mutex_enter(&(bkt->pbkt_lock));
1435 1433 pa = Q2PATTR(bkt->pbkt_pattr_q.ql_next);
1436 1434 while (pa != Q2PATTR(&(bkt->pbkt_pattr_q))) {
1437 1435 pattr_t *pa_next = Q2PATTR(pa->pat_next);
1438 1436
1439 1437 /* skip if it's removed */
1440 1438 if (pa->pat_flags & PATTR_REM_DEFER) {
1441 1439 pa = pa_next;
1442 1440 continue;
1443 1441 }
1444 1442
1445 1443 pai.type = pa->pat_type;
1446 1444 pai.len = pa->pat_buflen - sizeof (*pa);
1447 1445 if (mmd_addpattr(n_mmd, n_pd, &pai, (pa->pat_flags &
1448 1446 PATTR_PERSIST) != 0, kmflags) == NULL) {
1449 1447 mutex_exit(&(bkt->pbkt_lock));
1450 1448 return (-1);
1451 1449 }
1452 1450
1453 1451 /* copy over the contents */
1454 1452 if (pai.buf != NULL)
1455 1453 bcopy(pa + 1, pai.buf, pai.len);
1456 1454
1457 1455 pa = pa_next;
1458 1456 }
1459 1457 mutex_exit(&(bkt->pbkt_lock));
1460 1458 }
1461 1459
1462 1460 return (0);
1463 1461 }
1464 1462
1465 1463 /*
1466 1464 * Search for an attribute type within an attribute hash bucket.
1467 1465 */
1468 1466 static pattr_t *
1469 1467 mmd_find_pattr(patbkt_t *bkt, uint_t type)
1470 1468 {
1471 1469 pattr_t *pa_head, *pa;
1472 1470
1473 1471 mutex_enter(&(bkt->pbkt_lock));
1474 1472 pa_head = Q2PATTR(&(bkt->pbkt_pattr_q));
1475 1473 pa = Q2PATTR(bkt->pbkt_pattr_q.ql_next);
1476 1474
1477 1475 while (pa != pa_head) {
1478 1476 ASSERT(pa->pat_magic == PATTR_MAGIC);
1479 1477
1480 1478 /* return a match; we treat removed entry as non-existent */
1481 1479 if (pa->pat_type == type && !(pa->pat_flags & PATTR_REM_DEFER))
1482 1480 break;
1483 1481 pa = Q2PATTR(pa->pat_next);
1484 1482 }
1485 1483 mutex_exit(&(bkt->pbkt_lock));
1486 1484
1487 1485 return (pa == pa_head ? NULL : pa);
1488 1486 }
1489 1487
1490 1488 /*
1491 1489 * Remove an attribute from a Multidata.
1492 1490 */
1493 1491 void
1494 1492 mmd_rempattr(pattr_t *pa)
1495 1493 {
1496 1494 kmutex_t *pat_lock = pa->pat_lock;
1497 1495
1498 1496 ASSERT(pa->pat_magic == PATTR_MAGIC);
1499 1497
1500 1498 /* ignore if attribute was marked as persistent */
1501 1499 if ((pa->pat_flags & PATTR_PERSIST) != 0)
1502 1500 return;
1503 1501
1504 1502 mutex_enter(pat_lock);
1505 1503 /*
1506 1504 * We can't deallocate the associated resources if the Multidata
1507 1505 * is shared with other threads, because it's possible that the
1508 1506 * attribute handle value is held by those threads. That's why
1509 1507 * we simply mark the entry as "removed". If there are no other
1510 1508 * threads, then we free the attribute.
1511 1509 */
1512 1510 if (pa->pat_mmd->mmd_dp->db_ref > 1) {
1513 1511 pa->pat_flags |= PATTR_REM_DEFER;
1514 1512 } else {
1515 1513 remque(&(pa->pat_next));
1516 1514 kmem_free(pa, pa->pat_buflen);
1517 1515 }
1518 1516 mutex_exit(pat_lock);
1519 1517 }
1520 1518
1521 1519 /*
1522 1520 * Find an attribute (according to its type) and return its handle.
1523 1521 */
1524 1522 pattr_t *
1525 1523 mmd_getpattr(multidata_t *mmd, pdesc_t *pd, pattrinfo_t *pai)
1526 1524 {
1527 1525 patbkt_t *tbl, *bkt;
1528 1526 pattr_t *pa;
1529 1527
1530 1528 ASSERT(mmd != NULL);
1531 1529 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
1532 1530 ASSERT(pai != NULL);
1533 1531
1534 1532 /* get the right attribute hash table (local or global) */
1535 1533 tbl = pd != NULL ? pd->pd_pattbl : mmd->mmd_pattbl;
1536 1534
1537 1535 /* attribute hash table doesn't exist? */
1538 1536 if (tbl == NULL)
1539 1537 return (NULL);
1540 1538
1541 1539 ASSERT(tbl->pbkt_tbl_sz > 0);
1542 1540 bkt = &(tbl[PATTBL_HASH(pai->type, tbl->pbkt_tbl_sz)]);
1543 1541
1544 1542 if ((pa = mmd_find_pattr(bkt, pai->type)) != NULL) {
1545 1543 ASSERT(pa->pat_buflen >= sizeof (*pa));
1546 1544 pai->len = pa->pat_buflen - sizeof (*pa);
1547 1545 pai->buf = pai->len > 0 ?
1548 1546 (uchar_t *)pa + sizeof (pattr_t) : NULL;
1549 1547 }
1550 1548 ASSERT(pa == NULL || pa->pat_magic == PATTR_MAGIC);
1551 1549 return (pa);
1552 1550 }
1553 1551
1554 1552 /*
1555 1553 * Return total size of buffers and total size of areas referenced
1556 1554 * by all in-use (unremoved) packet descriptors.
1557 1555 */
1558 1556 void
1559 1557 mmd_getsize(multidata_t *mmd, uint_t *ptotal, uint_t *pinuse)
1560 1558 {
1561 1559 pdesc_t *pd;
1562 1560 pdescinfo_t *pdi;
1563 1561 int i;
1564 1562
1565 1563 ASSERT(mmd != NULL);
1566 1564 ASSERT(mmd->mmd_magic == MULTIDATA_MAGIC);
1567 1565
1568 1566 mutex_enter(&mmd->mmd_pd_slab_lock);
1569 1567 if (ptotal != NULL) {
1570 1568 *ptotal = 0;
1571 1569
1572 1570 if (mmd->mmd_hbuf != NULL)
1573 1571 *ptotal += MBLKL(mmd->mmd_hbuf);
1574 1572
1575 1573 for (i = 0; i < mmd->mmd_pbuf_cnt; i++) {
1576 1574 ASSERT(mmd->mmd_pbuf[i] != NULL);
1577 1575 *ptotal += MBLKL(mmd->mmd_pbuf[i]);
1578 1576 }
1579 1577 }
1580 1578 if (pinuse != NULL) {
1581 1579 *pinuse = 0;
1582 1580
1583 1581 /* first pdesc */
1584 1582 pd = mmd_getpdesc(mmd, NULL, NULL, 1, B_TRUE);
1585 1583 while (pd != NULL) {
1586 1584 pdi = &pd->pd_pdi;
1587 1585
1588 1586 /* next pdesc */
1589 1587 pd = mmd_getpdesc(mmd, pd, NULL, 1, B_TRUE);
1590 1588
1591 1589 /* skip over removed descriptor */
1592 1590 if (pdi->flags & PDESC_REM_DEFER)
1593 1591 continue;
1594 1592
1595 1593 if (pdi->flags & PDESC_HBUF_REF)
1596 1594 *pinuse += PDESC_HDRL(pdi);
1597 1595
1598 1596 if (pdi->flags & PDESC_PBUF_REF) {
1599 1597 for (i = 0; i < pdi->pld_cnt; i++)
1600 1598 *pinuse += PDESC_PLDL(pdi, i);
1601 1599 }
1602 1600 }
1603 1601 }
1604 1602 mutex_exit(&mmd->mmd_pd_slab_lock);
1605 1603 }
↓ open down ↓ |
298 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX