Print this page
6583 remove whole-process swapping
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/mdb/common/modules/genunix/leaky_subr.c
+++ new/usr/src/cmd/mdb/common/modules/genunix/leaky_subr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 -#pragma ident "%Z%%M% %I% %E% SMI"
28 -
29 27 #include <mdb/mdb_param.h>
30 28 #include <mdb/mdb_modapi.h>
31 29
32 30 #include <sys/fs/ufs_inode.h>
33 31 #include <sys/kmem_impl.h>
34 32 #include <sys/vmem_impl.h>
35 33 #include <sys/modctl.h>
36 34 #include <sys/kobj.h>
37 35 #include <sys/kobj_impl.h>
38 36 #include <vm/seg_vn.h>
39 37 #include <vm/as.h>
40 38 #include <vm/seg_map.h>
41 39 #include <mdb/mdb_ctf.h>
42 40
43 41 #include "kmem.h"
44 42 #include "leaky_impl.h"
45 43
46 44 /*
47 45 * This file defines the genunix target for leaky.c. There are three types
48 46 * of buffers in the kernel's heap: TYPE_VMEM, for kmem_oversize allocations,
49 47 * TYPE_KMEM, for kmem_cache_alloc() allocations bufctl_audit_ts, and
50 48 * TYPE_CACHE, for kmem_cache_alloc() allocation without bufctl_audit_ts.
51 49 *
52 50 * See "leaky_impl.h" for the target interface definition.
53 51 */
54 52
55 53 #define TYPE_VMEM 0 /* lkb_data is the vmem_seg's size */
56 54 #define TYPE_CACHE 1 /* lkb_cid is the bufctl's cache */
57 55 #define TYPE_KMEM 2 /* lkb_cid is the bufctl's cache */
58 56
59 57 #define LKM_CTL_BUFCTL 0 /* normal allocation, PTR is bufctl */
60 58 #define LKM_CTL_VMSEG 1 /* oversize allocation, PTR is vmem_seg_t */
61 59 #define LKM_CTL_CACHE 2 /* normal alloc, non-debug, PTR is cache */
62 60 #define LKM_CTL_MASK 3L
63 61
64 62 #define LKM_CTL(ptr, type) (LKM_CTLPTR(ptr) | (type))
65 63 #define LKM_CTLPTR(ctl) ((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
66 64 #define LKM_CTLTYPE(ctl) ((uintptr_t)(ctl) & (LKM_CTL_MASK))
67 65
68 66 static int kmem_lite_count = 0; /* cache of the kernel's version */
69 67
70 68 /*ARGSUSED*/
71 69 static int
72 70 leaky_mtab(uintptr_t addr, const kmem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
73 71 {
74 72 leak_mtab_t *lm = (*lmp)++;
75 73
76 74 lm->lkm_base = (uintptr_t)bcp->bc_addr;
77 75 lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
78 76
79 77 return (WALK_NEXT);
80 78 }
81 79
82 80 /*ARGSUSED*/
83 81 static int
84 82 leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
85 83 {
86 84 leak_mtab_t *lm = (*lmp)++;
87 85
88 86 lm->lkm_base = addr;
89 87
90 88 return (WALK_NEXT);
91 89 }
92 90
93 91 static int
94 92 leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
95 93 {
96 94 leak_mtab_t *lm = (*lmp)++;
97 95
98 96 lm->lkm_base = seg->vs_start;
99 97 lm->lkm_limit = seg->vs_end;
100 98 lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
101 99
102 100 return (WALK_NEXT);
103 101 }
104 102
105 103 static int
106 104 leaky_vmem_interested(const vmem_t *vmem)
107 105 {
108 106 if (strcmp(vmem->vm_name, "kmem_oversize") != 0 &&
109 107 strcmp(vmem->vm_name, "static_alloc") != 0)
110 108 return (0);
111 109 return (1);
112 110 }
113 111
114 112 static int
115 113 leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
116 114 {
117 115 if (!leaky_vmem_interested(vmem))
118 116 return (WALK_NEXT);
119 117
120 118 if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
121 119 mdb_warn("can't walk vmem_alloc for kmem_oversize (%p)", addr);
122 120
123 121 return (WALK_NEXT);
124 122 }
125 123
126 124 /*ARGSUSED*/
127 125 static int
128 126 leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
129 127 {
130 128 if (!leaky_vmem_interested(vmem))
131 129 return (WALK_NEXT);
132 130
133 131 *est += (int)(vmem->vm_kstat.vk_alloc.value.ui64 -
134 132 vmem->vm_kstat.vk_free.value.ui64);
135 133
136 134 return (WALK_NEXT);
137 135 }
138 136
139 137 static int
140 138 leaky_interested(const kmem_cache_t *c)
141 139 {
142 140 vmem_t vmem;
143 141
144 142 /*
145 143 * ignore HAT-related caches that happen to derive from kmem_default
146 144 */
147 145 if (strcmp(c->cache_name, "sfmmu1_cache") == 0 ||
148 146 strcmp(c->cache_name, "sf_hment_cache") == 0 ||
149 147 strcmp(c->cache_name, "pa_hment_cache") == 0)
150 148 return (0);
151 149
152 150 if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
153 151 mdb_warn("cannot read arena %p for cache '%s'",
154 152 (uintptr_t)c->cache_arena, c->cache_name);
155 153 return (0);
156 154 }
157 155
158 156 /*
159 157 * If this cache isn't allocating from the kmem_default,
160 158 * kmem_firewall, or static vmem arenas, we're not interested.
161 159 */
162 160 if (strcmp(vmem.vm_name, "kmem_default") != 0 &&
163 161 strcmp(vmem.vm_name, "kmem_firewall") != 0 &&
164 162 strcmp(vmem.vm_name, "static") != 0)
165 163 return (0);
166 164
167 165 return (1);
168 166 }
169 167
170 168 static int
171 169 leaky_estimate(uintptr_t addr, const kmem_cache_t *c, size_t *est)
172 170 {
173 171 if (!leaky_interested(c))
174 172 return (WALK_NEXT);
175 173
176 174 *est += kmem_estimate_allocated(addr, c);
177 175
178 176 return (WALK_NEXT);
179 177 }
180 178
181 179 /*ARGSUSED*/
182 180 static int
183 181 leaky_cache(uintptr_t addr, const kmem_cache_t *c, leak_mtab_t **lmp)
184 182 {
185 183 leak_mtab_t *lm = *lmp;
186 184 mdb_walk_cb_t cb;
187 185 const char *walk;
188 186 int audit = (c->cache_flags & KMF_AUDIT);
189 187
190 188 if (!leaky_interested(c))
191 189 return (WALK_NEXT);
192 190
193 191 if (audit) {
194 192 walk = "bufctl";
195 193 cb = (mdb_walk_cb_t)leaky_mtab;
196 194 } else {
197 195 walk = "kmem";
198 196 cb = (mdb_walk_cb_t)leaky_mtab_addr;
199 197 }
200 198 if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
201 199 mdb_warn("can't walk kmem for cache %p (%s)", addr,
202 200 c->cache_name);
203 201 return (WALK_DONE);
204 202 }
205 203
206 204 for (; lm < *lmp; lm++) {
207 205 lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
208 206 if (!audit)
209 207 lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
210 208 }
211 209
212 210 return (WALK_NEXT);
213 211 }
214 212
215 213 /*ARGSUSED*/
216 214 static int
217 215 leaky_scan_buffer(uintptr_t addr, const void *ignored, const kmem_cache_t *c)
218 216 {
219 217 leaky_grep(addr, c->cache_bufsize);
220 218
221 219 /*
222 220 * free, constructed KMF_LITE buffers keep their first uint64_t in
223 221 * their buftag's redzone.
224 222 */
225 223 if (c->cache_flags & KMF_LITE) {
226 224 /* LINTED alignment */
227 225 kmem_buftag_t *btp = KMEM_BUFTAG(c, addr);
228 226 leaky_grep((uintptr_t)&btp->bt_redzone,
229 227 sizeof (btp->bt_redzone));
230 228 }
231 229
232 230 return (WALK_NEXT);
233 231 }
234 232
235 233 /*ARGSUSED*/
236 234 static int
237 235 leaky_scan_cache(uintptr_t addr, const kmem_cache_t *c, void *ignored)
238 236 {
239 237 if (!leaky_interested(c))
240 238 return (WALK_NEXT);
241 239
242 240 /*
243 241 * Scan all of the free, constructed buffers, since they may have
244 242 * pointers to allocated objects.
245 243 */
246 244 if (mdb_pwalk("freemem_constructed",
247 245 (mdb_walk_cb_t)leaky_scan_buffer, (void *)c, addr) == -1) {
248 246 mdb_warn("can't walk freemem_constructed for cache %p (%s)",
249 247 addr, c->cache_name);
250 248 return (WALK_DONE);
251 249 }
252 250
253 251 return (WALK_NEXT);
254 252 }
255 253
256 254 /*ARGSUSED*/
257 255 static int
258 256 leaky_modctl(uintptr_t addr, const struct modctl *m, int *ignored)
259 257 {
260 258 struct module mod;
261 259 char name[MODMAXNAMELEN];
262 260
263 261 if (m->mod_mp == NULL)
264 262 return (WALK_NEXT);
265 263
266 264 if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
267 265 mdb_warn("couldn't read modctl %p's module", addr);
268 266 return (WALK_NEXT);
269 267 }
270 268
↓ open down ↓ |
232 lines elided |
↑ open up ↑ |
271 269 if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1)
272 270 (void) mdb_snprintf(name, sizeof (name), "0x%p", addr);
273 271
274 272 leaky_grep((uintptr_t)m->mod_mp, sizeof (struct module));
275 273 leaky_grep((uintptr_t)mod.data, mod.data_size);
276 274 leaky_grep((uintptr_t)mod.bss, mod.bss_size);
277 275
278 276 return (WALK_NEXT);
279 277 }
280 278
279 +/*ARGSUSED*/
281 280 static int
282 281 leaky_thread(uintptr_t addr, const kthread_t *t, unsigned long *pagesize)
283 282 {
284 283 uintptr_t size, base = (uintptr_t)t->t_stkbase;
285 284 uintptr_t stk = (uintptr_t)t->t_stk;
286 -
287 - /*
288 - * If this thread isn't in memory, we can't look at its stack. This
289 - * may result in false positives, so we print a warning.
290 - */
291 - if (!(t->t_schedflag & TS_LOAD)) {
292 - mdb_printf("findleaks: thread %p's stack swapped out; "
293 - "false positives possible\n", addr);
294 - return (WALK_NEXT);
295 - }
296 285
297 286 if (t->t_state != TS_FREE)
298 287 leaky_grep(base, stk - base);
299 288
300 289 /*
301 290 * There is always gunk hanging out between t_stk and the page
302 291 * boundary. If this thread structure wasn't kmem allocated,
303 292 * this will include the thread structure itself. If the thread
304 293 * _is_ kmem allocated, we'll be able to get to it via allthreads.
305 294 */
306 295 size = *pagesize - (stk & (*pagesize - 1));
307 296
308 297 leaky_grep(stk, size);
309 298
310 299 return (WALK_NEXT);
311 300 }
312 301
313 302 /*ARGSUSED*/
314 303 static int
315 304 leaky_kstat(uintptr_t addr, vmem_seg_t *seg, void *ignored)
316 305 {
317 306 leaky_grep(seg->vs_start, seg->vs_end - seg->vs_start);
318 307
319 308 return (WALK_NEXT);
320 309 }
321 310
322 311 static void
323 312 leaky_kludge(void)
324 313 {
325 314 GElf_Sym sym;
326 315 mdb_ctf_id_t id, rid;
327 316
328 317 int max_mem_nodes;
329 318 uintptr_t *counters;
330 319 size_t ncounters;
331 320 ssize_t hwpm_size;
332 321 int idx;
333 322
334 323 /*
335 324 * Because of DR, the page counters (which live in the kmem64 segment)
336 325 * can point into kmem_alloc()ed memory. The "page_counters" array
337 326 * is multi-dimensional, and each entry points to an array of
338 327 * "hw_page_map_t"s which is "max_mem_nodes" in length.
339 328 *
340 329 * To keep this from having too much grotty knowledge of internals,
341 330 * we use CTF data to get the size of the structure. For simplicity,
342 331 * we treat the page_counters array as a flat array of pointers, and
343 332 * use its size to determine how much to scan. Unused entries will
344 333 * be NULL.
345 334 */
346 335 if (mdb_lookup_by_name("page_counters", &sym) == -1) {
347 336 mdb_warn("unable to lookup page_counters");
348 337 return;
349 338 }
350 339
351 340 if (mdb_readvar(&max_mem_nodes, "max_mem_nodes") == -1) {
352 341 mdb_warn("unable to read max_mem_nodes");
353 342 return;
354 343 }
355 344
356 345 if (mdb_ctf_lookup_by_name("unix`hw_page_map_t", &id) == -1 ||
357 346 mdb_ctf_type_resolve(id, &rid) == -1 ||
358 347 (hwpm_size = mdb_ctf_type_size(rid)) < 0) {
359 348 mdb_warn("unable to lookup unix`hw_page_map_t");
360 349 return;
361 350 }
362 351
363 352 counters = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC);
364 353
365 354 if (mdb_vread(counters, sym.st_size, (uintptr_t)sym.st_value) == -1) {
366 355 mdb_warn("unable to read page_counters");
367 356 return;
368 357 }
369 358
370 359 ncounters = sym.st_size / sizeof (counters);
371 360
372 361 for (idx = 0; idx < ncounters; idx++) {
373 362 uintptr_t addr = counters[idx];
374 363 if (addr != 0)
375 364 leaky_grep(addr, hwpm_size * max_mem_nodes);
376 365 }
377 366 }
378 367
379 368 int
380 369 leaky_subr_estimate(size_t *estp)
381 370 {
382 371 uintptr_t panicstr;
383 372 int state;
384 373
385 374 if ((state = mdb_get_state()) == MDB_STATE_RUNNING) {
386 375 mdb_warn("findleaks: can only be run on a system "
387 376 "dump or under kmdb; see dumpadm(1M)\n");
388 377 return (DCMD_ERR);
389 378 }
390 379
391 380 if (mdb_readvar(&panicstr, "panicstr") == -1) {
392 381 mdb_warn("can't read variable 'panicstr'");
393 382 return (DCMD_ERR);
394 383 }
395 384
396 385 if (state != MDB_STATE_STOPPED && panicstr == NULL) {
397 386 mdb_warn("findleaks: cannot be run on a live dump.\n");
398 387 return (DCMD_ERR);
399 388 }
400 389
401 390 if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
402 391 mdb_warn("couldn't walk 'kmem_cache'");
403 392 return (DCMD_ERR);
404 393 }
405 394
406 395 if (*estp == 0) {
407 396 mdb_warn("findleaks: no buffers found\n");
408 397 return (DCMD_ERR);
409 398 }
410 399
411 400 if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
412 401 mdb_warn("couldn't walk 'vmem'");
413 402 return (DCMD_ERR);
414 403 }
415 404
416 405 return (DCMD_OK);
417 406 }
418 407
419 408 int
420 409 leaky_subr_fill(leak_mtab_t **lmpp)
421 410 {
422 411 if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
423 412 mdb_warn("couldn't walk 'vmem'");
424 413 return (DCMD_ERR);
425 414 }
426 415
427 416 if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
428 417 mdb_warn("couldn't walk 'kmem_cache'");
429 418 return (DCMD_ERR);
430 419 }
431 420
432 421 if (mdb_readvar(&kmem_lite_count, "kmem_lite_count") == -1) {
433 422 mdb_warn("couldn't read 'kmem_lite_count'");
434 423 kmem_lite_count = 0;
435 424 } else if (kmem_lite_count > 16) {
436 425 mdb_warn("kmem_lite_count nonsensical, ignored\n");
437 426 kmem_lite_count = 0;
438 427 }
439 428
440 429 return (DCMD_OK);
441 430 }
442 431
443 432 int
444 433 leaky_subr_run(void)
445 434 {
446 435 unsigned long ps = PAGESIZE;
447 436 uintptr_t kstat_arena;
448 437 uintptr_t dmods;
449 438
450 439 leaky_kludge();
451 440
452 441 if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_scan_cache,
453 442 NULL) == -1) {
454 443 mdb_warn("couldn't walk 'kmem_cache'");
455 444 return (DCMD_ERR);
456 445 }
457 446
458 447 if (mdb_walk("modctl", (mdb_walk_cb_t)leaky_modctl, NULL) == -1) {
459 448 mdb_warn("couldn't walk 'modctl'");
460 449 return (DCMD_ERR);
461 450 }
462 451
463 452 /*
464 453 * If kmdb is loaded, we need to walk it's module list, since kmdb
465 454 * modctl structures can reference kmem allocations.
466 455 */
467 456 if ((mdb_readvar(&dmods, "kdi_dmods") != -1) && (dmods != NULL))
468 457 (void) mdb_pwalk("modctl", (mdb_walk_cb_t)leaky_modctl,
469 458 NULL, dmods);
470 459
471 460 if (mdb_walk("thread", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
472 461 mdb_warn("couldn't walk 'thread'");
473 462 return (DCMD_ERR);
474 463 }
475 464
476 465 if (mdb_walk("deathrow", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
477 466 mdb_warn("couldn't walk 'deathrow'");
478 467 return (DCMD_ERR);
479 468 }
480 469
481 470 if (mdb_readvar(&kstat_arena, "kstat_arena") == -1) {
482 471 mdb_warn("couldn't read 'kstat_arena'");
483 472 return (DCMD_ERR);
484 473 }
485 474
486 475 if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_kstat,
487 476 NULL, kstat_arena) == -1) {
488 477 mdb_warn("couldn't walk kstat vmem arena");
489 478 return (DCMD_ERR);
490 479 }
491 480
492 481 return (DCMD_OK);
493 482 }
494 483
495 484 void
496 485 leaky_subr_add_leak(leak_mtab_t *lmp)
497 486 {
498 487 uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
499 488 size_t depth;
500 489
501 490 switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
502 491 case LKM_CTL_VMSEG: {
503 492 vmem_seg_t vs;
504 493
505 494 if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
506 495 mdb_warn("couldn't read leaked vmem_seg at addr %p",
507 496 addr);
508 497 return;
509 498 }
510 499 depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
511 500
512 501 leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
513 502 vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
514 503 break;
515 504 }
516 505 case LKM_CTL_BUFCTL: {
517 506 kmem_bufctl_audit_t bc;
518 507
519 508 if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
520 509 mdb_warn("couldn't read leaked bufctl at addr %p",
521 510 addr);
522 511 return;
523 512 }
524 513
525 514 depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH);
526 515
527 516 /*
528 517 * The top of the stack will be kmem_cache_alloc+offset.
529 518 * Since the offset in kmem_cache_alloc() isn't interesting
530 519 * we skip that frame for the purposes of uniquifying stacks.
531 520 *
532 521 * We also use the cache pointer as the leaks's cid, to
533 522 * prevent the coalescing of leaks from different caches.
534 523 */
535 524 if (depth > 0)
536 525 depth--;
537 526 leaky_add_leak(TYPE_KMEM, addr, (uintptr_t)bc.bc_addr,
538 527 bc.bc_timestamp, bc.bc_stack + 1, depth,
539 528 (uintptr_t)bc.bc_cache, 0);
540 529 break;
541 530 }
542 531 case LKM_CTL_CACHE: {
543 532 kmem_cache_t cache;
544 533 kmem_buftag_lite_t bt;
545 534 pc_t caller;
546 535 int depth = 0;
547 536
548 537 /*
549 538 * For KMF_LITE caches, we can get the allocation PC
550 539 * out of the buftag structure.
551 540 */
552 541 if (mdb_vread(&cache, sizeof (cache), addr) != -1 &&
553 542 (cache.cache_flags & KMF_LITE) &&
554 543 kmem_lite_count > 0 &&
555 544 mdb_vread(&bt, sizeof (bt),
556 545 /* LINTED alignment */
557 546 (uintptr_t)KMEM_BUFTAG(&cache, lmp->lkm_base)) != -1) {
558 547 caller = bt.bt_history[0];
559 548 depth = 1;
560 549 }
561 550 leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
562 551 &caller, depth, addr, addr);
563 552 break;
564 553 }
565 554 default:
566 555 mdb_warn("internal error: invalid leak_bufctl_t\n");
567 556 break;
568 557 }
569 558 }
570 559
571 560 static void
572 561 leaky_subr_caller(const pc_t *stack, uint_t depth, char *buf, uintptr_t *pcp)
573 562 {
574 563 int i;
575 564 GElf_Sym sym;
576 565 uintptr_t pc = 0;
577 566
578 567 buf[0] = 0;
579 568
580 569 for (i = 0; i < depth; i++) {
581 570 pc = stack[i];
582 571
583 572 if (mdb_lookup_by_addr(pc,
584 573 MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
585 574 continue;
586 575 if (strncmp(buf, "kmem_", 5) == 0)
587 576 continue;
588 577 if (strncmp(buf, "vmem_", 5) == 0)
589 578 continue;
590 579 *pcp = pc;
591 580
592 581 return;
593 582 }
594 583
595 584 /*
596 585 * We're only here if the entire call chain begins with "kmem_";
597 586 * this shouldn't happen, but we'll just use the last caller.
598 587 */
599 588 *pcp = pc;
600 589 }
601 590
602 591 int
603 592 leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
604 593 {
605 594 char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
606 595 uintptr_t lcaller, rcaller;
607 596 int rval;
608 597
609 598 leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
610 599 leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
611 600
612 601 if (rval = strcmp(lbuf, rbuf))
613 602 return (rval);
614 603
615 604 if (lcaller < rcaller)
616 605 return (-1);
617 606
618 607 if (lcaller > rcaller)
619 608 return (1);
620 609
621 610 if (lhs->lkb_data < rhs->lkb_data)
622 611 return (-1);
623 612
624 613 if (lhs->lkb_data > rhs->lkb_data)
625 614 return (1);
626 615
627 616 return (0);
628 617 }
629 618
630 619 /*
631 620 * Global state variables used by the leaky_subr_dump_* routines. Note that
632 621 * they are carefully cleared before use.
633 622 */
634 623 static int lk_vmem_seen;
635 624 static int lk_cache_seen;
636 625 static int lk_kmem_seen;
637 626 static size_t lk_ttl;
638 627 static size_t lk_bytes;
639 628
640 629 void
641 630 leaky_subr_dump_start(int type)
642 631 {
643 632 switch (type) {
644 633 case TYPE_VMEM:
645 634 lk_vmem_seen = 0;
646 635 break;
647 636 case TYPE_CACHE:
648 637 lk_cache_seen = 0;
649 638 break;
650 639 case TYPE_KMEM:
651 640 lk_kmem_seen = 0;
652 641 break;
653 642 default:
654 643 break;
655 644 }
656 645
657 646 lk_ttl = 0;
658 647 lk_bytes = 0;
659 648 }
660 649
661 650 void
662 651 leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
663 652 {
664 653 const leak_bufctl_t *cur;
665 654 kmem_cache_t cache;
666 655 size_t min, max, size;
667 656 char sz[30];
668 657 char c[MDB_SYM_NAMLEN];
669 658 uintptr_t caller;
670 659
671 660 if (verbose) {
672 661 lk_ttl = 0;
673 662 lk_bytes = 0;
674 663 }
675 664
676 665 switch (lkb->lkb_type) {
677 666 case TYPE_VMEM:
678 667 if (!verbose && !lk_vmem_seen) {
679 668 lk_vmem_seen = 1;
680 669 mdb_printf("%-16s %7s %?s %s\n",
681 670 "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
682 671 }
683 672
684 673 min = max = lkb->lkb_data;
685 674
686 675 for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
687 676 size = cur->lkb_data;
688 677
689 678 if (size < min)
690 679 min = size;
691 680 if (size > max)
692 681 max = size;
693 682
694 683 lk_ttl++;
695 684 lk_bytes += size;
696 685 }
697 686
698 687 if (min == max)
699 688 (void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
700 689 else
701 690 (void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
702 691 min, max);
703 692
704 693 if (!verbose) {
705 694 leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
706 695 c, &caller);
707 696
708 697 if (caller != 0) {
709 698 (void) mdb_snprintf(c, sizeof (c),
710 699 "%a", caller);
711 700 } else {
712 701 (void) mdb_snprintf(c, sizeof (c),
713 702 "%s", "?");
714 703 }
715 704 mdb_printf("%-16s %7d %?p %s\n", sz, lkb->lkb_dups + 1,
716 705 lkb->lkb_addr, c);
717 706 } else {
718 707 mdb_arg_t v;
719 708
720 709 if (lk_ttl == 1)
721 710 mdb_printf("kmem_oversize leak: 1 vmem_seg, "
722 711 "%ld bytes\n", lk_bytes);
723 712 else
724 713 mdb_printf("kmem_oversize leak: %d vmem_segs, "
725 714 "%s bytes each, %ld bytes total\n",
726 715 lk_ttl, sz, lk_bytes);
727 716
728 717 v.a_type = MDB_TYPE_STRING;
729 718 v.a_un.a_str = "-v";
730 719
731 720 if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
732 721 DCMD_ADDRSPEC, 1, &v) == -1) {
733 722 mdb_warn("'%p::vmem_seg -v' failed",
734 723 lkb->lkb_addr);
735 724 }
736 725 }
737 726 return;
738 727
739 728 case TYPE_CACHE:
740 729 if (!verbose && !lk_cache_seen) {
741 730 lk_cache_seen = 1;
742 731 if (lk_vmem_seen)
743 732 mdb_printf("\n");
744 733 mdb_printf("%-?s %7s %?s %s\n",
745 734 "CACHE", "LEAKED", "BUFFER", "CALLER");
746 735 }
747 736
748 737 if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
749 738 /*
750 739 * This _really_ shouldn't happen; we shouldn't
751 740 * have been able to get this far if this
752 741 * cache wasn't readable.
753 742 */
754 743 mdb_warn("can't read cache %p for leaked "
755 744 "buffer %p", lkb->lkb_data, lkb->lkb_addr);
756 745 return;
757 746 }
758 747
759 748 lk_ttl += lkb->lkb_dups + 1;
760 749 lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
761 750
762 751 caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
763 752 if (caller != 0) {
764 753 (void) mdb_snprintf(c, sizeof (c), "%a", caller);
765 754 } else {
766 755 (void) mdb_snprintf(c, sizeof (c),
767 756 "%s", (verbose) ? "" : "?");
768 757 }
769 758
770 759 if (!verbose) {
771 760 mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
772 761 lkb->lkb_dups + 1, lkb->lkb_addr, c);
773 762 } else {
774 763 if (lk_ttl == 1)
775 764 mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
776 765 cache.cache_name, lk_bytes);
777 766 else
778 767 mdb_printf("%s leak: %d buffers, "
779 768 "%ld bytes each, %ld bytes total,\n",
780 769 cache.cache_name, lk_ttl,
781 770 cache.cache_bufsize, lk_bytes);
782 771
783 772 mdb_printf(" sample addr %p%s%s\n",
784 773 lkb->lkb_addr, (caller == 0) ? "" : ", caller ", c);
785 774 }
786 775 return;
787 776
788 777 case TYPE_KMEM:
789 778 if (!verbose && !lk_kmem_seen) {
790 779 lk_kmem_seen = 1;
791 780 if (lk_vmem_seen || lk_cache_seen)
792 781 mdb_printf("\n");
793 782 mdb_printf("%-?s %7s %?s %s\n",
794 783 "CACHE", "LEAKED", "BUFCTL", "CALLER");
795 784 }
796 785
797 786 if (mdb_vread(&cache, sizeof (cache), lkb->lkb_cid) == -1) {
798 787 /*
799 788 * This _really_ shouldn't happen; we shouldn't
800 789 * have been able to get this far if this
801 790 * cache wasn't readable.
802 791 */
803 792 mdb_warn("can't read cache %p for leaked "
804 793 "bufctl %p", lkb->lkb_cid, lkb->lkb_addr);
805 794 return;
806 795 }
807 796
808 797 lk_ttl += lkb->lkb_dups + 1;
809 798 lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
810 799
811 800 if (!verbose) {
812 801 leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
813 802 c, &caller);
814 803
815 804 if (caller != 0) {
816 805 (void) mdb_snprintf(c, sizeof (c),
817 806 "%a", caller);
818 807 } else {
819 808 (void) mdb_snprintf(c, sizeof (c),
820 809 "%s", "?");
821 810 }
822 811 mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
823 812 lkb->lkb_dups + 1, lkb->lkb_addr, c);
824 813 } else {
825 814 mdb_arg_t v;
826 815
827 816 if (lk_ttl == 1)
828 817 mdb_printf("%s leak: 1 buffer, %ld bytes\n",
829 818 cache.cache_name, lk_bytes);
830 819 else
831 820 mdb_printf("%s leak: %d buffers, "
832 821 "%ld bytes each, %ld bytes total\n",
833 822 cache.cache_name, lk_ttl,
834 823 cache.cache_bufsize, lk_bytes);
835 824
836 825 v.a_type = MDB_TYPE_STRING;
837 826 v.a_un.a_str = "-v";
838 827
839 828 if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
840 829 DCMD_ADDRSPEC, 1, &v) == -1) {
841 830 mdb_warn("'%p::bufctl -v' failed",
842 831 lkb->lkb_addr);
843 832 }
844 833 }
845 834 return;
846 835
847 836 default:
848 837 return;
849 838 }
850 839 }
851 840
852 841 void
853 842 leaky_subr_dump_end(int type)
854 843 {
855 844 int i;
856 845 int width;
857 846 const char *leaks;
858 847
859 848 switch (type) {
860 849 case TYPE_VMEM:
861 850 if (!lk_vmem_seen)
862 851 return;
863 852
864 853 width = 16;
865 854 leaks = "kmem_oversize leak";
866 855 break;
867 856
868 857 case TYPE_CACHE:
869 858 if (!lk_cache_seen)
870 859 return;
871 860
872 861 width = sizeof (uintptr_t) * 2;
873 862 leaks = "buffer";
874 863 break;
875 864
876 865 case TYPE_KMEM:
877 866 if (!lk_kmem_seen)
878 867 return;
879 868
880 869 width = sizeof (uintptr_t) * 2;
881 870 leaks = "buffer";
882 871 break;
883 872
884 873 default:
885 874 return;
886 875 }
887 876
888 877 for (i = 0; i < 72; i++)
889 878 mdb_printf("-");
890 879 mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
891 880 width, "Total", lk_ttl, leaks, (lk_ttl == 1) ? "" : "s",
892 881 lk_bytes, (lk_bytes == 1) ? "" : "s");
893 882 }
894 883
895 884 int
896 885 leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
897 886 void *cbdata)
898 887 {
899 888 kmem_bufctl_audit_t bc;
900 889 vmem_seg_t vs;
901 890
902 891 switch (lkb->lkb_type) {
903 892 case TYPE_VMEM:
904 893 if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
905 894 mdb_warn("unable to read vmem_seg at %p",
906 895 lkb->lkb_addr);
907 896 return (WALK_NEXT);
908 897 }
909 898 return (cb(lkb->lkb_addr, &vs, cbdata));
910 899
911 900 case TYPE_CACHE:
912 901 return (cb(lkb->lkb_addr, NULL, cbdata));
913 902
914 903 case TYPE_KMEM:
915 904 if (mdb_vread(&bc, sizeof (bc), lkb->lkb_addr) == -1) {
916 905 mdb_warn("unable to read bufctl at %p",
917 906 lkb->lkb_addr);
918 907 return (WALK_NEXT);
919 908 }
920 909 return (cb(lkb->lkb_addr, &bc, cbdata));
921 910 default:
922 911 return (WALK_NEXT);
923 912 }
924 913 }
↓ open down ↓ |
619 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX