5255 uts shouldn't open-code ISP2
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
25 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
26 */
27
28 /*
29 * DTrace - Dynamic Tracing for Solaris
30 *
31 * This is the implementation of the Solaris Dynamic Tracing framework
32 * (DTrace). The user-visible interface to DTrace is described at length in
33 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
34 * library, the in-kernel DTrace framework, and the DTrace providers are
35 * described in the block comments in the <sys/dtrace.h> header file. The
36 * internal architecture of DTrace is described in the block comments in the
37 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
38 * implementation very much assume mastery of all of these sources; if one has
39 * an unanswered question about the implementation, one should consult them
40 * first.
41 *
42 * The functions here are ordered roughly as follows:
43 *
44 * - Probe context functions
45 * - Probe hashing functions
46 * - Non-probe context utility functions
47 * - Matching functions
48 * - Provider-to-Framework API functions
49 * - Probe management functions
50 * - DIF object functions
51 * - Format functions
52 * - Predicate functions
53 * - ECB functions
54 * - Buffer functions
55 * - Enabling functions
56 * - DOF functions
57 * - Anonymous enabling functions
58 * - Consumer state functions
59 * - Helper functions
60 * - Hook functions
61 * - Driver cookbook functions
62 *
63 * Each group of functions begins with a block comment labelled the "DTrace
64 * [Group] Functions", allowing one to find each block by searching forward
65 * on capital-f functions.
66 */
67 #include <sys/errno.h>
68 #include <sys/stat.h>
69 #include <sys/modctl.h>
70 #include <sys/conf.h>
71 #include <sys/systm.h>
72 #include <sys/ddi.h>
73 #include <sys/sunddi.h>
74 #include <sys/cpuvar.h>
75 #include <sys/kmem.h>
76 #include <sys/strsubr.h>
77 #include <sys/sysmacros.h>
78 #include <sys/dtrace_impl.h>
79 #include <sys/atomic.h>
80 #include <sys/cmn_err.h>
81 #include <sys/mutex_impl.h>
82 #include <sys/rwlock_impl.h>
83 #include <sys/ctf_api.h>
84 #include <sys/panic.h>
85 #include <sys/priv_impl.h>
86 #include <sys/policy.h>
87 #include <sys/cred_impl.h>
88 #include <sys/procfs_isa.h>
89 #include <sys/taskq.h>
90 #include <sys/mkdev.h>
91 #include <sys/kdi.h>
92 #include <sys/zone.h>
93 #include <sys/socket.h>
94 #include <netinet/in.h>
95 #include "strtolctype.h"
96
97 /*
98 * DTrace Tunable Variables
99 *
100 * The following variables may be tuned by adding a line to /etc/system that
101 * includes both the name of the DTrace module ("dtrace") and the name of the
102 * variable. For example:
103 *
104 * set dtrace:dtrace_destructive_disallow = 1
105 *
106 * In general, the only variables that one should be tuning this way are those
107 * that affect system-wide DTrace behavior, and for which the default behavior
108 * is undesirable. Most of these variables are tunable on a per-consumer
109 * basis using DTrace options, and need not be tuned on a system-wide basis.
110 * When tuning these variables, avoid pathological values; while some attempt
111 * is made to verify the integrity of these variables, they are not considered
112 * part of the supported interface to DTrace, and they are therefore not
113 * checked comprehensively. Further, these variables should not be tuned
114 * dynamically via "mdb -kw" or other means; they should only be tuned via
115 * /etc/system.
116 */
117 int dtrace_destructive_disallow = 0;
118 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
119 size_t dtrace_difo_maxsize = (256 * 1024);
120 dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024);
121 size_t dtrace_global_maxsize = (16 * 1024);
122 size_t dtrace_actions_max = (16 * 1024);
123 size_t dtrace_retain_max = 1024;
124 dtrace_optval_t dtrace_helper_actions_max = 1024;
125 dtrace_optval_t dtrace_helper_providers_max = 32;
126 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
127 size_t dtrace_strsize_default = 256;
128 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
129 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
130 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
131 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
132 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
133 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
134 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
135 dtrace_optval_t dtrace_nspec_default = 1;
136 dtrace_optval_t dtrace_specsize_default = 32 * 1024;
137 dtrace_optval_t dtrace_stackframes_default = 20;
138 dtrace_optval_t dtrace_ustackframes_default = 20;
139 dtrace_optval_t dtrace_jstackframes_default = 50;
140 dtrace_optval_t dtrace_jstackstrsize_default = 512;
141 int dtrace_msgdsize_max = 128;
142 hrtime_t dtrace_chill_max = MSEC2NSEC(500); /* 500 ms */
143 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
144 int dtrace_devdepth_max = 32;
145 int dtrace_err_verbose;
146 hrtime_t dtrace_deadman_interval = NANOSEC;
147 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
148 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
149 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC;
150
151 /*
152 * DTrace External Variables
153 *
154 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
155 * available to DTrace consumers via the backtick (`) syntax. One of these,
156 * dtrace_zero, is made deliberately so: it is provided as a source of
157 * well-known, zero-filled memory. While this variable is not documented,
158 * it is used by some translators as an implementation detail.
159 */
160 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
161
162 /*
163 * DTrace Internal Variables
164 */
165 static dev_info_t *dtrace_devi; /* device info */
166 static vmem_t *dtrace_arena; /* probe ID arena */
167 static vmem_t *dtrace_minor; /* minor number arena */
168 static taskq_t *dtrace_taskq; /* task queue */
169 static dtrace_probe_t **dtrace_probes; /* array of all probes */
170 static int dtrace_nprobes; /* number of probes */
171 static dtrace_provider_t *dtrace_provider; /* provider list */
172 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
173 static int dtrace_opens; /* number of opens */
174 static int dtrace_helpers; /* number of helpers */
175 static int dtrace_getf; /* number of unpriv getf()s */
176 static void *dtrace_softstate; /* softstate pointer */
177 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
178 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
179 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
180 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
181 static int dtrace_toxranges; /* number of toxic ranges */
182 static int dtrace_toxranges_max; /* size of toxic range array */
183 static dtrace_anon_t dtrace_anon; /* anonymous enabling */
184 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
185 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
186 static kthread_t *dtrace_panicked; /* panicking thread */
187 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
188 static dtrace_genid_t dtrace_probegen; /* current probe generation */
189 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
190 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
191 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */
192 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
193 static int dtrace_dynvar_failclean; /* dynvars failed to clean */
194
195 /*
196 * DTrace Locking
197 * DTrace is protected by three (relatively coarse-grained) locks:
198 *
199 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
200 * including enabling state, probes, ECBs, consumer state, helper state,
201 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
202 * probe context is lock-free -- synchronization is handled via the
203 * dtrace_sync() cross call mechanism.
204 *
205 * (2) dtrace_provider_lock is required when manipulating provider state, or
206 * when provider state must be held constant.
207 *
208 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
209 * when meta provider state must be held constant.
210 *
211 * The lock ordering between these three locks is dtrace_meta_lock before
212 * dtrace_provider_lock before dtrace_lock. (In particular, there are
213 * several places where dtrace_provider_lock is held by the framework as it
214 * calls into the providers -- which then call back into the framework,
215 * grabbing dtrace_lock.)
216 *
217 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
218 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
219 * role as a coarse-grained lock; it is acquired before both of these locks.
220 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
221 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
222 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
223 * acquired _between_ dtrace_provider_lock and dtrace_lock.
224 */
225 static kmutex_t dtrace_lock; /* probe state lock */
226 static kmutex_t dtrace_provider_lock; /* provider state lock */
227 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
228
229 /*
230 * DTrace Provider Variables
231 *
232 * These are the variables relating to DTrace as a provider (that is, the
233 * provider of the BEGIN, END, and ERROR probes).
234 */
235 static dtrace_pattr_t dtrace_provider_attr = {
236 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
237 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
238 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
239 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
240 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
241 };
242
243 static void
244 dtrace_nullop(void)
245 {}
246
247 static int
248 dtrace_enable_nullop(void)
249 {
250 return (0);
251 }
252
253 static dtrace_pops_t dtrace_provider_ops = {
254 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
255 (void (*)(void *, struct modctl *))dtrace_nullop,
256 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop,
257 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
258 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
259 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
260 NULL,
261 NULL,
262 NULL,
263 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
264 };
265
266 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
267 static dtrace_id_t dtrace_probeid_end; /* special END probe */
268 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
269
270 /*
271 * DTrace Helper Tracing Variables
272 *
273 * These variables should be set dynamically to enable helper tracing. The
274 * only variables that should be set are dtrace_helptrace_enable (which should
275 * be set to a non-zero value to allocate helper tracing buffers on the next
276 * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a
277 * non-zero value to deallocate helper tracing buffers on the next close of
278 * /dev/dtrace). When (and only when) helper tracing is disabled, the
279 * buffer size may also be set via dtrace_helptrace_bufsize.
280 */
281 int dtrace_helptrace_enable = 0;
282 int dtrace_helptrace_disable = 0;
283 int dtrace_helptrace_bufsize = 16 * 1024 * 1024;
284 uint32_t dtrace_helptrace_nlocals;
285 static dtrace_helptrace_t *dtrace_helptrace_buffer;
286 static uint32_t dtrace_helptrace_next = 0;
287 static int dtrace_helptrace_wrapped = 0;
288
289 /*
290 * DTrace Error Hashing
291 *
292 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
293 * table. This is very useful for checking coverage of tests that are
294 * expected to induce DIF or DOF processing errors, and may be useful for
295 * debugging problems in the DIF code generator or in DOF generation . The
296 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
297 */
298 #ifdef DEBUG
299 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
300 static const char *dtrace_errlast;
301 static kthread_t *dtrace_errthread;
302 static kmutex_t dtrace_errlock;
303 #endif
304
305 /*
306 * DTrace Macros and Constants
307 *
308 * These are various macros that are useful in various spots in the
309 * implementation, along with a few random constants that have no meaning
310 * outside of the implementation. There is no real structure to this cpp
311 * mishmash -- but is there ever?
312 */
313 #define DTRACE_HASHSTR(hash, probe) \
314 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
315
316 #define DTRACE_HASHNEXT(hash, probe) \
317 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
318
319 #define DTRACE_HASHPREV(hash, probe) \
320 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
321
322 #define DTRACE_HASHEQ(hash, lhs, rhs) \
323 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
324 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
325
326 #define DTRACE_AGGHASHSIZE_SLEW 17
327
328 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
329
330 /*
331 * The key for a thread-local variable consists of the lower 61 bits of the
332 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
333 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
334 * equal to a variable identifier. This is necessary (but not sufficient) to
335 * assure that global associative arrays never collide with thread-local
336 * variables. To guarantee that they cannot collide, we must also define the
337 * order for keying dynamic variables. That order is:
338 *
339 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
340 *
341 * Because the variable-key and the tls-key are in orthogonal spaces, there is
342 * no way for a global variable key signature to match a thread-local key
343 * signature.
344 */
345 #define DTRACE_TLS_THRKEY(where) { \
346 uint_t intr = 0; \
347 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
348 for (; actv; actv >>= 1) \
349 intr++; \
350 ASSERT(intr < (1 << 3)); \
351 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
352 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
353 }
354
355 #define DT_BSWAP_8(x) ((x) & 0xff)
356 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
357 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
358 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
359
360 #define DT_MASK_LO 0x00000000FFFFFFFFULL
361
362 #define DTRACE_STORE(type, tomax, offset, what) \
363 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
364
365 #ifndef __x86
366 #define DTRACE_ALIGNCHECK(addr, size, flags) \
367 if (addr & (size - 1)) { \
368 *flags |= CPU_DTRACE_BADALIGN; \
369 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
370 return (0); \
371 }
372 #else
373 #define DTRACE_ALIGNCHECK(addr, size, flags)
374 #endif
375
376 /*
377 * Test whether a range of memory starting at testaddr of size testsz falls
378 * within the range of memory described by addr, sz. We take care to avoid
379 * problems with overflow and underflow of the unsigned quantities, and
380 * disallow all negative sizes. Ranges of size 0 are allowed.
381 */
382 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
383 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \
384 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \
385 (testaddr) + (testsz) >= (testaddr))
386
387 /*
388 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
389 * alloc_sz on the righthand side of the comparison in order to avoid overflow
390 * or underflow in the comparison with it. This is simpler than the INRANGE
391 * check above, because we know that the dtms_scratch_ptr is valid in the
392 * range. Allocations of size zero are allowed.
393 */
394 #define DTRACE_INSCRATCH(mstate, alloc_sz) \
395 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
396 (mstate)->dtms_scratch_ptr >= (alloc_sz))
397
398 #define DTRACE_LOADFUNC(bits) \
399 /*CSTYLED*/ \
400 uint##bits##_t \
401 dtrace_load##bits(uintptr_t addr) \
402 { \
403 size_t size = bits / NBBY; \
404 /*CSTYLED*/ \
405 uint##bits##_t rval; \
406 int i; \
407 volatile uint16_t *flags = (volatile uint16_t *) \
408 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \
409 \
410 DTRACE_ALIGNCHECK(addr, size, flags); \
411 \
412 for (i = 0; i < dtrace_toxranges; i++) { \
413 if (addr >= dtrace_toxrange[i].dtt_limit) \
414 continue; \
415 \
416 if (addr + size <= dtrace_toxrange[i].dtt_base) \
417 continue; \
418 \
419 /* \
420 * This address falls within a toxic region; return 0. \
421 */ \
422 *flags |= CPU_DTRACE_BADADDR; \
423 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
424 return (0); \
425 } \
426 \
427 *flags |= CPU_DTRACE_NOFAULT; \
428 /*CSTYLED*/ \
429 rval = *((volatile uint##bits##_t *)addr); \
430 *flags &= ~CPU_DTRACE_NOFAULT; \
431 \
432 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
433 }
434
435 #ifdef _LP64
436 #define dtrace_loadptr dtrace_load64
437 #else
438 #define dtrace_loadptr dtrace_load32
439 #endif
440
441 #define DTRACE_DYNHASH_FREE 0
442 #define DTRACE_DYNHASH_SINK 1
443 #define DTRACE_DYNHASH_VALID 2
444
445 #define DTRACE_MATCH_FAIL -1
446 #define DTRACE_MATCH_NEXT 0
447 #define DTRACE_MATCH_DONE 1
448 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
449 #define DTRACE_STATE_ALIGN 64
450
451 #define DTRACE_FLAGS2FLT(flags) \
452 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
453 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
454 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
455 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
456 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
457 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
458 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
459 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
460 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
461 DTRACEFLT_UNKNOWN)
462
463 #define DTRACEACT_ISSTRING(act) \
464 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
465 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
466
467 static size_t dtrace_strlen(const char *, size_t);
468 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
469 static void dtrace_enabling_provide(dtrace_provider_t *);
470 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
471 static void dtrace_enabling_matchall(void);
472 static void dtrace_enabling_reap(void);
473 static dtrace_state_t *dtrace_anon_grab(void);
474 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
475 dtrace_state_t *, uint64_t, uint64_t);
476 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
477 static void dtrace_buffer_drop(dtrace_buffer_t *);
478 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when);
479 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
480 dtrace_state_t *, dtrace_mstate_t *);
481 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
482 dtrace_optval_t);
483 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
484 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
485 static int dtrace_priv_proc(dtrace_state_t *, dtrace_mstate_t *);
486 static void dtrace_getf_barrier(void);
487
488 /*
489 * DTrace Probe Context Functions
490 *
491 * These functions are called from probe context. Because probe context is
492 * any context in which C may be called, arbitrarily locks may be held,
493 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
494 * As a result, functions called from probe context may only call other DTrace
495 * support functions -- they may not interact at all with the system at large.
496 * (Note that the ASSERT macro is made probe-context safe by redefining it in
497 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
498 * loads are to be performed from probe context, they _must_ be in terms of
499 * the safe dtrace_load*() variants.
500 *
501 * Some functions in this block are not actually called from probe context;
502 * for these functions, there will be a comment above the function reading
503 * "Note: not called from probe context."
504 */
505 void
506 dtrace_panic(const char *format, ...)
507 {
508 va_list alist;
509
510 va_start(alist, format);
511 dtrace_vpanic(format, alist);
512 va_end(alist);
513 }
514
515 int
516 dtrace_assfail(const char *a, const char *f, int l)
517 {
518 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
519
520 /*
521 * We just need something here that even the most clever compiler
522 * cannot optimize away.
523 */
524 return (a[(uintptr_t)f]);
525 }
526
527 /*
528 * Atomically increment a specified error counter from probe context.
529 */
530 static void
531 dtrace_error(uint32_t *counter)
532 {
533 /*
534 * Most counters stored to in probe context are per-CPU counters.
535 * However, there are some error conditions that are sufficiently
536 * arcane that they don't merit per-CPU storage. If these counters
537 * are incremented concurrently on different CPUs, scalability will be
538 * adversely affected -- but we don't expect them to be white-hot in a
539 * correctly constructed enabling...
540 */
541 uint32_t oval, nval;
542
543 do {
544 oval = *counter;
545
546 if ((nval = oval + 1) == 0) {
547 /*
548 * If the counter would wrap, set it to 1 -- assuring
549 * that the counter is never zero when we have seen
550 * errors. (The counter must be 32-bits because we
551 * aren't guaranteed a 64-bit compare&swap operation.)
552 * To save this code both the infamy of being fingered
553 * by a priggish news story and the indignity of being
554 * the target of a neo-puritan witch trial, we're
555 * carefully avoiding any colorful description of the
556 * likelihood of this condition -- but suffice it to
557 * say that it is only slightly more likely than the
558 * overflow of predicate cache IDs, as discussed in
559 * dtrace_predicate_create().
560 */
561 nval = 1;
562 }
563 } while (dtrace_cas32(counter, oval, nval) != oval);
564 }
565
566 /*
567 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
568 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
569 */
570 DTRACE_LOADFUNC(8)
571 DTRACE_LOADFUNC(16)
572 DTRACE_LOADFUNC(32)
573 DTRACE_LOADFUNC(64)
574
575 static int
576 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
577 {
578 if (dest < mstate->dtms_scratch_base)
579 return (0);
580
581 if (dest + size < dest)
582 return (0);
583
584 if (dest + size > mstate->dtms_scratch_ptr)
585 return (0);
586
587 return (1);
588 }
589
590 static int
591 dtrace_canstore_statvar(uint64_t addr, size_t sz,
592 dtrace_statvar_t **svars, int nsvars)
593 {
594 int i;
595
596 for (i = 0; i < nsvars; i++) {
597 dtrace_statvar_t *svar = svars[i];
598
599 if (svar == NULL || svar->dtsv_size == 0)
600 continue;
601
602 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
603 return (1);
604 }
605
606 return (0);
607 }
608
609 /*
610 * Check to see if the address is within a memory region to which a store may
611 * be issued. This includes the DTrace scratch areas, and any DTrace variable
612 * region. The caller of dtrace_canstore() is responsible for performing any
613 * alignment checks that are needed before stores are actually executed.
614 */
615 static int
616 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
617 dtrace_vstate_t *vstate)
618 {
619 /*
620 * First, check to see if the address is in scratch space...
621 */
622 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
623 mstate->dtms_scratch_size))
624 return (1);
625
626 /*
627 * Now check to see if it's a dynamic variable. This check will pick
628 * up both thread-local variables and any global dynamically-allocated
629 * variables.
630 */
631 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base,
632 vstate->dtvs_dynvars.dtds_size)) {
633 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
634 uintptr_t base = (uintptr_t)dstate->dtds_base +
635 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
636 uintptr_t chunkoffs;
637
638 /*
639 * Before we assume that we can store here, we need to make
640 * sure that it isn't in our metadata -- storing to our
641 * dynamic variable metadata would corrupt our state. For
642 * the range to not include any dynamic variable metadata,
643 * it must:
644 *
645 * (1) Start above the hash table that is at the base of
646 * the dynamic variable space
647 *
648 * (2) Have a starting chunk offset that is beyond the
649 * dtrace_dynvar_t that is at the base of every chunk
650 *
651 * (3) Not span a chunk boundary
652 *
653 */
654 if (addr < base)
655 return (0);
656
657 chunkoffs = (addr - base) % dstate->dtds_chunksize;
658
659 if (chunkoffs < sizeof (dtrace_dynvar_t))
660 return (0);
661
662 if (chunkoffs + sz > dstate->dtds_chunksize)
663 return (0);
664
665 return (1);
666 }
667
668 /*
669 * Finally, check the static local and global variables. These checks
670 * take the longest, so we perform them last.
671 */
672 if (dtrace_canstore_statvar(addr, sz,
673 vstate->dtvs_locals, vstate->dtvs_nlocals))
674 return (1);
675
676 if (dtrace_canstore_statvar(addr, sz,
677 vstate->dtvs_globals, vstate->dtvs_nglobals))
678 return (1);
679
680 return (0);
681 }
682
683
684 /*
685 * Convenience routine to check to see if the address is within a memory
686 * region in which a load may be issued given the user's privilege level;
687 * if not, it sets the appropriate error flags and loads 'addr' into the
688 * illegal value slot.
689 *
690 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
691 * appropriate memory access protection.
692 */
693 static int
694 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
695 dtrace_vstate_t *vstate)
696 {
697 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
698 file_t *fp;
699
700 /*
701 * If we hold the privilege to read from kernel memory, then
702 * everything is readable.
703 */
704 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
705 return (1);
706
707 /*
708 * You can obviously read that which you can store.
709 */
710 if (dtrace_canstore(addr, sz, mstate, vstate))
711 return (1);
712
713 /*
714 * We're allowed to read from our own string table.
715 */
716 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab,
717 mstate->dtms_difo->dtdo_strlen))
718 return (1);
719
720 if (vstate->dtvs_state != NULL &&
721 dtrace_priv_proc(vstate->dtvs_state, mstate)) {
722 proc_t *p;
723
724 /*
725 * When we have privileges to the current process, there are
726 * several context-related kernel structures that are safe to
727 * read, even absent the privilege to read from kernel memory.
728 * These reads are safe because these structures contain only
729 * state that (1) we're permitted to read, (2) is harmless or
730 * (3) contains pointers to additional kernel state that we're
731 * not permitted to read (and as such, do not present an
732 * opportunity for privilege escalation). Finally (and
733 * critically), because of the nature of their relation with
734 * the current thread context, the memory associated with these
735 * structures cannot change over the duration of probe context,
736 * and it is therefore impossible for this memory to be
737 * deallocated and reallocated as something else while it's
738 * being operated upon.
739 */
740 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t)))
741 return (1);
742
743 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr,
744 sz, curthread->t_procp, sizeof (proc_t))) {
745 return (1);
746 }
747
748 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz,
749 curthread->t_cred, sizeof (cred_t))) {
750 return (1);
751 }
752
753 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz,
754 &(p->p_pidp->pid_id), sizeof (pid_t))) {
755 return (1);
756 }
757
758 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz,
759 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) {
760 return (1);
761 }
762 }
763
764 if ((fp = mstate->dtms_getf) != NULL) {
765 uintptr_t psz = sizeof (void *);
766 vnode_t *vp;
767 vnodeops_t *op;
768
769 /*
770 * When getf() returns a file_t, the enabling is implicitly
771 * granted the (transient) right to read the returned file_t
772 * as well as the v_path and v_op->vnop_name of the underlying
773 * vnode. These accesses are allowed after a successful
774 * getf() because the members that they refer to cannot change
775 * once set -- and the barrier logic in the kernel's closef()
776 * path assures that the file_t and its referenced vode_t
777 * cannot themselves be stale (that is, it impossible for
778 * either dtms_getf itself or its f_vnode member to reference
779 * freed memory).
780 */
781 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t)))
782 return (1);
783
784 if ((vp = fp->f_vnode) != NULL) {
785 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz))
786 return (1);
787
788 if (vp->v_path != NULL && DTRACE_INRANGE(addr, sz,
789 vp->v_path, strlen(vp->v_path) + 1)) {
790 return (1);
791 }
792
793 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz))
794 return (1);
795
796 if ((op = vp->v_op) != NULL &&
797 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) {
798 return (1);
799 }
800
801 if (op != NULL && op->vnop_name != NULL &&
802 DTRACE_INRANGE(addr, sz, op->vnop_name,
803 strlen(op->vnop_name) + 1)) {
804 return (1);
805 }
806 }
807 }
808
809 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
810 *illval = addr;
811 return (0);
812 }
813
814 /*
815 * Convenience routine to check to see if a given string is within a memory
816 * region in which a load may be issued given the user's privilege level;
817 * this exists so that we don't need to issue unnecessary dtrace_strlen()
818 * calls in the event that the user has all privileges.
819 */
820 static int
821 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
822 dtrace_vstate_t *vstate)
823 {
824 size_t strsz;
825
826 /*
827 * If we hold the privilege to read from kernel memory, then
828 * everything is readable.
829 */
830 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
831 return (1);
832
833 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
834 if (dtrace_canload(addr, strsz, mstate, vstate))
835 return (1);
836
837 return (0);
838 }
839
840 /*
841 * Convenience routine to check to see if a given variable is within a memory
842 * region in which a load may be issued given the user's privilege level.
843 */
844 static int
845 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
846 dtrace_vstate_t *vstate)
847 {
848 size_t sz;
849 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
850
851 /*
852 * If we hold the privilege to read from kernel memory, then
853 * everything is readable.
854 */
855 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
856 return (1);
857
858 if (type->dtdt_kind == DIF_TYPE_STRING)
859 sz = dtrace_strlen(src,
860 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
861 else
862 sz = type->dtdt_size;
863
864 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
865 }
866
867 /*
868 * Convert a string to a signed integer using safe loads.
869 *
870 * NOTE: This function uses various macros from strtolctype.h to manipulate
871 * digit values, etc -- these have all been checked to ensure they make
872 * no additional function calls.
873 */
874 static int64_t
875 dtrace_strtoll(char *input, int base, size_t limit)
876 {
877 uintptr_t pos = (uintptr_t)input;
878 int64_t val = 0;
879 int x;
880 boolean_t neg = B_FALSE;
881 char c, cc, ccc;
882 uintptr_t end = pos + limit;
883
884 /*
885 * Consume any whitespace preceding digits.
886 */
887 while ((c = dtrace_load8(pos)) == ' ' || c == '\t')
888 pos++;
889
890 /*
891 * Handle an explicit sign if one is present.
892 */
893 if (c == '-' || c == '+') {
894 if (c == '-')
895 neg = B_TRUE;
896 c = dtrace_load8(++pos);
897 }
898
899 /*
900 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it
901 * if present.
902 */
903 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' ||
904 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) {
905 pos += 2;
906 c = ccc;
907 }
908
909 /*
910 * Read in contiguous digits until the first non-digit character.
911 */
912 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base;
913 c = dtrace_load8(++pos))
914 val = val * base + x;
915
916 return (neg ? -val : val);
917 }
918
919 /*
920 * Compare two strings using safe loads.
921 */
922 static int
923 dtrace_strncmp(char *s1, char *s2, size_t limit)
924 {
925 uint8_t c1, c2;
926 volatile uint16_t *flags;
927
928 if (s1 == s2 || limit == 0)
929 return (0);
930
931 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
932
933 do {
934 if (s1 == NULL) {
935 c1 = '\0';
936 } else {
937 c1 = dtrace_load8((uintptr_t)s1++);
938 }
939
940 if (s2 == NULL) {
941 c2 = '\0';
942 } else {
943 c2 = dtrace_load8((uintptr_t)s2++);
944 }
945
946 if (c1 != c2)
947 return (c1 - c2);
948 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
949
950 return (0);
951 }
952
953 /*
954 * Compute strlen(s) for a string using safe memory accesses. The additional
955 * len parameter is used to specify a maximum length to ensure completion.
956 */
957 static size_t
958 dtrace_strlen(const char *s, size_t lim)
959 {
960 uint_t len;
961
962 for (len = 0; len != lim; len++) {
963 if (dtrace_load8((uintptr_t)s++) == '\0')
964 break;
965 }
966
967 return (len);
968 }
969
970 /*
971 * Check if an address falls within a toxic region.
972 */
973 static int
974 dtrace_istoxic(uintptr_t kaddr, size_t size)
975 {
976 uintptr_t taddr, tsize;
977 int i;
978
979 for (i = 0; i < dtrace_toxranges; i++) {
980 taddr = dtrace_toxrange[i].dtt_base;
981 tsize = dtrace_toxrange[i].dtt_limit - taddr;
982
983 if (kaddr - taddr < tsize) {
984 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
985 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr;
986 return (1);
987 }
988
989 if (taddr - kaddr < size) {
990 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
991 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr;
992 return (1);
993 }
994 }
995
996 return (0);
997 }
998
999 /*
1000 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
1001 * memory specified by the DIF program. The dst is assumed to be safe memory
1002 * that we can store to directly because it is managed by DTrace. As with
1003 * standard bcopy, overlapping copies are handled properly.
1004 */
1005 static void
1006 dtrace_bcopy(const void *src, void *dst, size_t len)
1007 {
1008 if (len != 0) {
1009 uint8_t *s1 = dst;
1010 const uint8_t *s2 = src;
1011
1012 if (s1 <= s2) {
1013 do {
1014 *s1++ = dtrace_load8((uintptr_t)s2++);
1015 } while (--len != 0);
1016 } else {
1017 s2 += len;
1018 s1 += len;
1019
1020 do {
1021 *--s1 = dtrace_load8((uintptr_t)--s2);
1022 } while (--len != 0);
1023 }
1024 }
1025 }
1026
1027 /*
1028 * Copy src to dst using safe memory accesses, up to either the specified
1029 * length, or the point that a nul byte is encountered. The src is assumed to
1030 * be unsafe memory specified by the DIF program. The dst is assumed to be
1031 * safe memory that we can store to directly because it is managed by DTrace.
1032 * Unlike dtrace_bcopy(), overlapping regions are not handled.
1033 */
1034 static void
1035 dtrace_strcpy(const void *src, void *dst, size_t len)
1036 {
1037 if (len != 0) {
1038 uint8_t *s1 = dst, c;
1039 const uint8_t *s2 = src;
1040
1041 do {
1042 *s1++ = c = dtrace_load8((uintptr_t)s2++);
1043 } while (--len != 0 && c != '\0');
1044 }
1045 }
1046
1047 /*
1048 * Copy src to dst, deriving the size and type from the specified (BYREF)
1049 * variable type. The src is assumed to be unsafe memory specified by the DIF
1050 * program. The dst is assumed to be DTrace variable memory that is of the
1051 * specified type; we assume that we can store to directly.
1052 */
1053 static void
1054 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
1055 {
1056 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1057
1058 if (type->dtdt_kind == DIF_TYPE_STRING) {
1059 dtrace_strcpy(src, dst, type->dtdt_size);
1060 } else {
1061 dtrace_bcopy(src, dst, type->dtdt_size);
1062 }
1063 }
1064
1065 /*
1066 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1067 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1068 * safe memory that we can access directly because it is managed by DTrace.
1069 */
1070 static int
1071 dtrace_bcmp(const void *s1, const void *s2, size_t len)
1072 {
1073 volatile uint16_t *flags;
1074
1075 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1076
1077 if (s1 == s2)
1078 return (0);
1079
1080 if (s1 == NULL || s2 == NULL)
1081 return (1);
1082
1083 if (s1 != s2 && len != 0) {
1084 const uint8_t *ps1 = s1;
1085 const uint8_t *ps2 = s2;
1086
1087 do {
1088 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1089 return (1);
1090 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1091 }
1092 return (0);
1093 }
1094
1095 /*
1096 * Zero the specified region using a simple byte-by-byte loop. Note that this
1097 * is for safe DTrace-managed memory only.
1098 */
1099 static void
1100 dtrace_bzero(void *dst, size_t len)
1101 {
1102 uchar_t *cp;
1103
1104 for (cp = dst; len != 0; len--)
1105 *cp++ = 0;
1106 }
1107
1108 static void
1109 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1110 {
1111 uint64_t result[2];
1112
1113 result[0] = addend1[0] + addend2[0];
1114 result[1] = addend1[1] + addend2[1] +
1115 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1116
1117 sum[0] = result[0];
1118 sum[1] = result[1];
1119 }
1120
1121 /*
1122 * Shift the 128-bit value in a by b. If b is positive, shift left.
1123 * If b is negative, shift right.
1124 */
1125 static void
1126 dtrace_shift_128(uint64_t *a, int b)
1127 {
1128 uint64_t mask;
1129
1130 if (b == 0)
1131 return;
1132
1133 if (b < 0) {
1134 b = -b;
1135 if (b >= 64) {
1136 a[0] = a[1] >> (b - 64);
1137 a[1] = 0;
1138 } else {
1139 a[0] >>= b;
1140 mask = 1LL << (64 - b);
1141 mask -= 1;
1142 a[0] |= ((a[1] & mask) << (64 - b));
1143 a[1] >>= b;
1144 }
1145 } else {
1146 if (b >= 64) {
1147 a[1] = a[0] << (b - 64);
1148 a[0] = 0;
1149 } else {
1150 a[1] <<= b;
1151 mask = a[0] >> (64 - b);
1152 a[1] |= mask;
1153 a[0] <<= b;
1154 }
1155 }
1156 }
1157
1158 /*
1159 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1160 * use native multiplication on those, and then re-combine into the
1161 * resulting 128-bit value.
1162 *
1163 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1164 * hi1 * hi2 << 64 +
1165 * hi1 * lo2 << 32 +
1166 * hi2 * lo1 << 32 +
1167 * lo1 * lo2
1168 */
1169 static void
1170 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1171 {
1172 uint64_t hi1, hi2, lo1, lo2;
1173 uint64_t tmp[2];
1174
1175 hi1 = factor1 >> 32;
1176 hi2 = factor2 >> 32;
1177
1178 lo1 = factor1 & DT_MASK_LO;
1179 lo2 = factor2 & DT_MASK_LO;
1180
1181 product[0] = lo1 * lo2;
1182 product[1] = hi1 * hi2;
1183
1184 tmp[0] = hi1 * lo2;
1185 tmp[1] = 0;
1186 dtrace_shift_128(tmp, 32);
1187 dtrace_add_128(product, tmp, product);
1188
1189 tmp[0] = hi2 * lo1;
1190 tmp[1] = 0;
1191 dtrace_shift_128(tmp, 32);
1192 dtrace_add_128(product, tmp, product);
1193 }
1194
1195 /*
1196 * This privilege check should be used by actions and subroutines to
1197 * verify that the user credentials of the process that enabled the
1198 * invoking ECB match the target credentials
1199 */
1200 static int
1201 dtrace_priv_proc_common_user(dtrace_state_t *state)
1202 {
1203 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1204
1205 /*
1206 * We should always have a non-NULL state cred here, since if cred
1207 * is null (anonymous tracing), we fast-path bypass this routine.
1208 */
1209 ASSERT(s_cr != NULL);
1210
1211 if ((cr = CRED()) != NULL &&
1212 s_cr->cr_uid == cr->cr_uid &&
1213 s_cr->cr_uid == cr->cr_ruid &&
1214 s_cr->cr_uid == cr->cr_suid &&
1215 s_cr->cr_gid == cr->cr_gid &&
1216 s_cr->cr_gid == cr->cr_rgid &&
1217 s_cr->cr_gid == cr->cr_sgid)
1218 return (1);
1219
1220 return (0);
1221 }
1222
1223 /*
1224 * This privilege check should be used by actions and subroutines to
1225 * verify that the zone of the process that enabled the invoking ECB
1226 * matches the target credentials
1227 */
1228 static int
1229 dtrace_priv_proc_common_zone(dtrace_state_t *state)
1230 {
1231 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1232
1233 /*
1234 * We should always have a non-NULL state cred here, since if cred
1235 * is null (anonymous tracing), we fast-path bypass this routine.
1236 */
1237 ASSERT(s_cr != NULL);
1238
1239 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone)
1240 return (1);
1241
1242 return (0);
1243 }
1244
1245 /*
1246 * This privilege check should be used by actions and subroutines to
1247 * verify that the process has not setuid or changed credentials.
1248 */
1249 static int
1250 dtrace_priv_proc_common_nocd()
1251 {
1252 proc_t *proc;
1253
1254 if ((proc = ttoproc(curthread)) != NULL &&
1255 !(proc->p_flag & SNOCD))
1256 return (1);
1257
1258 return (0);
1259 }
1260
1261 static int
1262 dtrace_priv_proc_destructive(dtrace_state_t *state, dtrace_mstate_t *mstate)
1263 {
1264 int action = state->dts_cred.dcr_action;
1265
1266 if (!(mstate->dtms_access & DTRACE_ACCESS_PROC))
1267 goto bad;
1268
1269 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1270 dtrace_priv_proc_common_zone(state) == 0)
1271 goto bad;
1272
1273 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1274 dtrace_priv_proc_common_user(state) == 0)
1275 goto bad;
1276
1277 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1278 dtrace_priv_proc_common_nocd() == 0)
1279 goto bad;
1280
1281 return (1);
1282
1283 bad:
1284 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1285
1286 return (0);
1287 }
1288
1289 static int
1290 dtrace_priv_proc_control(dtrace_state_t *state, dtrace_mstate_t *mstate)
1291 {
1292 if (mstate->dtms_access & DTRACE_ACCESS_PROC) {
1293 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1294 return (1);
1295
1296 if (dtrace_priv_proc_common_zone(state) &&
1297 dtrace_priv_proc_common_user(state) &&
1298 dtrace_priv_proc_common_nocd())
1299 return (1);
1300 }
1301
1302 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1303
1304 return (0);
1305 }
1306
1307 static int
1308 dtrace_priv_proc(dtrace_state_t *state, dtrace_mstate_t *mstate)
1309 {
1310 if ((mstate->dtms_access & DTRACE_ACCESS_PROC) &&
1311 (state->dts_cred.dcr_action & DTRACE_CRA_PROC))
1312 return (1);
1313
1314 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1315
1316 return (0);
1317 }
1318
1319 static int
1320 dtrace_priv_kernel(dtrace_state_t *state)
1321 {
1322 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1323 return (1);
1324
1325 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1326
1327 return (0);
1328 }
1329
1330 static int
1331 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1332 {
1333 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1334 return (1);
1335
1336 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1337
1338 return (0);
1339 }
1340
1341 /*
1342 * Determine if the dte_cond of the specified ECB allows for processing of
1343 * the current probe to continue. Note that this routine may allow continued
1344 * processing, but with access(es) stripped from the mstate's dtms_access
1345 * field.
1346 */
1347 static int
1348 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate,
1349 dtrace_ecb_t *ecb)
1350 {
1351 dtrace_probe_t *probe = ecb->dte_probe;
1352 dtrace_provider_t *prov = probe->dtpr_provider;
1353 dtrace_pops_t *pops = &prov->dtpv_pops;
1354 int mode = DTRACE_MODE_NOPRIV_DROP;
1355
1356 ASSERT(ecb->dte_cond);
1357
1358 if (pops->dtps_mode != NULL) {
1359 mode = pops->dtps_mode(prov->dtpv_arg,
1360 probe->dtpr_id, probe->dtpr_arg);
1361
1362 ASSERT(mode & (DTRACE_MODE_USER | DTRACE_MODE_KERNEL));
1363 ASSERT(mode & (DTRACE_MODE_NOPRIV_RESTRICT |
1364 DTRACE_MODE_NOPRIV_DROP));
1365 }
1366
1367 /*
1368 * If the dte_cond bits indicate that this consumer is only allowed to
1369 * see user-mode firings of this probe, check that the probe was fired
1370 * while in a user context. If that's not the case, use the policy
1371 * specified by the provider to determine if we drop the probe or
1372 * merely restrict operation.
1373 */
1374 if (ecb->dte_cond & DTRACE_COND_USERMODE) {
1375 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP);
1376
1377 if (!(mode & DTRACE_MODE_USER)) {
1378 if (mode & DTRACE_MODE_NOPRIV_DROP)
1379 return (0);
1380
1381 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1382 }
1383 }
1384
1385 /*
1386 * This is more subtle than it looks. We have to be absolutely certain
1387 * that CRED() isn't going to change out from under us so it's only
1388 * legit to examine that structure if we're in constrained situations.
1389 * Currently, the only times we'll this check is if a non-super-user
1390 * has enabled the profile or syscall providers -- providers that
1391 * allow visibility of all processes. For the profile case, the check
1392 * above will ensure that we're examining a user context.
1393 */
1394 if (ecb->dte_cond & DTRACE_COND_OWNER) {
1395 cred_t *cr;
1396 cred_t *s_cr = state->dts_cred.dcr_cred;
1397 proc_t *proc;
1398
1399 ASSERT(s_cr != NULL);
1400
1401 if ((cr = CRED()) == NULL ||
1402 s_cr->cr_uid != cr->cr_uid ||
1403 s_cr->cr_uid != cr->cr_ruid ||
1404 s_cr->cr_uid != cr->cr_suid ||
1405 s_cr->cr_gid != cr->cr_gid ||
1406 s_cr->cr_gid != cr->cr_rgid ||
1407 s_cr->cr_gid != cr->cr_sgid ||
1408 (proc = ttoproc(curthread)) == NULL ||
1409 (proc->p_flag & SNOCD)) {
1410 if (mode & DTRACE_MODE_NOPRIV_DROP)
1411 return (0);
1412
1413 mstate->dtms_access &= ~DTRACE_ACCESS_PROC;
1414 }
1415 }
1416
1417 /*
1418 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not
1419 * in our zone, check to see if our mode policy is to restrict rather
1420 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC
1421 * and DTRACE_ACCESS_ARGS
1422 */
1423 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
1424 cred_t *cr;
1425 cred_t *s_cr = state->dts_cred.dcr_cred;
1426
1427 ASSERT(s_cr != NULL);
1428
1429 if ((cr = CRED()) == NULL ||
1430 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) {
1431 if (mode & DTRACE_MODE_NOPRIV_DROP)
1432 return (0);
1433
1434 mstate->dtms_access &=
1435 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS);
1436 }
1437 }
1438
1439 /*
1440 * By merits of being in this code path at all, we have limited
1441 * privileges. If the provider has indicated that limited privileges
1442 * are to denote restricted operation, strip off the ability to access
1443 * arguments.
1444 */
1445 if (mode & DTRACE_MODE_LIMITEDPRIV_RESTRICT)
1446 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1447
1448 return (1);
1449 }
1450
1451 /*
1452 * Note: not called from probe context. This function is called
1453 * asynchronously (and at a regular interval) from outside of probe context to
1454 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1455 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1456 */
1457 void
1458 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1459 {
1460 dtrace_dynvar_t *dirty;
1461 dtrace_dstate_percpu_t *dcpu;
1462 dtrace_dynvar_t **rinsep;
1463 int i, j, work = 0;
1464
1465 for (i = 0; i < NCPU; i++) {
1466 dcpu = &dstate->dtds_percpu[i];
1467 rinsep = &dcpu->dtdsc_rinsing;
1468
1469 /*
1470 * If the dirty list is NULL, there is no dirty work to do.
1471 */
1472 if (dcpu->dtdsc_dirty == NULL)
1473 continue;
1474
1475 if (dcpu->dtdsc_rinsing != NULL) {
1476 /*
1477 * If the rinsing list is non-NULL, then it is because
1478 * this CPU was selected to accept another CPU's
1479 * dirty list -- and since that time, dirty buffers
1480 * have accumulated. This is a highly unlikely
1481 * condition, but we choose to ignore the dirty
1482 * buffers -- they'll be picked up a future cleanse.
1483 */
1484 continue;
1485 }
1486
1487 if (dcpu->dtdsc_clean != NULL) {
1488 /*
1489 * If the clean list is non-NULL, then we're in a
1490 * situation where a CPU has done deallocations (we
1491 * have a non-NULL dirty list) but no allocations (we
1492 * also have a non-NULL clean list). We can't simply
1493 * move the dirty list into the clean list on this
1494 * CPU, yet we also don't want to allow this condition
1495 * to persist, lest a short clean list prevent a
1496 * massive dirty list from being cleaned (which in
1497 * turn could lead to otherwise avoidable dynamic
1498 * drops). To deal with this, we look for some CPU
1499 * with a NULL clean list, NULL dirty list, and NULL
1500 * rinsing list -- and then we borrow this CPU to
1501 * rinse our dirty list.
1502 */
1503 for (j = 0; j < NCPU; j++) {
1504 dtrace_dstate_percpu_t *rinser;
1505
1506 rinser = &dstate->dtds_percpu[j];
1507
1508 if (rinser->dtdsc_rinsing != NULL)
1509 continue;
1510
1511 if (rinser->dtdsc_dirty != NULL)
1512 continue;
1513
1514 if (rinser->dtdsc_clean != NULL)
1515 continue;
1516
1517 rinsep = &rinser->dtdsc_rinsing;
1518 break;
1519 }
1520
1521 if (j == NCPU) {
1522 /*
1523 * We were unable to find another CPU that
1524 * could accept this dirty list -- we are
1525 * therefore unable to clean it now.
1526 */
1527 dtrace_dynvar_failclean++;
1528 continue;
1529 }
1530 }
1531
1532 work = 1;
1533
1534 /*
1535 * Atomically move the dirty list aside.
1536 */
1537 do {
1538 dirty = dcpu->dtdsc_dirty;
1539
1540 /*
1541 * Before we zap the dirty list, set the rinsing list.
1542 * (This allows for a potential assertion in
1543 * dtrace_dynvar(): if a free dynamic variable appears
1544 * on a hash chain, either the dirty list or the
1545 * rinsing list for some CPU must be non-NULL.)
1546 */
1547 *rinsep = dirty;
1548 dtrace_membar_producer();
1549 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1550 dirty, NULL) != dirty);
1551 }
1552
1553 if (!work) {
1554 /*
1555 * We have no work to do; we can simply return.
1556 */
1557 return;
1558 }
1559
1560 dtrace_sync();
1561
1562 for (i = 0; i < NCPU; i++) {
1563 dcpu = &dstate->dtds_percpu[i];
1564
1565 if (dcpu->dtdsc_rinsing == NULL)
1566 continue;
1567
1568 /*
1569 * We are now guaranteed that no hash chain contains a pointer
1570 * into this dirty list; we can make it clean.
1571 */
1572 ASSERT(dcpu->dtdsc_clean == NULL);
1573 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1574 dcpu->dtdsc_rinsing = NULL;
1575 }
1576
1577 /*
1578 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1579 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1580 * This prevents a race whereby a CPU incorrectly decides that
1581 * the state should be something other than DTRACE_DSTATE_CLEAN
1582 * after dtrace_dynvar_clean() has completed.
1583 */
1584 dtrace_sync();
1585
1586 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1587 }
1588
1589 /*
1590 * Depending on the value of the op parameter, this function looks-up,
1591 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1592 * allocation is requested, this function will return a pointer to a
1593 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1594 * variable can be allocated. If NULL is returned, the appropriate counter
1595 * will be incremented.
1596 */
1597 dtrace_dynvar_t *
1598 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1599 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1600 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1601 {
1602 uint64_t hashval = DTRACE_DYNHASH_VALID;
1603 dtrace_dynhash_t *hash = dstate->dtds_hash;
1604 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1605 processorid_t me = CPU->cpu_id, cpu = me;
1606 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1607 size_t bucket, ksize;
1608 size_t chunksize = dstate->dtds_chunksize;
1609 uintptr_t kdata, lock, nstate;
1610 uint_t i;
1611
1612 ASSERT(nkeys != 0);
1613
1614 /*
1615 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1616 * algorithm. For the by-value portions, we perform the algorithm in
1617 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1618 * bit, and seems to have only a minute effect on distribution. For
1619 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1620 * over each referenced byte. It's painful to do this, but it's much
1621 * better than pathological hash distribution. The efficacy of the
1622 * hashing algorithm (and a comparison with other algorithms) may be
1623 * found by running the ::dtrace_dynstat MDB dcmd.
1624 */
1625 for (i = 0; i < nkeys; i++) {
1626 if (key[i].dttk_size == 0) {
1627 uint64_t val = key[i].dttk_value;
1628
1629 hashval += (val >> 48) & 0xffff;
1630 hashval += (hashval << 10);
1631 hashval ^= (hashval >> 6);
1632
1633 hashval += (val >> 32) & 0xffff;
1634 hashval += (hashval << 10);
1635 hashval ^= (hashval >> 6);
1636
1637 hashval += (val >> 16) & 0xffff;
1638 hashval += (hashval << 10);
1639 hashval ^= (hashval >> 6);
1640
1641 hashval += val & 0xffff;
1642 hashval += (hashval << 10);
1643 hashval ^= (hashval >> 6);
1644 } else {
1645 /*
1646 * This is incredibly painful, but it beats the hell
1647 * out of the alternative.
1648 */
1649 uint64_t j, size = key[i].dttk_size;
1650 uintptr_t base = (uintptr_t)key[i].dttk_value;
1651
1652 if (!dtrace_canload(base, size, mstate, vstate))
1653 break;
1654
1655 for (j = 0; j < size; j++) {
1656 hashval += dtrace_load8(base + j);
1657 hashval += (hashval << 10);
1658 hashval ^= (hashval >> 6);
1659 }
1660 }
1661 }
1662
1663 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1664 return (NULL);
1665
1666 hashval += (hashval << 3);
1667 hashval ^= (hashval >> 11);
1668 hashval += (hashval << 15);
1669
1670 /*
1671 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1672 * comes out to be one of our two sentinel hash values. If this
1673 * actually happens, we set the hashval to be a value known to be a
1674 * non-sentinel value.
1675 */
1676 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1677 hashval = DTRACE_DYNHASH_VALID;
1678
1679 /*
1680 * Yes, it's painful to do a divide here. If the cycle count becomes
1681 * important here, tricks can be pulled to reduce it. (However, it's
1682 * critical that hash collisions be kept to an absolute minimum;
1683 * they're much more painful than a divide.) It's better to have a
1684 * solution that generates few collisions and still keeps things
1685 * relatively simple.
1686 */
1687 bucket = hashval % dstate->dtds_hashsize;
1688
1689 if (op == DTRACE_DYNVAR_DEALLOC) {
1690 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1691
1692 for (;;) {
1693 while ((lock = *lockp) & 1)
1694 continue;
1695
1696 if (dtrace_casptr((void *)lockp,
1697 (void *)lock, (void *)(lock + 1)) == (void *)lock)
1698 break;
1699 }
1700
1701 dtrace_membar_producer();
1702 }
1703
1704 top:
1705 prev = NULL;
1706 lock = hash[bucket].dtdh_lock;
1707
1708 dtrace_membar_consumer();
1709
1710 start = hash[bucket].dtdh_chain;
1711 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1712 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1713 op != DTRACE_DYNVAR_DEALLOC));
1714
1715 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1716 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1717 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1718
1719 if (dvar->dtdv_hashval != hashval) {
1720 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1721 /*
1722 * We've reached the sink, and therefore the
1723 * end of the hash chain; we can kick out of
1724 * the loop knowing that we have seen a valid
1725 * snapshot of state.
1726 */
1727 ASSERT(dvar->dtdv_next == NULL);
1728 ASSERT(dvar == &dtrace_dynhash_sink);
1729 break;
1730 }
1731
1732 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1733 /*
1734 * We've gone off the rails: somewhere along
1735 * the line, one of the members of this hash
1736 * chain was deleted. Note that we could also
1737 * detect this by simply letting this loop run
1738 * to completion, as we would eventually hit
1739 * the end of the dirty list. However, we
1740 * want to avoid running the length of the
1741 * dirty list unnecessarily (it might be quite
1742 * long), so we catch this as early as
1743 * possible by detecting the hash marker. In
1744 * this case, we simply set dvar to NULL and
1745 * break; the conditional after the loop will
1746 * send us back to top.
1747 */
1748 dvar = NULL;
1749 break;
1750 }
1751
1752 goto next;
1753 }
1754
1755 if (dtuple->dtt_nkeys != nkeys)
1756 goto next;
1757
1758 for (i = 0; i < nkeys; i++, dkey++) {
1759 if (dkey->dttk_size != key[i].dttk_size)
1760 goto next; /* size or type mismatch */
1761
1762 if (dkey->dttk_size != 0) {
1763 if (dtrace_bcmp(
1764 (void *)(uintptr_t)key[i].dttk_value,
1765 (void *)(uintptr_t)dkey->dttk_value,
1766 dkey->dttk_size))
1767 goto next;
1768 } else {
1769 if (dkey->dttk_value != key[i].dttk_value)
1770 goto next;
1771 }
1772 }
1773
1774 if (op != DTRACE_DYNVAR_DEALLOC)
1775 return (dvar);
1776
1777 ASSERT(dvar->dtdv_next == NULL ||
1778 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1779
1780 if (prev != NULL) {
1781 ASSERT(hash[bucket].dtdh_chain != dvar);
1782 ASSERT(start != dvar);
1783 ASSERT(prev->dtdv_next == dvar);
1784 prev->dtdv_next = dvar->dtdv_next;
1785 } else {
1786 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1787 start, dvar->dtdv_next) != start) {
1788 /*
1789 * We have failed to atomically swing the
1790 * hash table head pointer, presumably because
1791 * of a conflicting allocation on another CPU.
1792 * We need to reread the hash chain and try
1793 * again.
1794 */
1795 goto top;
1796 }
1797 }
1798
1799 dtrace_membar_producer();
1800
1801 /*
1802 * Now set the hash value to indicate that it's free.
1803 */
1804 ASSERT(hash[bucket].dtdh_chain != dvar);
1805 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1806
1807 dtrace_membar_producer();
1808
1809 /*
1810 * Set the next pointer to point at the dirty list, and
1811 * atomically swing the dirty pointer to the newly freed dvar.
1812 */
1813 do {
1814 next = dcpu->dtdsc_dirty;
1815 dvar->dtdv_next = next;
1816 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1817
1818 /*
1819 * Finally, unlock this hash bucket.
1820 */
1821 ASSERT(hash[bucket].dtdh_lock == lock);
1822 ASSERT(lock & 1);
1823 hash[bucket].dtdh_lock++;
1824
1825 return (NULL);
1826 next:
1827 prev = dvar;
1828 continue;
1829 }
1830
1831 if (dvar == NULL) {
1832 /*
1833 * If dvar is NULL, it is because we went off the rails:
1834 * one of the elements that we traversed in the hash chain
1835 * was deleted while we were traversing it. In this case,
1836 * we assert that we aren't doing a dealloc (deallocs lock
1837 * the hash bucket to prevent themselves from racing with
1838 * one another), and retry the hash chain traversal.
1839 */
1840 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1841 goto top;
1842 }
1843
1844 if (op != DTRACE_DYNVAR_ALLOC) {
1845 /*
1846 * If we are not to allocate a new variable, we want to
1847 * return NULL now. Before we return, check that the value
1848 * of the lock word hasn't changed. If it has, we may have
1849 * seen an inconsistent snapshot.
1850 */
1851 if (op == DTRACE_DYNVAR_NOALLOC) {
1852 if (hash[bucket].dtdh_lock != lock)
1853 goto top;
1854 } else {
1855 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1856 ASSERT(hash[bucket].dtdh_lock == lock);
1857 ASSERT(lock & 1);
1858 hash[bucket].dtdh_lock++;
1859 }
1860
1861 return (NULL);
1862 }
1863
1864 /*
1865 * We need to allocate a new dynamic variable. The size we need is the
1866 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1867 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1868 * the size of any referred-to data (dsize). We then round the final
1869 * size up to the chunksize for allocation.
1870 */
1871 for (ksize = 0, i = 0; i < nkeys; i++)
1872 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1873
1874 /*
1875 * This should be pretty much impossible, but could happen if, say,
1876 * strange DIF specified the tuple. Ideally, this should be an
1877 * assertion and not an error condition -- but that requires that the
1878 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1879 * bullet-proof. (That is, it must not be able to be fooled by
1880 * malicious DIF.) Given the lack of backwards branches in DIF,
1881 * solving this would presumably not amount to solving the Halting
1882 * Problem -- but it still seems awfully hard.
1883 */
1884 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1885 ksize + dsize > chunksize) {
1886 dcpu->dtdsc_drops++;
1887 return (NULL);
1888 }
1889
1890 nstate = DTRACE_DSTATE_EMPTY;
1891
1892 do {
1893 retry:
1894 free = dcpu->dtdsc_free;
1895
1896 if (free == NULL) {
1897 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1898 void *rval;
1899
1900 if (clean == NULL) {
1901 /*
1902 * We're out of dynamic variable space on
1903 * this CPU. Unless we have tried all CPUs,
1904 * we'll try to allocate from a different
1905 * CPU.
1906 */
1907 switch (dstate->dtds_state) {
1908 case DTRACE_DSTATE_CLEAN: {
1909 void *sp = &dstate->dtds_state;
1910
1911 if (++cpu >= NCPU)
1912 cpu = 0;
1913
1914 if (dcpu->dtdsc_dirty != NULL &&
1915 nstate == DTRACE_DSTATE_EMPTY)
1916 nstate = DTRACE_DSTATE_DIRTY;
1917
1918 if (dcpu->dtdsc_rinsing != NULL)
1919 nstate = DTRACE_DSTATE_RINSING;
1920
1921 dcpu = &dstate->dtds_percpu[cpu];
1922
1923 if (cpu != me)
1924 goto retry;
1925
1926 (void) dtrace_cas32(sp,
1927 DTRACE_DSTATE_CLEAN, nstate);
1928
1929 /*
1930 * To increment the correct bean
1931 * counter, take another lap.
1932 */
1933 goto retry;
1934 }
1935
1936 case DTRACE_DSTATE_DIRTY:
1937 dcpu->dtdsc_dirty_drops++;
1938 break;
1939
1940 case DTRACE_DSTATE_RINSING:
1941 dcpu->dtdsc_rinsing_drops++;
1942 break;
1943
1944 case DTRACE_DSTATE_EMPTY:
1945 dcpu->dtdsc_drops++;
1946 break;
1947 }
1948
1949 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1950 return (NULL);
1951 }
1952
1953 /*
1954 * The clean list appears to be non-empty. We want to
1955 * move the clean list to the free list; we start by
1956 * moving the clean pointer aside.
1957 */
1958 if (dtrace_casptr(&dcpu->dtdsc_clean,
1959 clean, NULL) != clean) {
1960 /*
1961 * We are in one of two situations:
1962 *
1963 * (a) The clean list was switched to the
1964 * free list by another CPU.
1965 *
1966 * (b) The clean list was added to by the
1967 * cleansing cyclic.
1968 *
1969 * In either of these situations, we can
1970 * just reattempt the free list allocation.
1971 */
1972 goto retry;
1973 }
1974
1975 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1976
1977 /*
1978 * Now we'll move the clean list to our free list.
1979 * It's impossible for this to fail: the only way
1980 * the free list can be updated is through this
1981 * code path, and only one CPU can own the clean list.
1982 * Thus, it would only be possible for this to fail if
1983 * this code were racing with dtrace_dynvar_clean().
1984 * (That is, if dtrace_dynvar_clean() updated the clean
1985 * list, and we ended up racing to update the free
1986 * list.) This race is prevented by the dtrace_sync()
1987 * in dtrace_dynvar_clean() -- which flushes the
1988 * owners of the clean lists out before resetting
1989 * the clean lists.
1990 */
1991 dcpu = &dstate->dtds_percpu[me];
1992 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1993 ASSERT(rval == NULL);
1994 goto retry;
1995 }
1996
1997 dvar = free;
1998 new_free = dvar->dtdv_next;
1999 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
2000
2001 /*
2002 * We have now allocated a new chunk. We copy the tuple keys into the
2003 * tuple array and copy any referenced key data into the data space
2004 * following the tuple array. As we do this, we relocate dttk_value
2005 * in the final tuple to point to the key data address in the chunk.
2006 */
2007 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
2008 dvar->dtdv_data = (void *)(kdata + ksize);
2009 dvar->dtdv_tuple.dtt_nkeys = nkeys;
2010
2011 for (i = 0; i < nkeys; i++) {
2012 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
2013 size_t kesize = key[i].dttk_size;
2014
2015 if (kesize != 0) {
2016 dtrace_bcopy(
2017 (const void *)(uintptr_t)key[i].dttk_value,
2018 (void *)kdata, kesize);
2019 dkey->dttk_value = kdata;
2020 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
2021 } else {
2022 dkey->dttk_value = key[i].dttk_value;
2023 }
2024
2025 dkey->dttk_size = kesize;
2026 }
2027
2028 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
2029 dvar->dtdv_hashval = hashval;
2030 dvar->dtdv_next = start;
2031
2032 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
2033 return (dvar);
2034
2035 /*
2036 * The cas has failed. Either another CPU is adding an element to
2037 * this hash chain, or another CPU is deleting an element from this
2038 * hash chain. The simplest way to deal with both of these cases
2039 * (though not necessarily the most efficient) is to free our
2040 * allocated block and tail-call ourselves. Note that the free is
2041 * to the dirty list and _not_ to the free list. This is to prevent
2042 * races with allocators, above.
2043 */
2044 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2045
2046 dtrace_membar_producer();
2047
2048 do {
2049 free = dcpu->dtdsc_dirty;
2050 dvar->dtdv_next = free;
2051 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
2052
2053 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
2054 }
2055
2056 /*ARGSUSED*/
2057 static void
2058 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
2059 {
2060 if ((int64_t)nval < (int64_t)*oval)
2061 *oval = nval;
2062 }
2063
2064 /*ARGSUSED*/
2065 static void
2066 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
2067 {
2068 if ((int64_t)nval > (int64_t)*oval)
2069 *oval = nval;
2070 }
2071
2072 static void
2073 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
2074 {
2075 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
2076 int64_t val = (int64_t)nval;
2077
2078 if (val < 0) {
2079 for (i = 0; i < zero; i++) {
2080 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
2081 quanta[i] += incr;
2082 return;
2083 }
2084 }
2085 } else {
2086 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
2087 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
2088 quanta[i - 1] += incr;
2089 return;
2090 }
2091 }
2092
2093 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
2094 return;
2095 }
2096
2097 ASSERT(0);
2098 }
2099
2100 static void
2101 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
2102 {
2103 uint64_t arg = *lquanta++;
2104 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
2105 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
2106 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
2107 int32_t val = (int32_t)nval, level;
2108
2109 ASSERT(step != 0);
2110 ASSERT(levels != 0);
2111
2112 if (val < base) {
2113 /*
2114 * This is an underflow.
2115 */
2116 lquanta[0] += incr;
2117 return;
2118 }
2119
2120 level = (val - base) / step;
2121
2122 if (level < levels) {
2123 lquanta[level + 1] += incr;
2124 return;
2125 }
2126
2127 /*
2128 * This is an overflow.
2129 */
2130 lquanta[levels + 1] += incr;
2131 }
2132
2133 static int
2134 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low,
2135 uint16_t high, uint16_t nsteps, int64_t value)
2136 {
2137 int64_t this = 1, last, next;
2138 int base = 1, order;
2139
2140 ASSERT(factor <= nsteps);
2141 ASSERT(nsteps % factor == 0);
2142
2143 for (order = 0; order < low; order++)
2144 this *= factor;
2145
2146 /*
2147 * If our value is less than our factor taken to the power of the
2148 * low order of magnitude, it goes into the zeroth bucket.
2149 */
2150 if (value < (last = this))
2151 return (0);
2152
2153 for (this *= factor; order <= high; order++) {
2154 int nbuckets = this > nsteps ? nsteps : this;
2155
2156 if ((next = this * factor) < this) {
2157 /*
2158 * We should not generally get log/linear quantizations
2159 * with a high magnitude that allows 64-bits to
2160 * overflow, but we nonetheless protect against this
2161 * by explicitly checking for overflow, and clamping
2162 * our value accordingly.
2163 */
2164 value = this - 1;
2165 }
2166
2167 if (value < this) {
2168 /*
2169 * If our value lies within this order of magnitude,
2170 * determine its position by taking the offset within
2171 * the order of magnitude, dividing by the bucket
2172 * width, and adding to our (accumulated) base.
2173 */
2174 return (base + (value - last) / (this / nbuckets));
2175 }
2176
2177 base += nbuckets - (nbuckets / factor);
2178 last = this;
2179 this = next;
2180 }
2181
2182 /*
2183 * Our value is greater than or equal to our factor taken to the
2184 * power of one plus the high magnitude -- return the top bucket.
2185 */
2186 return (base);
2187 }
2188
2189 static void
2190 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
2191 {
2192 uint64_t arg = *llquanta++;
2193 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
2194 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
2195 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
2196 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
2197
2198 llquanta[dtrace_aggregate_llquantize_bucket(factor,
2199 low, high, nsteps, nval)] += incr;
2200 }
2201
2202 /*ARGSUSED*/
2203 static void
2204 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
2205 {
2206 data[0]++;
2207 data[1] += nval;
2208 }
2209
2210 /*ARGSUSED*/
2211 static void
2212 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
2213 {
2214 int64_t snval = (int64_t)nval;
2215 uint64_t tmp[2];
2216
2217 data[0]++;
2218 data[1] += nval;
2219
2220 /*
2221 * What we want to say here is:
2222 *
2223 * data[2] += nval * nval;
2224 *
2225 * But given that nval is 64-bit, we could easily overflow, so
2226 * we do this as 128-bit arithmetic.
2227 */
2228 if (snval < 0)
2229 snval = -snval;
2230
2231 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2232 dtrace_add_128(data + 2, tmp, data + 2);
2233 }
2234
2235 /*ARGSUSED*/
2236 static void
2237 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2238 {
2239 *oval = *oval + 1;
2240 }
2241
2242 /*ARGSUSED*/
2243 static void
2244 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2245 {
2246 *oval += nval;
2247 }
2248
2249 /*
2250 * Aggregate given the tuple in the principal data buffer, and the aggregating
2251 * action denoted by the specified dtrace_aggregation_t. The aggregation
2252 * buffer is specified as the buf parameter. This routine does not return
2253 * failure; if there is no space in the aggregation buffer, the data will be
2254 * dropped, and a corresponding counter incremented.
2255 */
2256 static void
2257 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2258 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2259 {
2260 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2261 uint32_t i, ndx, size, fsize;
2262 uint32_t align = sizeof (uint64_t) - 1;
2263 dtrace_aggbuffer_t *agb;
2264 dtrace_aggkey_t *key;
2265 uint32_t hashval = 0, limit, isstr;
2266 caddr_t tomax, data, kdata;
2267 dtrace_actkind_t action;
2268 dtrace_action_t *act;
2269 uintptr_t offs;
2270
2271 if (buf == NULL)
2272 return;
2273
2274 if (!agg->dtag_hasarg) {
2275 /*
2276 * Currently, only quantize() and lquantize() take additional
2277 * arguments, and they have the same semantics: an increment
2278 * value that defaults to 1 when not present. If additional
2279 * aggregating actions take arguments, the setting of the
2280 * default argument value will presumably have to become more
2281 * sophisticated...
2282 */
2283 arg = 1;
2284 }
2285
2286 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2287 size = rec->dtrd_offset - agg->dtag_base;
2288 fsize = size + rec->dtrd_size;
2289
2290 ASSERT(dbuf->dtb_tomax != NULL);
2291 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2292
2293 if ((tomax = buf->dtb_tomax) == NULL) {
2294 dtrace_buffer_drop(buf);
2295 return;
2296 }
2297
2298 /*
2299 * The metastructure is always at the bottom of the buffer.
2300 */
2301 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2302 sizeof (dtrace_aggbuffer_t));
2303
2304 if (buf->dtb_offset == 0) {
2305 /*
2306 * We just kludge up approximately 1/8th of the size to be
2307 * buckets. If this guess ends up being routinely
2308 * off-the-mark, we may need to dynamically readjust this
2309 * based on past performance.
2310 */
2311 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2312
2313 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2314 (uintptr_t)tomax || hashsize == 0) {
2315 /*
2316 * We've been given a ludicrously small buffer;
2317 * increment our drop count and leave.
2318 */
2319 dtrace_buffer_drop(buf);
2320 return;
2321 }
2322
2323 /*
2324 * And now, a pathetic attempt to try to get a an odd (or
2325 * perchance, a prime) hash size for better hash distribution.
2326 */
2327 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2328 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2329
2330 agb->dtagb_hashsize = hashsize;
2331 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2332 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2333 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2334
2335 for (i = 0; i < agb->dtagb_hashsize; i++)
2336 agb->dtagb_hash[i] = NULL;
2337 }
2338
2339 ASSERT(agg->dtag_first != NULL);
2340 ASSERT(agg->dtag_first->dta_intuple);
2341
2342 /*
2343 * Calculate the hash value based on the key. Note that we _don't_
2344 * include the aggid in the hashing (but we will store it as part of
2345 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2346 * algorithm: a simple, quick algorithm that has no known funnels, and
2347 * gets good distribution in practice. The efficacy of the hashing
2348 * algorithm (and a comparison with other algorithms) may be found by
2349 * running the ::dtrace_aggstat MDB dcmd.
2350 */
2351 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2352 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2353 limit = i + act->dta_rec.dtrd_size;
2354 ASSERT(limit <= size);
2355 isstr = DTRACEACT_ISSTRING(act);
2356
2357 for (; i < limit; i++) {
2358 hashval += data[i];
2359 hashval += (hashval << 10);
2360 hashval ^= (hashval >> 6);
2361
2362 if (isstr && data[i] == '\0')
2363 break;
2364 }
2365 }
2366
2367 hashval += (hashval << 3);
2368 hashval ^= (hashval >> 11);
2369 hashval += (hashval << 15);
2370
2371 /*
2372 * Yes, the divide here is expensive -- but it's generally the least
2373 * of the performance issues given the amount of data that we iterate
2374 * over to compute hash values, compare data, etc.
2375 */
2376 ndx = hashval % agb->dtagb_hashsize;
2377
2378 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2379 ASSERT((caddr_t)key >= tomax);
2380 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2381
2382 if (hashval != key->dtak_hashval || key->dtak_size != size)
2383 continue;
2384
2385 kdata = key->dtak_data;
2386 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2387
2388 for (act = agg->dtag_first; act->dta_intuple;
2389 act = act->dta_next) {
2390 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2391 limit = i + act->dta_rec.dtrd_size;
2392 ASSERT(limit <= size);
2393 isstr = DTRACEACT_ISSTRING(act);
2394
2395 for (; i < limit; i++) {
2396 if (kdata[i] != data[i])
2397 goto next;
2398
2399 if (isstr && data[i] == '\0')
2400 break;
2401 }
2402 }
2403
2404 if (action != key->dtak_action) {
2405 /*
2406 * We are aggregating on the same value in the same
2407 * aggregation with two different aggregating actions.
2408 * (This should have been picked up in the compiler,
2409 * so we may be dealing with errant or devious DIF.)
2410 * This is an error condition; we indicate as much,
2411 * and return.
2412 */
2413 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2414 return;
2415 }
2416
2417 /*
2418 * This is a hit: we need to apply the aggregator to
2419 * the value at this key.
2420 */
2421 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2422 return;
2423 next:
2424 continue;
2425 }
2426
2427 /*
2428 * We didn't find it. We need to allocate some zero-filled space,
2429 * link it into the hash table appropriately, and apply the aggregator
2430 * to the (zero-filled) value.
2431 */
2432 offs = buf->dtb_offset;
2433 while (offs & (align - 1))
2434 offs += sizeof (uint32_t);
2435
2436 /*
2437 * If we don't have enough room to both allocate a new key _and_
2438 * its associated data, increment the drop count and return.
2439 */
2440 if ((uintptr_t)tomax + offs + fsize >
2441 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2442 dtrace_buffer_drop(buf);
2443 return;
2444 }
2445
2446 /*CONSTCOND*/
2447 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2448 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2449 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2450
2451 key->dtak_data = kdata = tomax + offs;
2452 buf->dtb_offset = offs + fsize;
2453
2454 /*
2455 * Now copy the data across.
2456 */
2457 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2458
2459 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2460 kdata[i] = data[i];
2461
2462 /*
2463 * Because strings are not zeroed out by default, we need to iterate
2464 * looking for actions that store strings, and we need to explicitly
2465 * pad these strings out with zeroes.
2466 */
2467 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2468 int nul;
2469
2470 if (!DTRACEACT_ISSTRING(act))
2471 continue;
2472
2473 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2474 limit = i + act->dta_rec.dtrd_size;
2475 ASSERT(limit <= size);
2476
2477 for (nul = 0; i < limit; i++) {
2478 if (nul) {
2479 kdata[i] = '\0';
2480 continue;
2481 }
2482
2483 if (data[i] != '\0')
2484 continue;
2485
2486 nul = 1;
2487 }
2488 }
2489
2490 for (i = size; i < fsize; i++)
2491 kdata[i] = 0;
2492
2493 key->dtak_hashval = hashval;
2494 key->dtak_size = size;
2495 key->dtak_action = action;
2496 key->dtak_next = agb->dtagb_hash[ndx];
2497 agb->dtagb_hash[ndx] = key;
2498
2499 /*
2500 * Finally, apply the aggregator.
2501 */
2502 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2503 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2504 }
2505
2506 /*
2507 * Given consumer state, this routine finds a speculation in the INACTIVE
2508 * state and transitions it into the ACTIVE state. If there is no speculation
2509 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2510 * incremented -- it is up to the caller to take appropriate action.
2511 */
2512 static int
2513 dtrace_speculation(dtrace_state_t *state)
2514 {
2515 int i = 0;
2516 dtrace_speculation_state_t current;
2517 uint32_t *stat = &state->dts_speculations_unavail, count;
2518
2519 while (i < state->dts_nspeculations) {
2520 dtrace_speculation_t *spec = &state->dts_speculations[i];
2521
2522 current = spec->dtsp_state;
2523
2524 if (current != DTRACESPEC_INACTIVE) {
2525 if (current == DTRACESPEC_COMMITTINGMANY ||
2526 current == DTRACESPEC_COMMITTING ||
2527 current == DTRACESPEC_DISCARDING)
2528 stat = &state->dts_speculations_busy;
2529 i++;
2530 continue;
2531 }
2532
2533 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2534 current, DTRACESPEC_ACTIVE) == current)
2535 return (i + 1);
2536 }
2537
2538 /*
2539 * We couldn't find a speculation. If we found as much as a single
2540 * busy speculation buffer, we'll attribute this failure as "busy"
2541 * instead of "unavail".
2542 */
2543 do {
2544 count = *stat;
2545 } while (dtrace_cas32(stat, count, count + 1) != count);
2546
2547 return (0);
2548 }
2549
2550 /*
2551 * This routine commits an active speculation. If the specified speculation
2552 * is not in a valid state to perform a commit(), this routine will silently do
2553 * nothing. The state of the specified speculation is transitioned according
2554 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2555 */
2556 static void
2557 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2558 dtrace_specid_t which)
2559 {
2560 dtrace_speculation_t *spec;
2561 dtrace_buffer_t *src, *dest;
2562 uintptr_t daddr, saddr, dlimit, slimit;
2563 dtrace_speculation_state_t current, new;
2564 intptr_t offs;
2565 uint64_t timestamp;
2566
2567 if (which == 0)
2568 return;
2569
2570 if (which > state->dts_nspeculations) {
2571 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2572 return;
2573 }
2574
2575 spec = &state->dts_speculations[which - 1];
2576 src = &spec->dtsp_buffer[cpu];
2577 dest = &state->dts_buffer[cpu];
2578
2579 do {
2580 current = spec->dtsp_state;
2581
2582 if (current == DTRACESPEC_COMMITTINGMANY)
2583 break;
2584
2585 switch (current) {
2586 case DTRACESPEC_INACTIVE:
2587 case DTRACESPEC_DISCARDING:
2588 return;
2589
2590 case DTRACESPEC_COMMITTING:
2591 /*
2592 * This is only possible if we are (a) commit()'ing
2593 * without having done a prior speculate() on this CPU
2594 * and (b) racing with another commit() on a different
2595 * CPU. There's nothing to do -- we just assert that
2596 * our offset is 0.
2597 */
2598 ASSERT(src->dtb_offset == 0);
2599 return;
2600
2601 case DTRACESPEC_ACTIVE:
2602 new = DTRACESPEC_COMMITTING;
2603 break;
2604
2605 case DTRACESPEC_ACTIVEONE:
2606 /*
2607 * This speculation is active on one CPU. If our
2608 * buffer offset is non-zero, we know that the one CPU
2609 * must be us. Otherwise, we are committing on a
2610 * different CPU from the speculate(), and we must
2611 * rely on being asynchronously cleaned.
2612 */
2613 if (src->dtb_offset != 0) {
2614 new = DTRACESPEC_COMMITTING;
2615 break;
2616 }
2617 /*FALLTHROUGH*/
2618
2619 case DTRACESPEC_ACTIVEMANY:
2620 new = DTRACESPEC_COMMITTINGMANY;
2621 break;
2622
2623 default:
2624 ASSERT(0);
2625 }
2626 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2627 current, new) != current);
2628
2629 /*
2630 * We have set the state to indicate that we are committing this
2631 * speculation. Now reserve the necessary space in the destination
2632 * buffer.
2633 */
2634 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2635 sizeof (uint64_t), state, NULL)) < 0) {
2636 dtrace_buffer_drop(dest);
2637 goto out;
2638 }
2639
2640 /*
2641 * We have sufficient space to copy the speculative buffer into the
2642 * primary buffer. First, modify the speculative buffer, filling
2643 * in the timestamp of all entries with the current time. The data
2644 * must have the commit() time rather than the time it was traced,
2645 * so that all entries in the primary buffer are in timestamp order.
2646 */
2647 timestamp = dtrace_gethrtime();
2648 saddr = (uintptr_t)src->dtb_tomax;
2649 slimit = saddr + src->dtb_offset;
2650 while (saddr < slimit) {
2651 size_t size;
2652 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr;
2653
2654 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2655 saddr += sizeof (dtrace_epid_t);
2656 continue;
2657 }
2658 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs);
2659 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size;
2660
2661 ASSERT3U(saddr + size, <=, slimit);
2662 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t));
2663 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX);
2664
2665 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp);
2666
2667 saddr += size;
2668 }
2669
2670 /*
2671 * Copy the buffer across. (Note that this is a
2672 * highly subobtimal bcopy(); in the unlikely event that this becomes
2673 * a serious performance issue, a high-performance DTrace-specific
2674 * bcopy() should obviously be invented.)
2675 */
2676 daddr = (uintptr_t)dest->dtb_tomax + offs;
2677 dlimit = daddr + src->dtb_offset;
2678 saddr = (uintptr_t)src->dtb_tomax;
2679
2680 /*
2681 * First, the aligned portion.
2682 */
2683 while (dlimit - daddr >= sizeof (uint64_t)) {
2684 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2685
2686 daddr += sizeof (uint64_t);
2687 saddr += sizeof (uint64_t);
2688 }
2689
2690 /*
2691 * Now any left-over bit...
2692 */
2693 while (dlimit - daddr)
2694 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2695
2696 /*
2697 * Finally, commit the reserved space in the destination buffer.
2698 */
2699 dest->dtb_offset = offs + src->dtb_offset;
2700
2701 out:
2702 /*
2703 * If we're lucky enough to be the only active CPU on this speculation
2704 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2705 */
2706 if (current == DTRACESPEC_ACTIVE ||
2707 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2708 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2709 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2710
2711 ASSERT(rval == DTRACESPEC_COMMITTING);
2712 }
2713
2714 src->dtb_offset = 0;
2715 src->dtb_xamot_drops += src->dtb_drops;
2716 src->dtb_drops = 0;
2717 }
2718
2719 /*
2720 * This routine discards an active speculation. If the specified speculation
2721 * is not in a valid state to perform a discard(), this routine will silently
2722 * do nothing. The state of the specified speculation is transitioned
2723 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2724 */
2725 static void
2726 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2727 dtrace_specid_t which)
2728 {
2729 dtrace_speculation_t *spec;
2730 dtrace_speculation_state_t current, new;
2731 dtrace_buffer_t *buf;
2732
2733 if (which == 0)
2734 return;
2735
2736 if (which > state->dts_nspeculations) {
2737 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2738 return;
2739 }
2740
2741 spec = &state->dts_speculations[which - 1];
2742 buf = &spec->dtsp_buffer[cpu];
2743
2744 do {
2745 current = spec->dtsp_state;
2746
2747 switch (current) {
2748 case DTRACESPEC_INACTIVE:
2749 case DTRACESPEC_COMMITTINGMANY:
2750 case DTRACESPEC_COMMITTING:
2751 case DTRACESPEC_DISCARDING:
2752 return;
2753
2754 case DTRACESPEC_ACTIVE:
2755 case DTRACESPEC_ACTIVEMANY:
2756 new = DTRACESPEC_DISCARDING;
2757 break;
2758
2759 case DTRACESPEC_ACTIVEONE:
2760 if (buf->dtb_offset != 0) {
2761 new = DTRACESPEC_INACTIVE;
2762 } else {
2763 new = DTRACESPEC_DISCARDING;
2764 }
2765 break;
2766
2767 default:
2768 ASSERT(0);
2769 }
2770 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2771 current, new) != current);
2772
2773 buf->dtb_offset = 0;
2774 buf->dtb_drops = 0;
2775 }
2776
2777 /*
2778 * Note: not called from probe context. This function is called
2779 * asynchronously from cross call context to clean any speculations that are
2780 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2781 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2782 * speculation.
2783 */
2784 static void
2785 dtrace_speculation_clean_here(dtrace_state_t *state)
2786 {
2787 dtrace_icookie_t cookie;
2788 processorid_t cpu = CPU->cpu_id;
2789 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2790 dtrace_specid_t i;
2791
2792 cookie = dtrace_interrupt_disable();
2793
2794 if (dest->dtb_tomax == NULL) {
2795 dtrace_interrupt_enable(cookie);
2796 return;
2797 }
2798
2799 for (i = 0; i < state->dts_nspeculations; i++) {
2800 dtrace_speculation_t *spec = &state->dts_speculations[i];
2801 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2802
2803 if (src->dtb_tomax == NULL)
2804 continue;
2805
2806 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2807 src->dtb_offset = 0;
2808 continue;
2809 }
2810
2811 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2812 continue;
2813
2814 if (src->dtb_offset == 0)
2815 continue;
2816
2817 dtrace_speculation_commit(state, cpu, i + 1);
2818 }
2819
2820 dtrace_interrupt_enable(cookie);
2821 }
2822
2823 /*
2824 * Note: not called from probe context. This function is called
2825 * asynchronously (and at a regular interval) to clean any speculations that
2826 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2827 * is work to be done, it cross calls all CPUs to perform that work;
2828 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2829 * INACTIVE state until they have been cleaned by all CPUs.
2830 */
2831 static void
2832 dtrace_speculation_clean(dtrace_state_t *state)
2833 {
2834 int work = 0, rv;
2835 dtrace_specid_t i;
2836
2837 for (i = 0; i < state->dts_nspeculations; i++) {
2838 dtrace_speculation_t *spec = &state->dts_speculations[i];
2839
2840 ASSERT(!spec->dtsp_cleaning);
2841
2842 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2843 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2844 continue;
2845
2846 work++;
2847 spec->dtsp_cleaning = 1;
2848 }
2849
2850 if (!work)
2851 return;
2852
2853 dtrace_xcall(DTRACE_CPUALL,
2854 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2855
2856 /*
2857 * We now know that all CPUs have committed or discarded their
2858 * speculation buffers, as appropriate. We can now set the state
2859 * to inactive.
2860 */
2861 for (i = 0; i < state->dts_nspeculations; i++) {
2862 dtrace_speculation_t *spec = &state->dts_speculations[i];
2863 dtrace_speculation_state_t current, new;
2864
2865 if (!spec->dtsp_cleaning)
2866 continue;
2867
2868 current = spec->dtsp_state;
2869 ASSERT(current == DTRACESPEC_DISCARDING ||
2870 current == DTRACESPEC_COMMITTINGMANY);
2871
2872 new = DTRACESPEC_INACTIVE;
2873
2874 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2875 ASSERT(rv == current);
2876 spec->dtsp_cleaning = 0;
2877 }
2878 }
2879
2880 /*
2881 * Called as part of a speculate() to get the speculative buffer associated
2882 * with a given speculation. Returns NULL if the specified speculation is not
2883 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
2884 * the active CPU is not the specified CPU -- the speculation will be
2885 * atomically transitioned into the ACTIVEMANY state.
2886 */
2887 static dtrace_buffer_t *
2888 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2889 dtrace_specid_t which)
2890 {
2891 dtrace_speculation_t *spec;
2892 dtrace_speculation_state_t current, new;
2893 dtrace_buffer_t *buf;
2894
2895 if (which == 0)
2896 return (NULL);
2897
2898 if (which > state->dts_nspeculations) {
2899 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2900 return (NULL);
2901 }
2902
2903 spec = &state->dts_speculations[which - 1];
2904 buf = &spec->dtsp_buffer[cpuid];
2905
2906 do {
2907 current = spec->dtsp_state;
2908
2909 switch (current) {
2910 case DTRACESPEC_INACTIVE:
2911 case DTRACESPEC_COMMITTINGMANY:
2912 case DTRACESPEC_DISCARDING:
2913 return (NULL);
2914
2915 case DTRACESPEC_COMMITTING:
2916 ASSERT(buf->dtb_offset == 0);
2917 return (NULL);
2918
2919 case DTRACESPEC_ACTIVEONE:
2920 /*
2921 * This speculation is currently active on one CPU.
2922 * Check the offset in the buffer; if it's non-zero,
2923 * that CPU must be us (and we leave the state alone).
2924 * If it's zero, assume that we're starting on a new
2925 * CPU -- and change the state to indicate that the
2926 * speculation is active on more than one CPU.
2927 */
2928 if (buf->dtb_offset != 0)
2929 return (buf);
2930
2931 new = DTRACESPEC_ACTIVEMANY;
2932 break;
2933
2934 case DTRACESPEC_ACTIVEMANY:
2935 return (buf);
2936
2937 case DTRACESPEC_ACTIVE:
2938 new = DTRACESPEC_ACTIVEONE;
2939 break;
2940
2941 default:
2942 ASSERT(0);
2943 }
2944 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2945 current, new) != current);
2946
2947 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2948 return (buf);
2949 }
2950
2951 /*
2952 * Return a string. In the event that the user lacks the privilege to access
2953 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2954 * don't fail access checking.
2955 *
2956 * dtrace_dif_variable() uses this routine as a helper for various
2957 * builtin values such as 'execname' and 'probefunc.'
2958 */
2959 uintptr_t
2960 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
2961 dtrace_mstate_t *mstate)
2962 {
2963 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
2964 uintptr_t ret;
2965 size_t strsz;
2966
2967 /*
2968 * The easy case: this probe is allowed to read all of memory, so
2969 * we can just return this as a vanilla pointer.
2970 */
2971 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
2972 return (addr);
2973
2974 /*
2975 * This is the tougher case: we copy the string in question from
2976 * kernel memory into scratch memory and return it that way: this
2977 * ensures that we won't trip up when access checking tests the
2978 * BYREF return value.
2979 */
2980 strsz = dtrace_strlen((char *)addr, size) + 1;
2981
2982 if (mstate->dtms_scratch_ptr + strsz >
2983 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2984 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2985 return (NULL);
2986 }
2987
2988 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2989 strsz);
2990 ret = mstate->dtms_scratch_ptr;
2991 mstate->dtms_scratch_ptr += strsz;
2992 return (ret);
2993 }
2994
2995 /*
2996 * This function implements the DIF emulator's variable lookups. The emulator
2997 * passes a reserved variable identifier and optional built-in array index.
2998 */
2999 static uint64_t
3000 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
3001 uint64_t ndx)
3002 {
3003 /*
3004 * If we're accessing one of the uncached arguments, we'll turn this
3005 * into a reference in the args array.
3006 */
3007 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
3008 ndx = v - DIF_VAR_ARG0;
3009 v = DIF_VAR_ARGS;
3010 }
3011
3012 switch (v) {
3013 case DIF_VAR_ARGS:
3014 if (!(mstate->dtms_access & DTRACE_ACCESS_ARGS)) {
3015 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |=
3016 CPU_DTRACE_KPRIV;
3017 return (0);
3018 }
3019
3020 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
3021 if (ndx >= sizeof (mstate->dtms_arg) /
3022 sizeof (mstate->dtms_arg[0])) {
3023 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3024 dtrace_provider_t *pv;
3025 uint64_t val;
3026
3027 pv = mstate->dtms_probe->dtpr_provider;
3028 if (pv->dtpv_pops.dtps_getargval != NULL)
3029 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
3030 mstate->dtms_probe->dtpr_id,
3031 mstate->dtms_probe->dtpr_arg, ndx, aframes);
3032 else
3033 val = dtrace_getarg(ndx, aframes);
3034
3035 /*
3036 * This is regrettably required to keep the compiler
3037 * from tail-optimizing the call to dtrace_getarg().
3038 * The condition always evaluates to true, but the
3039 * compiler has no way of figuring that out a priori.
3040 * (None of this would be necessary if the compiler
3041 * could be relied upon to _always_ tail-optimize
3042 * the call to dtrace_getarg() -- but it can't.)
3043 */
3044 if (mstate->dtms_probe != NULL)
3045 return (val);
3046
3047 ASSERT(0);
3048 }
3049
3050 return (mstate->dtms_arg[ndx]);
3051
3052 case DIF_VAR_UREGS: {
3053 klwp_t *lwp;
3054
3055 if (!dtrace_priv_proc(state, mstate))
3056 return (0);
3057
3058 if ((lwp = curthread->t_lwp) == NULL) {
3059 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3060 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL;
3061 return (0);
3062 }
3063
3064 return (dtrace_getreg(lwp->lwp_regs, ndx));
3065 }
3066
3067 case DIF_VAR_VMREGS: {
3068 uint64_t rval;
3069
3070 if (!dtrace_priv_kernel(state))
3071 return (0);
3072
3073 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3074
3075 rval = dtrace_getvmreg(ndx,
3076 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags);
3077
3078 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3079
3080 return (rval);
3081 }
3082
3083 case DIF_VAR_CURTHREAD:
3084 if (!dtrace_priv_proc(state, mstate))
3085 return (0);
3086 return ((uint64_t)(uintptr_t)curthread);
3087
3088 case DIF_VAR_TIMESTAMP:
3089 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
3090 mstate->dtms_timestamp = dtrace_gethrtime();
3091 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
3092 }
3093 return (mstate->dtms_timestamp);
3094
3095 case DIF_VAR_VTIMESTAMP:
3096 ASSERT(dtrace_vtime_references != 0);
3097 return (curthread->t_dtrace_vtime);
3098
3099 case DIF_VAR_WALLTIMESTAMP:
3100 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
3101 mstate->dtms_walltimestamp = dtrace_gethrestime();
3102 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
3103 }
3104 return (mstate->dtms_walltimestamp);
3105
3106 case DIF_VAR_IPL:
3107 if (!dtrace_priv_kernel(state))
3108 return (0);
3109 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
3110 mstate->dtms_ipl = dtrace_getipl();
3111 mstate->dtms_present |= DTRACE_MSTATE_IPL;
3112 }
3113 return (mstate->dtms_ipl);
3114
3115 case DIF_VAR_EPID:
3116 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
3117 return (mstate->dtms_epid);
3118
3119 case DIF_VAR_ID:
3120 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3121 return (mstate->dtms_probe->dtpr_id);
3122
3123 case DIF_VAR_STACKDEPTH:
3124 if (!dtrace_priv_kernel(state))
3125 return (0);
3126 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
3127 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3128
3129 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
3130 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
3131 }
3132 return (mstate->dtms_stackdepth);
3133
3134 case DIF_VAR_USTACKDEPTH:
3135 if (!dtrace_priv_proc(state, mstate))
3136 return (0);
3137 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
3138 /*
3139 * See comment in DIF_VAR_PID.
3140 */
3141 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
3142 CPU_ON_INTR(CPU)) {
3143 mstate->dtms_ustackdepth = 0;
3144 } else {
3145 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3146 mstate->dtms_ustackdepth =
3147 dtrace_getustackdepth();
3148 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3149 }
3150 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
3151 }
3152 return (mstate->dtms_ustackdepth);
3153
3154 case DIF_VAR_CALLER:
3155 if (!dtrace_priv_kernel(state))
3156 return (0);
3157 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
3158 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3159
3160 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
3161 /*
3162 * If this is an unanchored probe, we are
3163 * required to go through the slow path:
3164 * dtrace_caller() only guarantees correct
3165 * results for anchored probes.
3166 */
3167 pc_t caller[2];
3168
3169 dtrace_getpcstack(caller, 2, aframes,
3170 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
3171 mstate->dtms_caller = caller[1];
3172 } else if ((mstate->dtms_caller =
3173 dtrace_caller(aframes)) == -1) {
3174 /*
3175 * We have failed to do this the quick way;
3176 * we must resort to the slower approach of
3177 * calling dtrace_getpcstack().
3178 */
3179 pc_t caller;
3180
3181 dtrace_getpcstack(&caller, 1, aframes, NULL);
3182 mstate->dtms_caller = caller;
3183 }
3184
3185 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
3186 }
3187 return (mstate->dtms_caller);
3188
3189 case DIF_VAR_UCALLER:
3190 if (!dtrace_priv_proc(state, mstate))
3191 return (0);
3192
3193 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
3194 uint64_t ustack[3];
3195
3196 /*
3197 * dtrace_getupcstack() fills in the first uint64_t
3198 * with the current PID. The second uint64_t will
3199 * be the program counter at user-level. The third
3200 * uint64_t will contain the caller, which is what
3201 * we're after.
3202 */
3203 ustack[2] = NULL;
3204 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3205 dtrace_getupcstack(ustack, 3);
3206 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3207 mstate->dtms_ucaller = ustack[2];
3208 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
3209 }
3210
3211 return (mstate->dtms_ucaller);
3212
3213 case DIF_VAR_PROBEPROV:
3214 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3215 return (dtrace_dif_varstr(
3216 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3217 state, mstate));
3218
3219 case DIF_VAR_PROBEMOD:
3220 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3221 return (dtrace_dif_varstr(
3222 (uintptr_t)mstate->dtms_probe->dtpr_mod,
3223 state, mstate));
3224
3225 case DIF_VAR_PROBEFUNC:
3226 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3227 return (dtrace_dif_varstr(
3228 (uintptr_t)mstate->dtms_probe->dtpr_func,
3229 state, mstate));
3230
3231 case DIF_VAR_PROBENAME:
3232 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3233 return (dtrace_dif_varstr(
3234 (uintptr_t)mstate->dtms_probe->dtpr_name,
3235 state, mstate));
3236
3237 case DIF_VAR_PID:
3238 if (!dtrace_priv_proc(state, mstate))
3239 return (0);
3240
3241 /*
3242 * Note that we are assuming that an unanchored probe is
3243 * always due to a high-level interrupt. (And we're assuming
3244 * that there is only a single high level interrupt.)
3245 */
3246 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3247 return (pid0.pid_id);
3248
3249 /*
3250 * It is always safe to dereference one's own t_procp pointer:
3251 * it always points to a valid, allocated proc structure.
3252 * Further, it is always safe to dereference the p_pidp member
3253 * of one's own proc structure. (These are truisms becuase
3254 * threads and processes don't clean up their own state --
3255 * they leave that task to whomever reaps them.)
3256 */
3257 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
3258
3259 case DIF_VAR_PPID:
3260 if (!dtrace_priv_proc(state, mstate))
3261 return (0);
3262
3263 /*
3264 * See comment in DIF_VAR_PID.
3265 */
3266 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3267 return (pid0.pid_id);
3268
3269 /*
3270 * It is always safe to dereference one's own t_procp pointer:
3271 * it always points to a valid, allocated proc structure.
3272 * (This is true because threads don't clean up their own
3273 * state -- they leave that task to whomever reaps them.)
3274 */
3275 return ((uint64_t)curthread->t_procp->p_ppid);
3276
3277 case DIF_VAR_TID:
3278 /*
3279 * See comment in DIF_VAR_PID.
3280 */
3281 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3282 return (0);
3283
3284 return ((uint64_t)curthread->t_tid);
3285
3286 case DIF_VAR_EXECNAME:
3287 if (!dtrace_priv_proc(state, mstate))
3288 return (0);
3289
3290 /*
3291 * See comment in DIF_VAR_PID.
3292 */
3293 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3294 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3295
3296 /*
3297 * It is always safe to dereference one's own t_procp pointer:
3298 * it always points to a valid, allocated proc structure.
3299 * (This is true because threads don't clean up their own
3300 * state -- they leave that task to whomever reaps them.)
3301 */
3302 return (dtrace_dif_varstr(
3303 (uintptr_t)curthread->t_procp->p_user.u_comm,
3304 state, mstate));
3305
3306 case DIF_VAR_ZONENAME:
3307 if (!dtrace_priv_proc(state, mstate))
3308 return (0);
3309
3310 /*
3311 * See comment in DIF_VAR_PID.
3312 */
3313 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3314 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3315
3316 /*
3317 * It is always safe to dereference one's own t_procp pointer:
3318 * it always points to a valid, allocated proc structure.
3319 * (This is true because threads don't clean up their own
3320 * state -- they leave that task to whomever reaps them.)
3321 */
3322 return (dtrace_dif_varstr(
3323 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3324 state, mstate));
3325
3326 case DIF_VAR_UID:
3327 if (!dtrace_priv_proc(state, mstate))
3328 return (0);
3329
3330 /*
3331 * See comment in DIF_VAR_PID.
3332 */
3333 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3334 return ((uint64_t)p0.p_cred->cr_uid);
3335
3336 /*
3337 * It is always safe to dereference one's own t_procp pointer:
3338 * it always points to a valid, allocated proc structure.
3339 * (This is true because threads don't clean up their own
3340 * state -- they leave that task to whomever reaps them.)
3341 *
3342 * Additionally, it is safe to dereference one's own process
3343 * credential, since this is never NULL after process birth.
3344 */
3345 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3346
3347 case DIF_VAR_GID:
3348 if (!dtrace_priv_proc(state, mstate))
3349 return (0);
3350
3351 /*
3352 * See comment in DIF_VAR_PID.
3353 */
3354 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3355 return ((uint64_t)p0.p_cred->cr_gid);
3356
3357 /*
3358 * It is always safe to dereference one's own t_procp pointer:
3359 * it always points to a valid, allocated proc structure.
3360 * (This is true because threads don't clean up their own
3361 * state -- they leave that task to whomever reaps them.)
3362 *
3363 * Additionally, it is safe to dereference one's own process
3364 * credential, since this is never NULL after process birth.
3365 */
3366 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3367
3368 case DIF_VAR_ERRNO: {
3369 klwp_t *lwp;
3370 if (!dtrace_priv_proc(state, mstate))
3371 return (0);
3372
3373 /*
3374 * See comment in DIF_VAR_PID.
3375 */
3376 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3377 return (0);
3378
3379 /*
3380 * It is always safe to dereference one's own t_lwp pointer in
3381 * the event that this pointer is non-NULL. (This is true
3382 * because threads and lwps don't clean up their own state --
3383 * they leave that task to whomever reaps them.)
3384 */
3385 if ((lwp = curthread->t_lwp) == NULL)
3386 return (0);
3387
3388 return ((uint64_t)lwp->lwp_errno);
3389 }
3390 default:
3391 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3392 return (0);
3393 }
3394 }
3395
3396
3397 typedef enum dtrace_json_state {
3398 DTRACE_JSON_REST = 1,
3399 DTRACE_JSON_OBJECT,
3400 DTRACE_JSON_STRING,
3401 DTRACE_JSON_STRING_ESCAPE,
3402 DTRACE_JSON_STRING_ESCAPE_UNICODE,
3403 DTRACE_JSON_COLON,
3404 DTRACE_JSON_COMMA,
3405 DTRACE_JSON_VALUE,
3406 DTRACE_JSON_IDENTIFIER,
3407 DTRACE_JSON_NUMBER,
3408 DTRACE_JSON_NUMBER_FRAC,
3409 DTRACE_JSON_NUMBER_EXP,
3410 DTRACE_JSON_COLLECT_OBJECT
3411 } dtrace_json_state_t;
3412
3413 /*
3414 * This function possesses just enough knowledge about JSON to extract a single
3415 * value from a JSON string and store it in the scratch buffer. It is able
3416 * to extract nested object values, and members of arrays by index.
3417 *
3418 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to
3419 * be looked up as we descend into the object tree. e.g.
3420 *
3421 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL
3422 * with nelems = 5.
3423 *
3424 * The run time of this function must be bounded above by strsize to limit the
3425 * amount of work done in probe context. As such, it is implemented as a
3426 * simple state machine, reading one character at a time using safe loads
3427 * until we find the requested element, hit a parsing error or run off the
3428 * end of the object or string.
3429 *
3430 * As there is no way for a subroutine to return an error without interrupting
3431 * clause execution, we simply return NULL in the event of a missing key or any
3432 * other error condition. Each NULL return in this function is commented with
3433 * the error condition it represents -- parsing or otherwise.
3434 *
3435 * The set of states for the state machine closely matches the JSON
3436 * specification (http://json.org/). Briefly:
3437 *
3438 * DTRACE_JSON_REST:
3439 * Skip whitespace until we find either a top-level Object, moving
3440 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE.
3441 *
3442 * DTRACE_JSON_OBJECT:
3443 * Locate the next key String in an Object. Sets a flag to denote
3444 * the next String as a key string and moves to DTRACE_JSON_STRING.
3445 *
3446 * DTRACE_JSON_COLON:
3447 * Skip whitespace until we find the colon that separates key Strings
3448 * from their values. Once found, move to DTRACE_JSON_VALUE.
3449 *
3450 * DTRACE_JSON_VALUE:
3451 * Detects the type of the next value (String, Number, Identifier, Object
3452 * or Array) and routes to the states that process that type. Here we also
3453 * deal with the element selector list if we are requested to traverse down
3454 * into the object tree.
3455 *
3456 * DTRACE_JSON_COMMA:
3457 * Skip whitespace until we find the comma that separates key-value pairs
3458 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays
3459 * (similarly DTRACE_JSON_VALUE). All following literal value processing
3460 * states return to this state at the end of their value, unless otherwise
3461 * noted.
3462 *
3463 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP:
3464 * Processes a Number literal from the JSON, including any exponent
3465 * component that may be present. Numbers are returned as strings, which
3466 * may be passed to strtoll() if an integer is required.
3467 *
3468 * DTRACE_JSON_IDENTIFIER:
3469 * Processes a "true", "false" or "null" literal in the JSON.
3470 *
3471 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE,
3472 * DTRACE_JSON_STRING_ESCAPE_UNICODE:
3473 * Processes a String literal from the JSON, whether the String denotes
3474 * a key, a value or part of a larger Object. Handles all escape sequences
3475 * present in the specification, including four-digit unicode characters,
3476 * but merely includes the escape sequence without converting it to the
3477 * actual escaped character. If the String is flagged as a key, we
3478 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA.
3479 *
3480 * DTRACE_JSON_COLLECT_OBJECT:
3481 * This state collects an entire Object (or Array), correctly handling
3482 * embedded strings. If the full element selector list matches this nested
3483 * object, we return the Object in full as a string. If not, we use this
3484 * state to skip to the next value at this level and continue processing.
3485 *
3486 * NOTE: This function uses various macros from strtolctype.h to manipulate
3487 * digit values, etc -- these have all been checked to ensure they make
3488 * no additional function calls.
3489 */
3490 static char *
3491 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems,
3492 char *dest)
3493 {
3494 dtrace_json_state_t state = DTRACE_JSON_REST;
3495 int64_t array_elem = INT64_MIN;
3496 int64_t array_pos = 0;
3497 uint8_t escape_unicount = 0;
3498 boolean_t string_is_key = B_FALSE;
3499 boolean_t collect_object = B_FALSE;
3500 boolean_t found_key = B_FALSE;
3501 boolean_t in_array = B_FALSE;
3502 uint32_t braces = 0, brackets = 0;
3503 char *elem = elemlist;
3504 char *dd = dest;
3505 uintptr_t cur;
3506
3507 for (cur = json; cur < json + size; cur++) {
3508 char cc = dtrace_load8(cur);
3509 if (cc == '\0')
3510 return (NULL);
3511
3512 switch (state) {
3513 case DTRACE_JSON_REST:
3514 if (isspace(cc))
3515 break;
3516
3517 if (cc == '{') {
3518 state = DTRACE_JSON_OBJECT;
3519 break;
3520 }
3521
3522 if (cc == '[') {
3523 in_array = B_TRUE;
3524 array_pos = 0;
3525 array_elem = dtrace_strtoll(elem, 10, size);
3526 found_key = array_elem == 0 ? B_TRUE : B_FALSE;
3527 state = DTRACE_JSON_VALUE;
3528 break;
3529 }
3530
3531 /*
3532 * ERROR: expected to find a top-level object or array.
3533 */
3534 return (NULL);
3535 case DTRACE_JSON_OBJECT:
3536 if (isspace(cc))
3537 break;
3538
3539 if (cc == '"') {
3540 state = DTRACE_JSON_STRING;
3541 string_is_key = B_TRUE;
3542 break;
3543 }
3544
3545 /*
3546 * ERROR: either the object did not start with a key
3547 * string, or we've run off the end of the object
3548 * without finding the requested key.
3549 */
3550 return (NULL);
3551 case DTRACE_JSON_STRING:
3552 if (cc == '\\') {
3553 *dd++ = '\\';
3554 state = DTRACE_JSON_STRING_ESCAPE;
3555 break;
3556 }
3557
3558 if (cc == '"') {
3559 if (collect_object) {
3560 /*
3561 * We don't reset the dest here, as
3562 * the string is part of a larger
3563 * object being collected.
3564 */
3565 *dd++ = cc;
3566 collect_object = B_FALSE;
3567 state = DTRACE_JSON_COLLECT_OBJECT;
3568 break;
3569 }
3570 *dd = '\0';
3571 dd = dest; /* reset string buffer */
3572 if (string_is_key) {
3573 if (dtrace_strncmp(dest, elem,
3574 size) == 0)
3575 found_key = B_TRUE;
3576 } else if (found_key) {
3577 if (nelems > 1) {
3578 /*
3579 * We expected an object, not
3580 * this string.
3581 */
3582 return (NULL);
3583 }
3584 return (dest);
3585 }
3586 state = string_is_key ? DTRACE_JSON_COLON :
3587 DTRACE_JSON_COMMA;
3588 string_is_key = B_FALSE;
3589 break;
3590 }
3591
3592 *dd++ = cc;
3593 break;
3594 case DTRACE_JSON_STRING_ESCAPE:
3595 *dd++ = cc;
3596 if (cc == 'u') {
3597 escape_unicount = 0;
3598 state = DTRACE_JSON_STRING_ESCAPE_UNICODE;
3599 } else {
3600 state = DTRACE_JSON_STRING;
3601 }
3602 break;
3603 case DTRACE_JSON_STRING_ESCAPE_UNICODE:
3604 if (!isxdigit(cc)) {
3605 /*
3606 * ERROR: invalid unicode escape, expected
3607 * four valid hexidecimal digits.
3608 */
3609 return (NULL);
3610 }
3611
3612 *dd++ = cc;
3613 if (++escape_unicount == 4)
3614 state = DTRACE_JSON_STRING;
3615 break;
3616 case DTRACE_JSON_COLON:
3617 if (isspace(cc))
3618 break;
3619
3620 if (cc == ':') {
3621 state = DTRACE_JSON_VALUE;
3622 break;
3623 }
3624
3625 /*
3626 * ERROR: expected a colon.
3627 */
3628 return (NULL);
3629 case DTRACE_JSON_COMMA:
3630 if (isspace(cc))
3631 break;
3632
3633 if (cc == ',') {
3634 if (in_array) {
3635 state = DTRACE_JSON_VALUE;
3636 if (++array_pos == array_elem)
3637 found_key = B_TRUE;
3638 } else {
3639 state = DTRACE_JSON_OBJECT;
3640 }
3641 break;
3642 }
3643
3644 /*
3645 * ERROR: either we hit an unexpected character, or
3646 * we reached the end of the object or array without
3647 * finding the requested key.
3648 */
3649 return (NULL);
3650 case DTRACE_JSON_IDENTIFIER:
3651 if (islower(cc)) {
3652 *dd++ = cc;
3653 break;
3654 }
3655
3656 *dd = '\0';
3657 dd = dest; /* reset string buffer */
3658
3659 if (dtrace_strncmp(dest, "true", 5) == 0 ||
3660 dtrace_strncmp(dest, "false", 6) == 0 ||
3661 dtrace_strncmp(dest, "null", 5) == 0) {
3662 if (found_key) {
3663 if (nelems > 1) {
3664 /*
3665 * ERROR: We expected an object,
3666 * not this identifier.
3667 */
3668 return (NULL);
3669 }
3670 return (dest);
3671 } else {
3672 cur--;
3673 state = DTRACE_JSON_COMMA;
3674 break;
3675 }
3676 }
3677
3678 /*
3679 * ERROR: we did not recognise the identifier as one
3680 * of those in the JSON specification.
3681 */
3682 return (NULL);
3683 case DTRACE_JSON_NUMBER:
3684 if (cc == '.') {
3685 *dd++ = cc;
3686 state = DTRACE_JSON_NUMBER_FRAC;
3687 break;
3688 }
3689
3690 if (cc == 'x' || cc == 'X') {
3691 /*
3692 * ERROR: specification explicitly excludes
3693 * hexidecimal or octal numbers.
3694 */
3695 return (NULL);
3696 }
3697
3698 /* FALLTHRU */
3699 case DTRACE_JSON_NUMBER_FRAC:
3700 if (cc == 'e' || cc == 'E') {
3701 *dd++ = cc;
3702 state = DTRACE_JSON_NUMBER_EXP;
3703 break;
3704 }
3705
3706 if (cc == '+' || cc == '-') {
3707 /*
3708 * ERROR: expect sign as part of exponent only.
3709 */
3710 return (NULL);
3711 }
3712 /* FALLTHRU */
3713 case DTRACE_JSON_NUMBER_EXP:
3714 if (isdigit(cc) || cc == '+' || cc == '-') {
3715 *dd++ = cc;
3716 break;
3717 }
3718
3719 *dd = '\0';
3720 dd = dest; /* reset string buffer */
3721 if (found_key) {
3722 if (nelems > 1) {
3723 /*
3724 * ERROR: We expected an object, not
3725 * this number.
3726 */
3727 return (NULL);
3728 }
3729 return (dest);
3730 }
3731
3732 cur--;
3733 state = DTRACE_JSON_COMMA;
3734 break;
3735 case DTRACE_JSON_VALUE:
3736 if (isspace(cc))
3737 break;
3738
3739 if (cc == '{' || cc == '[') {
3740 if (nelems > 1 && found_key) {
3741 in_array = cc == '[' ? B_TRUE : B_FALSE;
3742 /*
3743 * If our element selector directs us
3744 * to descend into this nested object,
3745 * then move to the next selector
3746 * element in the list and restart the
3747 * state machine.
3748 */
3749 while (*elem != '\0')
3750 elem++;
3751 elem++; /* skip the inter-element NUL */
3752 nelems--;
3753 dd = dest;
3754 if (in_array) {
3755 state = DTRACE_JSON_VALUE;
3756 array_pos = 0;
3757 array_elem = dtrace_strtoll(
3758 elem, 10, size);
3759 found_key = array_elem == 0 ?
3760 B_TRUE : B_FALSE;
3761 } else {
3762 found_key = B_FALSE;
3763 state = DTRACE_JSON_OBJECT;
3764 }
3765 break;
3766 }
3767
3768 /*
3769 * Otherwise, we wish to either skip this
3770 * nested object or return it in full.
3771 */
3772 if (cc == '[')
3773 brackets = 1;
3774 else
3775 braces = 1;
3776 *dd++ = cc;
3777 state = DTRACE_JSON_COLLECT_OBJECT;
3778 break;
3779 }
3780
3781 if (cc == '"') {
3782 state = DTRACE_JSON_STRING;
3783 break;
3784 }
3785
3786 if (islower(cc)) {
3787 /*
3788 * Here we deal with true, false and null.
3789 */
3790 *dd++ = cc;
3791 state = DTRACE_JSON_IDENTIFIER;
3792 break;
3793 }
3794
3795 if (cc == '-' || isdigit(cc)) {
3796 *dd++ = cc;
3797 state = DTRACE_JSON_NUMBER;
3798 break;
3799 }
3800
3801 /*
3802 * ERROR: unexpected character at start of value.
3803 */
3804 return (NULL);
3805 case DTRACE_JSON_COLLECT_OBJECT:
3806 if (cc == '\0')
3807 /*
3808 * ERROR: unexpected end of input.
3809 */
3810 return (NULL);
3811
3812 *dd++ = cc;
3813 if (cc == '"') {
3814 collect_object = B_TRUE;
3815 state = DTRACE_JSON_STRING;
3816 break;
3817 }
3818
3819 if (cc == ']') {
3820 if (brackets-- == 0) {
3821 /*
3822 * ERROR: unbalanced brackets.
3823 */
3824 return (NULL);
3825 }
3826 } else if (cc == '}') {
3827 if (braces-- == 0) {
3828 /*
3829 * ERROR: unbalanced braces.
3830 */
3831 return (NULL);
3832 }
3833 } else if (cc == '{') {
3834 braces++;
3835 } else if (cc == '[') {
3836 brackets++;
3837 }
3838
3839 if (brackets == 0 && braces == 0) {
3840 if (found_key) {
3841 *dd = '\0';
3842 return (dest);
3843 }
3844 dd = dest; /* reset string buffer */
3845 state = DTRACE_JSON_COMMA;
3846 }
3847 break;
3848 }
3849 }
3850 return (NULL);
3851 }
3852
3853 /*
3854 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
3855 * Notice that we don't bother validating the proper number of arguments or
3856 * their types in the tuple stack. This isn't needed because all argument
3857 * interpretation is safe because of our load safety -- the worst that can
3858 * happen is that a bogus program can obtain bogus results.
3859 */
3860 static void
3861 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
3862 dtrace_key_t *tupregs, int nargs,
3863 dtrace_mstate_t *mstate, dtrace_state_t *state)
3864 {
3865 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
3866 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
3867 dtrace_vstate_t *vstate = &state->dts_vstate;
3868
3869 union {
3870 mutex_impl_t mi;
3871 uint64_t mx;
3872 } m;
3873
3874 union {
3875 krwlock_t ri;
3876 uintptr_t rw;
3877 } r;
3878
3879 switch (subr) {
3880 case DIF_SUBR_RAND:
3881 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
3882 break;
3883
3884 case DIF_SUBR_MUTEX_OWNED:
3885 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3886 mstate, vstate)) {
3887 regs[rd] = NULL;
3888 break;
3889 }
3890
3891 m.mx = dtrace_load64(tupregs[0].dttk_value);
3892 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
3893 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
3894 else
3895 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
3896 break;
3897
3898 case DIF_SUBR_MUTEX_OWNER:
3899 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3900 mstate, vstate)) {
3901 regs[rd] = NULL;
3902 break;
3903 }
3904
3905 m.mx = dtrace_load64(tupregs[0].dttk_value);
3906 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3907 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3908 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3909 else
3910 regs[rd] = 0;
3911 break;
3912
3913 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3914 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3915 mstate, vstate)) {
3916 regs[rd] = NULL;
3917 break;
3918 }
3919
3920 m.mx = dtrace_load64(tupregs[0].dttk_value);
3921 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3922 break;
3923
3924 case DIF_SUBR_MUTEX_TYPE_SPIN:
3925 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3926 mstate, vstate)) {
3927 regs[rd] = NULL;
3928 break;
3929 }
3930
3931 m.mx = dtrace_load64(tupregs[0].dttk_value);
3932 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3933 break;
3934
3935 case DIF_SUBR_RW_READ_HELD: {
3936 uintptr_t tmp;
3937
3938 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3939 mstate, vstate)) {
3940 regs[rd] = NULL;
3941 break;
3942 }
3943
3944 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3945 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
3946 break;
3947 }
3948
3949 case DIF_SUBR_RW_WRITE_HELD:
3950 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3951 mstate, vstate)) {
3952 regs[rd] = NULL;
3953 break;
3954 }
3955
3956 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3957 regs[rd] = _RW_WRITE_HELD(&r.ri);
3958 break;
3959
3960 case DIF_SUBR_RW_ISWRITER:
3961 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3962 mstate, vstate)) {
3963 regs[rd] = NULL;
3964 break;
3965 }
3966
3967 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3968 regs[rd] = _RW_ISWRITER(&r.ri);
3969 break;
3970
3971 case DIF_SUBR_BCOPY: {
3972 /*
3973 * We need to be sure that the destination is in the scratch
3974 * region -- no other region is allowed.
3975 */
3976 uintptr_t src = tupregs[0].dttk_value;
3977 uintptr_t dest = tupregs[1].dttk_value;
3978 size_t size = tupregs[2].dttk_value;
3979
3980 if (!dtrace_inscratch(dest, size, mstate)) {
3981 *flags |= CPU_DTRACE_BADADDR;
3982 *illval = regs[rd];
3983 break;
3984 }
3985
3986 if (!dtrace_canload(src, size, mstate, vstate)) {
3987 regs[rd] = NULL;
3988 break;
3989 }
3990
3991 dtrace_bcopy((void *)src, (void *)dest, size);
3992 break;
3993 }
3994
3995 case DIF_SUBR_ALLOCA:
3996 case DIF_SUBR_COPYIN: {
3997 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3998 uint64_t size =
3999 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
4000 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
4001
4002 /*
4003 * This action doesn't require any credential checks since
4004 * probes will not activate in user contexts to which the
4005 * enabling user does not have permissions.
4006 */
4007
4008 /*
4009 * Rounding up the user allocation size could have overflowed
4010 * a large, bogus allocation (like -1ULL) to 0.
4011 */
4012 if (scratch_size < size ||
4013 !DTRACE_INSCRATCH(mstate, scratch_size)) {
4014 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4015 regs[rd] = NULL;
4016 break;
4017 }
4018
4019 if (subr == DIF_SUBR_COPYIN) {
4020 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4021 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4022 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4023 }
4024
4025 mstate->dtms_scratch_ptr += scratch_size;
4026 regs[rd] = dest;
4027 break;
4028 }
4029
4030 case DIF_SUBR_COPYINTO: {
4031 uint64_t size = tupregs[1].dttk_value;
4032 uintptr_t dest = tupregs[2].dttk_value;
4033
4034 /*
4035 * This action doesn't require any credential checks since
4036 * probes will not activate in user contexts to which the
4037 * enabling user does not have permissions.
4038 */
4039 if (!dtrace_inscratch(dest, size, mstate)) {
4040 *flags |= CPU_DTRACE_BADADDR;
4041 *illval = regs[rd];
4042 break;
4043 }
4044
4045 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4046 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4047 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4048 break;
4049 }
4050
4051 case DIF_SUBR_COPYINSTR: {
4052 uintptr_t dest = mstate->dtms_scratch_ptr;
4053 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4054
4055 if (nargs > 1 && tupregs[1].dttk_value < size)
4056 size = tupregs[1].dttk_value + 1;
4057
4058 /*
4059 * This action doesn't require any credential checks since
4060 * probes will not activate in user contexts to which the
4061 * enabling user does not have permissions.
4062 */
4063 if (!DTRACE_INSCRATCH(mstate, size)) {
4064 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4065 regs[rd] = NULL;
4066 break;
4067 }
4068
4069 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4070 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
4071 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4072
4073 ((char *)dest)[size - 1] = '\0';
4074 mstate->dtms_scratch_ptr += size;
4075 regs[rd] = dest;
4076 break;
4077 }
4078
4079 case DIF_SUBR_MSGSIZE:
4080 case DIF_SUBR_MSGDSIZE: {
4081 uintptr_t baddr = tupregs[0].dttk_value, daddr;
4082 uintptr_t wptr, rptr;
4083 size_t count = 0;
4084 int cont = 0;
4085
4086 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4087
4088 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
4089 vstate)) {
4090 regs[rd] = NULL;
4091 break;
4092 }
4093
4094 wptr = dtrace_loadptr(baddr +
4095 offsetof(mblk_t, b_wptr));
4096
4097 rptr = dtrace_loadptr(baddr +
4098 offsetof(mblk_t, b_rptr));
4099
4100 if (wptr < rptr) {
4101 *flags |= CPU_DTRACE_BADADDR;
4102 *illval = tupregs[0].dttk_value;
4103 break;
4104 }
4105
4106 daddr = dtrace_loadptr(baddr +
4107 offsetof(mblk_t, b_datap));
4108
4109 baddr = dtrace_loadptr(baddr +
4110 offsetof(mblk_t, b_cont));
4111
4112 /*
4113 * We want to prevent against denial-of-service here,
4114 * so we're only going to search the list for
4115 * dtrace_msgdsize_max mblks.
4116 */
4117 if (cont++ > dtrace_msgdsize_max) {
4118 *flags |= CPU_DTRACE_ILLOP;
4119 break;
4120 }
4121
4122 if (subr == DIF_SUBR_MSGDSIZE) {
4123 if (dtrace_load8(daddr +
4124 offsetof(dblk_t, db_type)) != M_DATA)
4125 continue;
4126 }
4127
4128 count += wptr - rptr;
4129 }
4130
4131 if (!(*flags & CPU_DTRACE_FAULT))
4132 regs[rd] = count;
4133
4134 break;
4135 }
4136
4137 case DIF_SUBR_PROGENYOF: {
4138 pid_t pid = tupregs[0].dttk_value;
4139 proc_t *p;
4140 int rval = 0;
4141
4142 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4143
4144 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
4145 if (p->p_pidp->pid_id == pid) {
4146 rval = 1;
4147 break;
4148 }
4149 }
4150
4151 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4152
4153 regs[rd] = rval;
4154 break;
4155 }
4156
4157 case DIF_SUBR_SPECULATION:
4158 regs[rd] = dtrace_speculation(state);
4159 break;
4160
4161 case DIF_SUBR_COPYOUT: {
4162 uintptr_t kaddr = tupregs[0].dttk_value;
4163 uintptr_t uaddr = tupregs[1].dttk_value;
4164 uint64_t size = tupregs[2].dttk_value;
4165
4166 if (!dtrace_destructive_disallow &&
4167 dtrace_priv_proc_control(state, mstate) &&
4168 !dtrace_istoxic(kaddr, size)) {
4169 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4170 dtrace_copyout(kaddr, uaddr, size, flags);
4171 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4172 }
4173 break;
4174 }
4175
4176 case DIF_SUBR_COPYOUTSTR: {
4177 uintptr_t kaddr = tupregs[0].dttk_value;
4178 uintptr_t uaddr = tupregs[1].dttk_value;
4179 uint64_t size = tupregs[2].dttk_value;
4180
4181 if (!dtrace_destructive_disallow &&
4182 dtrace_priv_proc_control(state, mstate) &&
4183 !dtrace_istoxic(kaddr, size)) {
4184 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4185 dtrace_copyoutstr(kaddr, uaddr, size, flags);
4186 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4187 }
4188 break;
4189 }
4190
4191 case DIF_SUBR_STRLEN: {
4192 size_t sz;
4193 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
4194 sz = dtrace_strlen((char *)addr,
4195 state->dts_options[DTRACEOPT_STRSIZE]);
4196
4197 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
4198 regs[rd] = NULL;
4199 break;
4200 }
4201
4202 regs[rd] = sz;
4203
4204 break;
4205 }
4206
4207 case DIF_SUBR_STRCHR:
4208 case DIF_SUBR_STRRCHR: {
4209 /*
4210 * We're going to iterate over the string looking for the
4211 * specified character. We will iterate until we have reached
4212 * the string length or we have found the character. If this
4213 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
4214 * of the specified character instead of the first.
4215 */
4216 uintptr_t saddr = tupregs[0].dttk_value;
4217 uintptr_t addr = tupregs[0].dttk_value;
4218 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
4219 char c, target = (char)tupregs[1].dttk_value;
4220
4221 for (regs[rd] = NULL; addr < limit; addr++) {
4222 if ((c = dtrace_load8(addr)) == target) {
4223 regs[rd] = addr;
4224
4225 if (subr == DIF_SUBR_STRCHR)
4226 break;
4227 }
4228
4229 if (c == '\0')
4230 break;
4231 }
4232
4233 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
4234 regs[rd] = NULL;
4235 break;
4236 }
4237
4238 break;
4239 }
4240
4241 case DIF_SUBR_STRSTR:
4242 case DIF_SUBR_INDEX:
4243 case DIF_SUBR_RINDEX: {
4244 /*
4245 * We're going to iterate over the string looking for the
4246 * specified string. We will iterate until we have reached
4247 * the string length or we have found the string. (Yes, this
4248 * is done in the most naive way possible -- but considering
4249 * that the string we're searching for is likely to be
4250 * relatively short, the complexity of Rabin-Karp or similar
4251 * hardly seems merited.)
4252 */
4253 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
4254 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
4255 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4256 size_t len = dtrace_strlen(addr, size);
4257 size_t sublen = dtrace_strlen(substr, size);
4258 char *limit = addr + len, *orig = addr;
4259 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
4260 int inc = 1;
4261
4262 regs[rd] = notfound;
4263
4264 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
4265 regs[rd] = NULL;
4266 break;
4267 }
4268
4269 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
4270 vstate)) {
4271 regs[rd] = NULL;
4272 break;
4273 }
4274
4275 /*
4276 * strstr() and index()/rindex() have similar semantics if
4277 * both strings are the empty string: strstr() returns a
4278 * pointer to the (empty) string, and index() and rindex()
4279 * both return index 0 (regardless of any position argument).
4280 */
4281 if (sublen == 0 && len == 0) {
4282 if (subr == DIF_SUBR_STRSTR)
4283 regs[rd] = (uintptr_t)addr;
4284 else
4285 regs[rd] = 0;
4286 break;
4287 }
4288
4289 if (subr != DIF_SUBR_STRSTR) {
4290 if (subr == DIF_SUBR_RINDEX) {
4291 limit = orig - 1;
4292 addr += len;
4293 inc = -1;
4294 }
4295
4296 /*
4297 * Both index() and rindex() take an optional position
4298 * argument that denotes the starting position.
4299 */
4300 if (nargs == 3) {
4301 int64_t pos = (int64_t)tupregs[2].dttk_value;
4302
4303 /*
4304 * If the position argument to index() is
4305 * negative, Perl implicitly clamps it at
4306 * zero. This semantic is a little surprising
4307 * given the special meaning of negative
4308 * positions to similar Perl functions like
4309 * substr(), but it appears to reflect a
4310 * notion that index() can start from a
4311 * negative index and increment its way up to
4312 * the string. Given this notion, Perl's
4313 * rindex() is at least self-consistent in
4314 * that it implicitly clamps positions greater
4315 * than the string length to be the string
4316 * length. Where Perl completely loses
4317 * coherence, however, is when the specified
4318 * substring is the empty string (""). In
4319 * this case, even if the position is
4320 * negative, rindex() returns 0 -- and even if
4321 * the position is greater than the length,
4322 * index() returns the string length. These
4323 * semantics violate the notion that index()
4324 * should never return a value less than the
4325 * specified position and that rindex() should
4326 * never return a value greater than the
4327 * specified position. (One assumes that
4328 * these semantics are artifacts of Perl's
4329 * implementation and not the results of
4330 * deliberate design -- it beggars belief that
4331 * even Larry Wall could desire such oddness.)
4332 * While in the abstract one would wish for
4333 * consistent position semantics across
4334 * substr(), index() and rindex() -- or at the
4335 * very least self-consistent position
4336 * semantics for index() and rindex() -- we
4337 * instead opt to keep with the extant Perl
4338 * semantics, in all their broken glory. (Do
4339 * we have more desire to maintain Perl's
4340 * semantics than Perl does? Probably.)
4341 */
4342 if (subr == DIF_SUBR_RINDEX) {
4343 if (pos < 0) {
4344 if (sublen == 0)
4345 regs[rd] = 0;
4346 break;
4347 }
4348
4349 if (pos > len)
4350 pos = len;
4351 } else {
4352 if (pos < 0)
4353 pos = 0;
4354
4355 if (pos >= len) {
4356 if (sublen == 0)
4357 regs[rd] = len;
4358 break;
4359 }
4360 }
4361
4362 addr = orig + pos;
4363 }
4364 }
4365
4366 for (regs[rd] = notfound; addr != limit; addr += inc) {
4367 if (dtrace_strncmp(addr, substr, sublen) == 0) {
4368 if (subr != DIF_SUBR_STRSTR) {
4369 /*
4370 * As D index() and rindex() are
4371 * modeled on Perl (and not on awk),
4372 * we return a zero-based (and not a
4373 * one-based) index. (For you Perl
4374 * weenies: no, we're not going to add
4375 * $[ -- and shouldn't you be at a con
4376 * or something?)
4377 */
4378 regs[rd] = (uintptr_t)(addr - orig);
4379 break;
4380 }
4381
4382 ASSERT(subr == DIF_SUBR_STRSTR);
4383 regs[rd] = (uintptr_t)addr;
4384 break;
4385 }
4386 }
4387
4388 break;
4389 }
4390
4391 case DIF_SUBR_STRTOK: {
4392 uintptr_t addr = tupregs[0].dttk_value;
4393 uintptr_t tokaddr = tupregs[1].dttk_value;
4394 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4395 uintptr_t limit, toklimit = tokaddr + size;
4396 uint8_t c, tokmap[32]; /* 256 / 8 */
4397 char *dest = (char *)mstate->dtms_scratch_ptr;
4398 int i;
4399
4400 /*
4401 * Check both the token buffer and (later) the input buffer,
4402 * since both could be non-scratch addresses.
4403 */
4404 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
4405 regs[rd] = NULL;
4406 break;
4407 }
4408
4409 if (!DTRACE_INSCRATCH(mstate, size)) {
4410 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4411 regs[rd] = NULL;
4412 break;
4413 }
4414
4415 if (addr == NULL) {
4416 /*
4417 * If the address specified is NULL, we use our saved
4418 * strtok pointer from the mstate. Note that this
4419 * means that the saved strtok pointer is _only_
4420 * valid within multiple enablings of the same probe --
4421 * it behaves like an implicit clause-local variable.
4422 */
4423 addr = mstate->dtms_strtok;
4424 } else {
4425 /*
4426 * If the user-specified address is non-NULL we must
4427 * access check it. This is the only time we have
4428 * a chance to do so, since this address may reside
4429 * in the string table of this clause-- future calls
4430 * (when we fetch addr from mstate->dtms_strtok)
4431 * would fail this access check.
4432 */
4433 if (!dtrace_strcanload(addr, size, mstate, vstate)) {
4434 regs[rd] = NULL;
4435 break;
4436 }
4437 }
4438
4439 /*
4440 * First, zero the token map, and then process the token
4441 * string -- setting a bit in the map for every character
4442 * found in the token string.
4443 */
4444 for (i = 0; i < sizeof (tokmap); i++)
4445 tokmap[i] = 0;
4446
4447 for (; tokaddr < toklimit; tokaddr++) {
4448 if ((c = dtrace_load8(tokaddr)) == '\0')
4449 break;
4450
4451 ASSERT((c >> 3) < sizeof (tokmap));
4452 tokmap[c >> 3] |= (1 << (c & 0x7));
4453 }
4454
4455 for (limit = addr + size; addr < limit; addr++) {
4456 /*
4457 * We're looking for a character that is _not_ contained
4458 * in the token string.
4459 */
4460 if ((c = dtrace_load8(addr)) == '\0')
4461 break;
4462
4463 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
4464 break;
4465 }
4466
4467 if (c == '\0') {
4468 /*
4469 * We reached the end of the string without finding
4470 * any character that was not in the token string.
4471 * We return NULL in this case, and we set the saved
4472 * address to NULL as well.
4473 */
4474 regs[rd] = NULL;
4475 mstate->dtms_strtok = NULL;
4476 break;
4477 }
4478
4479 /*
4480 * From here on, we're copying into the destination string.
4481 */
4482 for (i = 0; addr < limit && i < size - 1; addr++) {
4483 if ((c = dtrace_load8(addr)) == '\0')
4484 break;
4485
4486 if (tokmap[c >> 3] & (1 << (c & 0x7)))
4487 break;
4488
4489 ASSERT(i < size);
4490 dest[i++] = c;
4491 }
4492
4493 ASSERT(i < size);
4494 dest[i] = '\0';
4495 regs[rd] = (uintptr_t)dest;
4496 mstate->dtms_scratch_ptr += size;
4497 mstate->dtms_strtok = addr;
4498 break;
4499 }
4500
4501 case DIF_SUBR_SUBSTR: {
4502 uintptr_t s = tupregs[0].dttk_value;
4503 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4504 char *d = (char *)mstate->dtms_scratch_ptr;
4505 int64_t index = (int64_t)tupregs[1].dttk_value;
4506 int64_t remaining = (int64_t)tupregs[2].dttk_value;
4507 size_t len = dtrace_strlen((char *)s, size);
4508 int64_t i;
4509
4510 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4511 regs[rd] = NULL;
4512 break;
4513 }
4514
4515 if (!DTRACE_INSCRATCH(mstate, size)) {
4516 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4517 regs[rd] = NULL;
4518 break;
4519 }
4520
4521 if (nargs <= 2)
4522 remaining = (int64_t)size;
4523
4524 if (index < 0) {
4525 index += len;
4526
4527 if (index < 0 && index + remaining > 0) {
4528 remaining += index;
4529 index = 0;
4530 }
4531 }
4532
4533 if (index >= len || index < 0) {
4534 remaining = 0;
4535 } else if (remaining < 0) {
4536 remaining += len - index;
4537 } else if (index + remaining > size) {
4538 remaining = size - index;
4539 }
4540
4541 for (i = 0; i < remaining; i++) {
4542 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
4543 break;
4544 }
4545
4546 d[i] = '\0';
4547
4548 mstate->dtms_scratch_ptr += size;
4549 regs[rd] = (uintptr_t)d;
4550 break;
4551 }
4552
4553 case DIF_SUBR_JSON: {
4554 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4555 uintptr_t json = tupregs[0].dttk_value;
4556 size_t jsonlen = dtrace_strlen((char *)json, size);
4557 uintptr_t elem = tupregs[1].dttk_value;
4558 size_t elemlen = dtrace_strlen((char *)elem, size);
4559
4560 char *dest = (char *)mstate->dtms_scratch_ptr;
4561 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1;
4562 char *ee = elemlist;
4563 int nelems = 1;
4564 uintptr_t cur;
4565
4566 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) ||
4567 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) {
4568 regs[rd] = NULL;
4569 break;
4570 }
4571
4572 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) {
4573 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4574 regs[rd] = NULL;
4575 break;
4576 }
4577
4578 /*
4579 * Read the element selector and split it up into a packed list
4580 * of strings.
4581 */
4582 for (cur = elem; cur < elem + elemlen; cur++) {
4583 char cc = dtrace_load8(cur);
4584
4585 if (cur == elem && cc == '[') {
4586 /*
4587 * If the first element selector key is
4588 * actually an array index then ignore the
4589 * bracket.
4590 */
4591 continue;
4592 }
4593
4594 if (cc == ']')
4595 continue;
4596
4597 if (cc == '.' || cc == '[') {
4598 nelems++;
4599 cc = '\0';
4600 }
4601
4602 *ee++ = cc;
4603 }
4604 *ee++ = '\0';
4605
4606 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist,
4607 nelems, dest)) != NULL)
4608 mstate->dtms_scratch_ptr += jsonlen + 1;
4609 break;
4610 }
4611
4612 case DIF_SUBR_TOUPPER:
4613 case DIF_SUBR_TOLOWER: {
4614 uintptr_t s = tupregs[0].dttk_value;
4615 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4616 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4617 size_t len = dtrace_strlen((char *)s, size);
4618 char lower, upper, convert;
4619 int64_t i;
4620
4621 if (subr == DIF_SUBR_TOUPPER) {
4622 lower = 'a';
4623 upper = 'z';
4624 convert = 'A';
4625 } else {
4626 lower = 'A';
4627 upper = 'Z';
4628 convert = 'a';
4629 }
4630
4631 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4632 regs[rd] = NULL;
4633 break;
4634 }
4635
4636 if (!DTRACE_INSCRATCH(mstate, size)) {
4637 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4638 regs[rd] = NULL;
4639 break;
4640 }
4641
4642 for (i = 0; i < size - 1; i++) {
4643 if ((c = dtrace_load8(s + i)) == '\0')
4644 break;
4645
4646 if (c >= lower && c <= upper)
4647 c = convert + (c - lower);
4648
4649 dest[i] = c;
4650 }
4651
4652 ASSERT(i < size);
4653 dest[i] = '\0';
4654 regs[rd] = (uintptr_t)dest;
4655 mstate->dtms_scratch_ptr += size;
4656 break;
4657 }
4658
4659 case DIF_SUBR_GETMAJOR:
4660 #ifdef _LP64
4661 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
4662 #else
4663 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
4664 #endif
4665 break;
4666
4667 case DIF_SUBR_GETMINOR:
4668 #ifdef _LP64
4669 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
4670 #else
4671 regs[rd] = tupregs[0].dttk_value & MAXMIN;
4672 #endif
4673 break;
4674
4675 case DIF_SUBR_DDI_PATHNAME: {
4676 /*
4677 * This one is a galactic mess. We are going to roughly
4678 * emulate ddi_pathname(), but it's made more complicated
4679 * by the fact that we (a) want to include the minor name and
4680 * (b) must proceed iteratively instead of recursively.
4681 */
4682 uintptr_t dest = mstate->dtms_scratch_ptr;
4683 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4684 char *start = (char *)dest, *end = start + size - 1;
4685 uintptr_t daddr = tupregs[0].dttk_value;
4686 int64_t minor = (int64_t)tupregs[1].dttk_value;
4687 char *s;
4688 int i, len, depth = 0;
4689
4690 /*
4691 * Due to all the pointer jumping we do and context we must
4692 * rely upon, we just mandate that the user must have kernel
4693 * read privileges to use this routine.
4694 */
4695 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
4696 *flags |= CPU_DTRACE_KPRIV;
4697 *illval = daddr;
4698 regs[rd] = NULL;
4699 }
4700
4701 if (!DTRACE_INSCRATCH(mstate, size)) {
4702 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4703 regs[rd] = NULL;
4704 break;
4705 }
4706
4707 *end = '\0';
4708
4709 /*
4710 * We want to have a name for the minor. In order to do this,
4711 * we need to walk the minor list from the devinfo. We want
4712 * to be sure that we don't infinitely walk a circular list,
4713 * so we check for circularity by sending a scout pointer
4714 * ahead two elements for every element that we iterate over;
4715 * if the list is circular, these will ultimately point to the
4716 * same element. You may recognize this little trick as the
4717 * answer to a stupid interview question -- one that always
4718 * seems to be asked by those who had to have it laboriously
4719 * explained to them, and who can't even concisely describe
4720 * the conditions under which one would be forced to resort to
4721 * this technique. Needless to say, those conditions are
4722 * found here -- and probably only here. Is this the only use
4723 * of this infamous trick in shipping, production code? If it
4724 * isn't, it probably should be...
4725 */
4726 if (minor != -1) {
4727 uintptr_t maddr = dtrace_loadptr(daddr +
4728 offsetof(struct dev_info, devi_minor));
4729
4730 uintptr_t next = offsetof(struct ddi_minor_data, next);
4731 uintptr_t name = offsetof(struct ddi_minor_data,
4732 d_minor) + offsetof(struct ddi_minor, name);
4733 uintptr_t dev = offsetof(struct ddi_minor_data,
4734 d_minor) + offsetof(struct ddi_minor, dev);
4735 uintptr_t scout;
4736
4737 if (maddr != NULL)
4738 scout = dtrace_loadptr(maddr + next);
4739
4740 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4741 uint64_t m;
4742 #ifdef _LP64
4743 m = dtrace_load64(maddr + dev) & MAXMIN64;
4744 #else
4745 m = dtrace_load32(maddr + dev) & MAXMIN;
4746 #endif
4747 if (m != minor) {
4748 maddr = dtrace_loadptr(maddr + next);
4749
4750 if (scout == NULL)
4751 continue;
4752
4753 scout = dtrace_loadptr(scout + next);
4754
4755 if (scout == NULL)
4756 continue;
4757
4758 scout = dtrace_loadptr(scout + next);
4759
4760 if (scout == NULL)
4761 continue;
4762
4763 if (scout == maddr) {
4764 *flags |= CPU_DTRACE_ILLOP;
4765 break;
4766 }
4767
4768 continue;
4769 }
4770
4771 /*
4772 * We have the minor data. Now we need to
4773 * copy the minor's name into the end of the
4774 * pathname.
4775 */
4776 s = (char *)dtrace_loadptr(maddr + name);
4777 len = dtrace_strlen(s, size);
4778
4779 if (*flags & CPU_DTRACE_FAULT)
4780 break;
4781
4782 if (len != 0) {
4783 if ((end -= (len + 1)) < start)
4784 break;
4785
4786 *end = ':';
4787 }
4788
4789 for (i = 1; i <= len; i++)
4790 end[i] = dtrace_load8((uintptr_t)s++);
4791 break;
4792 }
4793 }
4794
4795 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4796 ddi_node_state_t devi_state;
4797
4798 devi_state = dtrace_load32(daddr +
4799 offsetof(struct dev_info, devi_node_state));
4800
4801 if (*flags & CPU_DTRACE_FAULT)
4802 break;
4803
4804 if (devi_state >= DS_INITIALIZED) {
4805 s = (char *)dtrace_loadptr(daddr +
4806 offsetof(struct dev_info, devi_addr));
4807 len = dtrace_strlen(s, size);
4808
4809 if (*flags & CPU_DTRACE_FAULT)
4810 break;
4811
4812 if (len != 0) {
4813 if ((end -= (len + 1)) < start)
4814 break;
4815
4816 *end = '@';
4817 }
4818
4819 for (i = 1; i <= len; i++)
4820 end[i] = dtrace_load8((uintptr_t)s++);
4821 }
4822
4823 /*
4824 * Now for the node name...
4825 */
4826 s = (char *)dtrace_loadptr(daddr +
4827 offsetof(struct dev_info, devi_node_name));
4828
4829 daddr = dtrace_loadptr(daddr +
4830 offsetof(struct dev_info, devi_parent));
4831
4832 /*
4833 * If our parent is NULL (that is, if we're the root
4834 * node), we're going to use the special path
4835 * "devices".
4836 */
4837 if (daddr == NULL)
4838 s = "devices";
4839
4840 len = dtrace_strlen(s, size);
4841 if (*flags & CPU_DTRACE_FAULT)
4842 break;
4843
4844 if ((end -= (len + 1)) < start)
4845 break;
4846
4847 for (i = 1; i <= len; i++)
4848 end[i] = dtrace_load8((uintptr_t)s++);
4849 *end = '/';
4850
4851 if (depth++ > dtrace_devdepth_max) {
4852 *flags |= CPU_DTRACE_ILLOP;
4853 break;
4854 }
4855 }
4856
4857 if (end < start)
4858 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4859
4860 if (daddr == NULL) {
4861 regs[rd] = (uintptr_t)end;
4862 mstate->dtms_scratch_ptr += size;
4863 }
4864
4865 break;
4866 }
4867
4868 case DIF_SUBR_STRJOIN: {
4869 char *d = (char *)mstate->dtms_scratch_ptr;
4870 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4871 uintptr_t s1 = tupregs[0].dttk_value;
4872 uintptr_t s2 = tupregs[1].dttk_value;
4873 int i = 0;
4874
4875 if (!dtrace_strcanload(s1, size, mstate, vstate) ||
4876 !dtrace_strcanload(s2, size, mstate, vstate)) {
4877 regs[rd] = NULL;
4878 break;
4879 }
4880
4881 if (!DTRACE_INSCRATCH(mstate, size)) {
4882 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4883 regs[rd] = NULL;
4884 break;
4885 }
4886
4887 for (;;) {
4888 if (i >= size) {
4889 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4890 regs[rd] = NULL;
4891 break;
4892 }
4893
4894 if ((d[i++] = dtrace_load8(s1++)) == '\0') {
4895 i--;
4896 break;
4897 }
4898 }
4899
4900 for (;;) {
4901 if (i >= size) {
4902 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4903 regs[rd] = NULL;
4904 break;
4905 }
4906
4907 if ((d[i++] = dtrace_load8(s2++)) == '\0')
4908 break;
4909 }
4910
4911 if (i < size) {
4912 mstate->dtms_scratch_ptr += i;
4913 regs[rd] = (uintptr_t)d;
4914 }
4915
4916 break;
4917 }
4918
4919 case DIF_SUBR_STRTOLL: {
4920 uintptr_t s = tupregs[0].dttk_value;
4921 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4922 int base = 10;
4923
4924 if (nargs > 1) {
4925 if ((base = tupregs[1].dttk_value) <= 1 ||
4926 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
4927 *flags |= CPU_DTRACE_ILLOP;
4928 break;
4929 }
4930 }
4931
4932 if (!dtrace_strcanload(s, size, mstate, vstate)) {
4933 regs[rd] = INT64_MIN;
4934 break;
4935 }
4936
4937 regs[rd] = dtrace_strtoll((char *)s, base, size);
4938 break;
4939 }
4940
4941 case DIF_SUBR_LLTOSTR: {
4942 int64_t i = (int64_t)tupregs[0].dttk_value;
4943 uint64_t val, digit;
4944 uint64_t size = 65; /* enough room for 2^64 in binary */
4945 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
4946 int base = 10;
4947
4948 if (nargs > 1) {
4949 if ((base = tupregs[1].dttk_value) <= 1 ||
4950 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
4951 *flags |= CPU_DTRACE_ILLOP;
4952 break;
4953 }
4954 }
4955
4956 val = (base == 10 && i < 0) ? i * -1 : i;
4957
4958 if (!DTRACE_INSCRATCH(mstate, size)) {
4959 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4960 regs[rd] = NULL;
4961 break;
4962 }
4963
4964 for (*end-- = '\0'; val; val /= base) {
4965 if ((digit = val % base) <= '9' - '0') {
4966 *end-- = '0' + digit;
4967 } else {
4968 *end-- = 'a' + (digit - ('9' - '0') - 1);
4969 }
4970 }
4971
4972 if (i == 0 && base == 16)
4973 *end-- = '0';
4974
4975 if (base == 16)
4976 *end-- = 'x';
4977
4978 if (i == 0 || base == 8 || base == 16)
4979 *end-- = '0';
4980
4981 if (i < 0 && base == 10)
4982 *end-- = '-';
4983
4984 regs[rd] = (uintptr_t)end + 1;
4985 mstate->dtms_scratch_ptr += size;
4986 break;
4987 }
4988
4989 case DIF_SUBR_HTONS:
4990 case DIF_SUBR_NTOHS:
4991 #ifdef _BIG_ENDIAN
4992 regs[rd] = (uint16_t)tupregs[0].dttk_value;
4993 #else
4994 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
4995 #endif
4996 break;
4997
4998
4999 case DIF_SUBR_HTONL:
5000 case DIF_SUBR_NTOHL:
5001 #ifdef _BIG_ENDIAN
5002 regs[rd] = (uint32_t)tupregs[0].dttk_value;
5003 #else
5004 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
5005 #endif
5006 break;
5007
5008
5009 case DIF_SUBR_HTONLL:
5010 case DIF_SUBR_NTOHLL:
5011 #ifdef _BIG_ENDIAN
5012 regs[rd] = (uint64_t)tupregs[0].dttk_value;
5013 #else
5014 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
5015 #endif
5016 break;
5017
5018
5019 case DIF_SUBR_DIRNAME:
5020 case DIF_SUBR_BASENAME: {
5021 char *dest = (char *)mstate->dtms_scratch_ptr;
5022 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5023 uintptr_t src = tupregs[0].dttk_value;
5024 int i, j, len = dtrace_strlen((char *)src, size);
5025 int lastbase = -1, firstbase = -1, lastdir = -1;
5026 int start, end;
5027
5028 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
5029 regs[rd] = NULL;
5030 break;
5031 }
5032
5033 if (!DTRACE_INSCRATCH(mstate, size)) {
5034 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5035 regs[rd] = NULL;
5036 break;
5037 }
5038
5039 /*
5040 * The basename and dirname for a zero-length string is
5041 * defined to be "."
5042 */
5043 if (len == 0) {
5044 len = 1;
5045 src = (uintptr_t)".";
5046 }
5047
5048 /*
5049 * Start from the back of the string, moving back toward the
5050 * front until we see a character that isn't a slash. That
5051 * character is the last character in the basename.
5052 */
5053 for (i = len - 1; i >= 0; i--) {
5054 if (dtrace_load8(src + i) != '/')
5055 break;
5056 }
5057
5058 if (i >= 0)
5059 lastbase = i;
5060
5061 /*
5062 * Starting from the last character in the basename, move
5063 * towards the front until we find a slash. The character
5064 * that we processed immediately before that is the first
5065 * character in the basename.
5066 */
5067 for (; i >= 0; i--) {
5068 if (dtrace_load8(src + i) == '/')
5069 break;
5070 }
5071
5072 if (i >= 0)
5073 firstbase = i + 1;
5074
5075 /*
5076 * Now keep going until we find a non-slash character. That
5077 * character is the last character in the dirname.
5078 */
5079 for (; i >= 0; i--) {
5080 if (dtrace_load8(src + i) != '/')
5081 break;
5082 }
5083
5084 if (i >= 0)
5085 lastdir = i;
5086
5087 ASSERT(!(lastbase == -1 && firstbase != -1));
5088 ASSERT(!(firstbase == -1 && lastdir != -1));
5089
5090 if (lastbase == -1) {
5091 /*
5092 * We didn't find a non-slash character. We know that
5093 * the length is non-zero, so the whole string must be
5094 * slashes. In either the dirname or the basename
5095 * case, we return '/'.
5096 */
5097 ASSERT(firstbase == -1);
5098 firstbase = lastbase = lastdir = 0;
5099 }
5100
5101 if (firstbase == -1) {
5102 /*
5103 * The entire string consists only of a basename
5104 * component. If we're looking for dirname, we need
5105 * to change our string to be just "."; if we're
5106 * looking for a basename, we'll just set the first
5107 * character of the basename to be 0.
5108 */
5109 if (subr == DIF_SUBR_DIRNAME) {
5110 ASSERT(lastdir == -1);
5111 src = (uintptr_t)".";
5112 lastdir = 0;
5113 } else {
5114 firstbase = 0;
5115 }
5116 }
5117
5118 if (subr == DIF_SUBR_DIRNAME) {
5119 if (lastdir == -1) {
5120 /*
5121 * We know that we have a slash in the name --
5122 * or lastdir would be set to 0, above. And
5123 * because lastdir is -1, we know that this
5124 * slash must be the first character. (That
5125 * is, the full string must be of the form
5126 * "/basename".) In this case, the last
5127 * character of the directory name is 0.
5128 */
5129 lastdir = 0;
5130 }
5131
5132 start = 0;
5133 end = lastdir;
5134 } else {
5135 ASSERT(subr == DIF_SUBR_BASENAME);
5136 ASSERT(firstbase != -1 && lastbase != -1);
5137 start = firstbase;
5138 end = lastbase;
5139 }
5140
5141 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
5142 dest[j] = dtrace_load8(src + i);
5143
5144 dest[j] = '\0';
5145 regs[rd] = (uintptr_t)dest;
5146 mstate->dtms_scratch_ptr += size;
5147 break;
5148 }
5149
5150 case DIF_SUBR_GETF: {
5151 uintptr_t fd = tupregs[0].dttk_value;
5152 uf_info_t *finfo = &curthread->t_procp->p_user.u_finfo;
5153 file_t *fp;
5154
5155 if (!dtrace_priv_proc(state, mstate)) {
5156 regs[rd] = NULL;
5157 break;
5158 }
5159
5160 /*
5161 * This is safe because fi_nfiles only increases, and the
5162 * fi_list array is not freed when the array size doubles.
5163 * (See the comment in flist_grow() for details on the
5164 * management of the u_finfo structure.)
5165 */
5166 fp = fd < finfo->fi_nfiles ? finfo->fi_list[fd].uf_file : NULL;
5167
5168 mstate->dtms_getf = fp;
5169 regs[rd] = (uintptr_t)fp;
5170 break;
5171 }
5172
5173 case DIF_SUBR_CLEANPATH: {
5174 char *dest = (char *)mstate->dtms_scratch_ptr, c;
5175 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5176 uintptr_t src = tupregs[0].dttk_value;
5177 int i = 0, j = 0;
5178 zone_t *z;
5179
5180 if (!dtrace_strcanload(src, size, mstate, vstate)) {
5181 regs[rd] = NULL;
5182 break;
5183 }
5184
5185 if (!DTRACE_INSCRATCH(mstate, size)) {
5186 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5187 regs[rd] = NULL;
5188 break;
5189 }
5190
5191 /*
5192 * Move forward, loading each character.
5193 */
5194 do {
5195 c = dtrace_load8(src + i++);
5196 next:
5197 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
5198 break;
5199
5200 if (c != '/') {
5201 dest[j++] = c;
5202 continue;
5203 }
5204
5205 c = dtrace_load8(src + i++);
5206
5207 if (c == '/') {
5208 /*
5209 * We have two slashes -- we can just advance
5210 * to the next character.
5211 */
5212 goto next;
5213 }
5214
5215 if (c != '.') {
5216 /*
5217 * This is not "." and it's not ".." -- we can
5218 * just store the "/" and this character and
5219 * drive on.
5220 */
5221 dest[j++] = '/';
5222 dest[j++] = c;
5223 continue;
5224 }
5225
5226 c = dtrace_load8(src + i++);
5227
5228 if (c == '/') {
5229 /*
5230 * This is a "/./" component. We're not going
5231 * to store anything in the destination buffer;
5232 * we're just going to go to the next component.
5233 */
5234 goto next;
5235 }
5236
5237 if (c != '.') {
5238 /*
5239 * This is not ".." -- we can just store the
5240 * "/." and this character and continue
5241 * processing.
5242 */
5243 dest[j++] = '/';
5244 dest[j++] = '.';
5245 dest[j++] = c;
5246 continue;
5247 }
5248
5249 c = dtrace_load8(src + i++);
5250
5251 if (c != '/' && c != '\0') {
5252 /*
5253 * This is not ".." -- it's "..[mumble]".
5254 * We'll store the "/.." and this character
5255 * and continue processing.
5256 */
5257 dest[j++] = '/';
5258 dest[j++] = '.';
5259 dest[j++] = '.';
5260 dest[j++] = c;
5261 continue;
5262 }
5263
5264 /*
5265 * This is "/../" or "/..\0". We need to back up
5266 * our destination pointer until we find a "/".
5267 */
5268 i--;
5269 while (j != 0 && dest[--j] != '/')
5270 continue;
5271
5272 if (c == '\0')
5273 dest[++j] = '/';
5274 } while (c != '\0');
5275
5276 dest[j] = '\0';
5277
5278 if (mstate->dtms_getf != NULL &&
5279 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) &&
5280 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) {
5281 /*
5282 * If we've done a getf() as a part of this ECB and we
5283 * don't have kernel access (and we're not in the global
5284 * zone), check if the path we cleaned up begins with
5285 * the zone's root path, and trim it off if so. Note
5286 * that this is an output cleanliness issue, not a
5287 * security issue: knowing one's zone root path does
5288 * not enable privilege escalation.
5289 */
5290 if (strstr(dest, z->zone_rootpath) == dest)
5291 dest += strlen(z->zone_rootpath) - 1;
5292 }
5293
5294 regs[rd] = (uintptr_t)dest;
5295 mstate->dtms_scratch_ptr += size;
5296 break;
5297 }
5298
5299 case DIF_SUBR_INET_NTOA:
5300 case DIF_SUBR_INET_NTOA6:
5301 case DIF_SUBR_INET_NTOP: {
5302 size_t size;
5303 int af, argi, i;
5304 char *base, *end;
5305
5306 if (subr == DIF_SUBR_INET_NTOP) {
5307 af = (int)tupregs[0].dttk_value;
5308 argi = 1;
5309 } else {
5310 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
5311 argi = 0;
5312 }
5313
5314 if (af == AF_INET) {
5315 ipaddr_t ip4;
5316 uint8_t *ptr8, val;
5317
5318 /*
5319 * Safely load the IPv4 address.
5320 */
5321 ip4 = dtrace_load32(tupregs[argi].dttk_value);
5322
5323 /*
5324 * Check an IPv4 string will fit in scratch.
5325 */
5326 size = INET_ADDRSTRLEN;
5327 if (!DTRACE_INSCRATCH(mstate, size)) {
5328 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5329 regs[rd] = NULL;
5330 break;
5331 }
5332 base = (char *)mstate->dtms_scratch_ptr;
5333 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5334
5335 /*
5336 * Stringify as a dotted decimal quad.
5337 */
5338 *end-- = '\0';
5339 ptr8 = (uint8_t *)&ip4;
5340 for (i = 3; i >= 0; i--) {
5341 val = ptr8[i];
5342
5343 if (val == 0) {
5344 *end-- = '0';
5345 } else {
5346 for (; val; val /= 10) {
5347 *end-- = '0' + (val % 10);
5348 }
5349 }
5350
5351 if (i > 0)
5352 *end-- = '.';
5353 }
5354 ASSERT(end + 1 >= base);
5355
5356 } else if (af == AF_INET6) {
5357 struct in6_addr ip6;
5358 int firstzero, tryzero, numzero, v6end;
5359 uint16_t val;
5360 const char digits[] = "0123456789abcdef";
5361
5362 /*
5363 * Stringify using RFC 1884 convention 2 - 16 bit
5364 * hexadecimal values with a zero-run compression.
5365 * Lower case hexadecimal digits are used.
5366 * eg, fe80::214:4fff:fe0b:76c8.
5367 * The IPv4 embedded form is returned for inet_ntop,
5368 * just the IPv4 string is returned for inet_ntoa6.
5369 */
5370
5371 /*
5372 * Safely load the IPv6 address.
5373 */
5374 dtrace_bcopy(
5375 (void *)(uintptr_t)tupregs[argi].dttk_value,
5376 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
5377
5378 /*
5379 * Check an IPv6 string will fit in scratch.
5380 */
5381 size = INET6_ADDRSTRLEN;
5382 if (!DTRACE_INSCRATCH(mstate, size)) {
5383 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5384 regs[rd] = NULL;
5385 break;
5386 }
5387 base = (char *)mstate->dtms_scratch_ptr;
5388 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5389 *end-- = '\0';
5390
5391 /*
5392 * Find the longest run of 16 bit zero values
5393 * for the single allowed zero compression - "::".
5394 */
5395 firstzero = -1;
5396 tryzero = -1;
5397 numzero = 1;
5398 for (i = 0; i < sizeof (struct in6_addr); i++) {
5399 if (ip6._S6_un._S6_u8[i] == 0 &&
5400 tryzero == -1 && i % 2 == 0) {
5401 tryzero = i;
5402 continue;
5403 }
5404
5405 if (tryzero != -1 &&
5406 (ip6._S6_un._S6_u8[i] != 0 ||
5407 i == sizeof (struct in6_addr) - 1)) {
5408
5409 if (i - tryzero <= numzero) {
5410 tryzero = -1;
5411 continue;
5412 }
5413
5414 firstzero = tryzero;
5415 numzero = i - i % 2 - tryzero;
5416 tryzero = -1;
5417
5418 if (ip6._S6_un._S6_u8[i] == 0 &&
5419 i == sizeof (struct in6_addr) - 1)
5420 numzero += 2;
5421 }
5422 }
5423 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
5424
5425 /*
5426 * Check for an IPv4 embedded address.
5427 */
5428 v6end = sizeof (struct in6_addr) - 2;
5429 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
5430 IN6_IS_ADDR_V4COMPAT(&ip6)) {
5431 for (i = sizeof (struct in6_addr) - 1;
5432 i >= DTRACE_V4MAPPED_OFFSET; i--) {
5433 ASSERT(end >= base);
5434
5435 val = ip6._S6_un._S6_u8[i];
5436
5437 if (val == 0) {
5438 *end-- = '0';
5439 } else {
5440 for (; val; val /= 10) {
5441 *end-- = '0' + val % 10;
5442 }
5443 }
5444
5445 if (i > DTRACE_V4MAPPED_OFFSET)
5446 *end-- = '.';
5447 }
5448
5449 if (subr == DIF_SUBR_INET_NTOA6)
5450 goto inetout;
5451
5452 /*
5453 * Set v6end to skip the IPv4 address that
5454 * we have already stringified.
5455 */
5456 v6end = 10;
5457 }
5458
5459 /*
5460 * Build the IPv6 string by working through the
5461 * address in reverse.
5462 */
5463 for (i = v6end; i >= 0; i -= 2) {
5464 ASSERT(end >= base);
5465
5466 if (i == firstzero + numzero - 2) {
5467 *end-- = ':';
5468 *end-- = ':';
5469 i -= numzero - 2;
5470 continue;
5471 }
5472
5473 if (i < 14 && i != firstzero - 2)
5474 *end-- = ':';
5475
5476 val = (ip6._S6_un._S6_u8[i] << 8) +
5477 ip6._S6_un._S6_u8[i + 1];
5478
5479 if (val == 0) {
5480 *end-- = '0';
5481 } else {
5482 for (; val; val /= 16) {
5483 *end-- = digits[val % 16];
5484 }
5485 }
5486 }
5487 ASSERT(end + 1 >= base);
5488
5489 } else {
5490 /*
5491 * The user didn't use AH_INET or AH_INET6.
5492 */
5493 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5494 regs[rd] = NULL;
5495 break;
5496 }
5497
5498 inetout: regs[rd] = (uintptr_t)end + 1;
5499 mstate->dtms_scratch_ptr += size;
5500 break;
5501 }
5502
5503 }
5504 }
5505
5506 /*
5507 * Emulate the execution of DTrace IR instructions specified by the given
5508 * DIF object. This function is deliberately void of assertions as all of
5509 * the necessary checks are handled by a call to dtrace_difo_validate().
5510 */
5511 static uint64_t
5512 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
5513 dtrace_vstate_t *vstate, dtrace_state_t *state)
5514 {
5515 const dif_instr_t *text = difo->dtdo_buf;
5516 const uint_t textlen = difo->dtdo_len;
5517 const char *strtab = difo->dtdo_strtab;
5518 const uint64_t *inttab = difo->dtdo_inttab;
5519
5520 uint64_t rval = 0;
5521 dtrace_statvar_t *svar;
5522 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
5523 dtrace_difv_t *v;
5524 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
5525 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
5526
5527 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
5528 uint64_t regs[DIF_DIR_NREGS];
5529 uint64_t *tmp;
5530
5531 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
5532 int64_t cc_r;
5533 uint_t pc = 0, id, opc;
5534 uint8_t ttop = 0;
5535 dif_instr_t instr;
5536 uint_t r1, r2, rd;
5537
5538 /*
5539 * We stash the current DIF object into the machine state: we need it
5540 * for subsequent access checking.
5541 */
5542 mstate->dtms_difo = difo;
5543
5544 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
5545
5546 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
5547 opc = pc;
5548
5549 instr = text[pc++];
5550 r1 = DIF_INSTR_R1(instr);
5551 r2 = DIF_INSTR_R2(instr);
5552 rd = DIF_INSTR_RD(instr);
5553
5554 switch (DIF_INSTR_OP(instr)) {
5555 case DIF_OP_OR:
5556 regs[rd] = regs[r1] | regs[r2];
5557 break;
5558 case DIF_OP_XOR:
5559 regs[rd] = regs[r1] ^ regs[r2];
5560 break;
5561 case DIF_OP_AND:
5562 regs[rd] = regs[r1] & regs[r2];
5563 break;
5564 case DIF_OP_SLL:
5565 regs[rd] = regs[r1] << regs[r2];
5566 break;
5567 case DIF_OP_SRL:
5568 regs[rd] = regs[r1] >> regs[r2];
5569 break;
5570 case DIF_OP_SUB:
5571 regs[rd] = regs[r1] - regs[r2];
5572 break;
5573 case DIF_OP_ADD:
5574 regs[rd] = regs[r1] + regs[r2];
5575 break;
5576 case DIF_OP_MUL:
5577 regs[rd] = regs[r1] * regs[r2];
5578 break;
5579 case DIF_OP_SDIV:
5580 if (regs[r2] == 0) {
5581 regs[rd] = 0;
5582 *flags |= CPU_DTRACE_DIVZERO;
5583 } else {
5584 regs[rd] = (int64_t)regs[r1] /
5585 (int64_t)regs[r2];
5586 }
5587 break;
5588
5589 case DIF_OP_UDIV:
5590 if (regs[r2] == 0) {
5591 regs[rd] = 0;
5592 *flags |= CPU_DTRACE_DIVZERO;
5593 } else {
5594 regs[rd] = regs[r1] / regs[r2];
5595 }
5596 break;
5597
5598 case DIF_OP_SREM:
5599 if (regs[r2] == 0) {
5600 regs[rd] = 0;
5601 *flags |= CPU_DTRACE_DIVZERO;
5602 } else {
5603 regs[rd] = (int64_t)regs[r1] %
5604 (int64_t)regs[r2];
5605 }
5606 break;
5607
5608 case DIF_OP_UREM:
5609 if (regs[r2] == 0) {
5610 regs[rd] = 0;
5611 *flags |= CPU_DTRACE_DIVZERO;
5612 } else {
5613 regs[rd] = regs[r1] % regs[r2];
5614 }
5615 break;
5616
5617 case DIF_OP_NOT:
5618 regs[rd] = ~regs[r1];
5619 break;
5620 case DIF_OP_MOV:
5621 regs[rd] = regs[r1];
5622 break;
5623 case DIF_OP_CMP:
5624 cc_r = regs[r1] - regs[r2];
5625 cc_n = cc_r < 0;
5626 cc_z = cc_r == 0;
5627 cc_v = 0;
5628 cc_c = regs[r1] < regs[r2];
5629 break;
5630 case DIF_OP_TST:
5631 cc_n = cc_v = cc_c = 0;
5632 cc_z = regs[r1] == 0;
5633 break;
5634 case DIF_OP_BA:
5635 pc = DIF_INSTR_LABEL(instr);
5636 break;
5637 case DIF_OP_BE:
5638 if (cc_z)
5639 pc = DIF_INSTR_LABEL(instr);
5640 break;
5641 case DIF_OP_BNE:
5642 if (cc_z == 0)
5643 pc = DIF_INSTR_LABEL(instr);
5644 break;
5645 case DIF_OP_BG:
5646 if ((cc_z | (cc_n ^ cc_v)) == 0)
5647 pc = DIF_INSTR_LABEL(instr);
5648 break;
5649 case DIF_OP_BGU:
5650 if ((cc_c | cc_z) == 0)
5651 pc = DIF_INSTR_LABEL(instr);
5652 break;
5653 case DIF_OP_BGE:
5654 if ((cc_n ^ cc_v) == 0)
5655 pc = DIF_INSTR_LABEL(instr);
5656 break;
5657 case DIF_OP_BGEU:
5658 if (cc_c == 0)
5659 pc = DIF_INSTR_LABEL(instr);
5660 break;
5661 case DIF_OP_BL:
5662 if (cc_n ^ cc_v)
5663 pc = DIF_INSTR_LABEL(instr);
5664 break;
5665 case DIF_OP_BLU:
5666 if (cc_c)
5667 pc = DIF_INSTR_LABEL(instr);
5668 break;
5669 case DIF_OP_BLE:
5670 if (cc_z | (cc_n ^ cc_v))
5671 pc = DIF_INSTR_LABEL(instr);
5672 break;
5673 case DIF_OP_BLEU:
5674 if (cc_c | cc_z)
5675 pc = DIF_INSTR_LABEL(instr);
5676 break;
5677 case DIF_OP_RLDSB:
5678 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
5679 break;
5680 /*FALLTHROUGH*/
5681 case DIF_OP_LDSB:
5682 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
5683 break;
5684 case DIF_OP_RLDSH:
5685 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
5686 break;
5687 /*FALLTHROUGH*/
5688 case DIF_OP_LDSH:
5689 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
5690 break;
5691 case DIF_OP_RLDSW:
5692 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
5693 break;
5694 /*FALLTHROUGH*/
5695 case DIF_OP_LDSW:
5696 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
5697 break;
5698 case DIF_OP_RLDUB:
5699 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
5700 break;
5701 /*FALLTHROUGH*/
5702 case DIF_OP_LDUB:
5703 regs[rd] = dtrace_load8(regs[r1]);
5704 break;
5705 case DIF_OP_RLDUH:
5706 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
5707 break;
5708 /*FALLTHROUGH*/
5709 case DIF_OP_LDUH:
5710 regs[rd] = dtrace_load16(regs[r1]);
5711 break;
5712 case DIF_OP_RLDUW:
5713 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
5714 break;
5715 /*FALLTHROUGH*/
5716 case DIF_OP_LDUW:
5717 regs[rd] = dtrace_load32(regs[r1]);
5718 break;
5719 case DIF_OP_RLDX:
5720 if (!dtrace_canload(regs[r1], 8, mstate, vstate))
5721 break;
5722 /*FALLTHROUGH*/
5723 case DIF_OP_LDX:
5724 regs[rd] = dtrace_load64(regs[r1]);
5725 break;
5726 case DIF_OP_ULDSB:
5727 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5728 regs[rd] = (int8_t)
5729 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5730 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5731 break;
5732 case DIF_OP_ULDSH:
5733 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5734 regs[rd] = (int16_t)
5735 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5736 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5737 break;
5738 case DIF_OP_ULDSW:
5739 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5740 regs[rd] = (int32_t)
5741 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5742 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5743 break;
5744 case DIF_OP_ULDUB:
5745 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5746 regs[rd] =
5747 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
5748 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5749 break;
5750 case DIF_OP_ULDUH:
5751 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5752 regs[rd] =
5753 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
5754 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5755 break;
5756 case DIF_OP_ULDUW:
5757 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5758 regs[rd] =
5759 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
5760 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5761 break;
5762 case DIF_OP_ULDX:
5763 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5764 regs[rd] =
5765 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
5766 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5767 break;
5768 case DIF_OP_RET:
5769 rval = regs[rd];
5770 pc = textlen;
5771 break;
5772 case DIF_OP_NOP:
5773 break;
5774 case DIF_OP_SETX:
5775 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
5776 break;
5777 case DIF_OP_SETS:
5778 regs[rd] = (uint64_t)(uintptr_t)
5779 (strtab + DIF_INSTR_STRING(instr));
5780 break;
5781 case DIF_OP_SCMP: {
5782 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
5783 uintptr_t s1 = regs[r1];
5784 uintptr_t s2 = regs[r2];
5785
5786 if (s1 != NULL &&
5787 !dtrace_strcanload(s1, sz, mstate, vstate))
5788 break;
5789 if (s2 != NULL &&
5790 !dtrace_strcanload(s2, sz, mstate, vstate))
5791 break;
5792
5793 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
5794
5795 cc_n = cc_r < 0;
5796 cc_z = cc_r == 0;
5797 cc_v = cc_c = 0;
5798 break;
5799 }
5800 case DIF_OP_LDGA:
5801 regs[rd] = dtrace_dif_variable(mstate, state,
5802 r1, regs[r2]);
5803 break;
5804 case DIF_OP_LDGS:
5805 id = DIF_INSTR_VAR(instr);
5806
5807 if (id >= DIF_VAR_OTHER_UBASE) {
5808 uintptr_t a;
5809
5810 id -= DIF_VAR_OTHER_UBASE;
5811 svar = vstate->dtvs_globals[id];
5812 ASSERT(svar != NULL);
5813 v = &svar->dtsv_var;
5814
5815 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
5816 regs[rd] = svar->dtsv_data;
5817 break;
5818 }
5819
5820 a = (uintptr_t)svar->dtsv_data;
5821
5822 if (*(uint8_t *)a == UINT8_MAX) {
5823 /*
5824 * If the 0th byte is set to UINT8_MAX
5825 * then this is to be treated as a
5826 * reference to a NULL variable.
5827 */
5828 regs[rd] = NULL;
5829 } else {
5830 regs[rd] = a + sizeof (uint64_t);
5831 }
5832
5833 break;
5834 }
5835
5836 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
5837 break;
5838
5839 case DIF_OP_STGS:
5840 id = DIF_INSTR_VAR(instr);
5841
5842 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5843 id -= DIF_VAR_OTHER_UBASE;
5844
5845 svar = vstate->dtvs_globals[id];
5846 ASSERT(svar != NULL);
5847 v = &svar->dtsv_var;
5848
5849 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5850 uintptr_t a = (uintptr_t)svar->dtsv_data;
5851
5852 ASSERT(a != NULL);
5853 ASSERT(svar->dtsv_size != 0);
5854
5855 if (regs[rd] == NULL) {
5856 *(uint8_t *)a = UINT8_MAX;
5857 break;
5858 } else {
5859 *(uint8_t *)a = 0;
5860 a += sizeof (uint64_t);
5861 }
5862 if (!dtrace_vcanload(
5863 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5864 mstate, vstate))
5865 break;
5866
5867 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5868 (void *)a, &v->dtdv_type);
5869 break;
5870 }
5871
5872 svar->dtsv_data = regs[rd];
5873 break;
5874
5875 case DIF_OP_LDTA:
5876 /*
5877 * There are no DTrace built-in thread-local arrays at
5878 * present. This opcode is saved for future work.
5879 */
5880 *flags |= CPU_DTRACE_ILLOP;
5881 regs[rd] = 0;
5882 break;
5883
5884 case DIF_OP_LDLS:
5885 id = DIF_INSTR_VAR(instr);
5886
5887 if (id < DIF_VAR_OTHER_UBASE) {
5888 /*
5889 * For now, this has no meaning.
5890 */
5891 regs[rd] = 0;
5892 break;
5893 }
5894
5895 id -= DIF_VAR_OTHER_UBASE;
5896
5897 ASSERT(id < vstate->dtvs_nlocals);
5898 ASSERT(vstate->dtvs_locals != NULL);
5899
5900 svar = vstate->dtvs_locals[id];
5901 ASSERT(svar != NULL);
5902 v = &svar->dtsv_var;
5903
5904 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5905 uintptr_t a = (uintptr_t)svar->dtsv_data;
5906 size_t sz = v->dtdv_type.dtdt_size;
5907
5908 sz += sizeof (uint64_t);
5909 ASSERT(svar->dtsv_size == NCPU * sz);
5910 a += CPU->cpu_id * sz;
5911
5912 if (*(uint8_t *)a == UINT8_MAX) {
5913 /*
5914 * If the 0th byte is set to UINT8_MAX
5915 * then this is to be treated as a
5916 * reference to a NULL variable.
5917 */
5918 regs[rd] = NULL;
5919 } else {
5920 regs[rd] = a + sizeof (uint64_t);
5921 }
5922
5923 break;
5924 }
5925
5926 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5927 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5928 regs[rd] = tmp[CPU->cpu_id];
5929 break;
5930
5931 case DIF_OP_STLS:
5932 id = DIF_INSTR_VAR(instr);
5933
5934 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5935 id -= DIF_VAR_OTHER_UBASE;
5936 ASSERT(id < vstate->dtvs_nlocals);
5937
5938 ASSERT(vstate->dtvs_locals != NULL);
5939 svar = vstate->dtvs_locals[id];
5940 ASSERT(svar != NULL);
5941 v = &svar->dtsv_var;
5942
5943 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5944 uintptr_t a = (uintptr_t)svar->dtsv_data;
5945 size_t sz = v->dtdv_type.dtdt_size;
5946
5947 sz += sizeof (uint64_t);
5948 ASSERT(svar->dtsv_size == NCPU * sz);
5949 a += CPU->cpu_id * sz;
5950
5951 if (regs[rd] == NULL) {
5952 *(uint8_t *)a = UINT8_MAX;
5953 break;
5954 } else {
5955 *(uint8_t *)a = 0;
5956 a += sizeof (uint64_t);
5957 }
5958
5959 if (!dtrace_vcanload(
5960 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5961 mstate, vstate))
5962 break;
5963
5964 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5965 (void *)a, &v->dtdv_type);
5966 break;
5967 }
5968
5969 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5970 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5971 tmp[CPU->cpu_id] = regs[rd];
5972 break;
5973
5974 case DIF_OP_LDTS: {
5975 dtrace_dynvar_t *dvar;
5976 dtrace_key_t *key;
5977
5978 id = DIF_INSTR_VAR(instr);
5979 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5980 id -= DIF_VAR_OTHER_UBASE;
5981 v = &vstate->dtvs_tlocals[id];
5982
5983 key = &tupregs[DIF_DTR_NREGS];
5984 key[0].dttk_value = (uint64_t)id;
5985 key[0].dttk_size = 0;
5986 DTRACE_TLS_THRKEY(key[1].dttk_value);
5987 key[1].dttk_size = 0;
5988
5989 dvar = dtrace_dynvar(dstate, 2, key,
5990 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
5991 mstate, vstate);
5992
5993 if (dvar == NULL) {
5994 regs[rd] = 0;
5995 break;
5996 }
5997
5998 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5999 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6000 } else {
6001 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6002 }
6003
6004 break;
6005 }
6006
6007 case DIF_OP_STTS: {
6008 dtrace_dynvar_t *dvar;
6009 dtrace_key_t *key;
6010
6011 id = DIF_INSTR_VAR(instr);
6012 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6013 id -= DIF_VAR_OTHER_UBASE;
6014
6015 key = &tupregs[DIF_DTR_NREGS];
6016 key[0].dttk_value = (uint64_t)id;
6017 key[0].dttk_size = 0;
6018 DTRACE_TLS_THRKEY(key[1].dttk_value);
6019 key[1].dttk_size = 0;
6020 v = &vstate->dtvs_tlocals[id];
6021
6022 dvar = dtrace_dynvar(dstate, 2, key,
6023 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6024 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6025 regs[rd] ? DTRACE_DYNVAR_ALLOC :
6026 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6027
6028 /*
6029 * Given that we're storing to thread-local data,
6030 * we need to flush our predicate cache.
6031 */
6032 curthread->t_predcache = NULL;
6033
6034 if (dvar == NULL)
6035 break;
6036
6037 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6038 if (!dtrace_vcanload(
6039 (void *)(uintptr_t)regs[rd],
6040 &v->dtdv_type, mstate, vstate))
6041 break;
6042
6043 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6044 dvar->dtdv_data, &v->dtdv_type);
6045 } else {
6046 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6047 }
6048
6049 break;
6050 }
6051
6052 case DIF_OP_SRA:
6053 regs[rd] = (int64_t)regs[r1] >> regs[r2];
6054 break;
6055
6056 case DIF_OP_CALL:
6057 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
6058 regs, tupregs, ttop, mstate, state);
6059 break;
6060
6061 case DIF_OP_PUSHTR:
6062 if (ttop == DIF_DTR_NREGS) {
6063 *flags |= CPU_DTRACE_TUPOFLOW;
6064 break;
6065 }
6066
6067 if (r1 == DIF_TYPE_STRING) {
6068 /*
6069 * If this is a string type and the size is 0,
6070 * we'll use the system-wide default string
6071 * size. Note that we are _not_ looking at
6072 * the value of the DTRACEOPT_STRSIZE option;
6073 * had this been set, we would expect to have
6074 * a non-zero size value in the "pushtr".
6075 */
6076 tupregs[ttop].dttk_size =
6077 dtrace_strlen((char *)(uintptr_t)regs[rd],
6078 regs[r2] ? regs[r2] :
6079 dtrace_strsize_default) + 1;
6080 } else {
6081 tupregs[ttop].dttk_size = regs[r2];
6082 }
6083
6084 tupregs[ttop++].dttk_value = regs[rd];
6085 break;
6086
6087 case DIF_OP_PUSHTV:
6088 if (ttop == DIF_DTR_NREGS) {
6089 *flags |= CPU_DTRACE_TUPOFLOW;
6090 break;
6091 }
6092
6093 tupregs[ttop].dttk_value = regs[rd];
6094 tupregs[ttop++].dttk_size = 0;
6095 break;
6096
6097 case DIF_OP_POPTS:
6098 if (ttop != 0)
6099 ttop--;
6100 break;
6101
6102 case DIF_OP_FLUSHTS:
6103 ttop = 0;
6104 break;
6105
6106 case DIF_OP_LDGAA:
6107 case DIF_OP_LDTAA: {
6108 dtrace_dynvar_t *dvar;
6109 dtrace_key_t *key = tupregs;
6110 uint_t nkeys = ttop;
6111
6112 id = DIF_INSTR_VAR(instr);
6113 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6114 id -= DIF_VAR_OTHER_UBASE;
6115
6116 key[nkeys].dttk_value = (uint64_t)id;
6117 key[nkeys++].dttk_size = 0;
6118
6119 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
6120 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6121 key[nkeys++].dttk_size = 0;
6122 v = &vstate->dtvs_tlocals[id];
6123 } else {
6124 v = &vstate->dtvs_globals[id]->dtsv_var;
6125 }
6126
6127 dvar = dtrace_dynvar(dstate, nkeys, key,
6128 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6129 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6130 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
6131
6132 if (dvar == NULL) {
6133 regs[rd] = 0;
6134 break;
6135 }
6136
6137 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6138 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6139 } else {
6140 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6141 }
6142
6143 break;
6144 }
6145
6146 case DIF_OP_STGAA:
6147 case DIF_OP_STTAA: {
6148 dtrace_dynvar_t *dvar;
6149 dtrace_key_t *key = tupregs;
6150 uint_t nkeys = ttop;
6151
6152 id = DIF_INSTR_VAR(instr);
6153 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6154 id -= DIF_VAR_OTHER_UBASE;
6155
6156 key[nkeys].dttk_value = (uint64_t)id;
6157 key[nkeys++].dttk_size = 0;
6158
6159 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
6160 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6161 key[nkeys++].dttk_size = 0;
6162 v = &vstate->dtvs_tlocals[id];
6163 } else {
6164 v = &vstate->dtvs_globals[id]->dtsv_var;
6165 }
6166
6167 dvar = dtrace_dynvar(dstate, nkeys, key,
6168 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6169 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6170 regs[rd] ? DTRACE_DYNVAR_ALLOC :
6171 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6172
6173 if (dvar == NULL)
6174 break;
6175
6176 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6177 if (!dtrace_vcanload(
6178 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6179 mstate, vstate))
6180 break;
6181
6182 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6183 dvar->dtdv_data, &v->dtdv_type);
6184 } else {
6185 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6186 }
6187
6188 break;
6189 }
6190
6191 case DIF_OP_ALLOCS: {
6192 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6193 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
6194
6195 /*
6196 * Rounding up the user allocation size could have
6197 * overflowed large, bogus allocations (like -1ULL) to
6198 * 0.
6199 */
6200 if (size < regs[r1] ||
6201 !DTRACE_INSCRATCH(mstate, size)) {
6202 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6203 regs[rd] = NULL;
6204 break;
6205 }
6206
6207 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
6208 mstate->dtms_scratch_ptr += size;
6209 regs[rd] = ptr;
6210 break;
6211 }
6212
6213 case DIF_OP_COPYS:
6214 if (!dtrace_canstore(regs[rd], regs[r2],
6215 mstate, vstate)) {
6216 *flags |= CPU_DTRACE_BADADDR;
6217 *illval = regs[rd];
6218 break;
6219 }
6220
6221 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
6222 break;
6223
6224 dtrace_bcopy((void *)(uintptr_t)regs[r1],
6225 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
6226 break;
6227
6228 case DIF_OP_STB:
6229 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
6230 *flags |= CPU_DTRACE_BADADDR;
6231 *illval = regs[rd];
6232 break;
6233 }
6234 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
6235 break;
6236
6237 case DIF_OP_STH:
6238 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
6239 *flags |= CPU_DTRACE_BADADDR;
6240 *illval = regs[rd];
6241 break;
6242 }
6243 if (regs[rd] & 1) {
6244 *flags |= CPU_DTRACE_BADALIGN;
6245 *illval = regs[rd];
6246 break;
6247 }
6248 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
6249 break;
6250
6251 case DIF_OP_STW:
6252 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
6253 *flags |= CPU_DTRACE_BADADDR;
6254 *illval = regs[rd];
6255 break;
6256 }
6257 if (regs[rd] & 3) {
6258 *flags |= CPU_DTRACE_BADALIGN;
6259 *illval = regs[rd];
6260 break;
6261 }
6262 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
6263 break;
6264
6265 case DIF_OP_STX:
6266 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
6267 *flags |= CPU_DTRACE_BADADDR;
6268 *illval = regs[rd];
6269 break;
6270 }
6271 if (regs[rd] & 7) {
6272 *flags |= CPU_DTRACE_BADALIGN;
6273 *illval = regs[rd];
6274 break;
6275 }
6276 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
6277 break;
6278 }
6279 }
6280
6281 if (!(*flags & CPU_DTRACE_FAULT))
6282 return (rval);
6283
6284 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
6285 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
6286
6287 return (0);
6288 }
6289
6290 static void
6291 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
6292 {
6293 dtrace_probe_t *probe = ecb->dte_probe;
6294 dtrace_provider_t *prov = probe->dtpr_provider;
6295 char c[DTRACE_FULLNAMELEN + 80], *str;
6296 char *msg = "dtrace: breakpoint action at probe ";
6297 char *ecbmsg = " (ecb ";
6298 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
6299 uintptr_t val = (uintptr_t)ecb;
6300 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
6301
6302 if (dtrace_destructive_disallow)
6303 return;
6304
6305 /*
6306 * It's impossible to be taking action on the NULL probe.
6307 */
6308 ASSERT(probe != NULL);
6309
6310 /*
6311 * This is a poor man's (destitute man's?) sprintf(): we want to
6312 * print the provider name, module name, function name and name of
6313 * the probe, along with the hex address of the ECB with the breakpoint
6314 * action -- all of which we must place in the character buffer by
6315 * hand.
6316 */
6317 while (*msg != '\0')
6318 c[i++] = *msg++;
6319
6320 for (str = prov->dtpv_name; *str != '\0'; str++)
6321 c[i++] = *str;
6322 c[i++] = ':';
6323
6324 for (str = probe->dtpr_mod; *str != '\0'; str++)
6325 c[i++] = *str;
6326 c[i++] = ':';
6327
6328 for (str = probe->dtpr_func; *str != '\0'; str++)
6329 c[i++] = *str;
6330 c[i++] = ':';
6331
6332 for (str = probe->dtpr_name; *str != '\0'; str++)
6333 c[i++] = *str;
6334
6335 while (*ecbmsg != '\0')
6336 c[i++] = *ecbmsg++;
6337
6338 while (shift >= 0) {
6339 mask = (uintptr_t)0xf << shift;
6340
6341 if (val >= ((uintptr_t)1 << shift))
6342 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
6343 shift -= 4;
6344 }
6345
6346 c[i++] = ')';
6347 c[i] = '\0';
6348
6349 debug_enter(c);
6350 }
6351
6352 static void
6353 dtrace_action_panic(dtrace_ecb_t *ecb)
6354 {
6355 dtrace_probe_t *probe = ecb->dte_probe;
6356
6357 /*
6358 * It's impossible to be taking action on the NULL probe.
6359 */
6360 ASSERT(probe != NULL);
6361
6362 if (dtrace_destructive_disallow)
6363 return;
6364
6365 if (dtrace_panicked != NULL)
6366 return;
6367
6368 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
6369 return;
6370
6371 /*
6372 * We won the right to panic. (We want to be sure that only one
6373 * thread calls panic() from dtrace_probe(), and that panic() is
6374 * called exactly once.)
6375 */
6376 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
6377 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
6378 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
6379 }
6380
6381 static void
6382 dtrace_action_raise(uint64_t sig)
6383 {
6384 if (dtrace_destructive_disallow)
6385 return;
6386
6387 if (sig >= NSIG) {
6388 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6389 return;
6390 }
6391
6392 /*
6393 * raise() has a queue depth of 1 -- we ignore all subsequent
6394 * invocations of the raise() action.
6395 */
6396 if (curthread->t_dtrace_sig == 0)
6397 curthread->t_dtrace_sig = (uint8_t)sig;
6398
6399 curthread->t_sig_check = 1;
6400 aston(curthread);
6401 }
6402
6403 static void
6404 dtrace_action_stop(void)
6405 {
6406 if (dtrace_destructive_disallow)
6407 return;
6408
6409 if (!curthread->t_dtrace_stop) {
6410 curthread->t_dtrace_stop = 1;
6411 curthread->t_sig_check = 1;
6412 aston(curthread);
6413 }
6414 }
6415
6416 static void
6417 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
6418 {
6419 hrtime_t now;
6420 volatile uint16_t *flags;
6421 cpu_t *cpu = CPU;
6422
6423 if (dtrace_destructive_disallow)
6424 return;
6425
6426 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
6427
6428 now = dtrace_gethrtime();
6429
6430 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
6431 /*
6432 * We need to advance the mark to the current time.
6433 */
6434 cpu->cpu_dtrace_chillmark = now;
6435 cpu->cpu_dtrace_chilled = 0;
6436 }
6437
6438 /*
6439 * Now check to see if the requested chill time would take us over
6440 * the maximum amount of time allowed in the chill interval. (Or
6441 * worse, if the calculation itself induces overflow.)
6442 */
6443 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
6444 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
6445 *flags |= CPU_DTRACE_ILLOP;
6446 return;
6447 }
6448
6449 while (dtrace_gethrtime() - now < val)
6450 continue;
6451
6452 /*
6453 * Normally, we assure that the value of the variable "timestamp" does
6454 * not change within an ECB. The presence of chill() represents an
6455 * exception to this rule, however.
6456 */
6457 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
6458 cpu->cpu_dtrace_chilled += val;
6459 }
6460
6461 static void
6462 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
6463 uint64_t *buf, uint64_t arg)
6464 {
6465 int nframes = DTRACE_USTACK_NFRAMES(arg);
6466 int strsize = DTRACE_USTACK_STRSIZE(arg);
6467 uint64_t *pcs = &buf[1], *fps;
6468 char *str = (char *)&pcs[nframes];
6469 int size, offs = 0, i, j;
6470 uintptr_t old = mstate->dtms_scratch_ptr, saved;
6471 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6472 char *sym;
6473
6474 /*
6475 * Should be taking a faster path if string space has not been
6476 * allocated.
6477 */
6478 ASSERT(strsize != 0);
6479
6480 /*
6481 * We will first allocate some temporary space for the frame pointers.
6482 */
6483 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6484 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
6485 (nframes * sizeof (uint64_t));
6486
6487 if (!DTRACE_INSCRATCH(mstate, size)) {
6488 /*
6489 * Not enough room for our frame pointers -- need to indicate
6490 * that we ran out of scratch space.
6491 */
6492 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6493 return;
6494 }
6495
6496 mstate->dtms_scratch_ptr += size;
6497 saved = mstate->dtms_scratch_ptr;
6498
6499 /*
6500 * Now get a stack with both program counters and frame pointers.
6501 */
6502 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6503 dtrace_getufpstack(buf, fps, nframes + 1);
6504 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6505
6506 /*
6507 * If that faulted, we're cooked.
6508 */
6509 if (*flags & CPU_DTRACE_FAULT)
6510 goto out;
6511
6512 /*
6513 * Now we want to walk up the stack, calling the USTACK helper. For
6514 * each iteration, we restore the scratch pointer.
6515 */
6516 for (i = 0; i < nframes; i++) {
6517 mstate->dtms_scratch_ptr = saved;
6518
6519 if (offs >= strsize)
6520 break;
6521
6522 sym = (char *)(uintptr_t)dtrace_helper(
6523 DTRACE_HELPER_ACTION_USTACK,
6524 mstate, state, pcs[i], fps[i]);
6525
6526 /*
6527 * If we faulted while running the helper, we're going to
6528 * clear the fault and null out the corresponding string.
6529 */
6530 if (*flags & CPU_DTRACE_FAULT) {
6531 *flags &= ~CPU_DTRACE_FAULT;
6532 str[offs++] = '\0';
6533 continue;
6534 }
6535
6536 if (sym == NULL) {
6537 str[offs++] = '\0';
6538 continue;
6539 }
6540
6541 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6542
6543 /*
6544 * Now copy in the string that the helper returned to us.
6545 */
6546 for (j = 0; offs + j < strsize; j++) {
6547 if ((str[offs + j] = sym[j]) == '\0')
6548 break;
6549 }
6550
6551 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6552
6553 offs += j + 1;
6554 }
6555
6556 if (offs >= strsize) {
6557 /*
6558 * If we didn't have room for all of the strings, we don't
6559 * abort processing -- this needn't be a fatal error -- but we
6560 * still want to increment a counter (dts_stkstroverflows) to
6561 * allow this condition to be warned about. (If this is from
6562 * a jstack() action, it is easily tuned via jstackstrsize.)
6563 */
6564 dtrace_error(&state->dts_stkstroverflows);
6565 }
6566
6567 while (offs < strsize)
6568 str[offs++] = '\0';
6569
6570 out:
6571 mstate->dtms_scratch_ptr = old;
6572 }
6573
6574 static void
6575 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size,
6576 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind)
6577 {
6578 volatile uint16_t *flags;
6579 uint64_t val = *valp;
6580 size_t valoffs = *valoffsp;
6581
6582 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6583 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF);
6584
6585 /*
6586 * If this is a string, we're going to only load until we find the zero
6587 * byte -- after which we'll store zero bytes.
6588 */
6589 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
6590 char c = '\0' + 1;
6591 size_t s;
6592
6593 for (s = 0; s < size; s++) {
6594 if (c != '\0' && dtkind == DIF_TF_BYREF) {
6595 c = dtrace_load8(val++);
6596 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) {
6597 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6598 c = dtrace_fuword8((void *)(uintptr_t)val++);
6599 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6600 if (*flags & CPU_DTRACE_FAULT)
6601 break;
6602 }
6603
6604 DTRACE_STORE(uint8_t, tomax, valoffs++, c);
6605
6606 if (c == '\0' && intuple)
6607 break;
6608 }
6609 } else {
6610 uint8_t c;
6611 while (valoffs < end) {
6612 if (dtkind == DIF_TF_BYREF) {
6613 c = dtrace_load8(val++);
6614 } else if (dtkind == DIF_TF_BYUREF) {
6615 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6616 c = dtrace_fuword8((void *)(uintptr_t)val++);
6617 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6618 if (*flags & CPU_DTRACE_FAULT)
6619 break;
6620 }
6621
6622 DTRACE_STORE(uint8_t, tomax,
6623 valoffs++, c);
6624 }
6625 }
6626
6627 *valp = val;
6628 *valoffsp = valoffs;
6629 }
6630
6631 /*
6632 * If you're looking for the epicenter of DTrace, you just found it. This
6633 * is the function called by the provider to fire a probe -- from which all
6634 * subsequent probe-context DTrace activity emanates.
6635 */
6636 void
6637 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
6638 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
6639 {
6640 processorid_t cpuid;
6641 dtrace_icookie_t cookie;
6642 dtrace_probe_t *probe;
6643 dtrace_mstate_t mstate;
6644 dtrace_ecb_t *ecb;
6645 dtrace_action_t *act;
6646 intptr_t offs;
6647 size_t size;
6648 int vtime, onintr;
6649 volatile uint16_t *flags;
6650 hrtime_t now, end;
6651
6652 /*
6653 * Kick out immediately if this CPU is still being born (in which case
6654 * curthread will be set to -1) or the current thread can't allow
6655 * probes in its current context.
6656 */
6657 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
6658 return;
6659
6660 cookie = dtrace_interrupt_disable();
6661 probe = dtrace_probes[id - 1];
6662 cpuid = CPU->cpu_id;
6663 onintr = CPU_ON_INTR(CPU);
6664
6665 CPU->cpu_dtrace_probes++;
6666
6667 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
6668 probe->dtpr_predcache == curthread->t_predcache) {
6669 /*
6670 * We have hit in the predicate cache; we know that
6671 * this predicate would evaluate to be false.
6672 */
6673 dtrace_interrupt_enable(cookie);
6674 return;
6675 }
6676
6677 if (panic_quiesce) {
6678 /*
6679 * We don't trace anything if we're panicking.
6680 */
6681 dtrace_interrupt_enable(cookie);
6682 return;
6683 }
6684
6685 now = mstate.dtms_timestamp = dtrace_gethrtime();
6686 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
6687 vtime = dtrace_vtime_references != 0;
6688
6689 if (vtime && curthread->t_dtrace_start)
6690 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
6691
6692 mstate.dtms_difo = NULL;
6693 mstate.dtms_probe = probe;
6694 mstate.dtms_strtok = NULL;
6695 mstate.dtms_arg[0] = arg0;
6696 mstate.dtms_arg[1] = arg1;
6697 mstate.dtms_arg[2] = arg2;
6698 mstate.dtms_arg[3] = arg3;
6699 mstate.dtms_arg[4] = arg4;
6700
6701 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
6702
6703 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
6704 dtrace_predicate_t *pred = ecb->dte_predicate;
6705 dtrace_state_t *state = ecb->dte_state;
6706 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
6707 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
6708 dtrace_vstate_t *vstate = &state->dts_vstate;
6709 dtrace_provider_t *prov = probe->dtpr_provider;
6710 uint64_t tracememsize = 0;
6711 int committed = 0;
6712 caddr_t tomax;
6713
6714 /*
6715 * A little subtlety with the following (seemingly innocuous)
6716 * declaration of the automatic 'val': by looking at the
6717 * code, you might think that it could be declared in the
6718 * action processing loop, below. (That is, it's only used in
6719 * the action processing loop.) However, it must be declared
6720 * out of that scope because in the case of DIF expression
6721 * arguments to aggregating actions, one iteration of the
6722 * action loop will use the last iteration's value.
6723 */
6724 #ifdef lint
6725 uint64_t val = 0;
6726 #else
6727 uint64_t val;
6728 #endif
6729
6730 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
6731 mstate.dtms_access = DTRACE_ACCESS_ARGS | DTRACE_ACCESS_PROC;
6732 mstate.dtms_getf = NULL;
6733
6734 *flags &= ~CPU_DTRACE_ERROR;
6735
6736 if (prov == dtrace_provider) {
6737 /*
6738 * If dtrace itself is the provider of this probe,
6739 * we're only going to continue processing the ECB if
6740 * arg0 (the dtrace_state_t) is equal to the ECB's
6741 * creating state. (This prevents disjoint consumers
6742 * from seeing one another's metaprobes.)
6743 */
6744 if (arg0 != (uint64_t)(uintptr_t)state)
6745 continue;
6746 }
6747
6748 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
6749 /*
6750 * We're not currently active. If our provider isn't
6751 * the dtrace pseudo provider, we're not interested.
6752 */
6753 if (prov != dtrace_provider)
6754 continue;
6755
6756 /*
6757 * Now we must further check if we are in the BEGIN
6758 * probe. If we are, we will only continue processing
6759 * if we're still in WARMUP -- if one BEGIN enabling
6760 * has invoked the exit() action, we don't want to
6761 * evaluate subsequent BEGIN enablings.
6762 */
6763 if (probe->dtpr_id == dtrace_probeid_begin &&
6764 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
6765 ASSERT(state->dts_activity ==
6766 DTRACE_ACTIVITY_DRAINING);
6767 continue;
6768 }
6769 }
6770
6771 if (ecb->dte_cond && !dtrace_priv_probe(state, &mstate, ecb))
6772 continue;
6773
6774 if (now - state->dts_alive > dtrace_deadman_timeout) {
6775 /*
6776 * We seem to be dead. Unless we (a) have kernel
6777 * destructive permissions (b) have explicitly enabled
6778 * destructive actions and (c) destructive actions have
6779 * not been disabled, we're going to transition into
6780 * the KILLED state, from which no further processing
6781 * on this state will be performed.
6782 */
6783 if (!dtrace_priv_kernel_destructive(state) ||
6784 !state->dts_cred.dcr_destructive ||
6785 dtrace_destructive_disallow) {
6786 void *activity = &state->dts_activity;
6787 dtrace_activity_t current;
6788
6789 do {
6790 current = state->dts_activity;
6791 } while (dtrace_cas32(activity, current,
6792 DTRACE_ACTIVITY_KILLED) != current);
6793
6794 continue;
6795 }
6796 }
6797
6798 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
6799 ecb->dte_alignment, state, &mstate)) < 0)
6800 continue;
6801
6802 tomax = buf->dtb_tomax;
6803 ASSERT(tomax != NULL);
6804
6805 if (ecb->dte_size != 0) {
6806 dtrace_rechdr_t dtrh;
6807 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
6808 mstate.dtms_timestamp = dtrace_gethrtime();
6809 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
6810 }
6811 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t));
6812 dtrh.dtrh_epid = ecb->dte_epid;
6813 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh,
6814 mstate.dtms_timestamp);
6815 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh;
6816 }
6817
6818 mstate.dtms_epid = ecb->dte_epid;
6819 mstate.dtms_present |= DTRACE_MSTATE_EPID;
6820
6821 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
6822 mstate.dtms_access |= DTRACE_ACCESS_KERNEL;
6823
6824 if (pred != NULL) {
6825 dtrace_difo_t *dp = pred->dtp_difo;
6826 int rval;
6827
6828 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
6829
6830 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
6831 dtrace_cacheid_t cid = probe->dtpr_predcache;
6832
6833 if (cid != DTRACE_CACHEIDNONE && !onintr) {
6834 /*
6835 * Update the predicate cache...
6836 */
6837 ASSERT(cid == pred->dtp_cacheid);
6838 curthread->t_predcache = cid;
6839 }
6840
6841 continue;
6842 }
6843 }
6844
6845 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
6846 act != NULL; act = act->dta_next) {
6847 size_t valoffs;
6848 dtrace_difo_t *dp;
6849 dtrace_recdesc_t *rec = &act->dta_rec;
6850
6851 size = rec->dtrd_size;
6852 valoffs = offs + rec->dtrd_offset;
6853
6854 if (DTRACEACT_ISAGG(act->dta_kind)) {
6855 uint64_t v = 0xbad;
6856 dtrace_aggregation_t *agg;
6857
6858 agg = (dtrace_aggregation_t *)act;
6859
6860 if ((dp = act->dta_difo) != NULL)
6861 v = dtrace_dif_emulate(dp,
6862 &mstate, vstate, state);
6863
6864 if (*flags & CPU_DTRACE_ERROR)
6865 continue;
6866
6867 /*
6868 * Note that we always pass the expression
6869 * value from the previous iteration of the
6870 * action loop. This value will only be used
6871 * if there is an expression argument to the
6872 * aggregating action, denoted by the
6873 * dtag_hasarg field.
6874 */
6875 dtrace_aggregate(agg, buf,
6876 offs, aggbuf, v, val);
6877 continue;
6878 }
6879
6880 switch (act->dta_kind) {
6881 case DTRACEACT_STOP:
6882 if (dtrace_priv_proc_destructive(state,
6883 &mstate))
6884 dtrace_action_stop();
6885 continue;
6886
6887 case DTRACEACT_BREAKPOINT:
6888 if (dtrace_priv_kernel_destructive(state))
6889 dtrace_action_breakpoint(ecb);
6890 continue;
6891
6892 case DTRACEACT_PANIC:
6893 if (dtrace_priv_kernel_destructive(state))
6894 dtrace_action_panic(ecb);
6895 continue;
6896
6897 case DTRACEACT_STACK:
6898 if (!dtrace_priv_kernel(state))
6899 continue;
6900
6901 dtrace_getpcstack((pc_t *)(tomax + valoffs),
6902 size / sizeof (pc_t), probe->dtpr_aframes,
6903 DTRACE_ANCHORED(probe) ? NULL :
6904 (uint32_t *)arg0);
6905
6906 continue;
6907
6908 case DTRACEACT_JSTACK:
6909 case DTRACEACT_USTACK:
6910 if (!dtrace_priv_proc(state, &mstate))
6911 continue;
6912
6913 /*
6914 * See comment in DIF_VAR_PID.
6915 */
6916 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
6917 CPU_ON_INTR(CPU)) {
6918 int depth = DTRACE_USTACK_NFRAMES(
6919 rec->dtrd_arg) + 1;
6920
6921 dtrace_bzero((void *)(tomax + valoffs),
6922 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
6923 + depth * sizeof (uint64_t));
6924
6925 continue;
6926 }
6927
6928 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
6929 curproc->p_dtrace_helpers != NULL) {
6930 /*
6931 * This is the slow path -- we have
6932 * allocated string space, and we're
6933 * getting the stack of a process that
6934 * has helpers. Call into a separate
6935 * routine to perform this processing.
6936 */
6937 dtrace_action_ustack(&mstate, state,
6938 (uint64_t *)(tomax + valoffs),
6939 rec->dtrd_arg);
6940 continue;
6941 }
6942
6943 /*
6944 * Clear the string space, since there's no
6945 * helper to do it for us.
6946 */
6947 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0) {
6948 int depth = DTRACE_USTACK_NFRAMES(
6949 rec->dtrd_arg);
6950 size_t strsize = DTRACE_USTACK_STRSIZE(
6951 rec->dtrd_arg);
6952 uint64_t *buf = (uint64_t *)(tomax +
6953 valoffs);
6954 void *strspace = &buf[depth + 1];
6955
6956 dtrace_bzero(strspace,
6957 MIN(depth, strsize));
6958 }
6959
6960 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6961 dtrace_getupcstack((uint64_t *)
6962 (tomax + valoffs),
6963 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
6964 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6965 continue;
6966
6967 default:
6968 break;
6969 }
6970
6971 dp = act->dta_difo;
6972 ASSERT(dp != NULL);
6973
6974 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
6975
6976 if (*flags & CPU_DTRACE_ERROR)
6977 continue;
6978
6979 switch (act->dta_kind) {
6980 case DTRACEACT_SPECULATE: {
6981 dtrace_rechdr_t *dtrh;
6982
6983 ASSERT(buf == &state->dts_buffer[cpuid]);
6984 buf = dtrace_speculation_buffer(state,
6985 cpuid, val);
6986
6987 if (buf == NULL) {
6988 *flags |= CPU_DTRACE_DROP;
6989 continue;
6990 }
6991
6992 offs = dtrace_buffer_reserve(buf,
6993 ecb->dte_needed, ecb->dte_alignment,
6994 state, NULL);
6995
6996 if (offs < 0) {
6997 *flags |= CPU_DTRACE_DROP;
6998 continue;
6999 }
7000
7001 tomax = buf->dtb_tomax;
7002 ASSERT(tomax != NULL);
7003
7004 if (ecb->dte_size == 0)
7005 continue;
7006
7007 ASSERT3U(ecb->dte_size, >=,
7008 sizeof (dtrace_rechdr_t));
7009 dtrh = ((void *)(tomax + offs));
7010 dtrh->dtrh_epid = ecb->dte_epid;
7011 /*
7012 * When the speculation is committed, all of
7013 * the records in the speculative buffer will
7014 * have their timestamps set to the commit
7015 * time. Until then, it is set to a sentinel
7016 * value, for debugability.
7017 */
7018 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX);
7019 continue;
7020 }
7021
7022 case DTRACEACT_CHILL:
7023 if (dtrace_priv_kernel_destructive(state))
7024 dtrace_action_chill(&mstate, val);
7025 continue;
7026
7027 case DTRACEACT_RAISE:
7028 if (dtrace_priv_proc_destructive(state,
7029 &mstate))
7030 dtrace_action_raise(val);
7031 continue;
7032
7033 case DTRACEACT_COMMIT:
7034 ASSERT(!committed);
7035
7036 /*
7037 * We need to commit our buffer state.
7038 */
7039 if (ecb->dte_size)
7040 buf->dtb_offset = offs + ecb->dte_size;
7041 buf = &state->dts_buffer[cpuid];
7042 dtrace_speculation_commit(state, cpuid, val);
7043 committed = 1;
7044 continue;
7045
7046 case DTRACEACT_DISCARD:
7047 dtrace_speculation_discard(state, cpuid, val);
7048 continue;
7049
7050 case DTRACEACT_DIFEXPR:
7051 case DTRACEACT_LIBACT:
7052 case DTRACEACT_PRINTF:
7053 case DTRACEACT_PRINTA:
7054 case DTRACEACT_SYSTEM:
7055 case DTRACEACT_FREOPEN:
7056 case DTRACEACT_TRACEMEM:
7057 break;
7058
7059 case DTRACEACT_TRACEMEM_DYNSIZE:
7060 tracememsize = val;
7061 break;
7062
7063 case DTRACEACT_SYM:
7064 case DTRACEACT_MOD:
7065 if (!dtrace_priv_kernel(state))
7066 continue;
7067 break;
7068
7069 case DTRACEACT_USYM:
7070 case DTRACEACT_UMOD:
7071 case DTRACEACT_UADDR: {
7072 struct pid *pid = curthread->t_procp->p_pidp;
7073
7074 if (!dtrace_priv_proc(state, &mstate))
7075 continue;
7076
7077 DTRACE_STORE(uint64_t, tomax,
7078 valoffs, (uint64_t)pid->pid_id);
7079 DTRACE_STORE(uint64_t, tomax,
7080 valoffs + sizeof (uint64_t), val);
7081
7082 continue;
7083 }
7084
7085 case DTRACEACT_EXIT: {
7086 /*
7087 * For the exit action, we are going to attempt
7088 * to atomically set our activity to be
7089 * draining. If this fails (either because
7090 * another CPU has beat us to the exit action,
7091 * or because our current activity is something
7092 * other than ACTIVE or WARMUP), we will
7093 * continue. This assures that the exit action
7094 * can be successfully recorded at most once
7095 * when we're in the ACTIVE state. If we're
7096 * encountering the exit() action while in
7097 * COOLDOWN, however, we want to honor the new
7098 * status code. (We know that we're the only
7099 * thread in COOLDOWN, so there is no race.)
7100 */
7101 void *activity = &state->dts_activity;
7102 dtrace_activity_t current = state->dts_activity;
7103
7104 if (current == DTRACE_ACTIVITY_COOLDOWN)
7105 break;
7106
7107 if (current != DTRACE_ACTIVITY_WARMUP)
7108 current = DTRACE_ACTIVITY_ACTIVE;
7109
7110 if (dtrace_cas32(activity, current,
7111 DTRACE_ACTIVITY_DRAINING) != current) {
7112 *flags |= CPU_DTRACE_DROP;
7113 continue;
7114 }
7115
7116 break;
7117 }
7118
7119 default:
7120 ASSERT(0);
7121 }
7122
7123 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ||
7124 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) {
7125 uintptr_t end = valoffs + size;
7126
7127 if (tracememsize != 0 &&
7128 valoffs + tracememsize < end) {
7129 end = valoffs + tracememsize;
7130 tracememsize = 0;
7131 }
7132
7133 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF &&
7134 !dtrace_vcanload((void *)(uintptr_t)val,
7135 &dp->dtdo_rtype, &mstate, vstate))
7136 continue;
7137
7138 dtrace_store_by_ref(dp, tomax, size, &valoffs,
7139 &val, end, act->dta_intuple,
7140 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ?
7141 DIF_TF_BYREF: DIF_TF_BYUREF);
7142 continue;
7143 }
7144
7145 switch (size) {
7146 case 0:
7147 break;
7148
7149 case sizeof (uint8_t):
7150 DTRACE_STORE(uint8_t, tomax, valoffs, val);
7151 break;
7152 case sizeof (uint16_t):
7153 DTRACE_STORE(uint16_t, tomax, valoffs, val);
7154 break;
7155 case sizeof (uint32_t):
7156 DTRACE_STORE(uint32_t, tomax, valoffs, val);
7157 break;
7158 case sizeof (uint64_t):
7159 DTRACE_STORE(uint64_t, tomax, valoffs, val);
7160 break;
7161 default:
7162 /*
7163 * Any other size should have been returned by
7164 * reference, not by value.
7165 */
7166 ASSERT(0);
7167 break;
7168 }
7169 }
7170
7171 if (*flags & CPU_DTRACE_DROP)
7172 continue;
7173
7174 if (*flags & CPU_DTRACE_FAULT) {
7175 int ndx;
7176 dtrace_action_t *err;
7177
7178 buf->dtb_errors++;
7179
7180 if (probe->dtpr_id == dtrace_probeid_error) {
7181 /*
7182 * There's nothing we can do -- we had an
7183 * error on the error probe. We bump an
7184 * error counter to at least indicate that
7185 * this condition happened.
7186 */
7187 dtrace_error(&state->dts_dblerrors);
7188 continue;
7189 }
7190
7191 if (vtime) {
7192 /*
7193 * Before recursing on dtrace_probe(), we
7194 * need to explicitly clear out our start
7195 * time to prevent it from being accumulated
7196 * into t_dtrace_vtime.
7197 */
7198 curthread->t_dtrace_start = 0;
7199 }
7200
7201 /*
7202 * Iterate over the actions to figure out which action
7203 * we were processing when we experienced the error.
7204 * Note that act points _past_ the faulting action; if
7205 * act is ecb->dte_action, the fault was in the
7206 * predicate, if it's ecb->dte_action->dta_next it's
7207 * in action #1, and so on.
7208 */
7209 for (err = ecb->dte_action, ndx = 0;
7210 err != act; err = err->dta_next, ndx++)
7211 continue;
7212
7213 dtrace_probe_error(state, ecb->dte_epid, ndx,
7214 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
7215 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
7216 cpu_core[cpuid].cpuc_dtrace_illval);
7217
7218 continue;
7219 }
7220
7221 if (!committed)
7222 buf->dtb_offset = offs + ecb->dte_size;
7223 }
7224
7225 end = dtrace_gethrtime();
7226 if (vtime)
7227 curthread->t_dtrace_start = end;
7228
7229 CPU->cpu_dtrace_nsec += end - now;
7230
7231 dtrace_interrupt_enable(cookie);
7232 }
7233
7234 /*
7235 * DTrace Probe Hashing Functions
7236 *
7237 * The functions in this section (and indeed, the functions in remaining
7238 * sections) are not _called_ from probe context. (Any exceptions to this are
7239 * marked with a "Note:".) Rather, they are called from elsewhere in the
7240 * DTrace framework to look-up probes in, add probes to and remove probes from
7241 * the DTrace probe hashes. (Each probe is hashed by each element of the
7242 * probe tuple -- allowing for fast lookups, regardless of what was
7243 * specified.)
7244 */
7245 static uint_t
7246 dtrace_hash_str(char *p)
7247 {
7248 unsigned int g;
7249 uint_t hval = 0;
7250
7251 while (*p) {
7252 hval = (hval << 4) + *p++;
7253 if ((g = (hval & 0xf0000000)) != 0)
7254 hval ^= g >> 24;
7255 hval &= ~g;
7256 }
7257 return (hval);
7258 }
7259
7260 static dtrace_hash_t *
7261 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
7262 {
7263 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
7264
7265 hash->dth_stroffs = stroffs;
7266 hash->dth_nextoffs = nextoffs;
7267 hash->dth_prevoffs = prevoffs;
7268
7269 hash->dth_size = 1;
7270 hash->dth_mask = hash->dth_size - 1;
7271
7272 hash->dth_tab = kmem_zalloc(hash->dth_size *
7273 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
7274
7275 return (hash);
7276 }
7277
7278 static void
7279 dtrace_hash_destroy(dtrace_hash_t *hash)
7280 {
7281 #ifdef DEBUG
7282 int i;
7283
7284 for (i = 0; i < hash->dth_size; i++)
7285 ASSERT(hash->dth_tab[i] == NULL);
7286 #endif
7287
7288 kmem_free(hash->dth_tab,
7289 hash->dth_size * sizeof (dtrace_hashbucket_t *));
7290 kmem_free(hash, sizeof (dtrace_hash_t));
7291 }
7292
7293 static void
7294 dtrace_hash_resize(dtrace_hash_t *hash)
7295 {
7296 int size = hash->dth_size, i, ndx;
7297 int new_size = hash->dth_size << 1;
7298 int new_mask = new_size - 1;
7299 dtrace_hashbucket_t **new_tab, *bucket, *next;
7300
7301 ASSERT((new_size & new_mask) == 0);
7302
7303 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
7304
7305 for (i = 0; i < size; i++) {
7306 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
7307 dtrace_probe_t *probe = bucket->dthb_chain;
7308
7309 ASSERT(probe != NULL);
7310 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
7311
7312 next = bucket->dthb_next;
7313 bucket->dthb_next = new_tab[ndx];
7314 new_tab[ndx] = bucket;
7315 }
7316 }
7317
7318 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
7319 hash->dth_tab = new_tab;
7320 hash->dth_size = new_size;
7321 hash->dth_mask = new_mask;
7322 }
7323
7324 static void
7325 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
7326 {
7327 int hashval = DTRACE_HASHSTR(hash, new);
7328 int ndx = hashval & hash->dth_mask;
7329 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7330 dtrace_probe_t **nextp, **prevp;
7331
7332 for (; bucket != NULL; bucket = bucket->dthb_next) {
7333 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
7334 goto add;
7335 }
7336
7337 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
7338 dtrace_hash_resize(hash);
7339 dtrace_hash_add(hash, new);
7340 return;
7341 }
7342
7343 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
7344 bucket->dthb_next = hash->dth_tab[ndx];
7345 hash->dth_tab[ndx] = bucket;
7346 hash->dth_nbuckets++;
7347
7348 add:
7349 nextp = DTRACE_HASHNEXT(hash, new);
7350 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
7351 *nextp = bucket->dthb_chain;
7352
7353 if (bucket->dthb_chain != NULL) {
7354 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
7355 ASSERT(*prevp == NULL);
7356 *prevp = new;
7357 }
7358
7359 bucket->dthb_chain = new;
7360 bucket->dthb_len++;
7361 }
7362
7363 static dtrace_probe_t *
7364 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
7365 {
7366 int hashval = DTRACE_HASHSTR(hash, template);
7367 int ndx = hashval & hash->dth_mask;
7368 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7369
7370 for (; bucket != NULL; bucket = bucket->dthb_next) {
7371 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7372 return (bucket->dthb_chain);
7373 }
7374
7375 return (NULL);
7376 }
7377
7378 static int
7379 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
7380 {
7381 int hashval = DTRACE_HASHSTR(hash, template);
7382 int ndx = hashval & hash->dth_mask;
7383 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7384
7385 for (; bucket != NULL; bucket = bucket->dthb_next) {
7386 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7387 return (bucket->dthb_len);
7388 }
7389
7390 return (NULL);
7391 }
7392
7393 static void
7394 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
7395 {
7396 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
7397 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7398
7399 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
7400 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
7401
7402 /*
7403 * Find the bucket that we're removing this probe from.
7404 */
7405 for (; bucket != NULL; bucket = bucket->dthb_next) {
7406 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
7407 break;
7408 }
7409
7410 ASSERT(bucket != NULL);
7411
7412 if (*prevp == NULL) {
7413 if (*nextp == NULL) {
7414 /*
7415 * The removed probe was the only probe on this
7416 * bucket; we need to remove the bucket.
7417 */
7418 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
7419
7420 ASSERT(bucket->dthb_chain == probe);
7421 ASSERT(b != NULL);
7422
7423 if (b == bucket) {
7424 hash->dth_tab[ndx] = bucket->dthb_next;
7425 } else {
7426 while (b->dthb_next != bucket)
7427 b = b->dthb_next;
7428 b->dthb_next = bucket->dthb_next;
7429 }
7430
7431 ASSERT(hash->dth_nbuckets > 0);
7432 hash->dth_nbuckets--;
7433 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
7434 return;
7435 }
7436
7437 bucket->dthb_chain = *nextp;
7438 } else {
7439 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
7440 }
7441
7442 if (*nextp != NULL)
7443 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
7444 }
7445
7446 /*
7447 * DTrace Utility Functions
7448 *
7449 * These are random utility functions that are _not_ called from probe context.
7450 */
7451 static int
7452 dtrace_badattr(const dtrace_attribute_t *a)
7453 {
7454 return (a->dtat_name > DTRACE_STABILITY_MAX ||
7455 a->dtat_data > DTRACE_STABILITY_MAX ||
7456 a->dtat_class > DTRACE_CLASS_MAX);
7457 }
7458
7459 /*
7460 * Return a duplicate copy of a string. If the specified string is NULL,
7461 * this function returns a zero-length string.
7462 */
7463 static char *
7464 dtrace_strdup(const char *str)
7465 {
7466 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
7467
7468 if (str != NULL)
7469 (void) strcpy(new, str);
7470
7471 return (new);
7472 }
7473
7474 #define DTRACE_ISALPHA(c) \
7475 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
7476
7477 static int
7478 dtrace_badname(const char *s)
7479 {
7480 char c;
7481
7482 if (s == NULL || (c = *s++) == '\0')
7483 return (0);
7484
7485 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
7486 return (1);
7487
7488 while ((c = *s++) != '\0') {
7489 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
7490 c != '-' && c != '_' && c != '.' && c != '`')
7491 return (1);
7492 }
7493
7494 return (0);
7495 }
7496
7497 static void
7498 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
7499 {
7500 uint32_t priv;
7501
7502 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
7503 /*
7504 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
7505 */
7506 priv = DTRACE_PRIV_ALL;
7507 } else {
7508 *uidp = crgetuid(cr);
7509 *zoneidp = crgetzoneid(cr);
7510
7511 priv = 0;
7512 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
7513 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
7514 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
7515 priv |= DTRACE_PRIV_USER;
7516 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
7517 priv |= DTRACE_PRIV_PROC;
7518 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
7519 priv |= DTRACE_PRIV_OWNER;
7520 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
7521 priv |= DTRACE_PRIV_ZONEOWNER;
7522 }
7523
7524 *privp = priv;
7525 }
7526
7527 #ifdef DTRACE_ERRDEBUG
7528 static void
7529 dtrace_errdebug(const char *str)
7530 {
7531 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ;
7532 int occupied = 0;
7533
7534 mutex_enter(&dtrace_errlock);
7535 dtrace_errlast = str;
7536 dtrace_errthread = curthread;
7537
7538 while (occupied++ < DTRACE_ERRHASHSZ) {
7539 if (dtrace_errhash[hval].dter_msg == str) {
7540 dtrace_errhash[hval].dter_count++;
7541 goto out;
7542 }
7543
7544 if (dtrace_errhash[hval].dter_msg != NULL) {
7545 hval = (hval + 1) % DTRACE_ERRHASHSZ;
7546 continue;
7547 }
7548
7549 dtrace_errhash[hval].dter_msg = str;
7550 dtrace_errhash[hval].dter_count = 1;
7551 goto out;
7552 }
7553
7554 panic("dtrace: undersized error hash");
7555 out:
7556 mutex_exit(&dtrace_errlock);
7557 }
7558 #endif
7559
7560 /*
7561 * DTrace Matching Functions
7562 *
7563 * These functions are used to match groups of probes, given some elements of
7564 * a probe tuple, or some globbed expressions for elements of a probe tuple.
7565 */
7566 static int
7567 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
7568 zoneid_t zoneid)
7569 {
7570 if (priv != DTRACE_PRIV_ALL) {
7571 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
7572 uint32_t match = priv & ppriv;
7573
7574 /*
7575 * No PRIV_DTRACE_* privileges...
7576 */
7577 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
7578 DTRACE_PRIV_KERNEL)) == 0)
7579 return (0);
7580
7581 /*
7582 * No matching bits, but there were bits to match...
7583 */
7584 if (match == 0 && ppriv != 0)
7585 return (0);
7586
7587 /*
7588 * Need to have permissions to the process, but don't...
7589 */
7590 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
7591 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
7592 return (0);
7593 }
7594
7595 /*
7596 * Need to be in the same zone unless we possess the
7597 * privilege to examine all zones.
7598 */
7599 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
7600 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
7601 return (0);
7602 }
7603 }
7604
7605 return (1);
7606 }
7607
7608 /*
7609 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
7610 * consists of input pattern strings and an ops-vector to evaluate them.
7611 * This function returns >0 for match, 0 for no match, and <0 for error.
7612 */
7613 static int
7614 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
7615 uint32_t priv, uid_t uid, zoneid_t zoneid)
7616 {
7617 dtrace_provider_t *pvp = prp->dtpr_provider;
7618 int rv;
7619
7620 if (pvp->dtpv_defunct)
7621 return (0);
7622
7623 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
7624 return (rv);
7625
7626 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
7627 return (rv);
7628
7629 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
7630 return (rv);
7631
7632 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
7633 return (rv);
7634
7635 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
7636 return (0);
7637
7638 return (rv);
7639 }
7640
7641 /*
7642 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
7643 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
7644 * libc's version, the kernel version only applies to 8-bit ASCII strings.
7645 * In addition, all of the recursion cases except for '*' matching have been
7646 * unwound. For '*', we still implement recursive evaluation, but a depth
7647 * counter is maintained and matching is aborted if we recurse too deep.
7648 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
7649 */
7650 static int
7651 dtrace_match_glob(const char *s, const char *p, int depth)
7652 {
7653 const char *olds;
7654 char s1, c;
7655 int gs;
7656
7657 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
7658 return (-1);
7659
7660 if (s == NULL)
7661 s = ""; /* treat NULL as empty string */
7662
7663 top:
7664 olds = s;
7665 s1 = *s++;
7666
7667 if (p == NULL)
7668 return (0);
7669
7670 if ((c = *p++) == '\0')
7671 return (s1 == '\0');
7672
7673 switch (c) {
7674 case '[': {
7675 int ok = 0, notflag = 0;
7676 char lc = '\0';
7677
7678 if (s1 == '\0')
7679 return (0);
7680
7681 if (*p == '!') {
7682 notflag = 1;
7683 p++;
7684 }
7685
7686 if ((c = *p++) == '\0')
7687 return (0);
7688
7689 do {
7690 if (c == '-' && lc != '\0' && *p != ']') {
7691 if ((c = *p++) == '\0')
7692 return (0);
7693 if (c == '\\' && (c = *p++) == '\0')
7694 return (0);
7695
7696 if (notflag) {
7697 if (s1 < lc || s1 > c)
7698 ok++;
7699 else
7700 return (0);
7701 } else if (lc <= s1 && s1 <= c)
7702 ok++;
7703
7704 } else if (c == '\\' && (c = *p++) == '\0')
7705 return (0);
7706
7707 lc = c; /* save left-hand 'c' for next iteration */
7708
7709 if (notflag) {
7710 if (s1 != c)
7711 ok++;
7712 else
7713 return (0);
7714 } else if (s1 == c)
7715 ok++;
7716
7717 if ((c = *p++) == '\0')
7718 return (0);
7719
7720 } while (c != ']');
7721
7722 if (ok)
7723 goto top;
7724
7725 return (0);
7726 }
7727
7728 case '\\':
7729 if ((c = *p++) == '\0')
7730 return (0);
7731 /*FALLTHRU*/
7732
7733 default:
7734 if (c != s1)
7735 return (0);
7736 /*FALLTHRU*/
7737
7738 case '?':
7739 if (s1 != '\0')
7740 goto top;
7741 return (0);
7742
7743 case '*':
7744 while (*p == '*')
7745 p++; /* consecutive *'s are identical to a single one */
7746
7747 if (*p == '\0')
7748 return (1);
7749
7750 for (s = olds; *s != '\0'; s++) {
7751 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
7752 return (gs);
7753 }
7754
7755 return (0);
7756 }
7757 }
7758
7759 /*ARGSUSED*/
7760 static int
7761 dtrace_match_string(const char *s, const char *p, int depth)
7762 {
7763 return (s != NULL && strcmp(s, p) == 0);
7764 }
7765
7766 /*ARGSUSED*/
7767 static int
7768 dtrace_match_nul(const char *s, const char *p, int depth)
7769 {
7770 return (1); /* always match the empty pattern */
7771 }
7772
7773 /*ARGSUSED*/
7774 static int
7775 dtrace_match_nonzero(const char *s, const char *p, int depth)
7776 {
7777 return (s != NULL && s[0] != '\0');
7778 }
7779
7780 static int
7781 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
7782 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
7783 {
7784 dtrace_probe_t template, *probe;
7785 dtrace_hash_t *hash = NULL;
7786 int len, rc, best = INT_MAX, nmatched = 0;
7787 dtrace_id_t i;
7788
7789 ASSERT(MUTEX_HELD(&dtrace_lock));
7790
7791 /*
7792 * If the probe ID is specified in the key, just lookup by ID and
7793 * invoke the match callback once if a matching probe is found.
7794 */
7795 if (pkp->dtpk_id != DTRACE_IDNONE) {
7796 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
7797 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
7798 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL)
7799 return (DTRACE_MATCH_FAIL);
7800 nmatched++;
7801 }
7802 return (nmatched);
7803 }
7804
7805 template.dtpr_mod = (char *)pkp->dtpk_mod;
7806 template.dtpr_func = (char *)pkp->dtpk_func;
7807 template.dtpr_name = (char *)pkp->dtpk_name;
7808
7809 /*
7810 * We want to find the most distinct of the module name, function
7811 * name, and name. So for each one that is not a glob pattern or
7812 * empty string, we perform a lookup in the corresponding hash and
7813 * use the hash table with the fewest collisions to do our search.
7814 */
7815 if (pkp->dtpk_mmatch == &dtrace_match_string &&
7816 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
7817 best = len;
7818 hash = dtrace_bymod;
7819 }
7820
7821 if (pkp->dtpk_fmatch == &dtrace_match_string &&
7822 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
7823 best = len;
7824 hash = dtrace_byfunc;
7825 }
7826
7827 if (pkp->dtpk_nmatch == &dtrace_match_string &&
7828 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
7829 best = len;
7830 hash = dtrace_byname;
7831 }
7832
7833 /*
7834 * If we did not select a hash table, iterate over every probe and
7835 * invoke our callback for each one that matches our input probe key.
7836 */
7837 if (hash == NULL) {
7838 for (i = 0; i < dtrace_nprobes; i++) {
7839 if ((probe = dtrace_probes[i]) == NULL ||
7840 dtrace_match_probe(probe, pkp, priv, uid,
7841 zoneid) <= 0)
7842 continue;
7843
7844 nmatched++;
7845
7846 if ((rc = (*matched)(probe, arg)) !=
7847 DTRACE_MATCH_NEXT) {
7848 if (rc == DTRACE_MATCH_FAIL)
7849 return (DTRACE_MATCH_FAIL);
7850 break;
7851 }
7852 }
7853
7854 return (nmatched);
7855 }
7856
7857 /*
7858 * If we selected a hash table, iterate over each probe of the same key
7859 * name and invoke the callback for every probe that matches the other
7860 * attributes of our input probe key.
7861 */
7862 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
7863 probe = *(DTRACE_HASHNEXT(hash, probe))) {
7864
7865 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
7866 continue;
7867
7868 nmatched++;
7869
7870 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) {
7871 if (rc == DTRACE_MATCH_FAIL)
7872 return (DTRACE_MATCH_FAIL);
7873 break;
7874 }
7875 }
7876
7877 return (nmatched);
7878 }
7879
7880 /*
7881 * Return the function pointer dtrace_probecmp() should use to compare the
7882 * specified pattern with a string. For NULL or empty patterns, we select
7883 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
7884 * For non-empty non-glob strings, we use dtrace_match_string().
7885 */
7886 static dtrace_probekey_f *
7887 dtrace_probekey_func(const char *p)
7888 {
7889 char c;
7890
7891 if (p == NULL || *p == '\0')
7892 return (&dtrace_match_nul);
7893
7894 while ((c = *p++) != '\0') {
7895 if (c == '[' || c == '?' || c == '*' || c == '\\')
7896 return (&dtrace_match_glob);
7897 }
7898
7899 return (&dtrace_match_string);
7900 }
7901
7902 /*
7903 * Build a probe comparison key for use with dtrace_match_probe() from the
7904 * given probe description. By convention, a null key only matches anchored
7905 * probes: if each field is the empty string, reset dtpk_fmatch to
7906 * dtrace_match_nonzero().
7907 */
7908 static void
7909 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
7910 {
7911 pkp->dtpk_prov = pdp->dtpd_provider;
7912 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
7913
7914 pkp->dtpk_mod = pdp->dtpd_mod;
7915 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
7916
7917 pkp->dtpk_func = pdp->dtpd_func;
7918 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
7919
7920 pkp->dtpk_name = pdp->dtpd_name;
7921 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
7922
7923 pkp->dtpk_id = pdp->dtpd_id;
7924
7925 if (pkp->dtpk_id == DTRACE_IDNONE &&
7926 pkp->dtpk_pmatch == &dtrace_match_nul &&
7927 pkp->dtpk_mmatch == &dtrace_match_nul &&
7928 pkp->dtpk_fmatch == &dtrace_match_nul &&
7929 pkp->dtpk_nmatch == &dtrace_match_nul)
7930 pkp->dtpk_fmatch = &dtrace_match_nonzero;
7931 }
7932
7933 /*
7934 * DTrace Provider-to-Framework API Functions
7935 *
7936 * These functions implement much of the Provider-to-Framework API, as
7937 * described in <sys/dtrace.h>. The parts of the API not in this section are
7938 * the functions in the API for probe management (found below), and
7939 * dtrace_probe() itself (found above).
7940 */
7941
7942 /*
7943 * Register the calling provider with the DTrace framework. This should
7944 * generally be called by DTrace providers in their attach(9E) entry point.
7945 */
7946 int
7947 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
7948 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
7949 {
7950 dtrace_provider_t *provider;
7951
7952 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
7953 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7954 "arguments", name ? name : "<NULL>");
7955 return (EINVAL);
7956 }
7957
7958 if (name[0] == '\0' || dtrace_badname(name)) {
7959 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7960 "provider name", name);
7961 return (EINVAL);
7962 }
7963
7964 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
7965 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
7966 pops->dtps_destroy == NULL ||
7967 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
7968 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7969 "provider ops", name);
7970 return (EINVAL);
7971 }
7972
7973 if (dtrace_badattr(&pap->dtpa_provider) ||
7974 dtrace_badattr(&pap->dtpa_mod) ||
7975 dtrace_badattr(&pap->dtpa_func) ||
7976 dtrace_badattr(&pap->dtpa_name) ||
7977 dtrace_badattr(&pap->dtpa_args)) {
7978 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7979 "provider attributes", name);
7980 return (EINVAL);
7981 }
7982
7983 if (priv & ~DTRACE_PRIV_ALL) {
7984 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7985 "privilege attributes", name);
7986 return (EINVAL);
7987 }
7988
7989 if ((priv & DTRACE_PRIV_KERNEL) &&
7990 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
7991 pops->dtps_mode == NULL) {
7992 cmn_err(CE_WARN, "failed to register provider '%s': need "
7993 "dtps_mode() op for given privilege attributes", name);
7994 return (EINVAL);
7995 }
7996
7997 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
7998 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7999 (void) strcpy(provider->dtpv_name, name);
8000
8001 provider->dtpv_attr = *pap;
8002 provider->dtpv_priv.dtpp_flags = priv;
8003 if (cr != NULL) {
8004 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
8005 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
8006 }
8007 provider->dtpv_pops = *pops;
8008
8009 if (pops->dtps_provide == NULL) {
8010 ASSERT(pops->dtps_provide_module != NULL);
8011 provider->dtpv_pops.dtps_provide =
8012 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop;
8013 }
8014
8015 if (pops->dtps_provide_module == NULL) {
8016 ASSERT(pops->dtps_provide != NULL);
8017 provider->dtpv_pops.dtps_provide_module =
8018 (void (*)(void *, struct modctl *))dtrace_nullop;
8019 }
8020
8021 if (pops->dtps_suspend == NULL) {
8022 ASSERT(pops->dtps_resume == NULL);
8023 provider->dtpv_pops.dtps_suspend =
8024 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8025 provider->dtpv_pops.dtps_resume =
8026 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8027 }
8028
8029 provider->dtpv_arg = arg;
8030 *idp = (dtrace_provider_id_t)provider;
8031
8032 if (pops == &dtrace_provider_ops) {
8033 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8034 ASSERT(MUTEX_HELD(&dtrace_lock));
8035 ASSERT(dtrace_anon.dta_enabling == NULL);
8036
8037 /*
8038 * We make sure that the DTrace provider is at the head of
8039 * the provider chain.
8040 */
8041 provider->dtpv_next = dtrace_provider;
8042 dtrace_provider = provider;
8043 return (0);
8044 }
8045
8046 mutex_enter(&dtrace_provider_lock);
8047 mutex_enter(&dtrace_lock);
8048
8049 /*
8050 * If there is at least one provider registered, we'll add this
8051 * provider after the first provider.
8052 */
8053 if (dtrace_provider != NULL) {
8054 provider->dtpv_next = dtrace_provider->dtpv_next;
8055 dtrace_provider->dtpv_next = provider;
8056 } else {
8057 dtrace_provider = provider;
8058 }
8059
8060 if (dtrace_retained != NULL) {
8061 dtrace_enabling_provide(provider);
8062
8063 /*
8064 * Now we need to call dtrace_enabling_matchall() -- which
8065 * will acquire cpu_lock and dtrace_lock. We therefore need
8066 * to drop all of our locks before calling into it...
8067 */
8068 mutex_exit(&dtrace_lock);
8069 mutex_exit(&dtrace_provider_lock);
8070 dtrace_enabling_matchall();
8071
8072 return (0);
8073 }
8074
8075 mutex_exit(&dtrace_lock);
8076 mutex_exit(&dtrace_provider_lock);
8077
8078 return (0);
8079 }
8080
8081 /*
8082 * Unregister the specified provider from the DTrace framework. This should
8083 * generally be called by DTrace providers in their detach(9E) entry point.
8084 */
8085 int
8086 dtrace_unregister(dtrace_provider_id_t id)
8087 {
8088 dtrace_provider_t *old = (dtrace_provider_t *)id;
8089 dtrace_provider_t *prev = NULL;
8090 int i, self = 0, noreap = 0;
8091 dtrace_probe_t *probe, *first = NULL;
8092
8093 if (old->dtpv_pops.dtps_enable ==
8094 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) {
8095 /*
8096 * If DTrace itself is the provider, we're called with locks
8097 * already held.
8098 */
8099 ASSERT(old == dtrace_provider);
8100 ASSERT(dtrace_devi != NULL);
8101 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8102 ASSERT(MUTEX_HELD(&dtrace_lock));
8103 self = 1;
8104
8105 if (dtrace_provider->dtpv_next != NULL) {
8106 /*
8107 * There's another provider here; return failure.
8108 */
8109 return (EBUSY);
8110 }
8111 } else {
8112 mutex_enter(&dtrace_provider_lock);
8113 mutex_enter(&mod_lock);
8114 mutex_enter(&dtrace_lock);
8115 }
8116
8117 /*
8118 * If anyone has /dev/dtrace open, or if there are anonymous enabled
8119 * probes, we refuse to let providers slither away, unless this
8120 * provider has already been explicitly invalidated.
8121 */
8122 if (!old->dtpv_defunct &&
8123 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
8124 dtrace_anon.dta_state->dts_necbs > 0))) {
8125 if (!self) {
8126 mutex_exit(&dtrace_lock);
8127 mutex_exit(&mod_lock);
8128 mutex_exit(&dtrace_provider_lock);
8129 }
8130 return (EBUSY);
8131 }
8132
8133 /*
8134 * Attempt to destroy the probes associated with this provider.
8135 */
8136 for (i = 0; i < dtrace_nprobes; i++) {
8137 if ((probe = dtrace_probes[i]) == NULL)
8138 continue;
8139
8140 if (probe->dtpr_provider != old)
8141 continue;
8142
8143 if (probe->dtpr_ecb == NULL)
8144 continue;
8145
8146 /*
8147 * If we are trying to unregister a defunct provider, and the
8148 * provider was made defunct within the interval dictated by
8149 * dtrace_unregister_defunct_reap, we'll (asynchronously)
8150 * attempt to reap our enablings. To denote that the provider
8151 * should reattempt to unregister itself at some point in the
8152 * future, we will return a differentiable error code (EAGAIN
8153 * instead of EBUSY) in this case.
8154 */
8155 if (dtrace_gethrtime() - old->dtpv_defunct >
8156 dtrace_unregister_defunct_reap)
8157 noreap = 1;
8158
8159 if (!self) {
8160 mutex_exit(&dtrace_lock);
8161 mutex_exit(&mod_lock);
8162 mutex_exit(&dtrace_provider_lock);
8163 }
8164
8165 if (noreap)
8166 return (EBUSY);
8167
8168 (void) taskq_dispatch(dtrace_taskq,
8169 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP);
8170
8171 return (EAGAIN);
8172 }
8173
8174 /*
8175 * All of the probes for this provider are disabled; we can safely
8176 * remove all of them from their hash chains and from the probe array.
8177 */
8178 for (i = 0; i < dtrace_nprobes; i++) {
8179 if ((probe = dtrace_probes[i]) == NULL)
8180 continue;
8181
8182 if (probe->dtpr_provider != old)
8183 continue;
8184
8185 dtrace_probes[i] = NULL;
8186
8187 dtrace_hash_remove(dtrace_bymod, probe);
8188 dtrace_hash_remove(dtrace_byfunc, probe);
8189 dtrace_hash_remove(dtrace_byname, probe);
8190
8191 if (first == NULL) {
8192 first = probe;
8193 probe->dtpr_nextmod = NULL;
8194 } else {
8195 probe->dtpr_nextmod = first;
8196 first = probe;
8197 }
8198 }
8199
8200 /*
8201 * The provider's probes have been removed from the hash chains and
8202 * from the probe array. Now issue a dtrace_sync() to be sure that
8203 * everyone has cleared out from any probe array processing.
8204 */
8205 dtrace_sync();
8206
8207 for (probe = first; probe != NULL; probe = first) {
8208 first = probe->dtpr_nextmod;
8209
8210 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
8211 probe->dtpr_arg);
8212 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8213 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8214 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8215 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
8216 kmem_free(probe, sizeof (dtrace_probe_t));
8217 }
8218
8219 if ((prev = dtrace_provider) == old) {
8220 ASSERT(self || dtrace_devi == NULL);
8221 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
8222 dtrace_provider = old->dtpv_next;
8223 } else {
8224 while (prev != NULL && prev->dtpv_next != old)
8225 prev = prev->dtpv_next;
8226
8227 if (prev == NULL) {
8228 panic("attempt to unregister non-existent "
8229 "dtrace provider %p\n", (void *)id);
8230 }
8231
8232 prev->dtpv_next = old->dtpv_next;
8233 }
8234
8235 if (!self) {
8236 mutex_exit(&dtrace_lock);
8237 mutex_exit(&mod_lock);
8238 mutex_exit(&dtrace_provider_lock);
8239 }
8240
8241 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
8242 kmem_free(old, sizeof (dtrace_provider_t));
8243
8244 return (0);
8245 }
8246
8247 /*
8248 * Invalidate the specified provider. All subsequent probe lookups for the
8249 * specified provider will fail, but its probes will not be removed.
8250 */
8251 void
8252 dtrace_invalidate(dtrace_provider_id_t id)
8253 {
8254 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
8255
8256 ASSERT(pvp->dtpv_pops.dtps_enable !=
8257 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8258
8259 mutex_enter(&dtrace_provider_lock);
8260 mutex_enter(&dtrace_lock);
8261
8262 pvp->dtpv_defunct = dtrace_gethrtime();
8263
8264 mutex_exit(&dtrace_lock);
8265 mutex_exit(&dtrace_provider_lock);
8266 }
8267
8268 /*
8269 * Indicate whether or not DTrace has attached.
8270 */
8271 int
8272 dtrace_attached(void)
8273 {
8274 /*
8275 * dtrace_provider will be non-NULL iff the DTrace driver has
8276 * attached. (It's non-NULL because DTrace is always itself a
8277 * provider.)
8278 */
8279 return (dtrace_provider != NULL);
8280 }
8281
8282 /*
8283 * Remove all the unenabled probes for the given provider. This function is
8284 * not unlike dtrace_unregister(), except that it doesn't remove the provider
8285 * -- just as many of its associated probes as it can.
8286 */
8287 int
8288 dtrace_condense(dtrace_provider_id_t id)
8289 {
8290 dtrace_provider_t *prov = (dtrace_provider_t *)id;
8291 int i;
8292 dtrace_probe_t *probe;
8293
8294 /*
8295 * Make sure this isn't the dtrace provider itself.
8296 */
8297 ASSERT(prov->dtpv_pops.dtps_enable !=
8298 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8299
8300 mutex_enter(&dtrace_provider_lock);
8301 mutex_enter(&dtrace_lock);
8302
8303 /*
8304 * Attempt to destroy the probes associated with this provider.
8305 */
8306 for (i = 0; i < dtrace_nprobes; i++) {
8307 if ((probe = dtrace_probes[i]) == NULL)
8308 continue;
8309
8310 if (probe->dtpr_provider != prov)
8311 continue;
8312
8313 if (probe->dtpr_ecb != NULL)
8314 continue;
8315
8316 dtrace_probes[i] = NULL;
8317
8318 dtrace_hash_remove(dtrace_bymod, probe);
8319 dtrace_hash_remove(dtrace_byfunc, probe);
8320 dtrace_hash_remove(dtrace_byname, probe);
8321
8322 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
8323 probe->dtpr_arg);
8324 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8325 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8326 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8327 kmem_free(probe, sizeof (dtrace_probe_t));
8328 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
8329 }
8330
8331 mutex_exit(&dtrace_lock);
8332 mutex_exit(&dtrace_provider_lock);
8333
8334 return (0);
8335 }
8336
8337 /*
8338 * DTrace Probe Management Functions
8339 *
8340 * The functions in this section perform the DTrace probe management,
8341 * including functions to create probes, look-up probes, and call into the
8342 * providers to request that probes be provided. Some of these functions are
8343 * in the Provider-to-Framework API; these functions can be identified by the
8344 * fact that they are not declared "static".
8345 */
8346
8347 /*
8348 * Create a probe with the specified module name, function name, and name.
8349 */
8350 dtrace_id_t
8351 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
8352 const char *func, const char *name, int aframes, void *arg)
8353 {
8354 dtrace_probe_t *probe, **probes;
8355 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
8356 dtrace_id_t id;
8357
8358 if (provider == dtrace_provider) {
8359 ASSERT(MUTEX_HELD(&dtrace_lock));
8360 } else {
8361 mutex_enter(&dtrace_lock);
8362 }
8363
8364 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
8365 VM_BESTFIT | VM_SLEEP);
8366 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
8367
8368 probe->dtpr_id = id;
8369 probe->dtpr_gen = dtrace_probegen++;
8370 probe->dtpr_mod = dtrace_strdup(mod);
8371 probe->dtpr_func = dtrace_strdup(func);
8372 probe->dtpr_name = dtrace_strdup(name);
8373 probe->dtpr_arg = arg;
8374 probe->dtpr_aframes = aframes;
8375 probe->dtpr_provider = provider;
8376
8377 dtrace_hash_add(dtrace_bymod, probe);
8378 dtrace_hash_add(dtrace_byfunc, probe);
8379 dtrace_hash_add(dtrace_byname, probe);
8380
8381 if (id - 1 >= dtrace_nprobes) {
8382 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
8383 size_t nsize = osize << 1;
8384
8385 if (nsize == 0) {
8386 ASSERT(osize == 0);
8387 ASSERT(dtrace_probes == NULL);
8388 nsize = sizeof (dtrace_probe_t *);
8389 }
8390
8391 probes = kmem_zalloc(nsize, KM_SLEEP);
8392
8393 if (dtrace_probes == NULL) {
8394 ASSERT(osize == 0);
8395 dtrace_probes = probes;
8396 dtrace_nprobes = 1;
8397 } else {
8398 dtrace_probe_t **oprobes = dtrace_probes;
8399
8400 bcopy(oprobes, probes, osize);
8401 dtrace_membar_producer();
8402 dtrace_probes = probes;
8403
8404 dtrace_sync();
8405
8406 /*
8407 * All CPUs are now seeing the new probes array; we can
8408 * safely free the old array.
8409 */
8410 kmem_free(oprobes, osize);
8411 dtrace_nprobes <<= 1;
8412 }
8413
8414 ASSERT(id - 1 < dtrace_nprobes);
8415 }
8416
8417 ASSERT(dtrace_probes[id - 1] == NULL);
8418 dtrace_probes[id - 1] = probe;
8419
8420 if (provider != dtrace_provider)
8421 mutex_exit(&dtrace_lock);
8422
8423 return (id);
8424 }
8425
8426 static dtrace_probe_t *
8427 dtrace_probe_lookup_id(dtrace_id_t id)
8428 {
8429 ASSERT(MUTEX_HELD(&dtrace_lock));
8430
8431 if (id == 0 || id > dtrace_nprobes)
8432 return (NULL);
8433
8434 return (dtrace_probes[id - 1]);
8435 }
8436
8437 static int
8438 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
8439 {
8440 *((dtrace_id_t *)arg) = probe->dtpr_id;
8441
8442 return (DTRACE_MATCH_DONE);
8443 }
8444
8445 /*
8446 * Look up a probe based on provider and one or more of module name, function
8447 * name and probe name.
8448 */
8449 dtrace_id_t
8450 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
8451 const char *func, const char *name)
8452 {
8453 dtrace_probekey_t pkey;
8454 dtrace_id_t id;
8455 int match;
8456
8457 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
8458 pkey.dtpk_pmatch = &dtrace_match_string;
8459 pkey.dtpk_mod = mod;
8460 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
8461 pkey.dtpk_func = func;
8462 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
8463 pkey.dtpk_name = name;
8464 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
8465 pkey.dtpk_id = DTRACE_IDNONE;
8466
8467 mutex_enter(&dtrace_lock);
8468 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
8469 dtrace_probe_lookup_match, &id);
8470 mutex_exit(&dtrace_lock);
8471
8472 ASSERT(match == 1 || match == 0);
8473 return (match ? id : 0);
8474 }
8475
8476 /*
8477 * Returns the probe argument associated with the specified probe.
8478 */
8479 void *
8480 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
8481 {
8482 dtrace_probe_t *probe;
8483 void *rval = NULL;
8484
8485 mutex_enter(&dtrace_lock);
8486
8487 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
8488 probe->dtpr_provider == (dtrace_provider_t *)id)
8489 rval = probe->dtpr_arg;
8490
8491 mutex_exit(&dtrace_lock);
8492
8493 return (rval);
8494 }
8495
8496 /*
8497 * Copy a probe into a probe description.
8498 */
8499 static void
8500 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
8501 {
8502 bzero(pdp, sizeof (dtrace_probedesc_t));
8503 pdp->dtpd_id = prp->dtpr_id;
8504
8505 (void) strncpy(pdp->dtpd_provider,
8506 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
8507
8508 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
8509 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
8510 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
8511 }
8512
8513 /*
8514 * Called to indicate that a probe -- or probes -- should be provided by a
8515 * specfied provider. If the specified description is NULL, the provider will
8516 * be told to provide all of its probes. (This is done whenever a new
8517 * consumer comes along, or whenever a retained enabling is to be matched.) If
8518 * the specified description is non-NULL, the provider is given the
8519 * opportunity to dynamically provide the specified probe, allowing providers
8520 * to support the creation of probes on-the-fly. (So-called _autocreated_
8521 * probes.) If the provider is NULL, the operations will be applied to all
8522 * providers; if the provider is non-NULL the operations will only be applied
8523 * to the specified provider. The dtrace_provider_lock must be held, and the
8524 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
8525 * will need to grab the dtrace_lock when it reenters the framework through
8526 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
8527 */
8528 static void
8529 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
8530 {
8531 struct modctl *ctl;
8532 int all = 0;
8533
8534 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8535
8536 if (prv == NULL) {
8537 all = 1;
8538 prv = dtrace_provider;
8539 }
8540
8541 do {
8542 /*
8543 * First, call the blanket provide operation.
8544 */
8545 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
8546
8547 /*
8548 * Now call the per-module provide operation. We will grab
8549 * mod_lock to prevent the list from being modified. Note
8550 * that this also prevents the mod_busy bits from changing.
8551 * (mod_busy can only be changed with mod_lock held.)
8552 */
8553 mutex_enter(&mod_lock);
8554
8555 ctl = &modules;
8556 do {
8557 if (ctl->mod_busy || ctl->mod_mp == NULL)
8558 continue;
8559
8560 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
8561
8562 } while ((ctl = ctl->mod_next) != &modules);
8563
8564 mutex_exit(&mod_lock);
8565 } while (all && (prv = prv->dtpv_next) != NULL);
8566 }
8567
8568 /*
8569 * Iterate over each probe, and call the Framework-to-Provider API function
8570 * denoted by offs.
8571 */
8572 static void
8573 dtrace_probe_foreach(uintptr_t offs)
8574 {
8575 dtrace_provider_t *prov;
8576 void (*func)(void *, dtrace_id_t, void *);
8577 dtrace_probe_t *probe;
8578 dtrace_icookie_t cookie;
8579 int i;
8580
8581 /*
8582 * We disable interrupts to walk through the probe array. This is
8583 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
8584 * won't see stale data.
8585 */
8586 cookie = dtrace_interrupt_disable();
8587
8588 for (i = 0; i < dtrace_nprobes; i++) {
8589 if ((probe = dtrace_probes[i]) == NULL)
8590 continue;
8591
8592 if (probe->dtpr_ecb == NULL) {
8593 /*
8594 * This probe isn't enabled -- don't call the function.
8595 */
8596 continue;
8597 }
8598
8599 prov = probe->dtpr_provider;
8600 func = *((void(**)(void *, dtrace_id_t, void *))
8601 ((uintptr_t)&prov->dtpv_pops + offs));
8602
8603 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
8604 }
8605
8606 dtrace_interrupt_enable(cookie);
8607 }
8608
8609 static int
8610 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
8611 {
8612 dtrace_probekey_t pkey;
8613 uint32_t priv;
8614 uid_t uid;
8615 zoneid_t zoneid;
8616
8617 ASSERT(MUTEX_HELD(&dtrace_lock));
8618 dtrace_ecb_create_cache = NULL;
8619
8620 if (desc == NULL) {
8621 /*
8622 * If we're passed a NULL description, we're being asked to
8623 * create an ECB with a NULL probe.
8624 */
8625 (void) dtrace_ecb_create_enable(NULL, enab);
8626 return (0);
8627 }
8628
8629 dtrace_probekey(desc, &pkey);
8630 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
8631 &priv, &uid, &zoneid);
8632
8633 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
8634 enab));
8635 }
8636
8637 /*
8638 * DTrace Helper Provider Functions
8639 */
8640 static void
8641 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
8642 {
8643 attr->dtat_name = DOF_ATTR_NAME(dofattr);
8644 attr->dtat_data = DOF_ATTR_DATA(dofattr);
8645 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
8646 }
8647
8648 static void
8649 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
8650 const dof_provider_t *dofprov, char *strtab)
8651 {
8652 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
8653 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
8654 dofprov->dofpv_provattr);
8655 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
8656 dofprov->dofpv_modattr);
8657 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
8658 dofprov->dofpv_funcattr);
8659 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
8660 dofprov->dofpv_nameattr);
8661 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
8662 dofprov->dofpv_argsattr);
8663 }
8664
8665 static void
8666 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8667 {
8668 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8669 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8670 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
8671 dof_provider_t *provider;
8672 dof_probe_t *probe;
8673 uint32_t *off, *enoff;
8674 uint8_t *arg;
8675 char *strtab;
8676 uint_t i, nprobes;
8677 dtrace_helper_provdesc_t dhpv;
8678 dtrace_helper_probedesc_t dhpb;
8679 dtrace_meta_t *meta = dtrace_meta_pid;
8680 dtrace_mops_t *mops = &meta->dtm_mops;
8681 void *parg;
8682
8683 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8684 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8685 provider->dofpv_strtab * dof->dofh_secsize);
8686 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8687 provider->dofpv_probes * dof->dofh_secsize);
8688 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8689 provider->dofpv_prargs * dof->dofh_secsize);
8690 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8691 provider->dofpv_proffs * dof->dofh_secsize);
8692
8693 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8694 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
8695 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
8696 enoff = NULL;
8697
8698 /*
8699 * See dtrace_helper_provider_validate().
8700 */
8701 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
8702 provider->dofpv_prenoffs != DOF_SECT_NONE) {
8703 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8704 provider->dofpv_prenoffs * dof->dofh_secsize);
8705 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
8706 }
8707
8708 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
8709
8710 /*
8711 * Create the provider.
8712 */
8713 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8714
8715 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
8716 return;
8717
8718 meta->dtm_count++;
8719
8720 /*
8721 * Create the probes.
8722 */
8723 for (i = 0; i < nprobes; i++) {
8724 probe = (dof_probe_t *)(uintptr_t)(daddr +
8725 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
8726
8727 dhpb.dthpb_mod = dhp->dofhp_mod;
8728 dhpb.dthpb_func = strtab + probe->dofpr_func;
8729 dhpb.dthpb_name = strtab + probe->dofpr_name;
8730 dhpb.dthpb_base = probe->dofpr_addr;
8731 dhpb.dthpb_offs = off + probe->dofpr_offidx;
8732 dhpb.dthpb_noffs = probe->dofpr_noffs;
8733 if (enoff != NULL) {
8734 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
8735 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
8736 } else {
8737 dhpb.dthpb_enoffs = NULL;
8738 dhpb.dthpb_nenoffs = 0;
8739 }
8740 dhpb.dthpb_args = arg + probe->dofpr_argidx;
8741 dhpb.dthpb_nargc = probe->dofpr_nargc;
8742 dhpb.dthpb_xargc = probe->dofpr_xargc;
8743 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
8744 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
8745
8746 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
8747 }
8748 }
8749
8750 static void
8751 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
8752 {
8753 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8754 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8755 int i;
8756
8757 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8758
8759 for (i = 0; i < dof->dofh_secnum; i++) {
8760 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8761 dof->dofh_secoff + i * dof->dofh_secsize);
8762
8763 if (sec->dofs_type != DOF_SECT_PROVIDER)
8764 continue;
8765
8766 dtrace_helper_provide_one(dhp, sec, pid);
8767 }
8768
8769 /*
8770 * We may have just created probes, so we must now rematch against
8771 * any retained enablings. Note that this call will acquire both
8772 * cpu_lock and dtrace_lock; the fact that we are holding
8773 * dtrace_meta_lock now is what defines the ordering with respect to
8774 * these three locks.
8775 */
8776 dtrace_enabling_matchall();
8777 }
8778
8779 static void
8780 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
8781 {
8782 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8783 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8784 dof_sec_t *str_sec;
8785 dof_provider_t *provider;
8786 char *strtab;
8787 dtrace_helper_provdesc_t dhpv;
8788 dtrace_meta_t *meta = dtrace_meta_pid;
8789 dtrace_mops_t *mops = &meta->dtm_mops;
8790
8791 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
8792 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
8793 provider->dofpv_strtab * dof->dofh_secsize);
8794
8795 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8796
8797 /*
8798 * Create the provider.
8799 */
8800 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8801
8802 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
8803
8804 meta->dtm_count--;
8805 }
8806
8807 static void
8808 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
8809 {
8810 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8811 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8812 int i;
8813
8814 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8815
8816 for (i = 0; i < dof->dofh_secnum; i++) {
8817 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8818 dof->dofh_secoff + i * dof->dofh_secsize);
8819
8820 if (sec->dofs_type != DOF_SECT_PROVIDER)
8821 continue;
8822
8823 dtrace_helper_provider_remove_one(dhp, sec, pid);
8824 }
8825 }
8826
8827 /*
8828 * DTrace Meta Provider-to-Framework API Functions
8829 *
8830 * These functions implement the Meta Provider-to-Framework API, as described
8831 * in <sys/dtrace.h>.
8832 */
8833 int
8834 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
8835 dtrace_meta_provider_id_t *idp)
8836 {
8837 dtrace_meta_t *meta;
8838 dtrace_helpers_t *help, *next;
8839 int i;
8840
8841 *idp = DTRACE_METAPROVNONE;
8842
8843 /*
8844 * We strictly don't need the name, but we hold onto it for
8845 * debuggability. All hail error queues!
8846 */
8847 if (name == NULL) {
8848 cmn_err(CE_WARN, "failed to register meta-provider: "
8849 "invalid name");
8850 return (EINVAL);
8851 }
8852
8853 if (mops == NULL ||
8854 mops->dtms_create_probe == NULL ||
8855 mops->dtms_provide_pid == NULL ||
8856 mops->dtms_remove_pid == NULL) {
8857 cmn_err(CE_WARN, "failed to register meta-register %s: "
8858 "invalid ops", name);
8859 return (EINVAL);
8860 }
8861
8862 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
8863 meta->dtm_mops = *mops;
8864 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8865 (void) strcpy(meta->dtm_name, name);
8866 meta->dtm_arg = arg;
8867
8868 mutex_enter(&dtrace_meta_lock);
8869 mutex_enter(&dtrace_lock);
8870
8871 if (dtrace_meta_pid != NULL) {
8872 mutex_exit(&dtrace_lock);
8873 mutex_exit(&dtrace_meta_lock);
8874 cmn_err(CE_WARN, "failed to register meta-register %s: "
8875 "user-land meta-provider exists", name);
8876 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
8877 kmem_free(meta, sizeof (dtrace_meta_t));
8878 return (EINVAL);
8879 }
8880
8881 dtrace_meta_pid = meta;
8882 *idp = (dtrace_meta_provider_id_t)meta;
8883
8884 /*
8885 * If there are providers and probes ready to go, pass them
8886 * off to the new meta provider now.
8887 */
8888
8889 help = dtrace_deferred_pid;
8890 dtrace_deferred_pid = NULL;
8891
8892 mutex_exit(&dtrace_lock);
8893
8894 while (help != NULL) {
8895 for (i = 0; i < help->dthps_nprovs; i++) {
8896 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
8897 help->dthps_pid);
8898 }
8899
8900 next = help->dthps_next;
8901 help->dthps_next = NULL;
8902 help->dthps_prev = NULL;
8903 help->dthps_deferred = 0;
8904 help = next;
8905 }
8906
8907 mutex_exit(&dtrace_meta_lock);
8908
8909 return (0);
8910 }
8911
8912 int
8913 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
8914 {
8915 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
8916
8917 mutex_enter(&dtrace_meta_lock);
8918 mutex_enter(&dtrace_lock);
8919
8920 if (old == dtrace_meta_pid) {
8921 pp = &dtrace_meta_pid;
8922 } else {
8923 panic("attempt to unregister non-existent "
8924 "dtrace meta-provider %p\n", (void *)old);
8925 }
8926
8927 if (old->dtm_count != 0) {
8928 mutex_exit(&dtrace_lock);
8929 mutex_exit(&dtrace_meta_lock);
8930 return (EBUSY);
8931 }
8932
8933 *pp = NULL;
8934
8935 mutex_exit(&dtrace_lock);
8936 mutex_exit(&dtrace_meta_lock);
8937
8938 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
8939 kmem_free(old, sizeof (dtrace_meta_t));
8940
8941 return (0);
8942 }
8943
8944
8945 /*
8946 * DTrace DIF Object Functions
8947 */
8948 static int
8949 dtrace_difo_err(uint_t pc, const char *format, ...)
8950 {
8951 if (dtrace_err_verbose) {
8952 va_list alist;
8953
8954 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
8955 va_start(alist, format);
8956 (void) vuprintf(format, alist);
8957 va_end(alist);
8958 }
8959
8960 #ifdef DTRACE_ERRDEBUG
8961 dtrace_errdebug(format);
8962 #endif
8963 return (1);
8964 }
8965
8966 /*
8967 * Validate a DTrace DIF object by checking the IR instructions. The following
8968 * rules are currently enforced by dtrace_difo_validate():
8969 *
8970 * 1. Each instruction must have a valid opcode
8971 * 2. Each register, string, variable, or subroutine reference must be valid
8972 * 3. No instruction can modify register %r0 (must be zero)
8973 * 4. All instruction reserved bits must be set to zero
8974 * 5. The last instruction must be a "ret" instruction
8975 * 6. All branch targets must reference a valid instruction _after_ the branch
8976 */
8977 static int
8978 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
8979 cred_t *cr)
8980 {
8981 int err = 0, i;
8982 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8983 int kcheckload;
8984 uint_t pc;
8985
8986 kcheckload = cr == NULL ||
8987 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
8988
8989 dp->dtdo_destructive = 0;
8990
8991 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
8992 dif_instr_t instr = dp->dtdo_buf[pc];
8993
8994 uint_t r1 = DIF_INSTR_R1(instr);
8995 uint_t r2 = DIF_INSTR_R2(instr);
8996 uint_t rd = DIF_INSTR_RD(instr);
8997 uint_t rs = DIF_INSTR_RS(instr);
8998 uint_t label = DIF_INSTR_LABEL(instr);
8999 uint_t v = DIF_INSTR_VAR(instr);
9000 uint_t subr = DIF_INSTR_SUBR(instr);
9001 uint_t type = DIF_INSTR_TYPE(instr);
9002 uint_t op = DIF_INSTR_OP(instr);
9003
9004 switch (op) {
9005 case DIF_OP_OR:
9006 case DIF_OP_XOR:
9007 case DIF_OP_AND:
9008 case DIF_OP_SLL:
9009 case DIF_OP_SRL:
9010 case DIF_OP_SRA:
9011 case DIF_OP_SUB:
9012 case DIF_OP_ADD:
9013 case DIF_OP_MUL:
9014 case DIF_OP_SDIV:
9015 case DIF_OP_UDIV:
9016 case DIF_OP_SREM:
9017 case DIF_OP_UREM:
9018 case DIF_OP_COPYS:
9019 if (r1 >= nregs)
9020 err += efunc(pc, "invalid register %u\n", r1);
9021 if (r2 >= nregs)
9022 err += efunc(pc, "invalid register %u\n", r2);
9023 if (rd >= nregs)
9024 err += efunc(pc, "invalid register %u\n", rd);
9025 if (rd == 0)
9026 err += efunc(pc, "cannot write to %r0\n");
9027 break;
9028 case DIF_OP_NOT:
9029 case DIF_OP_MOV:
9030 case DIF_OP_ALLOCS:
9031 if (r1 >= nregs)
9032 err += efunc(pc, "invalid register %u\n", r1);
9033 if (r2 != 0)
9034 err += efunc(pc, "non-zero reserved bits\n");
9035 if (rd >= nregs)
9036 err += efunc(pc, "invalid register %u\n", rd);
9037 if (rd == 0)
9038 err += efunc(pc, "cannot write to %r0\n");
9039 break;
9040 case DIF_OP_LDSB:
9041 case DIF_OP_LDSH:
9042 case DIF_OP_LDSW:
9043 case DIF_OP_LDUB:
9044 case DIF_OP_LDUH:
9045 case DIF_OP_LDUW:
9046 case DIF_OP_LDX:
9047 if (r1 >= nregs)
9048 err += efunc(pc, "invalid register %u\n", r1);
9049 if (r2 != 0)
9050 err += efunc(pc, "non-zero reserved bits\n");
9051 if (rd >= nregs)
9052 err += efunc(pc, "invalid register %u\n", rd);
9053 if (rd == 0)
9054 err += efunc(pc, "cannot write to %r0\n");
9055 if (kcheckload)
9056 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
9057 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
9058 break;
9059 case DIF_OP_RLDSB:
9060 case DIF_OP_RLDSH:
9061 case DIF_OP_RLDSW:
9062 case DIF_OP_RLDUB:
9063 case DIF_OP_RLDUH:
9064 case DIF_OP_RLDUW:
9065 case DIF_OP_RLDX:
9066 if (r1 >= nregs)
9067 err += efunc(pc, "invalid register %u\n", r1);
9068 if (r2 != 0)
9069 err += efunc(pc, "non-zero reserved bits\n");
9070 if (rd >= nregs)
9071 err += efunc(pc, "invalid register %u\n", rd);
9072 if (rd == 0)
9073 err += efunc(pc, "cannot write to %r0\n");
9074 break;
9075 case DIF_OP_ULDSB:
9076 case DIF_OP_ULDSH:
9077 case DIF_OP_ULDSW:
9078 case DIF_OP_ULDUB:
9079 case DIF_OP_ULDUH:
9080 case DIF_OP_ULDUW:
9081 case DIF_OP_ULDX:
9082 if (r1 >= nregs)
9083 err += efunc(pc, "invalid register %u\n", r1);
9084 if (r2 != 0)
9085 err += efunc(pc, "non-zero reserved bits\n");
9086 if (rd >= nregs)
9087 err += efunc(pc, "invalid register %u\n", rd);
9088 if (rd == 0)
9089 err += efunc(pc, "cannot write to %r0\n");
9090 break;
9091 case DIF_OP_STB:
9092 case DIF_OP_STH:
9093 case DIF_OP_STW:
9094 case DIF_OP_STX:
9095 if (r1 >= nregs)
9096 err += efunc(pc, "invalid register %u\n", r1);
9097 if (r2 != 0)
9098 err += efunc(pc, "non-zero reserved bits\n");
9099 if (rd >= nregs)
9100 err += efunc(pc, "invalid register %u\n", rd);
9101 if (rd == 0)
9102 err += efunc(pc, "cannot write to 0 address\n");
9103 break;
9104 case DIF_OP_CMP:
9105 case DIF_OP_SCMP:
9106 if (r1 >= nregs)
9107 err += efunc(pc, "invalid register %u\n", r1);
9108 if (r2 >= nregs)
9109 err += efunc(pc, "invalid register %u\n", r2);
9110 if (rd != 0)
9111 err += efunc(pc, "non-zero reserved bits\n");
9112 break;
9113 case DIF_OP_TST:
9114 if (r1 >= nregs)
9115 err += efunc(pc, "invalid register %u\n", r1);
9116 if (r2 != 0 || rd != 0)
9117 err += efunc(pc, "non-zero reserved bits\n");
9118 break;
9119 case DIF_OP_BA:
9120 case DIF_OP_BE:
9121 case DIF_OP_BNE:
9122 case DIF_OP_BG:
9123 case DIF_OP_BGU:
9124 case DIF_OP_BGE:
9125 case DIF_OP_BGEU:
9126 case DIF_OP_BL:
9127 case DIF_OP_BLU:
9128 case DIF_OP_BLE:
9129 case DIF_OP_BLEU:
9130 if (label >= dp->dtdo_len) {
9131 err += efunc(pc, "invalid branch target %u\n",
9132 label);
9133 }
9134 if (label <= pc) {
9135 err += efunc(pc, "backward branch to %u\n",
9136 label);
9137 }
9138 break;
9139 case DIF_OP_RET:
9140 if (r1 != 0 || r2 != 0)
9141 err += efunc(pc, "non-zero reserved bits\n");
9142 if (rd >= nregs)
9143 err += efunc(pc, "invalid register %u\n", rd);
9144 break;
9145 case DIF_OP_NOP:
9146 case DIF_OP_POPTS:
9147 case DIF_OP_FLUSHTS:
9148 if (r1 != 0 || r2 != 0 || rd != 0)
9149 err += efunc(pc, "non-zero reserved bits\n");
9150 break;
9151 case DIF_OP_SETX:
9152 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
9153 err += efunc(pc, "invalid integer ref %u\n",
9154 DIF_INSTR_INTEGER(instr));
9155 }
9156 if (rd >= nregs)
9157 err += efunc(pc, "invalid register %u\n", rd);
9158 if (rd == 0)
9159 err += efunc(pc, "cannot write to %r0\n");
9160 break;
9161 case DIF_OP_SETS:
9162 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
9163 err += efunc(pc, "invalid string ref %u\n",
9164 DIF_INSTR_STRING(instr));
9165 }
9166 if (rd >= nregs)
9167 err += efunc(pc, "invalid register %u\n", rd);
9168 if (rd == 0)
9169 err += efunc(pc, "cannot write to %r0\n");
9170 break;
9171 case DIF_OP_LDGA:
9172 case DIF_OP_LDTA:
9173 if (r1 > DIF_VAR_ARRAY_MAX)
9174 err += efunc(pc, "invalid array %u\n", r1);
9175 if (r2 >= nregs)
9176 err += efunc(pc, "invalid register %u\n", r2);
9177 if (rd >= nregs)
9178 err += efunc(pc, "invalid register %u\n", rd);
9179 if (rd == 0)
9180 err += efunc(pc, "cannot write to %r0\n");
9181 break;
9182 case DIF_OP_LDGS:
9183 case DIF_OP_LDTS:
9184 case DIF_OP_LDLS:
9185 case DIF_OP_LDGAA:
9186 case DIF_OP_LDTAA:
9187 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
9188 err += efunc(pc, "invalid variable %u\n", v);
9189 if (rd >= nregs)
9190 err += efunc(pc, "invalid register %u\n", rd);
9191 if (rd == 0)
9192 err += efunc(pc, "cannot write to %r0\n");
9193 break;
9194 case DIF_OP_STGS:
9195 case DIF_OP_STTS:
9196 case DIF_OP_STLS:
9197 case DIF_OP_STGAA:
9198 case DIF_OP_STTAA:
9199 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
9200 err += efunc(pc, "invalid variable %u\n", v);
9201 if (rs >= nregs)
9202 err += efunc(pc, "invalid register %u\n", rd);
9203 break;
9204 case DIF_OP_CALL:
9205 if (subr > DIF_SUBR_MAX)
9206 err += efunc(pc, "invalid subr %u\n", subr);
9207 if (rd >= nregs)
9208 err += efunc(pc, "invalid register %u\n", rd);
9209 if (rd == 0)
9210 err += efunc(pc, "cannot write to %r0\n");
9211
9212 if (subr == DIF_SUBR_COPYOUT ||
9213 subr == DIF_SUBR_COPYOUTSTR) {
9214 dp->dtdo_destructive = 1;
9215 }
9216
9217 if (subr == DIF_SUBR_GETF) {
9218 /*
9219 * If we have a getf() we need to record that
9220 * in our state. Note that our state can be
9221 * NULL if this is a helper -- but in that
9222 * case, the call to getf() is itself illegal,
9223 * and will be caught (slightly later) when
9224 * the helper is validated.
9225 */
9226 if (vstate->dtvs_state != NULL)
9227 vstate->dtvs_state->dts_getf++;
9228 }
9229
9230 break;
9231 case DIF_OP_PUSHTR:
9232 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
9233 err += efunc(pc, "invalid ref type %u\n", type);
9234 if (r2 >= nregs)
9235 err += efunc(pc, "invalid register %u\n", r2);
9236 if (rs >= nregs)
9237 err += efunc(pc, "invalid register %u\n", rs);
9238 break;
9239 case DIF_OP_PUSHTV:
9240 if (type != DIF_TYPE_CTF)
9241 err += efunc(pc, "invalid val type %u\n", type);
9242 if (r2 >= nregs)
9243 err += efunc(pc, "invalid register %u\n", r2);
9244 if (rs >= nregs)
9245 err += efunc(pc, "invalid register %u\n", rs);
9246 break;
9247 default:
9248 err += efunc(pc, "invalid opcode %u\n",
9249 DIF_INSTR_OP(instr));
9250 }
9251 }
9252
9253 if (dp->dtdo_len != 0 &&
9254 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
9255 err += efunc(dp->dtdo_len - 1,
9256 "expected 'ret' as last DIF instruction\n");
9257 }
9258
9259 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) {
9260 /*
9261 * If we're not returning by reference, the size must be either
9262 * 0 or the size of one of the base types.
9263 */
9264 switch (dp->dtdo_rtype.dtdt_size) {
9265 case 0:
9266 case sizeof (uint8_t):
9267 case sizeof (uint16_t):
9268 case sizeof (uint32_t):
9269 case sizeof (uint64_t):
9270 break;
9271
9272 default:
9273 err += efunc(dp->dtdo_len - 1, "bad return size\n");
9274 }
9275 }
9276
9277 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
9278 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
9279 dtrace_diftype_t *vt, *et;
9280 uint_t id, ndx;
9281
9282 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
9283 v->dtdv_scope != DIFV_SCOPE_THREAD &&
9284 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
9285 err += efunc(i, "unrecognized variable scope %d\n",
9286 v->dtdv_scope);
9287 break;
9288 }
9289
9290 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
9291 v->dtdv_kind != DIFV_KIND_SCALAR) {
9292 err += efunc(i, "unrecognized variable type %d\n",
9293 v->dtdv_kind);
9294 break;
9295 }
9296
9297 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
9298 err += efunc(i, "%d exceeds variable id limit\n", id);
9299 break;
9300 }
9301
9302 if (id < DIF_VAR_OTHER_UBASE)
9303 continue;
9304
9305 /*
9306 * For user-defined variables, we need to check that this
9307 * definition is identical to any previous definition that we
9308 * encountered.
9309 */
9310 ndx = id - DIF_VAR_OTHER_UBASE;
9311
9312 switch (v->dtdv_scope) {
9313 case DIFV_SCOPE_GLOBAL:
9314 if (ndx < vstate->dtvs_nglobals) {
9315 dtrace_statvar_t *svar;
9316
9317 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
9318 existing = &svar->dtsv_var;
9319 }
9320
9321 break;
9322
9323 case DIFV_SCOPE_THREAD:
9324 if (ndx < vstate->dtvs_ntlocals)
9325 existing = &vstate->dtvs_tlocals[ndx];
9326 break;
9327
9328 case DIFV_SCOPE_LOCAL:
9329 if (ndx < vstate->dtvs_nlocals) {
9330 dtrace_statvar_t *svar;
9331
9332 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
9333 existing = &svar->dtsv_var;
9334 }
9335
9336 break;
9337 }
9338
9339 vt = &v->dtdv_type;
9340
9341 if (vt->dtdt_flags & DIF_TF_BYREF) {
9342 if (vt->dtdt_size == 0) {
9343 err += efunc(i, "zero-sized variable\n");
9344 break;
9345 }
9346
9347 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
9348 vt->dtdt_size > dtrace_global_maxsize) {
9349 err += efunc(i, "oversized by-ref global\n");
9350 break;
9351 }
9352 }
9353
9354 if (existing == NULL || existing->dtdv_id == 0)
9355 continue;
9356
9357 ASSERT(existing->dtdv_id == v->dtdv_id);
9358 ASSERT(existing->dtdv_scope == v->dtdv_scope);
9359
9360 if (existing->dtdv_kind != v->dtdv_kind)
9361 err += efunc(i, "%d changed variable kind\n", id);
9362
9363 et = &existing->dtdv_type;
9364
9365 if (vt->dtdt_flags != et->dtdt_flags) {
9366 err += efunc(i, "%d changed variable type flags\n", id);
9367 break;
9368 }
9369
9370 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
9371 err += efunc(i, "%d changed variable type size\n", id);
9372 break;
9373 }
9374 }
9375
9376 return (err);
9377 }
9378
9379 /*
9380 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
9381 * are much more constrained than normal DIFOs. Specifically, they may
9382 * not:
9383 *
9384 * 1. Make calls to subroutines other than copyin(), copyinstr() or
9385 * miscellaneous string routines
9386 * 2. Access DTrace variables other than the args[] array, and the
9387 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
9388 * 3. Have thread-local variables.
9389 * 4. Have dynamic variables.
9390 */
9391 static int
9392 dtrace_difo_validate_helper(dtrace_difo_t *dp)
9393 {
9394 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9395 int err = 0;
9396 uint_t pc;
9397
9398 for (pc = 0; pc < dp->dtdo_len; pc++) {
9399 dif_instr_t instr = dp->dtdo_buf[pc];
9400
9401 uint_t v = DIF_INSTR_VAR(instr);
9402 uint_t subr = DIF_INSTR_SUBR(instr);
9403 uint_t op = DIF_INSTR_OP(instr);
9404
9405 switch (op) {
9406 case DIF_OP_OR:
9407 case DIF_OP_XOR:
9408 case DIF_OP_AND:
9409 case DIF_OP_SLL:
9410 case DIF_OP_SRL:
9411 case DIF_OP_SRA:
9412 case DIF_OP_SUB:
9413 case DIF_OP_ADD:
9414 case DIF_OP_MUL:
9415 case DIF_OP_SDIV:
9416 case DIF_OP_UDIV:
9417 case DIF_OP_SREM:
9418 case DIF_OP_UREM:
9419 case DIF_OP_COPYS:
9420 case DIF_OP_NOT:
9421 case DIF_OP_MOV:
9422 case DIF_OP_RLDSB:
9423 case DIF_OP_RLDSH:
9424 case DIF_OP_RLDSW:
9425 case DIF_OP_RLDUB:
9426 case DIF_OP_RLDUH:
9427 case DIF_OP_RLDUW:
9428 case DIF_OP_RLDX:
9429 case DIF_OP_ULDSB:
9430 case DIF_OP_ULDSH:
9431 case DIF_OP_ULDSW:
9432 case DIF_OP_ULDUB:
9433 case DIF_OP_ULDUH:
9434 case DIF_OP_ULDUW:
9435 case DIF_OP_ULDX:
9436 case DIF_OP_STB:
9437 case DIF_OP_STH:
9438 case DIF_OP_STW:
9439 case DIF_OP_STX:
9440 case DIF_OP_ALLOCS:
9441 case DIF_OP_CMP:
9442 case DIF_OP_SCMP:
9443 case DIF_OP_TST:
9444 case DIF_OP_BA:
9445 case DIF_OP_BE:
9446 case DIF_OP_BNE:
9447 case DIF_OP_BG:
9448 case DIF_OP_BGU:
9449 case DIF_OP_BGE:
9450 case DIF_OP_BGEU:
9451 case DIF_OP_BL:
9452 case DIF_OP_BLU:
9453 case DIF_OP_BLE:
9454 case DIF_OP_BLEU:
9455 case DIF_OP_RET:
9456 case DIF_OP_NOP:
9457 case DIF_OP_POPTS:
9458 case DIF_OP_FLUSHTS:
9459 case DIF_OP_SETX:
9460 case DIF_OP_SETS:
9461 case DIF_OP_LDGA:
9462 case DIF_OP_LDLS:
9463 case DIF_OP_STGS:
9464 case DIF_OP_STLS:
9465 case DIF_OP_PUSHTR:
9466 case DIF_OP_PUSHTV:
9467 break;
9468
9469 case DIF_OP_LDGS:
9470 if (v >= DIF_VAR_OTHER_UBASE)
9471 break;
9472
9473 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
9474 break;
9475
9476 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
9477 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
9478 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
9479 v == DIF_VAR_UID || v == DIF_VAR_GID)
9480 break;
9481
9482 err += efunc(pc, "illegal variable %u\n", v);
9483 break;
9484
9485 case DIF_OP_LDTA:
9486 case DIF_OP_LDTS:
9487 case DIF_OP_LDGAA:
9488 case DIF_OP_LDTAA:
9489 err += efunc(pc, "illegal dynamic variable load\n");
9490 break;
9491
9492 case DIF_OP_STTS:
9493 case DIF_OP_STGAA:
9494 case DIF_OP_STTAA:
9495 err += efunc(pc, "illegal dynamic variable store\n");
9496 break;
9497
9498 case DIF_OP_CALL:
9499 if (subr == DIF_SUBR_ALLOCA ||
9500 subr == DIF_SUBR_BCOPY ||
9501 subr == DIF_SUBR_COPYIN ||
9502 subr == DIF_SUBR_COPYINTO ||
9503 subr == DIF_SUBR_COPYINSTR ||
9504 subr == DIF_SUBR_INDEX ||
9505 subr == DIF_SUBR_INET_NTOA ||
9506 subr == DIF_SUBR_INET_NTOA6 ||
9507 subr == DIF_SUBR_INET_NTOP ||
9508 subr == DIF_SUBR_JSON ||
9509 subr == DIF_SUBR_LLTOSTR ||
9510 subr == DIF_SUBR_STRTOLL ||
9511 subr == DIF_SUBR_RINDEX ||
9512 subr == DIF_SUBR_STRCHR ||
9513 subr == DIF_SUBR_STRJOIN ||
9514 subr == DIF_SUBR_STRRCHR ||
9515 subr == DIF_SUBR_STRSTR ||
9516 subr == DIF_SUBR_HTONS ||
9517 subr == DIF_SUBR_HTONL ||
9518 subr == DIF_SUBR_HTONLL ||
9519 subr == DIF_SUBR_NTOHS ||
9520 subr == DIF_SUBR_NTOHL ||
9521 subr == DIF_SUBR_NTOHLL)
9522 break;
9523
9524 err += efunc(pc, "invalid subr %u\n", subr);
9525 break;
9526
9527 default:
9528 err += efunc(pc, "invalid opcode %u\n",
9529 DIF_INSTR_OP(instr));
9530 }
9531 }
9532
9533 return (err);
9534 }
9535
9536 /*
9537 * Returns 1 if the expression in the DIF object can be cached on a per-thread
9538 * basis; 0 if not.
9539 */
9540 static int
9541 dtrace_difo_cacheable(dtrace_difo_t *dp)
9542 {
9543 int i;
9544
9545 if (dp == NULL)
9546 return (0);
9547
9548 for (i = 0; i < dp->dtdo_varlen; i++) {
9549 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9550
9551 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
9552 continue;
9553
9554 switch (v->dtdv_id) {
9555 case DIF_VAR_CURTHREAD:
9556 case DIF_VAR_PID:
9557 case DIF_VAR_TID:
9558 case DIF_VAR_EXECNAME:
9559 case DIF_VAR_ZONENAME:
9560 break;
9561
9562 default:
9563 return (0);
9564 }
9565 }
9566
9567 /*
9568 * This DIF object may be cacheable. Now we need to look for any
9569 * array loading instructions, any memory loading instructions, or
9570 * any stores to thread-local variables.
9571 */
9572 for (i = 0; i < dp->dtdo_len; i++) {
9573 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
9574
9575 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
9576 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
9577 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
9578 op == DIF_OP_LDGA || op == DIF_OP_STTS)
9579 return (0);
9580 }
9581
9582 return (1);
9583 }
9584
9585 static void
9586 dtrace_difo_hold(dtrace_difo_t *dp)
9587 {
9588 int i;
9589
9590 ASSERT(MUTEX_HELD(&dtrace_lock));
9591
9592 dp->dtdo_refcnt++;
9593 ASSERT(dp->dtdo_refcnt != 0);
9594
9595 /*
9596 * We need to check this DIF object for references to the variable
9597 * DIF_VAR_VTIMESTAMP.
9598 */
9599 for (i = 0; i < dp->dtdo_varlen; i++) {
9600 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9601
9602 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9603 continue;
9604
9605 if (dtrace_vtime_references++ == 0)
9606 dtrace_vtime_enable();
9607 }
9608 }
9609
9610 /*
9611 * This routine calculates the dynamic variable chunksize for a given DIF
9612 * object. The calculation is not fool-proof, and can probably be tricked by
9613 * malicious DIF -- but it works for all compiler-generated DIF. Because this
9614 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
9615 * if a dynamic variable size exceeds the chunksize.
9616 */
9617 static void
9618 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9619 {
9620 uint64_t sval;
9621 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
9622 const dif_instr_t *text = dp->dtdo_buf;
9623 uint_t pc, srd = 0;
9624 uint_t ttop = 0;
9625 size_t size, ksize;
9626 uint_t id, i;
9627
9628 for (pc = 0; pc < dp->dtdo_len; pc++) {
9629 dif_instr_t instr = text[pc];
9630 uint_t op = DIF_INSTR_OP(instr);
9631 uint_t rd = DIF_INSTR_RD(instr);
9632 uint_t r1 = DIF_INSTR_R1(instr);
9633 uint_t nkeys = 0;
9634 uchar_t scope;
9635
9636 dtrace_key_t *key = tupregs;
9637
9638 switch (op) {
9639 case DIF_OP_SETX:
9640 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
9641 srd = rd;
9642 continue;
9643
9644 case DIF_OP_STTS:
9645 key = &tupregs[DIF_DTR_NREGS];
9646 key[0].dttk_size = 0;
9647 key[1].dttk_size = 0;
9648 nkeys = 2;
9649 scope = DIFV_SCOPE_THREAD;
9650 break;
9651
9652 case DIF_OP_STGAA:
9653 case DIF_OP_STTAA:
9654 nkeys = ttop;
9655
9656 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
9657 key[nkeys++].dttk_size = 0;
9658
9659 key[nkeys++].dttk_size = 0;
9660
9661 if (op == DIF_OP_STTAA) {
9662 scope = DIFV_SCOPE_THREAD;
9663 } else {
9664 scope = DIFV_SCOPE_GLOBAL;
9665 }
9666
9667 break;
9668
9669 case DIF_OP_PUSHTR:
9670 if (ttop == DIF_DTR_NREGS)
9671 return;
9672
9673 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
9674 /*
9675 * If the register for the size of the "pushtr"
9676 * is %r0 (or the value is 0) and the type is
9677 * a string, we'll use the system-wide default
9678 * string size.
9679 */
9680 tupregs[ttop++].dttk_size =
9681 dtrace_strsize_default;
9682 } else {
9683 if (srd == 0)
9684 return;
9685
9686 tupregs[ttop++].dttk_size = sval;
9687 }
9688
9689 break;
9690
9691 case DIF_OP_PUSHTV:
9692 if (ttop == DIF_DTR_NREGS)
9693 return;
9694
9695 tupregs[ttop++].dttk_size = 0;
9696 break;
9697
9698 case DIF_OP_FLUSHTS:
9699 ttop = 0;
9700 break;
9701
9702 case DIF_OP_POPTS:
9703 if (ttop != 0)
9704 ttop--;
9705 break;
9706 }
9707
9708 sval = 0;
9709 srd = 0;
9710
9711 if (nkeys == 0)
9712 continue;
9713
9714 /*
9715 * We have a dynamic variable allocation; calculate its size.
9716 */
9717 for (ksize = 0, i = 0; i < nkeys; i++)
9718 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
9719
9720 size = sizeof (dtrace_dynvar_t);
9721 size += sizeof (dtrace_key_t) * (nkeys - 1);
9722 size += ksize;
9723
9724 /*
9725 * Now we need to determine the size of the stored data.
9726 */
9727 id = DIF_INSTR_VAR(instr);
9728
9729 for (i = 0; i < dp->dtdo_varlen; i++) {
9730 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9731
9732 if (v->dtdv_id == id && v->dtdv_scope == scope) {
9733 size += v->dtdv_type.dtdt_size;
9734 break;
9735 }
9736 }
9737
9738 if (i == dp->dtdo_varlen)
9739 return;
9740
9741 /*
9742 * We have the size. If this is larger than the chunk size
9743 * for our dynamic variable state, reset the chunk size.
9744 */
9745 size = P2ROUNDUP(size, sizeof (uint64_t));
9746
9747 if (size > vstate->dtvs_dynvars.dtds_chunksize)
9748 vstate->dtvs_dynvars.dtds_chunksize = size;
9749 }
9750 }
9751
9752 static void
9753 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9754 {
9755 int i, oldsvars, osz, nsz, otlocals, ntlocals;
9756 uint_t id;
9757
9758 ASSERT(MUTEX_HELD(&dtrace_lock));
9759 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
9760
9761 for (i = 0; i < dp->dtdo_varlen; i++) {
9762 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9763 dtrace_statvar_t *svar, ***svarp;
9764 size_t dsize = 0;
9765 uint8_t scope = v->dtdv_scope;
9766 int *np;
9767
9768 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9769 continue;
9770
9771 id -= DIF_VAR_OTHER_UBASE;
9772
9773 switch (scope) {
9774 case DIFV_SCOPE_THREAD:
9775 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
9776 dtrace_difv_t *tlocals;
9777
9778 if ((ntlocals = (otlocals << 1)) == 0)
9779 ntlocals = 1;
9780
9781 osz = otlocals * sizeof (dtrace_difv_t);
9782 nsz = ntlocals * sizeof (dtrace_difv_t);
9783
9784 tlocals = kmem_zalloc(nsz, KM_SLEEP);
9785
9786 if (osz != 0) {
9787 bcopy(vstate->dtvs_tlocals,
9788 tlocals, osz);
9789 kmem_free(vstate->dtvs_tlocals, osz);
9790 }
9791
9792 vstate->dtvs_tlocals = tlocals;
9793 vstate->dtvs_ntlocals = ntlocals;
9794 }
9795
9796 vstate->dtvs_tlocals[id] = *v;
9797 continue;
9798
9799 case DIFV_SCOPE_LOCAL:
9800 np = &vstate->dtvs_nlocals;
9801 svarp = &vstate->dtvs_locals;
9802
9803 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9804 dsize = NCPU * (v->dtdv_type.dtdt_size +
9805 sizeof (uint64_t));
9806 else
9807 dsize = NCPU * sizeof (uint64_t);
9808
9809 break;
9810
9811 case DIFV_SCOPE_GLOBAL:
9812 np = &vstate->dtvs_nglobals;
9813 svarp = &vstate->dtvs_globals;
9814
9815 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9816 dsize = v->dtdv_type.dtdt_size +
9817 sizeof (uint64_t);
9818
9819 break;
9820
9821 default:
9822 ASSERT(0);
9823 }
9824
9825 while (id >= (oldsvars = *np)) {
9826 dtrace_statvar_t **statics;
9827 int newsvars, oldsize, newsize;
9828
9829 if ((newsvars = (oldsvars << 1)) == 0)
9830 newsvars = 1;
9831
9832 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
9833 newsize = newsvars * sizeof (dtrace_statvar_t *);
9834
9835 statics = kmem_zalloc(newsize, KM_SLEEP);
9836
9837 if (oldsize != 0) {
9838 bcopy(*svarp, statics, oldsize);
9839 kmem_free(*svarp, oldsize);
9840 }
9841
9842 *svarp = statics;
9843 *np = newsvars;
9844 }
9845
9846 if ((svar = (*svarp)[id]) == NULL) {
9847 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
9848 svar->dtsv_var = *v;
9849
9850 if ((svar->dtsv_size = dsize) != 0) {
9851 svar->dtsv_data = (uint64_t)(uintptr_t)
9852 kmem_zalloc(dsize, KM_SLEEP);
9853 }
9854
9855 (*svarp)[id] = svar;
9856 }
9857
9858 svar->dtsv_refcnt++;
9859 }
9860
9861 dtrace_difo_chunksize(dp, vstate);
9862 dtrace_difo_hold(dp);
9863 }
9864
9865 static dtrace_difo_t *
9866 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9867 {
9868 dtrace_difo_t *new;
9869 size_t sz;
9870
9871 ASSERT(dp->dtdo_buf != NULL);
9872 ASSERT(dp->dtdo_refcnt != 0);
9873
9874 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
9875
9876 ASSERT(dp->dtdo_buf != NULL);
9877 sz = dp->dtdo_len * sizeof (dif_instr_t);
9878 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
9879 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
9880 new->dtdo_len = dp->dtdo_len;
9881
9882 if (dp->dtdo_strtab != NULL) {
9883 ASSERT(dp->dtdo_strlen != 0);
9884 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
9885 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
9886 new->dtdo_strlen = dp->dtdo_strlen;
9887 }
9888
9889 if (dp->dtdo_inttab != NULL) {
9890 ASSERT(dp->dtdo_intlen != 0);
9891 sz = dp->dtdo_intlen * sizeof (uint64_t);
9892 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
9893 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
9894 new->dtdo_intlen = dp->dtdo_intlen;
9895 }
9896
9897 if (dp->dtdo_vartab != NULL) {
9898 ASSERT(dp->dtdo_varlen != 0);
9899 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
9900 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
9901 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
9902 new->dtdo_varlen = dp->dtdo_varlen;
9903 }
9904
9905 dtrace_difo_init(new, vstate);
9906 return (new);
9907 }
9908
9909 static void
9910 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9911 {
9912 int i;
9913
9914 ASSERT(dp->dtdo_refcnt == 0);
9915
9916 for (i = 0; i < dp->dtdo_varlen; i++) {
9917 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9918 dtrace_statvar_t *svar, **svarp;
9919 uint_t id;
9920 uint8_t scope = v->dtdv_scope;
9921 int *np;
9922
9923 switch (scope) {
9924 case DIFV_SCOPE_THREAD:
9925 continue;
9926
9927 case DIFV_SCOPE_LOCAL:
9928 np = &vstate->dtvs_nlocals;
9929 svarp = vstate->dtvs_locals;
9930 break;
9931
9932 case DIFV_SCOPE_GLOBAL:
9933 np = &vstate->dtvs_nglobals;
9934 svarp = vstate->dtvs_globals;
9935 break;
9936
9937 default:
9938 ASSERT(0);
9939 }
9940
9941 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9942 continue;
9943
9944 id -= DIF_VAR_OTHER_UBASE;
9945 ASSERT(id < *np);
9946
9947 svar = svarp[id];
9948 ASSERT(svar != NULL);
9949 ASSERT(svar->dtsv_refcnt > 0);
9950
9951 if (--svar->dtsv_refcnt > 0)
9952 continue;
9953
9954 if (svar->dtsv_size != 0) {
9955 ASSERT(svar->dtsv_data != NULL);
9956 kmem_free((void *)(uintptr_t)svar->dtsv_data,
9957 svar->dtsv_size);
9958 }
9959
9960 kmem_free(svar, sizeof (dtrace_statvar_t));
9961 svarp[id] = NULL;
9962 }
9963
9964 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
9965 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
9966 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
9967 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
9968
9969 kmem_free(dp, sizeof (dtrace_difo_t));
9970 }
9971
9972 static void
9973 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9974 {
9975 int i;
9976
9977 ASSERT(MUTEX_HELD(&dtrace_lock));
9978 ASSERT(dp->dtdo_refcnt != 0);
9979
9980 for (i = 0; i < dp->dtdo_varlen; i++) {
9981 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9982
9983 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9984 continue;
9985
9986 ASSERT(dtrace_vtime_references > 0);
9987 if (--dtrace_vtime_references == 0)
9988 dtrace_vtime_disable();
9989 }
9990
9991 if (--dp->dtdo_refcnt == 0)
9992 dtrace_difo_destroy(dp, vstate);
9993 }
9994
9995 /*
9996 * DTrace Format Functions
9997 */
9998 static uint16_t
9999 dtrace_format_add(dtrace_state_t *state, char *str)
10000 {
10001 char *fmt, **new;
10002 uint16_t ndx, len = strlen(str) + 1;
10003
10004 fmt = kmem_zalloc(len, KM_SLEEP);
10005 bcopy(str, fmt, len);
10006
10007 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
10008 if (state->dts_formats[ndx] == NULL) {
10009 state->dts_formats[ndx] = fmt;
10010 return (ndx + 1);
10011 }
10012 }
10013
10014 if (state->dts_nformats == USHRT_MAX) {
10015 /*
10016 * This is only likely if a denial-of-service attack is being
10017 * attempted. As such, it's okay to fail silently here.
10018 */
10019 kmem_free(fmt, len);
10020 return (0);
10021 }
10022
10023 /*
10024 * For simplicity, we always resize the formats array to be exactly the
10025 * number of formats.
10026 */
10027 ndx = state->dts_nformats++;
10028 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
10029
10030 if (state->dts_formats != NULL) {
10031 ASSERT(ndx != 0);
10032 bcopy(state->dts_formats, new, ndx * sizeof (char *));
10033 kmem_free(state->dts_formats, ndx * sizeof (char *));
10034 }
10035
10036 state->dts_formats = new;
10037 state->dts_formats[ndx] = fmt;
10038
10039 return (ndx + 1);
10040 }
10041
10042 static void
10043 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
10044 {
10045 char *fmt;
10046
10047 ASSERT(state->dts_formats != NULL);
10048 ASSERT(format <= state->dts_nformats);
10049 ASSERT(state->dts_formats[format - 1] != NULL);
10050
10051 fmt = state->dts_formats[format - 1];
10052 kmem_free(fmt, strlen(fmt) + 1);
10053 state->dts_formats[format - 1] = NULL;
10054 }
10055
10056 static void
10057 dtrace_format_destroy(dtrace_state_t *state)
10058 {
10059 int i;
10060
10061 if (state->dts_nformats == 0) {
10062 ASSERT(state->dts_formats == NULL);
10063 return;
10064 }
10065
10066 ASSERT(state->dts_formats != NULL);
10067
10068 for (i = 0; i < state->dts_nformats; i++) {
10069 char *fmt = state->dts_formats[i];
10070
10071 if (fmt == NULL)
10072 continue;
10073
10074 kmem_free(fmt, strlen(fmt) + 1);
10075 }
10076
10077 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
10078 state->dts_nformats = 0;
10079 state->dts_formats = NULL;
10080 }
10081
10082 /*
10083 * DTrace Predicate Functions
10084 */
10085 static dtrace_predicate_t *
10086 dtrace_predicate_create(dtrace_difo_t *dp)
10087 {
10088 dtrace_predicate_t *pred;
10089
10090 ASSERT(MUTEX_HELD(&dtrace_lock));
10091 ASSERT(dp->dtdo_refcnt != 0);
10092
10093 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
10094 pred->dtp_difo = dp;
10095 pred->dtp_refcnt = 1;
10096
10097 if (!dtrace_difo_cacheable(dp))
10098 return (pred);
10099
10100 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
10101 /*
10102 * This is only theoretically possible -- we have had 2^32
10103 * cacheable predicates on this machine. We cannot allow any
10104 * more predicates to become cacheable: as unlikely as it is,
10105 * there may be a thread caching a (now stale) predicate cache
10106 * ID. (N.B.: the temptation is being successfully resisted to
10107 * have this cmn_err() "Holy shit -- we executed this code!")
10108 */
10109 return (pred);
10110 }
10111
10112 pred->dtp_cacheid = dtrace_predcache_id++;
10113
10114 return (pred);
10115 }
10116
10117 static void
10118 dtrace_predicate_hold(dtrace_predicate_t *pred)
10119 {
10120 ASSERT(MUTEX_HELD(&dtrace_lock));
10121 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
10122 ASSERT(pred->dtp_refcnt > 0);
10123
10124 pred->dtp_refcnt++;
10125 }
10126
10127 static void
10128 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
10129 {
10130 dtrace_difo_t *dp = pred->dtp_difo;
10131
10132 ASSERT(MUTEX_HELD(&dtrace_lock));
10133 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
10134 ASSERT(pred->dtp_refcnt > 0);
10135
10136 if (--pred->dtp_refcnt == 0) {
10137 dtrace_difo_release(pred->dtp_difo, vstate);
10138 kmem_free(pred, sizeof (dtrace_predicate_t));
10139 }
10140 }
10141
10142 /*
10143 * DTrace Action Description Functions
10144 */
10145 static dtrace_actdesc_t *
10146 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
10147 uint64_t uarg, uint64_t arg)
10148 {
10149 dtrace_actdesc_t *act;
10150
10151 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
10152 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
10153
10154 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
10155 act->dtad_kind = kind;
10156 act->dtad_ntuple = ntuple;
10157 act->dtad_uarg = uarg;
10158 act->dtad_arg = arg;
10159 act->dtad_refcnt = 1;
10160
10161 return (act);
10162 }
10163
10164 static void
10165 dtrace_actdesc_hold(dtrace_actdesc_t *act)
10166 {
10167 ASSERT(act->dtad_refcnt >= 1);
10168 act->dtad_refcnt++;
10169 }
10170
10171 static void
10172 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
10173 {
10174 dtrace_actkind_t kind = act->dtad_kind;
10175 dtrace_difo_t *dp;
10176
10177 ASSERT(act->dtad_refcnt >= 1);
10178
10179 if (--act->dtad_refcnt != 0)
10180 return;
10181
10182 if ((dp = act->dtad_difo) != NULL)
10183 dtrace_difo_release(dp, vstate);
10184
10185 if (DTRACEACT_ISPRINTFLIKE(kind)) {
10186 char *str = (char *)(uintptr_t)act->dtad_arg;
10187
10188 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
10189 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
10190
10191 if (str != NULL)
10192 kmem_free(str, strlen(str) + 1);
10193 }
10194
10195 kmem_free(act, sizeof (dtrace_actdesc_t));
10196 }
10197
10198 /*
10199 * DTrace ECB Functions
10200 */
10201 static dtrace_ecb_t *
10202 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
10203 {
10204 dtrace_ecb_t *ecb;
10205 dtrace_epid_t epid;
10206
10207 ASSERT(MUTEX_HELD(&dtrace_lock));
10208
10209 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
10210 ecb->dte_predicate = NULL;
10211 ecb->dte_probe = probe;
10212
10213 /*
10214 * The default size is the size of the default action: recording
10215 * the header.
10216 */
10217 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t);
10218 ecb->dte_alignment = sizeof (dtrace_epid_t);
10219
10220 epid = state->dts_epid++;
10221
10222 if (epid - 1 >= state->dts_necbs) {
10223 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
10224 int necbs = state->dts_necbs << 1;
10225
10226 ASSERT(epid == state->dts_necbs + 1);
10227
10228 if (necbs == 0) {
10229 ASSERT(oecbs == NULL);
10230 necbs = 1;
10231 }
10232
10233 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
10234
10235 if (oecbs != NULL)
10236 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
10237
10238 dtrace_membar_producer();
10239 state->dts_ecbs = ecbs;
10240
10241 if (oecbs != NULL) {
10242 /*
10243 * If this state is active, we must dtrace_sync()
10244 * before we can free the old dts_ecbs array: we're
10245 * coming in hot, and there may be active ring
10246 * buffer processing (which indexes into the dts_ecbs
10247 * array) on another CPU.
10248 */
10249 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
10250 dtrace_sync();
10251
10252 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
10253 }
10254
10255 dtrace_membar_producer();
10256 state->dts_necbs = necbs;
10257 }
10258
10259 ecb->dte_state = state;
10260
10261 ASSERT(state->dts_ecbs[epid - 1] == NULL);
10262 dtrace_membar_producer();
10263 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
10264
10265 return (ecb);
10266 }
10267
10268 static int
10269 dtrace_ecb_enable(dtrace_ecb_t *ecb)
10270 {
10271 dtrace_probe_t *probe = ecb->dte_probe;
10272
10273 ASSERT(MUTEX_HELD(&cpu_lock));
10274 ASSERT(MUTEX_HELD(&dtrace_lock));
10275 ASSERT(ecb->dte_next == NULL);
10276
10277 if (probe == NULL) {
10278 /*
10279 * This is the NULL probe -- there's nothing to do.
10280 */
10281 return (0);
10282 }
10283
10284 if (probe->dtpr_ecb == NULL) {
10285 dtrace_provider_t *prov = probe->dtpr_provider;
10286
10287 /*
10288 * We're the first ECB on this probe.
10289 */
10290 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
10291
10292 if (ecb->dte_predicate != NULL)
10293 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
10294
10295 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
10296 probe->dtpr_id, probe->dtpr_arg));
10297 } else {
10298 /*
10299 * This probe is already active. Swing the last pointer to
10300 * point to the new ECB, and issue a dtrace_sync() to assure
10301 * that all CPUs have seen the change.
10302 */
10303 ASSERT(probe->dtpr_ecb_last != NULL);
10304 probe->dtpr_ecb_last->dte_next = ecb;
10305 probe->dtpr_ecb_last = ecb;
10306 probe->dtpr_predcache = 0;
10307
10308 dtrace_sync();
10309 return (0);
10310 }
10311 }
10312
10313 static void
10314 dtrace_ecb_resize(dtrace_ecb_t *ecb)
10315 {
10316 dtrace_action_t *act;
10317 uint32_t curneeded = UINT32_MAX;
10318 uint32_t aggbase = UINT32_MAX;
10319
10320 /*
10321 * If we record anything, we always record the dtrace_rechdr_t. (And
10322 * we always record it first.)
10323 */
10324 ecb->dte_size = sizeof (dtrace_rechdr_t);
10325 ecb->dte_alignment = sizeof (dtrace_epid_t);
10326
10327 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10328 dtrace_recdesc_t *rec = &act->dta_rec;
10329 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1);
10330
10331 ecb->dte_alignment = MAX(ecb->dte_alignment,
10332 rec->dtrd_alignment);
10333
10334 if (DTRACEACT_ISAGG(act->dta_kind)) {
10335 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10336
10337 ASSERT(rec->dtrd_size != 0);
10338 ASSERT(agg->dtag_first != NULL);
10339 ASSERT(act->dta_prev->dta_intuple);
10340 ASSERT(aggbase != UINT32_MAX);
10341 ASSERT(curneeded != UINT32_MAX);
10342
10343 agg->dtag_base = aggbase;
10344
10345 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10346 rec->dtrd_offset = curneeded;
10347 curneeded += rec->dtrd_size;
10348 ecb->dte_needed = MAX(ecb->dte_needed, curneeded);
10349
10350 aggbase = UINT32_MAX;
10351 curneeded = UINT32_MAX;
10352 } else if (act->dta_intuple) {
10353 if (curneeded == UINT32_MAX) {
10354 /*
10355 * This is the first record in a tuple. Align
10356 * curneeded to be at offset 4 in an 8-byte
10357 * aligned block.
10358 */
10359 ASSERT(act->dta_prev == NULL ||
10360 !act->dta_prev->dta_intuple);
10361 ASSERT3U(aggbase, ==, UINT32_MAX);
10362 curneeded = P2PHASEUP(ecb->dte_size,
10363 sizeof (uint64_t), sizeof (dtrace_aggid_t));
10364
10365 aggbase = curneeded - sizeof (dtrace_aggid_t);
10366 ASSERT(IS_P2ALIGNED(aggbase,
10367 sizeof (uint64_t)));
10368 }
10369 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
10370 rec->dtrd_offset = curneeded;
10371 curneeded += rec->dtrd_size;
10372 } else {
10373 /* tuples must be followed by an aggregation */
10374 ASSERT(act->dta_prev == NULL ||
10375 !act->dta_prev->dta_intuple);
10376
10377 ecb->dte_size = P2ROUNDUP(ecb->dte_size,
10378 rec->dtrd_alignment);
10379 rec->dtrd_offset = ecb->dte_size;
10380 ecb->dte_size += rec->dtrd_size;
10381 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size);
10382 }
10383 }
10384
10385 if ((act = ecb->dte_action) != NULL &&
10386 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
10387 ecb->dte_size == sizeof (dtrace_rechdr_t)) {
10388 /*
10389 * If the size is still sizeof (dtrace_rechdr_t), then all
10390 * actions store no data; set the size to 0.
10391 */
10392 ecb->dte_size = 0;
10393 }
10394
10395 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t));
10396 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t)));
10397 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed,
10398 ecb->dte_needed);
10399 }
10400
10401 static dtrace_action_t *
10402 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10403 {
10404 dtrace_aggregation_t *agg;
10405 size_t size = sizeof (uint64_t);
10406 int ntuple = desc->dtad_ntuple;
10407 dtrace_action_t *act;
10408 dtrace_recdesc_t *frec;
10409 dtrace_aggid_t aggid;
10410 dtrace_state_t *state = ecb->dte_state;
10411
10412 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
10413 agg->dtag_ecb = ecb;
10414
10415 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
10416
10417 switch (desc->dtad_kind) {
10418 case DTRACEAGG_MIN:
10419 agg->dtag_initial = INT64_MAX;
10420 agg->dtag_aggregate = dtrace_aggregate_min;
10421 break;
10422
10423 case DTRACEAGG_MAX:
10424 agg->dtag_initial = INT64_MIN;
10425 agg->dtag_aggregate = dtrace_aggregate_max;
10426 break;
10427
10428 case DTRACEAGG_COUNT:
10429 agg->dtag_aggregate = dtrace_aggregate_count;
10430 break;
10431
10432 case DTRACEAGG_QUANTIZE:
10433 agg->dtag_aggregate = dtrace_aggregate_quantize;
10434 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
10435 sizeof (uint64_t);
10436 break;
10437
10438 case DTRACEAGG_LQUANTIZE: {
10439 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
10440 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
10441
10442 agg->dtag_initial = desc->dtad_arg;
10443 agg->dtag_aggregate = dtrace_aggregate_lquantize;
10444
10445 if (step == 0 || levels == 0)
10446 goto err;
10447
10448 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
10449 break;
10450 }
10451
10452 case DTRACEAGG_LLQUANTIZE: {
10453 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
10454 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
10455 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
10456 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
10457 int64_t v;
10458
10459 agg->dtag_initial = desc->dtad_arg;
10460 agg->dtag_aggregate = dtrace_aggregate_llquantize;
10461
10462 if (factor < 2 || low >= high || nsteps < factor)
10463 goto err;
10464
10465 /*
10466 * Now check that the number of steps evenly divides a power
10467 * of the factor. (This assures both integer bucket size and
10468 * linearity within each magnitude.)
10469 */
10470 for (v = factor; v < nsteps; v *= factor)
10471 continue;
10472
10473 if ((v % nsteps) || (nsteps % factor))
10474 goto err;
10475
10476 size = (dtrace_aggregate_llquantize_bucket(factor,
10477 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
10478 break;
10479 }
10480
10481 case DTRACEAGG_AVG:
10482 agg->dtag_aggregate = dtrace_aggregate_avg;
10483 size = sizeof (uint64_t) * 2;
10484 break;
10485
10486 case DTRACEAGG_STDDEV:
10487 agg->dtag_aggregate = dtrace_aggregate_stddev;
10488 size = sizeof (uint64_t) * 4;
10489 break;
10490
10491 case DTRACEAGG_SUM:
10492 agg->dtag_aggregate = dtrace_aggregate_sum;
10493 break;
10494
10495 default:
10496 goto err;
10497 }
10498
10499 agg->dtag_action.dta_rec.dtrd_size = size;
10500
10501 if (ntuple == 0)
10502 goto err;
10503
10504 /*
10505 * We must make sure that we have enough actions for the n-tuple.
10506 */
10507 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
10508 if (DTRACEACT_ISAGG(act->dta_kind))
10509 break;
10510
10511 if (--ntuple == 0) {
10512 /*
10513 * This is the action with which our n-tuple begins.
10514 */
10515 agg->dtag_first = act;
10516 goto success;
10517 }
10518 }
10519
10520 /*
10521 * This n-tuple is short by ntuple elements. Return failure.
10522 */
10523 ASSERT(ntuple != 0);
10524 err:
10525 kmem_free(agg, sizeof (dtrace_aggregation_t));
10526 return (NULL);
10527
10528 success:
10529 /*
10530 * If the last action in the tuple has a size of zero, it's actually
10531 * an expression argument for the aggregating action.
10532 */
10533 ASSERT(ecb->dte_action_last != NULL);
10534 act = ecb->dte_action_last;
10535
10536 if (act->dta_kind == DTRACEACT_DIFEXPR) {
10537 ASSERT(act->dta_difo != NULL);
10538
10539 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
10540 agg->dtag_hasarg = 1;
10541 }
10542
10543 /*
10544 * We need to allocate an id for this aggregation.
10545 */
10546 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
10547 VM_BESTFIT | VM_SLEEP);
10548
10549 if (aggid - 1 >= state->dts_naggregations) {
10550 dtrace_aggregation_t **oaggs = state->dts_aggregations;
10551 dtrace_aggregation_t **aggs;
10552 int naggs = state->dts_naggregations << 1;
10553 int onaggs = state->dts_naggregations;
10554
10555 ASSERT(aggid == state->dts_naggregations + 1);
10556
10557 if (naggs == 0) {
10558 ASSERT(oaggs == NULL);
10559 naggs = 1;
10560 }
10561
10562 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
10563
10564 if (oaggs != NULL) {
10565 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
10566 kmem_free(oaggs, onaggs * sizeof (*aggs));
10567 }
10568
10569 state->dts_aggregations = aggs;
10570 state->dts_naggregations = naggs;
10571 }
10572
10573 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
10574 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
10575
10576 frec = &agg->dtag_first->dta_rec;
10577 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
10578 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
10579
10580 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
10581 ASSERT(!act->dta_intuple);
10582 act->dta_intuple = 1;
10583 }
10584
10585 return (&agg->dtag_action);
10586 }
10587
10588 static void
10589 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
10590 {
10591 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
10592 dtrace_state_t *state = ecb->dte_state;
10593 dtrace_aggid_t aggid = agg->dtag_id;
10594
10595 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
10596 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
10597
10598 ASSERT(state->dts_aggregations[aggid - 1] == agg);
10599 state->dts_aggregations[aggid - 1] = NULL;
10600
10601 kmem_free(agg, sizeof (dtrace_aggregation_t));
10602 }
10603
10604 static int
10605 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
10606 {
10607 dtrace_action_t *action, *last;
10608 dtrace_difo_t *dp = desc->dtad_difo;
10609 uint32_t size = 0, align = sizeof (uint8_t), mask;
10610 uint16_t format = 0;
10611 dtrace_recdesc_t *rec;
10612 dtrace_state_t *state = ecb->dte_state;
10613 dtrace_optval_t *opt = state->dts_options, nframes, strsize;
10614 uint64_t arg = desc->dtad_arg;
10615
10616 ASSERT(MUTEX_HELD(&dtrace_lock));
10617 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
10618
10619 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
10620 /*
10621 * If this is an aggregating action, there must be neither
10622 * a speculate nor a commit on the action chain.
10623 */
10624 dtrace_action_t *act;
10625
10626 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
10627 if (act->dta_kind == DTRACEACT_COMMIT)
10628 return (EINVAL);
10629
10630 if (act->dta_kind == DTRACEACT_SPECULATE)
10631 return (EINVAL);
10632 }
10633
10634 action = dtrace_ecb_aggregation_create(ecb, desc);
10635
10636 if (action == NULL)
10637 return (EINVAL);
10638 } else {
10639 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
10640 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
10641 dp != NULL && dp->dtdo_destructive)) {
10642 state->dts_destructive = 1;
10643 }
10644
10645 switch (desc->dtad_kind) {
10646 case DTRACEACT_PRINTF:
10647 case DTRACEACT_PRINTA:
10648 case DTRACEACT_SYSTEM:
10649 case DTRACEACT_FREOPEN:
10650 case DTRACEACT_DIFEXPR:
10651 /*
10652 * We know that our arg is a string -- turn it into a
10653 * format.
10654 */
10655 if (arg == NULL) {
10656 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
10657 desc->dtad_kind == DTRACEACT_DIFEXPR);
10658 format = 0;
10659 } else {
10660 ASSERT(arg != NULL);
10661 ASSERT(arg > KERNELBASE);
10662 format = dtrace_format_add(state,
10663 (char *)(uintptr_t)arg);
10664 }
10665
10666 /*FALLTHROUGH*/
10667 case DTRACEACT_LIBACT:
10668 case DTRACEACT_TRACEMEM:
10669 case DTRACEACT_TRACEMEM_DYNSIZE:
10670 if (dp == NULL)
10671 return (EINVAL);
10672
10673 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
10674 break;
10675
10676 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
10677 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10678 return (EINVAL);
10679
10680 size = opt[DTRACEOPT_STRSIZE];
10681 }
10682
10683 break;
10684
10685 case DTRACEACT_STACK:
10686 if ((nframes = arg) == 0) {
10687 nframes = opt[DTRACEOPT_STACKFRAMES];
10688 ASSERT(nframes > 0);
10689 arg = nframes;
10690 }
10691
10692 size = nframes * sizeof (pc_t);
10693 break;
10694
10695 case DTRACEACT_JSTACK:
10696 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
10697 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
10698
10699 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
10700 nframes = opt[DTRACEOPT_JSTACKFRAMES];
10701
10702 arg = DTRACE_USTACK_ARG(nframes, strsize);
10703
10704 /*FALLTHROUGH*/
10705 case DTRACEACT_USTACK:
10706 if (desc->dtad_kind != DTRACEACT_JSTACK &&
10707 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
10708 strsize = DTRACE_USTACK_STRSIZE(arg);
10709 nframes = opt[DTRACEOPT_USTACKFRAMES];
10710 ASSERT(nframes > 0);
10711 arg = DTRACE_USTACK_ARG(nframes, strsize);
10712 }
10713
10714 /*
10715 * Save a slot for the pid.
10716 */
10717 size = (nframes + 1) * sizeof (uint64_t);
10718 size += DTRACE_USTACK_STRSIZE(arg);
10719 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
10720
10721 break;
10722
10723 case DTRACEACT_SYM:
10724 case DTRACEACT_MOD:
10725 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
10726 sizeof (uint64_t)) ||
10727 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10728 return (EINVAL);
10729 break;
10730
10731 case DTRACEACT_USYM:
10732 case DTRACEACT_UMOD:
10733 case DTRACEACT_UADDR:
10734 if (dp == NULL ||
10735 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
10736 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10737 return (EINVAL);
10738
10739 /*
10740 * We have a slot for the pid, plus a slot for the
10741 * argument. To keep things simple (aligned with
10742 * bitness-neutral sizing), we store each as a 64-bit
10743 * quantity.
10744 */
10745 size = 2 * sizeof (uint64_t);
10746 break;
10747
10748 case DTRACEACT_STOP:
10749 case DTRACEACT_BREAKPOINT:
10750 case DTRACEACT_PANIC:
10751 break;
10752
10753 case DTRACEACT_CHILL:
10754 case DTRACEACT_DISCARD:
10755 case DTRACEACT_RAISE:
10756 if (dp == NULL)
10757 return (EINVAL);
10758 break;
10759
10760 case DTRACEACT_EXIT:
10761 if (dp == NULL ||
10762 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
10763 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
10764 return (EINVAL);
10765 break;
10766
10767 case DTRACEACT_SPECULATE:
10768 if (ecb->dte_size > sizeof (dtrace_rechdr_t))
10769 return (EINVAL);
10770
10771 if (dp == NULL)
10772 return (EINVAL);
10773
10774 state->dts_speculates = 1;
10775 break;
10776
10777 case DTRACEACT_COMMIT: {
10778 dtrace_action_t *act = ecb->dte_action;
10779
10780 for (; act != NULL; act = act->dta_next) {
10781 if (act->dta_kind == DTRACEACT_COMMIT)
10782 return (EINVAL);
10783 }
10784
10785 if (dp == NULL)
10786 return (EINVAL);
10787 break;
10788 }
10789
10790 default:
10791 return (EINVAL);
10792 }
10793
10794 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
10795 /*
10796 * If this is a data-storing action or a speculate,
10797 * we must be sure that there isn't a commit on the
10798 * action chain.
10799 */
10800 dtrace_action_t *act = ecb->dte_action;
10801
10802 for (; act != NULL; act = act->dta_next) {
10803 if (act->dta_kind == DTRACEACT_COMMIT)
10804 return (EINVAL);
10805 }
10806 }
10807
10808 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
10809 action->dta_rec.dtrd_size = size;
10810 }
10811
10812 action->dta_refcnt = 1;
10813 rec = &action->dta_rec;
10814 size = rec->dtrd_size;
10815
10816 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
10817 if (!(size & mask)) {
10818 align = mask + 1;
10819 break;
10820 }
10821 }
10822
10823 action->dta_kind = desc->dtad_kind;
10824
10825 if ((action->dta_difo = dp) != NULL)
10826 dtrace_difo_hold(dp);
10827
10828 rec->dtrd_action = action->dta_kind;
10829 rec->dtrd_arg = arg;
10830 rec->dtrd_uarg = desc->dtad_uarg;
10831 rec->dtrd_alignment = (uint16_t)align;
10832 rec->dtrd_format = format;
10833
10834 if ((last = ecb->dte_action_last) != NULL) {
10835 ASSERT(ecb->dte_action != NULL);
10836 action->dta_prev = last;
10837 last->dta_next = action;
10838 } else {
10839 ASSERT(ecb->dte_action == NULL);
10840 ecb->dte_action = action;
10841 }
10842
10843 ecb->dte_action_last = action;
10844
10845 return (0);
10846 }
10847
10848 static void
10849 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
10850 {
10851 dtrace_action_t *act = ecb->dte_action, *next;
10852 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
10853 dtrace_difo_t *dp;
10854 uint16_t format;
10855
10856 if (act != NULL && act->dta_refcnt > 1) {
10857 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
10858 act->dta_refcnt--;
10859 } else {
10860 for (; act != NULL; act = next) {
10861 next = act->dta_next;
10862 ASSERT(next != NULL || act == ecb->dte_action_last);
10863 ASSERT(act->dta_refcnt == 1);
10864
10865 if ((format = act->dta_rec.dtrd_format) != 0)
10866 dtrace_format_remove(ecb->dte_state, format);
10867
10868 if ((dp = act->dta_difo) != NULL)
10869 dtrace_difo_release(dp, vstate);
10870
10871 if (DTRACEACT_ISAGG(act->dta_kind)) {
10872 dtrace_ecb_aggregation_destroy(ecb, act);
10873 } else {
10874 kmem_free(act, sizeof (dtrace_action_t));
10875 }
10876 }
10877 }
10878
10879 ecb->dte_action = NULL;
10880 ecb->dte_action_last = NULL;
10881 ecb->dte_size = 0;
10882 }
10883
10884 static void
10885 dtrace_ecb_disable(dtrace_ecb_t *ecb)
10886 {
10887 /*
10888 * We disable the ECB by removing it from its probe.
10889 */
10890 dtrace_ecb_t *pecb, *prev = NULL;
10891 dtrace_probe_t *probe = ecb->dte_probe;
10892
10893 ASSERT(MUTEX_HELD(&dtrace_lock));
10894
10895 if (probe == NULL) {
10896 /*
10897 * This is the NULL probe; there is nothing to disable.
10898 */
10899 return;
10900 }
10901
10902 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
10903 if (pecb == ecb)
10904 break;
10905 prev = pecb;
10906 }
10907
10908 ASSERT(pecb != NULL);
10909
10910 if (prev == NULL) {
10911 probe->dtpr_ecb = ecb->dte_next;
10912 } else {
10913 prev->dte_next = ecb->dte_next;
10914 }
10915
10916 if (ecb == probe->dtpr_ecb_last) {
10917 ASSERT(ecb->dte_next == NULL);
10918 probe->dtpr_ecb_last = prev;
10919 }
10920
10921 /*
10922 * The ECB has been disconnected from the probe; now sync to assure
10923 * that all CPUs have seen the change before returning.
10924 */
10925 dtrace_sync();
10926
10927 if (probe->dtpr_ecb == NULL) {
10928 /*
10929 * That was the last ECB on the probe; clear the predicate
10930 * cache ID for the probe, disable it and sync one more time
10931 * to assure that we'll never hit it again.
10932 */
10933 dtrace_provider_t *prov = probe->dtpr_provider;
10934
10935 ASSERT(ecb->dte_next == NULL);
10936 ASSERT(probe->dtpr_ecb_last == NULL);
10937 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
10938 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
10939 probe->dtpr_id, probe->dtpr_arg);
10940 dtrace_sync();
10941 } else {
10942 /*
10943 * There is at least one ECB remaining on the probe. If there
10944 * is _exactly_ one, set the probe's predicate cache ID to be
10945 * the predicate cache ID of the remaining ECB.
10946 */
10947 ASSERT(probe->dtpr_ecb_last != NULL);
10948 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
10949
10950 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
10951 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
10952
10953 ASSERT(probe->dtpr_ecb->dte_next == NULL);
10954
10955 if (p != NULL)
10956 probe->dtpr_predcache = p->dtp_cacheid;
10957 }
10958
10959 ecb->dte_next = NULL;
10960 }
10961 }
10962
10963 static void
10964 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
10965 {
10966 dtrace_state_t *state = ecb->dte_state;
10967 dtrace_vstate_t *vstate = &state->dts_vstate;
10968 dtrace_predicate_t *pred;
10969 dtrace_epid_t epid = ecb->dte_epid;
10970
10971 ASSERT(MUTEX_HELD(&dtrace_lock));
10972 ASSERT(ecb->dte_next == NULL);
10973 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
10974
10975 if ((pred = ecb->dte_predicate) != NULL)
10976 dtrace_predicate_release(pred, vstate);
10977
10978 dtrace_ecb_action_remove(ecb);
10979
10980 ASSERT(state->dts_ecbs[epid - 1] == ecb);
10981 state->dts_ecbs[epid - 1] = NULL;
10982
10983 kmem_free(ecb, sizeof (dtrace_ecb_t));
10984 }
10985
10986 static dtrace_ecb_t *
10987 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
10988 dtrace_enabling_t *enab)
10989 {
10990 dtrace_ecb_t *ecb;
10991 dtrace_predicate_t *pred;
10992 dtrace_actdesc_t *act;
10993 dtrace_provider_t *prov;
10994 dtrace_ecbdesc_t *desc = enab->dten_current;
10995
10996 ASSERT(MUTEX_HELD(&dtrace_lock));
10997 ASSERT(state != NULL);
10998
10999 ecb = dtrace_ecb_add(state, probe);
11000 ecb->dte_uarg = desc->dted_uarg;
11001
11002 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
11003 dtrace_predicate_hold(pred);
11004 ecb->dte_predicate = pred;
11005 }
11006
11007 if (probe != NULL) {
11008 /*
11009 * If the provider shows more leg than the consumer is old
11010 * enough to see, we need to enable the appropriate implicit
11011 * predicate bits to prevent the ecb from activating at
11012 * revealing times.
11013 *
11014 * Providers specifying DTRACE_PRIV_USER at register time
11015 * are stating that they need the /proc-style privilege
11016 * model to be enforced, and this is what DTRACE_COND_OWNER
11017 * and DTRACE_COND_ZONEOWNER will then do at probe time.
11018 */
11019 prov = probe->dtpr_provider;
11020 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
11021 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11022 ecb->dte_cond |= DTRACE_COND_OWNER;
11023
11024 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
11025 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11026 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
11027
11028 /*
11029 * If the provider shows us kernel innards and the user
11030 * is lacking sufficient privilege, enable the
11031 * DTRACE_COND_USERMODE implicit predicate.
11032 */
11033 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
11034 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
11035 ecb->dte_cond |= DTRACE_COND_USERMODE;
11036 }
11037
11038 if (dtrace_ecb_create_cache != NULL) {
11039 /*
11040 * If we have a cached ecb, we'll use its action list instead
11041 * of creating our own (saving both time and space).
11042 */
11043 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
11044 dtrace_action_t *act = cached->dte_action;
11045
11046 if (act != NULL) {
11047 ASSERT(act->dta_refcnt > 0);
11048 act->dta_refcnt++;
11049 ecb->dte_action = act;
11050 ecb->dte_action_last = cached->dte_action_last;
11051 ecb->dte_needed = cached->dte_needed;
11052 ecb->dte_size = cached->dte_size;
11053 ecb->dte_alignment = cached->dte_alignment;
11054 }
11055
11056 return (ecb);
11057 }
11058
11059 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
11060 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
11061 dtrace_ecb_destroy(ecb);
11062 return (NULL);
11063 }
11064 }
11065
11066 dtrace_ecb_resize(ecb);
11067
11068 return (dtrace_ecb_create_cache = ecb);
11069 }
11070
11071 static int
11072 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
11073 {
11074 dtrace_ecb_t *ecb;
11075 dtrace_enabling_t *enab = arg;
11076 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
11077
11078 ASSERT(state != NULL);
11079
11080 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
11081 /*
11082 * This probe was created in a generation for which this
11083 * enabling has previously created ECBs; we don't want to
11084 * enable it again, so just kick out.
11085 */
11086 return (DTRACE_MATCH_NEXT);
11087 }
11088
11089 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
11090 return (DTRACE_MATCH_DONE);
11091
11092 if (dtrace_ecb_enable(ecb) < 0)
11093 return (DTRACE_MATCH_FAIL);
11094
11095 return (DTRACE_MATCH_NEXT);
11096 }
11097
11098 static dtrace_ecb_t *
11099 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
11100 {
11101 dtrace_ecb_t *ecb;
11102
11103 ASSERT(MUTEX_HELD(&dtrace_lock));
11104
11105 if (id == 0 || id > state->dts_necbs)
11106 return (NULL);
11107
11108 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
11109 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
11110
11111 return (state->dts_ecbs[id - 1]);
11112 }
11113
11114 static dtrace_aggregation_t *
11115 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
11116 {
11117 dtrace_aggregation_t *agg;
11118
11119 ASSERT(MUTEX_HELD(&dtrace_lock));
11120
11121 if (id == 0 || id > state->dts_naggregations)
11122 return (NULL);
11123
11124 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
11125 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
11126 agg->dtag_id == id);
11127
11128 return (state->dts_aggregations[id - 1]);
11129 }
11130
11131 /*
11132 * DTrace Buffer Functions
11133 *
11134 * The following functions manipulate DTrace buffers. Most of these functions
11135 * are called in the context of establishing or processing consumer state;
11136 * exceptions are explicitly noted.
11137 */
11138
11139 /*
11140 * Note: called from cross call context. This function switches the two
11141 * buffers on a given CPU. The atomicity of this operation is assured by
11142 * disabling interrupts while the actual switch takes place; the disabling of
11143 * interrupts serializes the execution with any execution of dtrace_probe() on
11144 * the same CPU.
11145 */
11146 static void
11147 dtrace_buffer_switch(dtrace_buffer_t *buf)
11148 {
11149 caddr_t tomax = buf->dtb_tomax;
11150 caddr_t xamot = buf->dtb_xamot;
11151 dtrace_icookie_t cookie;
11152 hrtime_t now;
11153
11154 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11155 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
11156
11157 cookie = dtrace_interrupt_disable();
11158 now = dtrace_gethrtime();
11159 buf->dtb_tomax = xamot;
11160 buf->dtb_xamot = tomax;
11161 buf->dtb_xamot_drops = buf->dtb_drops;
11162 buf->dtb_xamot_offset = buf->dtb_offset;
11163 buf->dtb_xamot_errors = buf->dtb_errors;
11164 buf->dtb_xamot_flags = buf->dtb_flags;
11165 buf->dtb_offset = 0;
11166 buf->dtb_drops = 0;
11167 buf->dtb_errors = 0;
11168 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
11169 buf->dtb_interval = now - buf->dtb_switched;
11170 buf->dtb_switched = now;
11171 dtrace_interrupt_enable(cookie);
11172 }
11173
11174 /*
11175 * Note: called from cross call context. This function activates a buffer
11176 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
11177 * is guaranteed by the disabling of interrupts.
11178 */
11179 static void
11180 dtrace_buffer_activate(dtrace_state_t *state)
11181 {
11182 dtrace_buffer_t *buf;
11183 dtrace_icookie_t cookie = dtrace_interrupt_disable();
11184
11185 buf = &state->dts_buffer[CPU->cpu_id];
11186
11187 if (buf->dtb_tomax != NULL) {
11188 /*
11189 * We might like to assert that the buffer is marked inactive,
11190 * but this isn't necessarily true: the buffer for the CPU
11191 * that processes the BEGIN probe has its buffer activated
11192 * manually. In this case, we take the (harmless) action
11193 * re-clearing the bit INACTIVE bit.
11194 */
11195 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
11196 }
11197
11198 dtrace_interrupt_enable(cookie);
11199 }
11200
11201 static int
11202 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
11203 processorid_t cpu, int *factor)
11204 {
11205 cpu_t *cp;
11206 dtrace_buffer_t *buf;
11207 int allocated = 0, desired = 0;
11208
11209 ASSERT(MUTEX_HELD(&cpu_lock));
11210 ASSERT(MUTEX_HELD(&dtrace_lock));
11211
11212 *factor = 1;
11213
11214 if (size > dtrace_nonroot_maxsize &&
11215 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
11216 return (EFBIG);
11217
11218 cp = cpu_list;
11219
11220 do {
11221 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11222 continue;
11223
11224 buf = &bufs[cp->cpu_id];
11225
11226 /*
11227 * If there is already a buffer allocated for this CPU, it
11228 * is only possible that this is a DR event. In this case,
11229 * the buffer size must match our specified size.
11230 */
11231 if (buf->dtb_tomax != NULL) {
11232 ASSERT(buf->dtb_size == size);
11233 continue;
11234 }
11235
11236 ASSERT(buf->dtb_xamot == NULL);
11237
11238 if ((buf->dtb_tomax = kmem_zalloc(size,
11239 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11240 goto err;
11241
11242 buf->dtb_size = size;
11243 buf->dtb_flags = flags;
11244 buf->dtb_offset = 0;
11245 buf->dtb_drops = 0;
11246
11247 if (flags & DTRACEBUF_NOSWITCH)
11248 continue;
11249
11250 if ((buf->dtb_xamot = kmem_zalloc(size,
11251 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
11252 goto err;
11253 } while ((cp = cp->cpu_next) != cpu_list);
11254
11255 return (0);
11256
11257 err:
11258 cp = cpu_list;
11259
11260 do {
11261 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
11262 continue;
11263
11264 buf = &bufs[cp->cpu_id];
11265 desired += 2;
11266
11267 if (buf->dtb_xamot != NULL) {
11268 ASSERT(buf->dtb_tomax != NULL);
11269 ASSERT(buf->dtb_size == size);
11270 kmem_free(buf->dtb_xamot, size);
11271 allocated++;
11272 }
11273
11274 if (buf->dtb_tomax != NULL) {
11275 ASSERT(buf->dtb_size == size);
11276 kmem_free(buf->dtb_tomax, size);
11277 allocated++;
11278 }
11279
11280 buf->dtb_tomax = NULL;
11281 buf->dtb_xamot = NULL;
11282 buf->dtb_size = 0;
11283 } while ((cp = cp->cpu_next) != cpu_list);
11284
11285 *factor = desired / (allocated > 0 ? allocated : 1);
11286
11287 return (ENOMEM);
11288 }
11289
11290 /*
11291 * Note: called from probe context. This function just increments the drop
11292 * count on a buffer. It has been made a function to allow for the
11293 * possibility of understanding the source of mysterious drop counts. (A
11294 * problem for which one may be particularly disappointed that DTrace cannot
11295 * be used to understand DTrace.)
11296 */
11297 static void
11298 dtrace_buffer_drop(dtrace_buffer_t *buf)
11299 {
11300 buf->dtb_drops++;
11301 }
11302
11303 /*
11304 * Note: called from probe context. This function is called to reserve space
11305 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
11306 * mstate. Returns the new offset in the buffer, or a negative value if an
11307 * error has occurred.
11308 */
11309 static intptr_t
11310 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
11311 dtrace_state_t *state, dtrace_mstate_t *mstate)
11312 {
11313 intptr_t offs = buf->dtb_offset, soffs;
11314 intptr_t woffs;
11315 caddr_t tomax;
11316 size_t total;
11317
11318 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
11319 return (-1);
11320
11321 if ((tomax = buf->dtb_tomax) == NULL) {
11322 dtrace_buffer_drop(buf);
11323 return (-1);
11324 }
11325
11326 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
11327 while (offs & (align - 1)) {
11328 /*
11329 * Assert that our alignment is off by a number which
11330 * is itself sizeof (uint32_t) aligned.
11331 */
11332 ASSERT(!((align - (offs & (align - 1))) &
11333 (sizeof (uint32_t) - 1)));
11334 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11335 offs += sizeof (uint32_t);
11336 }
11337
11338 if ((soffs = offs + needed) > buf->dtb_size) {
11339 dtrace_buffer_drop(buf);
11340 return (-1);
11341 }
11342
11343 if (mstate == NULL)
11344 return (offs);
11345
11346 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
11347 mstate->dtms_scratch_size = buf->dtb_size - soffs;
11348 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11349
11350 return (offs);
11351 }
11352
11353 if (buf->dtb_flags & DTRACEBUF_FILL) {
11354 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
11355 (buf->dtb_flags & DTRACEBUF_FULL))
11356 return (-1);
11357 goto out;
11358 }
11359
11360 total = needed + (offs & (align - 1));
11361
11362 /*
11363 * For a ring buffer, life is quite a bit more complicated. Before
11364 * we can store any padding, we need to adjust our wrapping offset.
11365 * (If we've never before wrapped or we're not about to, no adjustment
11366 * is required.)
11367 */
11368 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
11369 offs + total > buf->dtb_size) {
11370 woffs = buf->dtb_xamot_offset;
11371
11372 if (offs + total > buf->dtb_size) {
11373 /*
11374 * We can't fit in the end of the buffer. First, a
11375 * sanity check that we can fit in the buffer at all.
11376 */
11377 if (total > buf->dtb_size) {
11378 dtrace_buffer_drop(buf);
11379 return (-1);
11380 }
11381
11382 /*
11383 * We're going to be storing at the top of the buffer,
11384 * so now we need to deal with the wrapped offset. We
11385 * only reset our wrapped offset to 0 if it is
11386 * currently greater than the current offset. If it
11387 * is less than the current offset, it is because a
11388 * previous allocation induced a wrap -- but the
11389 * allocation didn't subsequently take the space due
11390 * to an error or false predicate evaluation. In this
11391 * case, we'll just leave the wrapped offset alone: if
11392 * the wrapped offset hasn't been advanced far enough
11393 * for this allocation, it will be adjusted in the
11394 * lower loop.
11395 */
11396 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
11397 if (woffs >= offs)
11398 woffs = 0;
11399 } else {
11400 woffs = 0;
11401 }
11402
11403 /*
11404 * Now we know that we're going to be storing to the
11405 * top of the buffer and that there is room for us
11406 * there. We need to clear the buffer from the current
11407 * offset to the end (there may be old gunk there).
11408 */
11409 while (offs < buf->dtb_size)
11410 tomax[offs++] = 0;
11411
11412 /*
11413 * We need to set our offset to zero. And because we
11414 * are wrapping, we need to set the bit indicating as
11415 * much. We can also adjust our needed space back
11416 * down to the space required by the ECB -- we know
11417 * that the top of the buffer is aligned.
11418 */
11419 offs = 0;
11420 total = needed;
11421 buf->dtb_flags |= DTRACEBUF_WRAPPED;
11422 } else {
11423 /*
11424 * There is room for us in the buffer, so we simply
11425 * need to check the wrapped offset.
11426 */
11427 if (woffs < offs) {
11428 /*
11429 * The wrapped offset is less than the offset.
11430 * This can happen if we allocated buffer space
11431 * that induced a wrap, but then we didn't
11432 * subsequently take the space due to an error
11433 * or false predicate evaluation. This is
11434 * okay; we know that _this_ allocation isn't
11435 * going to induce a wrap. We still can't
11436 * reset the wrapped offset to be zero,
11437 * however: the space may have been trashed in
11438 * the previous failed probe attempt. But at
11439 * least the wrapped offset doesn't need to
11440 * be adjusted at all...
11441 */
11442 goto out;
11443 }
11444 }
11445
11446 while (offs + total > woffs) {
11447 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
11448 size_t size;
11449
11450 if (epid == DTRACE_EPIDNONE) {
11451 size = sizeof (uint32_t);
11452 } else {
11453 ASSERT3U(epid, <=, state->dts_necbs);
11454 ASSERT(state->dts_ecbs[epid - 1] != NULL);
11455
11456 size = state->dts_ecbs[epid - 1]->dte_size;
11457 }
11458
11459 ASSERT(woffs + size <= buf->dtb_size);
11460 ASSERT(size != 0);
11461
11462 if (woffs + size == buf->dtb_size) {
11463 /*
11464 * We've reached the end of the buffer; we want
11465 * to set the wrapped offset to 0 and break
11466 * out. However, if the offs is 0, then we're
11467 * in a strange edge-condition: the amount of
11468 * space that we want to reserve plus the size
11469 * of the record that we're overwriting is
11470 * greater than the size of the buffer. This
11471 * is problematic because if we reserve the
11472 * space but subsequently don't consume it (due
11473 * to a failed predicate or error) the wrapped
11474 * offset will be 0 -- yet the EPID at offset 0
11475 * will not be committed. This situation is
11476 * relatively easy to deal with: if we're in
11477 * this case, the buffer is indistinguishable
11478 * from one that hasn't wrapped; we need only
11479 * finish the job by clearing the wrapped bit,
11480 * explicitly setting the offset to be 0, and
11481 * zero'ing out the old data in the buffer.
11482 */
11483 if (offs == 0) {
11484 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
11485 buf->dtb_offset = 0;
11486 woffs = total;
11487
11488 while (woffs < buf->dtb_size)
11489 tomax[woffs++] = 0;
11490 }
11491
11492 woffs = 0;
11493 break;
11494 }
11495
11496 woffs += size;
11497 }
11498
11499 /*
11500 * We have a wrapped offset. It may be that the wrapped offset
11501 * has become zero -- that's okay.
11502 */
11503 buf->dtb_xamot_offset = woffs;
11504 }
11505
11506 out:
11507 /*
11508 * Now we can plow the buffer with any necessary padding.
11509 */
11510 while (offs & (align - 1)) {
11511 /*
11512 * Assert that our alignment is off by a number which
11513 * is itself sizeof (uint32_t) aligned.
11514 */
11515 ASSERT(!((align - (offs & (align - 1))) &
11516 (sizeof (uint32_t) - 1)));
11517 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
11518 offs += sizeof (uint32_t);
11519 }
11520
11521 if (buf->dtb_flags & DTRACEBUF_FILL) {
11522 if (offs + needed > buf->dtb_size - state->dts_reserve) {
11523 buf->dtb_flags |= DTRACEBUF_FULL;
11524 return (-1);
11525 }
11526 }
11527
11528 if (mstate == NULL)
11529 return (offs);
11530
11531 /*
11532 * For ring buffers and fill buffers, the scratch space is always
11533 * the inactive buffer.
11534 */
11535 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
11536 mstate->dtms_scratch_size = buf->dtb_size;
11537 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
11538
11539 return (offs);
11540 }
11541
11542 static void
11543 dtrace_buffer_polish(dtrace_buffer_t *buf)
11544 {
11545 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
11546 ASSERT(MUTEX_HELD(&dtrace_lock));
11547
11548 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
11549 return;
11550
11551 /*
11552 * We need to polish the ring buffer. There are three cases:
11553 *
11554 * - The first (and presumably most common) is that there is no gap
11555 * between the buffer offset and the wrapped offset. In this case,
11556 * there is nothing in the buffer that isn't valid data; we can
11557 * mark the buffer as polished and return.
11558 *
11559 * - The second (less common than the first but still more common
11560 * than the third) is that there is a gap between the buffer offset
11561 * and the wrapped offset, and the wrapped offset is larger than the
11562 * buffer offset. This can happen because of an alignment issue, or
11563 * can happen because of a call to dtrace_buffer_reserve() that
11564 * didn't subsequently consume the buffer space. In this case,
11565 * we need to zero the data from the buffer offset to the wrapped
11566 * offset.
11567 *
11568 * - The third (and least common) is that there is a gap between the
11569 * buffer offset and the wrapped offset, but the wrapped offset is
11570 * _less_ than the buffer offset. This can only happen because a
11571 * call to dtrace_buffer_reserve() induced a wrap, but the space
11572 * was not subsequently consumed. In this case, we need to zero the
11573 * space from the offset to the end of the buffer _and_ from the
11574 * top of the buffer to the wrapped offset.
11575 */
11576 if (buf->dtb_offset < buf->dtb_xamot_offset) {
11577 bzero(buf->dtb_tomax + buf->dtb_offset,
11578 buf->dtb_xamot_offset - buf->dtb_offset);
11579 }
11580
11581 if (buf->dtb_offset > buf->dtb_xamot_offset) {
11582 bzero(buf->dtb_tomax + buf->dtb_offset,
11583 buf->dtb_size - buf->dtb_offset);
11584 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
11585 }
11586 }
11587
11588 /*
11589 * This routine determines if data generated at the specified time has likely
11590 * been entirely consumed at user-level. This routine is called to determine
11591 * if an ECB on a defunct probe (but for an active enabling) can be safely
11592 * disabled and destroyed.
11593 */
11594 static int
11595 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when)
11596 {
11597 int i;
11598
11599 for (i = 0; i < NCPU; i++) {
11600 dtrace_buffer_t *buf = &bufs[i];
11601
11602 if (buf->dtb_size == 0)
11603 continue;
11604
11605 if (buf->dtb_flags & DTRACEBUF_RING)
11606 return (0);
11607
11608 if (!buf->dtb_switched && buf->dtb_offset != 0)
11609 return (0);
11610
11611 if (buf->dtb_switched - buf->dtb_interval < when)
11612 return (0);
11613 }
11614
11615 return (1);
11616 }
11617
11618 static void
11619 dtrace_buffer_free(dtrace_buffer_t *bufs)
11620 {
11621 int i;
11622
11623 for (i = 0; i < NCPU; i++) {
11624 dtrace_buffer_t *buf = &bufs[i];
11625
11626 if (buf->dtb_tomax == NULL) {
11627 ASSERT(buf->dtb_xamot == NULL);
11628 ASSERT(buf->dtb_size == 0);
11629 continue;
11630 }
11631
11632 if (buf->dtb_xamot != NULL) {
11633 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11634 kmem_free(buf->dtb_xamot, buf->dtb_size);
11635 }
11636
11637 kmem_free(buf->dtb_tomax, buf->dtb_size);
11638 buf->dtb_size = 0;
11639 buf->dtb_tomax = NULL;
11640 buf->dtb_xamot = NULL;
11641 }
11642 }
11643
11644 /*
11645 * DTrace Enabling Functions
11646 */
11647 static dtrace_enabling_t *
11648 dtrace_enabling_create(dtrace_vstate_t *vstate)
11649 {
11650 dtrace_enabling_t *enab;
11651
11652 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
11653 enab->dten_vstate = vstate;
11654
11655 return (enab);
11656 }
11657
11658 static void
11659 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
11660 {
11661 dtrace_ecbdesc_t **ndesc;
11662 size_t osize, nsize;
11663
11664 /*
11665 * We can't add to enablings after we've enabled them, or after we've
11666 * retained them.
11667 */
11668 ASSERT(enab->dten_probegen == 0);
11669 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11670
11671 if (enab->dten_ndesc < enab->dten_maxdesc) {
11672 enab->dten_desc[enab->dten_ndesc++] = ecb;
11673 return;
11674 }
11675
11676 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11677
11678 if (enab->dten_maxdesc == 0) {
11679 enab->dten_maxdesc = 1;
11680 } else {
11681 enab->dten_maxdesc <<= 1;
11682 }
11683
11684 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
11685
11686 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
11687 ndesc = kmem_zalloc(nsize, KM_SLEEP);
11688 bcopy(enab->dten_desc, ndesc, osize);
11689 kmem_free(enab->dten_desc, osize);
11690
11691 enab->dten_desc = ndesc;
11692 enab->dten_desc[enab->dten_ndesc++] = ecb;
11693 }
11694
11695 static void
11696 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
11697 dtrace_probedesc_t *pd)
11698 {
11699 dtrace_ecbdesc_t *new;
11700 dtrace_predicate_t *pred;
11701 dtrace_actdesc_t *act;
11702
11703 /*
11704 * We're going to create a new ECB description that matches the
11705 * specified ECB in every way, but has the specified probe description.
11706 */
11707 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11708
11709 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
11710 dtrace_predicate_hold(pred);
11711
11712 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
11713 dtrace_actdesc_hold(act);
11714
11715 new->dted_action = ecb->dted_action;
11716 new->dted_pred = ecb->dted_pred;
11717 new->dted_probe = *pd;
11718 new->dted_uarg = ecb->dted_uarg;
11719
11720 dtrace_enabling_add(enab, new);
11721 }
11722
11723 static void
11724 dtrace_enabling_dump(dtrace_enabling_t *enab)
11725 {
11726 int i;
11727
11728 for (i = 0; i < enab->dten_ndesc; i++) {
11729 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
11730
11731 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
11732 desc->dtpd_provider, desc->dtpd_mod,
11733 desc->dtpd_func, desc->dtpd_name);
11734 }
11735 }
11736
11737 static void
11738 dtrace_enabling_destroy(dtrace_enabling_t *enab)
11739 {
11740 int i;
11741 dtrace_ecbdesc_t *ep;
11742 dtrace_vstate_t *vstate = enab->dten_vstate;
11743
11744 ASSERT(MUTEX_HELD(&dtrace_lock));
11745
11746 for (i = 0; i < enab->dten_ndesc; i++) {
11747 dtrace_actdesc_t *act, *next;
11748 dtrace_predicate_t *pred;
11749
11750 ep = enab->dten_desc[i];
11751
11752 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
11753 dtrace_predicate_release(pred, vstate);
11754
11755 for (act = ep->dted_action; act != NULL; act = next) {
11756 next = act->dtad_next;
11757 dtrace_actdesc_release(act, vstate);
11758 }
11759
11760 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11761 }
11762
11763 kmem_free(enab->dten_desc,
11764 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
11765
11766 /*
11767 * If this was a retained enabling, decrement the dts_nretained count
11768 * and take it off of the dtrace_retained list.
11769 */
11770 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
11771 dtrace_retained == enab) {
11772 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11773 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
11774 enab->dten_vstate->dtvs_state->dts_nretained--;
11775 dtrace_retained_gen++;
11776 }
11777
11778 if (enab->dten_prev == NULL) {
11779 if (dtrace_retained == enab) {
11780 dtrace_retained = enab->dten_next;
11781
11782 if (dtrace_retained != NULL)
11783 dtrace_retained->dten_prev = NULL;
11784 }
11785 } else {
11786 ASSERT(enab != dtrace_retained);
11787 ASSERT(dtrace_retained != NULL);
11788 enab->dten_prev->dten_next = enab->dten_next;
11789 }
11790
11791 if (enab->dten_next != NULL) {
11792 ASSERT(dtrace_retained != NULL);
11793 enab->dten_next->dten_prev = enab->dten_prev;
11794 }
11795
11796 kmem_free(enab, sizeof (dtrace_enabling_t));
11797 }
11798
11799 static int
11800 dtrace_enabling_retain(dtrace_enabling_t *enab)
11801 {
11802 dtrace_state_t *state;
11803
11804 ASSERT(MUTEX_HELD(&dtrace_lock));
11805 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11806 ASSERT(enab->dten_vstate != NULL);
11807
11808 state = enab->dten_vstate->dtvs_state;
11809 ASSERT(state != NULL);
11810
11811 /*
11812 * We only allow each state to retain dtrace_retain_max enablings.
11813 */
11814 if (state->dts_nretained >= dtrace_retain_max)
11815 return (ENOSPC);
11816
11817 state->dts_nretained++;
11818 dtrace_retained_gen++;
11819
11820 if (dtrace_retained == NULL) {
11821 dtrace_retained = enab;
11822 return (0);
11823 }
11824
11825 enab->dten_next = dtrace_retained;
11826 dtrace_retained->dten_prev = enab;
11827 dtrace_retained = enab;
11828
11829 return (0);
11830 }
11831
11832 static int
11833 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
11834 dtrace_probedesc_t *create)
11835 {
11836 dtrace_enabling_t *new, *enab;
11837 int found = 0, err = ENOENT;
11838
11839 ASSERT(MUTEX_HELD(&dtrace_lock));
11840 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
11841 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
11842 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
11843 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
11844
11845 new = dtrace_enabling_create(&state->dts_vstate);
11846
11847 /*
11848 * Iterate over all retained enablings, looking for enablings that
11849 * match the specified state.
11850 */
11851 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11852 int i;
11853
11854 /*
11855 * dtvs_state can only be NULL for helper enablings -- and
11856 * helper enablings can't be retained.
11857 */
11858 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11859
11860 if (enab->dten_vstate->dtvs_state != state)
11861 continue;
11862
11863 /*
11864 * Now iterate over each probe description; we're looking for
11865 * an exact match to the specified probe description.
11866 */
11867 for (i = 0; i < enab->dten_ndesc; i++) {
11868 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11869 dtrace_probedesc_t *pd = &ep->dted_probe;
11870
11871 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
11872 continue;
11873
11874 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
11875 continue;
11876
11877 if (strcmp(pd->dtpd_func, match->dtpd_func))
11878 continue;
11879
11880 if (strcmp(pd->dtpd_name, match->dtpd_name))
11881 continue;
11882
11883 /*
11884 * We have a winning probe! Add it to our growing
11885 * enabling.
11886 */
11887 found = 1;
11888 dtrace_enabling_addlike(new, ep, create);
11889 }
11890 }
11891
11892 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
11893 dtrace_enabling_destroy(new);
11894 return (err);
11895 }
11896
11897 return (0);
11898 }
11899
11900 static void
11901 dtrace_enabling_retract(dtrace_state_t *state)
11902 {
11903 dtrace_enabling_t *enab, *next;
11904
11905 ASSERT(MUTEX_HELD(&dtrace_lock));
11906
11907 /*
11908 * Iterate over all retained enablings, destroy the enablings retained
11909 * for the specified state.
11910 */
11911 for (enab = dtrace_retained; enab != NULL; enab = next) {
11912 next = enab->dten_next;
11913
11914 /*
11915 * dtvs_state can only be NULL for helper enablings -- and
11916 * helper enablings can't be retained.
11917 */
11918 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11919
11920 if (enab->dten_vstate->dtvs_state == state) {
11921 ASSERT(state->dts_nretained > 0);
11922 dtrace_enabling_destroy(enab);
11923 }
11924 }
11925
11926 ASSERT(state->dts_nretained == 0);
11927 }
11928
11929 static int
11930 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
11931 {
11932 int i = 0;
11933 int total_matched = 0, matched = 0;
11934
11935 ASSERT(MUTEX_HELD(&cpu_lock));
11936 ASSERT(MUTEX_HELD(&dtrace_lock));
11937
11938 for (i = 0; i < enab->dten_ndesc; i++) {
11939 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11940
11941 enab->dten_current = ep;
11942 enab->dten_error = 0;
11943
11944 /*
11945 * If a provider failed to enable a probe then get out and
11946 * let the consumer know we failed.
11947 */
11948 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0)
11949 return (EBUSY);
11950
11951 total_matched += matched;
11952
11953 if (enab->dten_error != 0) {
11954 /*
11955 * If we get an error half-way through enabling the
11956 * probes, we kick out -- perhaps with some number of
11957 * them enabled. Leaving enabled probes enabled may
11958 * be slightly confusing for user-level, but we expect
11959 * that no one will attempt to actually drive on in
11960 * the face of such errors. If this is an anonymous
11961 * enabling (indicated with a NULL nmatched pointer),
11962 * we cmn_err() a message. We aren't expecting to
11963 * get such an error -- such as it can exist at all,
11964 * it would be a result of corrupted DOF in the driver
11965 * properties.
11966 */
11967 if (nmatched == NULL) {
11968 cmn_err(CE_WARN, "dtrace_enabling_match() "
11969 "error on %p: %d", (void *)ep,
11970 enab->dten_error);
11971 }
11972
11973 return (enab->dten_error);
11974 }
11975 }
11976
11977 enab->dten_probegen = dtrace_probegen;
11978 if (nmatched != NULL)
11979 *nmatched = total_matched;
11980
11981 return (0);
11982 }
11983
11984 static void
11985 dtrace_enabling_matchall(void)
11986 {
11987 dtrace_enabling_t *enab;
11988
11989 mutex_enter(&cpu_lock);
11990 mutex_enter(&dtrace_lock);
11991
11992 /*
11993 * Iterate over all retained enablings to see if any probes match
11994 * against them. We only perform this operation on enablings for which
11995 * we have sufficient permissions by virtue of being in the global zone
11996 * or in the same zone as the DTrace client. Because we can be called
11997 * after dtrace_detach() has been called, we cannot assert that there
11998 * are retained enablings. We can safely load from dtrace_retained,
11999 * however: the taskq_destroy() at the end of dtrace_detach() will
12000 * block pending our completion.
12001 */
12002 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12003 dtrace_cred_t *dcr = &enab->dten_vstate->dtvs_state->dts_cred;
12004 cred_t *cr = dcr->dcr_cred;
12005 zoneid_t zone = cr != NULL ? crgetzoneid(cr) : 0;
12006
12007 if ((dcr->dcr_visible & DTRACE_CRV_ALLZONE) || (cr != NULL &&
12008 (zone == GLOBAL_ZONEID || getzoneid() == zone)))
12009 (void) dtrace_enabling_match(enab, NULL);
12010 }
12011
12012 mutex_exit(&dtrace_lock);
12013 mutex_exit(&cpu_lock);
12014 }
12015
12016 /*
12017 * If an enabling is to be enabled without having matched probes (that is, if
12018 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
12019 * enabling must be _primed_ by creating an ECB for every ECB description.
12020 * This must be done to assure that we know the number of speculations, the
12021 * number of aggregations, the minimum buffer size needed, etc. before we
12022 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
12023 * enabling any probes, we create ECBs for every ECB decription, but with a
12024 * NULL probe -- which is exactly what this function does.
12025 */
12026 static void
12027 dtrace_enabling_prime(dtrace_state_t *state)
12028 {
12029 dtrace_enabling_t *enab;
12030 int i;
12031
12032 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12033 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12034
12035 if (enab->dten_vstate->dtvs_state != state)
12036 continue;
12037
12038 /*
12039 * We don't want to prime an enabling more than once, lest
12040 * we allow a malicious user to induce resource exhaustion.
12041 * (The ECBs that result from priming an enabling aren't
12042 * leaked -- but they also aren't deallocated until the
12043 * consumer state is destroyed.)
12044 */
12045 if (enab->dten_primed)
12046 continue;
12047
12048 for (i = 0; i < enab->dten_ndesc; i++) {
12049 enab->dten_current = enab->dten_desc[i];
12050 (void) dtrace_probe_enable(NULL, enab);
12051 }
12052
12053 enab->dten_primed = 1;
12054 }
12055 }
12056
12057 /*
12058 * Called to indicate that probes should be provided due to retained
12059 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
12060 * must take an initial lap through the enabling calling the dtps_provide()
12061 * entry point explicitly to allow for autocreated probes.
12062 */
12063 static void
12064 dtrace_enabling_provide(dtrace_provider_t *prv)
12065 {
12066 int i, all = 0;
12067 dtrace_probedesc_t desc;
12068 dtrace_genid_t gen;
12069
12070 ASSERT(MUTEX_HELD(&dtrace_lock));
12071 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
12072
12073 if (prv == NULL) {
12074 all = 1;
12075 prv = dtrace_provider;
12076 }
12077
12078 do {
12079 dtrace_enabling_t *enab;
12080 void *parg = prv->dtpv_arg;
12081
12082 retry:
12083 gen = dtrace_retained_gen;
12084 for (enab = dtrace_retained; enab != NULL;
12085 enab = enab->dten_next) {
12086 for (i = 0; i < enab->dten_ndesc; i++) {
12087 desc = enab->dten_desc[i]->dted_probe;
12088 mutex_exit(&dtrace_lock);
12089 prv->dtpv_pops.dtps_provide(parg, &desc);
12090 mutex_enter(&dtrace_lock);
12091 /*
12092 * Process the retained enablings again if
12093 * they have changed while we weren't holding
12094 * dtrace_lock.
12095 */
12096 if (gen != dtrace_retained_gen)
12097 goto retry;
12098 }
12099 }
12100 } while (all && (prv = prv->dtpv_next) != NULL);
12101
12102 mutex_exit(&dtrace_lock);
12103 dtrace_probe_provide(NULL, all ? NULL : prv);
12104 mutex_enter(&dtrace_lock);
12105 }
12106
12107 /*
12108 * Called to reap ECBs that are attached to probes from defunct providers.
12109 */
12110 static void
12111 dtrace_enabling_reap(void)
12112 {
12113 dtrace_provider_t *prov;
12114 dtrace_probe_t *probe;
12115 dtrace_ecb_t *ecb;
12116 hrtime_t when;
12117 int i;
12118
12119 mutex_enter(&cpu_lock);
12120 mutex_enter(&dtrace_lock);
12121
12122 for (i = 0; i < dtrace_nprobes; i++) {
12123 if ((probe = dtrace_probes[i]) == NULL)
12124 continue;
12125
12126 if (probe->dtpr_ecb == NULL)
12127 continue;
12128
12129 prov = probe->dtpr_provider;
12130
12131 if ((when = prov->dtpv_defunct) == 0)
12132 continue;
12133
12134 /*
12135 * We have ECBs on a defunct provider: we want to reap these
12136 * ECBs to allow the provider to unregister. The destruction
12137 * of these ECBs must be done carefully: if we destroy the ECB
12138 * and the consumer later wishes to consume an EPID that
12139 * corresponds to the destroyed ECB (and if the EPID metadata
12140 * has not been previously consumed), the consumer will abort
12141 * processing on the unknown EPID. To reduce (but not, sadly,
12142 * eliminate) the possibility of this, we will only destroy an
12143 * ECB for a defunct provider if, for the state that
12144 * corresponds to the ECB:
12145 *
12146 * (a) There is no speculative tracing (which can effectively
12147 * cache an EPID for an arbitrary amount of time).
12148 *
12149 * (b) The principal buffers have been switched twice since the
12150 * provider became defunct.
12151 *
12152 * (c) The aggregation buffers are of zero size or have been
12153 * switched twice since the provider became defunct.
12154 *
12155 * We use dts_speculates to determine (a) and call a function
12156 * (dtrace_buffer_consumed()) to determine (b) and (c). Note
12157 * that as soon as we've been unable to destroy one of the ECBs
12158 * associated with the probe, we quit trying -- reaping is only
12159 * fruitful in as much as we can destroy all ECBs associated
12160 * with the defunct provider's probes.
12161 */
12162 while ((ecb = probe->dtpr_ecb) != NULL) {
12163 dtrace_state_t *state = ecb->dte_state;
12164 dtrace_buffer_t *buf = state->dts_buffer;
12165 dtrace_buffer_t *aggbuf = state->dts_aggbuffer;
12166
12167 if (state->dts_speculates)
12168 break;
12169
12170 if (!dtrace_buffer_consumed(buf, when))
12171 break;
12172
12173 if (!dtrace_buffer_consumed(aggbuf, when))
12174 break;
12175
12176 dtrace_ecb_disable(ecb);
12177 ASSERT(probe->dtpr_ecb != ecb);
12178 dtrace_ecb_destroy(ecb);
12179 }
12180 }
12181
12182 mutex_exit(&dtrace_lock);
12183 mutex_exit(&cpu_lock);
12184 }
12185
12186 /*
12187 * DTrace DOF Functions
12188 */
12189 /*ARGSUSED*/
12190 static void
12191 dtrace_dof_error(dof_hdr_t *dof, const char *str)
12192 {
12193 if (dtrace_err_verbose)
12194 cmn_err(CE_WARN, "failed to process DOF: %s", str);
12195
12196 #ifdef DTRACE_ERRDEBUG
12197 dtrace_errdebug(str);
12198 #endif
12199 }
12200
12201 /*
12202 * Create DOF out of a currently enabled state. Right now, we only create
12203 * DOF containing the run-time options -- but this could be expanded to create
12204 * complete DOF representing the enabled state.
12205 */
12206 static dof_hdr_t *
12207 dtrace_dof_create(dtrace_state_t *state)
12208 {
12209 dof_hdr_t *dof;
12210 dof_sec_t *sec;
12211 dof_optdesc_t *opt;
12212 int i, len = sizeof (dof_hdr_t) +
12213 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
12214 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12215
12216 ASSERT(MUTEX_HELD(&dtrace_lock));
12217
12218 dof = kmem_zalloc(len, KM_SLEEP);
12219 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
12220 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
12221 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
12222 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
12223
12224 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
12225 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
12226 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
12227 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
12228 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
12229 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
12230
12231 dof->dofh_flags = 0;
12232 dof->dofh_hdrsize = sizeof (dof_hdr_t);
12233 dof->dofh_secsize = sizeof (dof_sec_t);
12234 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
12235 dof->dofh_secoff = sizeof (dof_hdr_t);
12236 dof->dofh_loadsz = len;
12237 dof->dofh_filesz = len;
12238 dof->dofh_pad = 0;
12239
12240 /*
12241 * Fill in the option section header...
12242 */
12243 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
12244 sec->dofs_type = DOF_SECT_OPTDESC;
12245 sec->dofs_align = sizeof (uint64_t);
12246 sec->dofs_flags = DOF_SECF_LOAD;
12247 sec->dofs_entsize = sizeof (dof_optdesc_t);
12248
12249 opt = (dof_optdesc_t *)((uintptr_t)sec +
12250 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
12251
12252 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
12253 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12254
12255 for (i = 0; i < DTRACEOPT_MAX; i++) {
12256 opt[i].dofo_option = i;
12257 opt[i].dofo_strtab = DOF_SECIDX_NONE;
12258 opt[i].dofo_value = state->dts_options[i];
12259 }
12260
12261 return (dof);
12262 }
12263
12264 static dof_hdr_t *
12265 dtrace_dof_copyin(uintptr_t uarg, int *errp)
12266 {
12267 dof_hdr_t hdr, *dof;
12268
12269 ASSERT(!MUTEX_HELD(&dtrace_lock));
12270
12271 /*
12272 * First, we're going to copyin() the sizeof (dof_hdr_t).
12273 */
12274 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
12275 dtrace_dof_error(NULL, "failed to copyin DOF header");
12276 *errp = EFAULT;
12277 return (NULL);
12278 }
12279
12280 /*
12281 * Now we'll allocate the entire DOF and copy it in -- provided
12282 * that the length isn't outrageous.
12283 */
12284 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
12285 dtrace_dof_error(&hdr, "load size exceeds maximum");
12286 *errp = E2BIG;
12287 return (NULL);
12288 }
12289
12290 if (hdr.dofh_loadsz < sizeof (hdr)) {
12291 dtrace_dof_error(&hdr, "invalid load size");
12292 *errp = EINVAL;
12293 return (NULL);
12294 }
12295
12296 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
12297
12298 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 ||
12299 dof->dofh_loadsz != hdr.dofh_loadsz) {
12300 kmem_free(dof, hdr.dofh_loadsz);
12301 *errp = EFAULT;
12302 return (NULL);
12303 }
12304
12305 return (dof);
12306 }
12307
12308 static dof_hdr_t *
12309 dtrace_dof_property(const char *name)
12310 {
12311 uchar_t *buf;
12312 uint64_t loadsz;
12313 unsigned int len, i;
12314 dof_hdr_t *dof;
12315
12316 /*
12317 * Unfortunately, array of values in .conf files are always (and
12318 * only) interpreted to be integer arrays. We must read our DOF
12319 * as an integer array, and then squeeze it into a byte array.
12320 */
12321 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
12322 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
12323 return (NULL);
12324
12325 for (i = 0; i < len; i++)
12326 buf[i] = (uchar_t)(((int *)buf)[i]);
12327
12328 if (len < sizeof (dof_hdr_t)) {
12329 ddi_prop_free(buf);
12330 dtrace_dof_error(NULL, "truncated header");
12331 return (NULL);
12332 }
12333
12334 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
12335 ddi_prop_free(buf);
12336 dtrace_dof_error(NULL, "truncated DOF");
12337 return (NULL);
12338 }
12339
12340 if (loadsz >= dtrace_dof_maxsize) {
12341 ddi_prop_free(buf);
12342 dtrace_dof_error(NULL, "oversized DOF");
12343 return (NULL);
12344 }
12345
12346 dof = kmem_alloc(loadsz, KM_SLEEP);
12347 bcopy(buf, dof, loadsz);
12348 ddi_prop_free(buf);
12349
12350 return (dof);
12351 }
12352
12353 static void
12354 dtrace_dof_destroy(dof_hdr_t *dof)
12355 {
12356 kmem_free(dof, dof->dofh_loadsz);
12357 }
12358
12359 /*
12360 * Return the dof_sec_t pointer corresponding to a given section index. If the
12361 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
12362 * a type other than DOF_SECT_NONE is specified, the header is checked against
12363 * this type and NULL is returned if the types do not match.
12364 */
12365 static dof_sec_t *
12366 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
12367 {
12368 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
12369 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
12370
12371 if (i >= dof->dofh_secnum) {
12372 dtrace_dof_error(dof, "referenced section index is invalid");
12373 return (NULL);
12374 }
12375
12376 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
12377 dtrace_dof_error(dof, "referenced section is not loadable");
12378 return (NULL);
12379 }
12380
12381 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
12382 dtrace_dof_error(dof, "referenced section is the wrong type");
12383 return (NULL);
12384 }
12385
12386 return (sec);
12387 }
12388
12389 static dtrace_probedesc_t *
12390 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
12391 {
12392 dof_probedesc_t *probe;
12393 dof_sec_t *strtab;
12394 uintptr_t daddr = (uintptr_t)dof;
12395 uintptr_t str;
12396 size_t size;
12397
12398 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
12399 dtrace_dof_error(dof, "invalid probe section");
12400 return (NULL);
12401 }
12402
12403 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12404 dtrace_dof_error(dof, "bad alignment in probe description");
12405 return (NULL);
12406 }
12407
12408 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
12409 dtrace_dof_error(dof, "truncated probe description");
12410 return (NULL);
12411 }
12412
12413 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
12414 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
12415
12416 if (strtab == NULL)
12417 return (NULL);
12418
12419 str = daddr + strtab->dofs_offset;
12420 size = strtab->dofs_size;
12421
12422 if (probe->dofp_provider >= strtab->dofs_size) {
12423 dtrace_dof_error(dof, "corrupt probe provider");
12424 return (NULL);
12425 }
12426
12427 (void) strncpy(desc->dtpd_provider,
12428 (char *)(str + probe->dofp_provider),
12429 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
12430
12431 if (probe->dofp_mod >= strtab->dofs_size) {
12432 dtrace_dof_error(dof, "corrupt probe module");
12433 return (NULL);
12434 }
12435
12436 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
12437 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
12438
12439 if (probe->dofp_func >= strtab->dofs_size) {
12440 dtrace_dof_error(dof, "corrupt probe function");
12441 return (NULL);
12442 }
12443
12444 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
12445 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
12446
12447 if (probe->dofp_name >= strtab->dofs_size) {
12448 dtrace_dof_error(dof, "corrupt probe name");
12449 return (NULL);
12450 }
12451
12452 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
12453 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
12454
12455 return (desc);
12456 }
12457
12458 static dtrace_difo_t *
12459 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12460 cred_t *cr)
12461 {
12462 dtrace_difo_t *dp;
12463 size_t ttl = 0;
12464 dof_difohdr_t *dofd;
12465 uintptr_t daddr = (uintptr_t)dof;
12466 size_t max = dtrace_difo_maxsize;
12467 int i, l, n;
12468
12469 static const struct {
12470 int section;
12471 int bufoffs;
12472 int lenoffs;
12473 int entsize;
12474 int align;
12475 const char *msg;
12476 } difo[] = {
12477 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
12478 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
12479 sizeof (dif_instr_t), "multiple DIF sections" },
12480
12481 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
12482 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
12483 sizeof (uint64_t), "multiple integer tables" },
12484
12485 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
12486 offsetof(dtrace_difo_t, dtdo_strlen), 0,
12487 sizeof (char), "multiple string tables" },
12488
12489 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
12490 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
12491 sizeof (uint_t), "multiple variable tables" },
12492
12493 { DOF_SECT_NONE, 0, 0, 0, NULL }
12494 };
12495
12496 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
12497 dtrace_dof_error(dof, "invalid DIFO header section");
12498 return (NULL);
12499 }
12500
12501 if (sec->dofs_align != sizeof (dof_secidx_t)) {
12502 dtrace_dof_error(dof, "bad alignment in DIFO header");
12503 return (NULL);
12504 }
12505
12506 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
12507 sec->dofs_size % sizeof (dof_secidx_t)) {
12508 dtrace_dof_error(dof, "bad size in DIFO header");
12509 return (NULL);
12510 }
12511
12512 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12513 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
12514
12515 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
12516 dp->dtdo_rtype = dofd->dofd_rtype;
12517
12518 for (l = 0; l < n; l++) {
12519 dof_sec_t *subsec;
12520 void **bufp;
12521 uint32_t *lenp;
12522
12523 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
12524 dofd->dofd_links[l])) == NULL)
12525 goto err; /* invalid section link */
12526
12527 if (ttl + subsec->dofs_size > max) {
12528 dtrace_dof_error(dof, "exceeds maximum size");
12529 goto err;
12530 }
12531
12532 ttl += subsec->dofs_size;
12533
12534 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
12535 if (subsec->dofs_type != difo[i].section)
12536 continue;
12537
12538 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
12539 dtrace_dof_error(dof, "section not loaded");
12540 goto err;
12541 }
12542
12543 if (subsec->dofs_align != difo[i].align) {
12544 dtrace_dof_error(dof, "bad alignment");
12545 goto err;
12546 }
12547
12548 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
12549 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
12550
12551 if (*bufp != NULL) {
12552 dtrace_dof_error(dof, difo[i].msg);
12553 goto err;
12554 }
12555
12556 if (difo[i].entsize != subsec->dofs_entsize) {
12557 dtrace_dof_error(dof, "entry size mismatch");
12558 goto err;
12559 }
12560
12561 if (subsec->dofs_entsize != 0 &&
12562 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
12563 dtrace_dof_error(dof, "corrupt entry size");
12564 goto err;
12565 }
12566
12567 *lenp = subsec->dofs_size;
12568 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
12569 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
12570 *bufp, subsec->dofs_size);
12571
12572 if (subsec->dofs_entsize != 0)
12573 *lenp /= subsec->dofs_entsize;
12574
12575 break;
12576 }
12577
12578 /*
12579 * If we encounter a loadable DIFO sub-section that is not
12580 * known to us, assume this is a broken program and fail.
12581 */
12582 if (difo[i].section == DOF_SECT_NONE &&
12583 (subsec->dofs_flags & DOF_SECF_LOAD)) {
12584 dtrace_dof_error(dof, "unrecognized DIFO subsection");
12585 goto err;
12586 }
12587 }
12588
12589 if (dp->dtdo_buf == NULL) {
12590 /*
12591 * We can't have a DIF object without DIF text.
12592 */
12593 dtrace_dof_error(dof, "missing DIF text");
12594 goto err;
12595 }
12596
12597 /*
12598 * Before we validate the DIF object, run through the variable table
12599 * looking for the strings -- if any of their size are under, we'll set
12600 * their size to be the system-wide default string size. Note that
12601 * this should _not_ happen if the "strsize" option has been set --
12602 * in this case, the compiler should have set the size to reflect the
12603 * setting of the option.
12604 */
12605 for (i = 0; i < dp->dtdo_varlen; i++) {
12606 dtrace_difv_t *v = &dp->dtdo_vartab[i];
12607 dtrace_diftype_t *t = &v->dtdv_type;
12608
12609 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
12610 continue;
12611
12612 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
12613 t->dtdt_size = dtrace_strsize_default;
12614 }
12615
12616 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
12617 goto err;
12618
12619 dtrace_difo_init(dp, vstate);
12620 return (dp);
12621
12622 err:
12623 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
12624 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
12625 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
12626 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
12627
12628 kmem_free(dp, sizeof (dtrace_difo_t));
12629 return (NULL);
12630 }
12631
12632 static dtrace_predicate_t *
12633 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12634 cred_t *cr)
12635 {
12636 dtrace_difo_t *dp;
12637
12638 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
12639 return (NULL);
12640
12641 return (dtrace_predicate_create(dp));
12642 }
12643
12644 static dtrace_actdesc_t *
12645 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12646 cred_t *cr)
12647 {
12648 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
12649 dof_actdesc_t *desc;
12650 dof_sec_t *difosec;
12651 size_t offs;
12652 uintptr_t daddr = (uintptr_t)dof;
12653 uint64_t arg;
12654 dtrace_actkind_t kind;
12655
12656 if (sec->dofs_type != DOF_SECT_ACTDESC) {
12657 dtrace_dof_error(dof, "invalid action section");
12658 return (NULL);
12659 }
12660
12661 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
12662 dtrace_dof_error(dof, "truncated action description");
12663 return (NULL);
12664 }
12665
12666 if (sec->dofs_align != sizeof (uint64_t)) {
12667 dtrace_dof_error(dof, "bad alignment in action description");
12668 return (NULL);
12669 }
12670
12671 if (sec->dofs_size < sec->dofs_entsize) {
12672 dtrace_dof_error(dof, "section entry size exceeds total size");
12673 return (NULL);
12674 }
12675
12676 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
12677 dtrace_dof_error(dof, "bad entry size in action description");
12678 return (NULL);
12679 }
12680
12681 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
12682 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
12683 return (NULL);
12684 }
12685
12686 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
12687 desc = (dof_actdesc_t *)(daddr +
12688 (uintptr_t)sec->dofs_offset + offs);
12689 kind = (dtrace_actkind_t)desc->dofa_kind;
12690
12691 if ((DTRACEACT_ISPRINTFLIKE(kind) &&
12692 (kind != DTRACEACT_PRINTA ||
12693 desc->dofa_strtab != DOF_SECIDX_NONE)) ||
12694 (kind == DTRACEACT_DIFEXPR &&
12695 desc->dofa_strtab != DOF_SECIDX_NONE)) {
12696 dof_sec_t *strtab;
12697 char *str, *fmt;
12698 uint64_t i;
12699
12700 /*
12701 * The argument to these actions is an index into the
12702 * DOF string table. For printf()-like actions, this
12703 * is the format string. For print(), this is the
12704 * CTF type of the expression result.
12705 */
12706 if ((strtab = dtrace_dof_sect(dof,
12707 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
12708 goto err;
12709
12710 str = (char *)((uintptr_t)dof +
12711 (uintptr_t)strtab->dofs_offset);
12712
12713 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
12714 if (str[i] == '\0')
12715 break;
12716 }
12717
12718 if (i >= strtab->dofs_size) {
12719 dtrace_dof_error(dof, "bogus format string");
12720 goto err;
12721 }
12722
12723 if (i == desc->dofa_arg) {
12724 dtrace_dof_error(dof, "empty format string");
12725 goto err;
12726 }
12727
12728 i -= desc->dofa_arg;
12729 fmt = kmem_alloc(i + 1, KM_SLEEP);
12730 bcopy(&str[desc->dofa_arg], fmt, i + 1);
12731 arg = (uint64_t)(uintptr_t)fmt;
12732 } else {
12733 if (kind == DTRACEACT_PRINTA) {
12734 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
12735 arg = 0;
12736 } else {
12737 arg = desc->dofa_arg;
12738 }
12739 }
12740
12741 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
12742 desc->dofa_uarg, arg);
12743
12744 if (last != NULL) {
12745 last->dtad_next = act;
12746 } else {
12747 first = act;
12748 }
12749
12750 last = act;
12751
12752 if (desc->dofa_difo == DOF_SECIDX_NONE)
12753 continue;
12754
12755 if ((difosec = dtrace_dof_sect(dof,
12756 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
12757 goto err;
12758
12759 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
12760
12761 if (act->dtad_difo == NULL)
12762 goto err;
12763 }
12764
12765 ASSERT(first != NULL);
12766 return (first);
12767
12768 err:
12769 for (act = first; act != NULL; act = next) {
12770 next = act->dtad_next;
12771 dtrace_actdesc_release(act, vstate);
12772 }
12773
12774 return (NULL);
12775 }
12776
12777 static dtrace_ecbdesc_t *
12778 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
12779 cred_t *cr)
12780 {
12781 dtrace_ecbdesc_t *ep;
12782 dof_ecbdesc_t *ecb;
12783 dtrace_probedesc_t *desc;
12784 dtrace_predicate_t *pred = NULL;
12785
12786 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
12787 dtrace_dof_error(dof, "truncated ECB description");
12788 return (NULL);
12789 }
12790
12791 if (sec->dofs_align != sizeof (uint64_t)) {
12792 dtrace_dof_error(dof, "bad alignment in ECB description");
12793 return (NULL);
12794 }
12795
12796 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
12797 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
12798
12799 if (sec == NULL)
12800 return (NULL);
12801
12802 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12803 ep->dted_uarg = ecb->dofe_uarg;
12804 desc = &ep->dted_probe;
12805
12806 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
12807 goto err;
12808
12809 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
12810 if ((sec = dtrace_dof_sect(dof,
12811 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
12812 goto err;
12813
12814 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
12815 goto err;
12816
12817 ep->dted_pred.dtpdd_predicate = pred;
12818 }
12819
12820 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
12821 if ((sec = dtrace_dof_sect(dof,
12822 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
12823 goto err;
12824
12825 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
12826
12827 if (ep->dted_action == NULL)
12828 goto err;
12829 }
12830
12831 return (ep);
12832
12833 err:
12834 if (pred != NULL)
12835 dtrace_predicate_release(pred, vstate);
12836 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12837 return (NULL);
12838 }
12839
12840 /*
12841 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
12842 * specified DOF. At present, this amounts to simply adding 'ubase' to the
12843 * site of any user SETX relocations to account for load object base address.
12844 * In the future, if we need other relocations, this function can be extended.
12845 */
12846 static int
12847 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
12848 {
12849 uintptr_t daddr = (uintptr_t)dof;
12850 dof_relohdr_t *dofr =
12851 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12852 dof_sec_t *ss, *rs, *ts;
12853 dof_relodesc_t *r;
12854 uint_t i, n;
12855
12856 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
12857 sec->dofs_align != sizeof (dof_secidx_t)) {
12858 dtrace_dof_error(dof, "invalid relocation header");
12859 return (-1);
12860 }
12861
12862 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
12863 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
12864 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
12865
12866 if (ss == NULL || rs == NULL || ts == NULL)
12867 return (-1); /* dtrace_dof_error() has been called already */
12868
12869 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
12870 rs->dofs_align != sizeof (uint64_t)) {
12871 dtrace_dof_error(dof, "invalid relocation section");
12872 return (-1);
12873 }
12874
12875 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
12876 n = rs->dofs_size / rs->dofs_entsize;
12877
12878 for (i = 0; i < n; i++) {
12879 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
12880
12881 switch (r->dofr_type) {
12882 case DOF_RELO_NONE:
12883 break;
12884 case DOF_RELO_SETX:
12885 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
12886 sizeof (uint64_t) > ts->dofs_size) {
12887 dtrace_dof_error(dof, "bad relocation offset");
12888 return (-1);
12889 }
12890
12891 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
12892 dtrace_dof_error(dof, "misaligned setx relo");
12893 return (-1);
12894 }
12895
12896 *(uint64_t *)taddr += ubase;
12897 break;
12898 default:
12899 dtrace_dof_error(dof, "invalid relocation type");
12900 return (-1);
12901 }
12902
12903 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
12904 }
12905
12906 return (0);
12907 }
12908
12909 /*
12910 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
12911 * header: it should be at the front of a memory region that is at least
12912 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
12913 * size. It need not be validated in any other way.
12914 */
12915 static int
12916 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
12917 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
12918 {
12919 uint64_t len = dof->dofh_loadsz, seclen;
12920 uintptr_t daddr = (uintptr_t)dof;
12921 dtrace_ecbdesc_t *ep;
12922 dtrace_enabling_t *enab;
12923 uint_t i;
12924
12925 ASSERT(MUTEX_HELD(&dtrace_lock));
12926 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
12927
12928 /*
12929 * Check the DOF header identification bytes. In addition to checking
12930 * valid settings, we also verify that unused bits/bytes are zeroed so
12931 * we can use them later without fear of regressing existing binaries.
12932 */
12933 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
12934 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
12935 dtrace_dof_error(dof, "DOF magic string mismatch");
12936 return (-1);
12937 }
12938
12939 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
12940 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
12941 dtrace_dof_error(dof, "DOF has invalid data model");
12942 return (-1);
12943 }
12944
12945 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
12946 dtrace_dof_error(dof, "DOF encoding mismatch");
12947 return (-1);
12948 }
12949
12950 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
12951 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
12952 dtrace_dof_error(dof, "DOF version mismatch");
12953 return (-1);
12954 }
12955
12956 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
12957 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
12958 return (-1);
12959 }
12960
12961 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
12962 dtrace_dof_error(dof, "DOF uses too many integer registers");
12963 return (-1);
12964 }
12965
12966 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
12967 dtrace_dof_error(dof, "DOF uses too many tuple registers");
12968 return (-1);
12969 }
12970
12971 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
12972 if (dof->dofh_ident[i] != 0) {
12973 dtrace_dof_error(dof, "DOF has invalid ident byte set");
12974 return (-1);
12975 }
12976 }
12977
12978 if (dof->dofh_flags & ~DOF_FL_VALID) {
12979 dtrace_dof_error(dof, "DOF has invalid flag bits set");
12980 return (-1);
12981 }
12982
12983 if (dof->dofh_secsize == 0) {
12984 dtrace_dof_error(dof, "zero section header size");
12985 return (-1);
12986 }
12987
12988 /*
12989 * Check that the section headers don't exceed the amount of DOF
12990 * data. Note that we cast the section size and number of sections
12991 * to uint64_t's to prevent possible overflow in the multiplication.
12992 */
12993 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
12994
12995 if (dof->dofh_secoff > len || seclen > len ||
12996 dof->dofh_secoff + seclen > len) {
12997 dtrace_dof_error(dof, "truncated section headers");
12998 return (-1);
12999 }
13000
13001 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
13002 dtrace_dof_error(dof, "misaligned section headers");
13003 return (-1);
13004 }
13005
13006 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
13007 dtrace_dof_error(dof, "misaligned section size");
13008 return (-1);
13009 }
13010
13011 /*
13012 * Take an initial pass through the section headers to be sure that
13013 * the headers don't have stray offsets. If the 'noprobes' flag is
13014 * set, do not permit sections relating to providers, probes, or args.
13015 */
13016 for (i = 0; i < dof->dofh_secnum; i++) {
13017 dof_sec_t *sec = (dof_sec_t *)(daddr +
13018 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13019
13020 if (noprobes) {
13021 switch (sec->dofs_type) {
13022 case DOF_SECT_PROVIDER:
13023 case DOF_SECT_PROBES:
13024 case DOF_SECT_PRARGS:
13025 case DOF_SECT_PROFFS:
13026 dtrace_dof_error(dof, "illegal sections "
13027 "for enabling");
13028 return (-1);
13029 }
13030 }
13031
13032 if (DOF_SEC_ISLOADABLE(sec->dofs_type) &&
13033 !(sec->dofs_flags & DOF_SECF_LOAD)) {
13034 dtrace_dof_error(dof, "loadable section with load "
13035 "flag unset");
13036 return (-1);
13037 }
13038
13039 if (!(sec->dofs_flags & DOF_SECF_LOAD))
13040 continue; /* just ignore non-loadable sections */
13041
13042 if (!ISP2(sec->dofs_align)) {
13043 dtrace_dof_error(dof, "bad section alignment");
13044 return (-1);
13045 }
13046
13047 if (sec->dofs_offset & (sec->dofs_align - 1)) {
13048 dtrace_dof_error(dof, "misaligned section");
13049 return (-1);
13050 }
13051
13052 if (sec->dofs_offset > len || sec->dofs_size > len ||
13053 sec->dofs_offset + sec->dofs_size > len) {
13054 dtrace_dof_error(dof, "corrupt section header");
13055 return (-1);
13056 }
13057
13058 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
13059 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
13060 dtrace_dof_error(dof, "non-terminating string table");
13061 return (-1);
13062 }
13063 }
13064
13065 /*
13066 * Take a second pass through the sections and locate and perform any
13067 * relocations that are present. We do this after the first pass to
13068 * be sure that all sections have had their headers validated.
13069 */
13070 for (i = 0; i < dof->dofh_secnum; i++) {
13071 dof_sec_t *sec = (dof_sec_t *)(daddr +
13072 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13073
13074 if (!(sec->dofs_flags & DOF_SECF_LOAD))
13075 continue; /* skip sections that are not loadable */
13076
13077 switch (sec->dofs_type) {
13078 case DOF_SECT_URELHDR:
13079 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
13080 return (-1);
13081 break;
13082 }
13083 }
13084
13085 if ((enab = *enabp) == NULL)
13086 enab = *enabp = dtrace_enabling_create(vstate);
13087
13088 for (i = 0; i < dof->dofh_secnum; i++) {
13089 dof_sec_t *sec = (dof_sec_t *)(daddr +
13090 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13091
13092 if (sec->dofs_type != DOF_SECT_ECBDESC)
13093 continue;
13094
13095 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
13096 dtrace_enabling_destroy(enab);
13097 *enabp = NULL;
13098 return (-1);
13099 }
13100
13101 dtrace_enabling_add(enab, ep);
13102 }
13103
13104 return (0);
13105 }
13106
13107 /*
13108 * Process DOF for any options. This routine assumes that the DOF has been
13109 * at least processed by dtrace_dof_slurp().
13110 */
13111 static int
13112 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
13113 {
13114 int i, rval;
13115 uint32_t entsize;
13116 size_t offs;
13117 dof_optdesc_t *desc;
13118
13119 for (i = 0; i < dof->dofh_secnum; i++) {
13120 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
13121 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13122
13123 if (sec->dofs_type != DOF_SECT_OPTDESC)
13124 continue;
13125
13126 if (sec->dofs_align != sizeof (uint64_t)) {
13127 dtrace_dof_error(dof, "bad alignment in "
13128 "option description");
13129 return (EINVAL);
13130 }
13131
13132 if ((entsize = sec->dofs_entsize) == 0) {
13133 dtrace_dof_error(dof, "zeroed option entry size");
13134 return (EINVAL);
13135 }
13136
13137 if (entsize < sizeof (dof_optdesc_t)) {
13138 dtrace_dof_error(dof, "bad option entry size");
13139 return (EINVAL);
13140 }
13141
13142 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
13143 desc = (dof_optdesc_t *)((uintptr_t)dof +
13144 (uintptr_t)sec->dofs_offset + offs);
13145
13146 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
13147 dtrace_dof_error(dof, "non-zero option string");
13148 return (EINVAL);
13149 }
13150
13151 if (desc->dofo_value == DTRACEOPT_UNSET) {
13152 dtrace_dof_error(dof, "unset option");
13153 return (EINVAL);
13154 }
13155
13156 if ((rval = dtrace_state_option(state,
13157 desc->dofo_option, desc->dofo_value)) != 0) {
13158 dtrace_dof_error(dof, "rejected option");
13159 return (rval);
13160 }
13161 }
13162 }
13163
13164 return (0);
13165 }
13166
13167 /*
13168 * DTrace Consumer State Functions
13169 */
13170 int
13171 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
13172 {
13173 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
13174 void *base;
13175 uintptr_t limit;
13176 dtrace_dynvar_t *dvar, *next, *start;
13177 int i;
13178
13179 ASSERT(MUTEX_HELD(&dtrace_lock));
13180 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
13181
13182 bzero(dstate, sizeof (dtrace_dstate_t));
13183
13184 if ((dstate->dtds_chunksize = chunksize) == 0)
13185 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
13186
13187 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
13188 size = min;
13189
13190 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL)
13191 return (ENOMEM);
13192
13193 dstate->dtds_size = size;
13194 dstate->dtds_base = base;
13195 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
13196 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
13197
13198 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
13199
13200 if (hashsize != 1 && (hashsize & 1))
13201 hashsize--;
13202
13203 dstate->dtds_hashsize = hashsize;
13204 dstate->dtds_hash = dstate->dtds_base;
13205
13206 /*
13207 * Set all of our hash buckets to point to the single sink, and (if
13208 * it hasn't already been set), set the sink's hash value to be the
13209 * sink sentinel value. The sink is needed for dynamic variable
13210 * lookups to know that they have iterated over an entire, valid hash
13211 * chain.
13212 */
13213 for (i = 0; i < hashsize; i++)
13214 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
13215
13216 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
13217 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
13218
13219 /*
13220 * Determine number of active CPUs. Divide free list evenly among
13221 * active CPUs.
13222 */
13223 start = (dtrace_dynvar_t *)
13224 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
13225 limit = (uintptr_t)base + size;
13226
13227 maxper = (limit - (uintptr_t)start) / NCPU;
13228 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
13229
13230 for (i = 0; i < NCPU; i++) {
13231 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
13232
13233 /*
13234 * If we don't even have enough chunks to make it once through
13235 * NCPUs, we're just going to allocate everything to the first
13236 * CPU. And if we're on the last CPU, we're going to allocate
13237 * whatever is left over. In either case, we set the limit to
13238 * be the limit of the dynamic variable space.
13239 */
13240 if (maxper == 0 || i == NCPU - 1) {
13241 limit = (uintptr_t)base + size;
13242 start = NULL;
13243 } else {
13244 limit = (uintptr_t)start + maxper;
13245 start = (dtrace_dynvar_t *)limit;
13246 }
13247
13248 ASSERT(limit <= (uintptr_t)base + size);
13249
13250 for (;;) {
13251 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
13252 dstate->dtds_chunksize);
13253
13254 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
13255 break;
13256
13257 dvar->dtdv_next = next;
13258 dvar = next;
13259 }
13260
13261 if (maxper == 0)
13262 break;
13263 }
13264
13265 return (0);
13266 }
13267
13268 void
13269 dtrace_dstate_fini(dtrace_dstate_t *dstate)
13270 {
13271 ASSERT(MUTEX_HELD(&cpu_lock));
13272
13273 if (dstate->dtds_base == NULL)
13274 return;
13275
13276 kmem_free(dstate->dtds_base, dstate->dtds_size);
13277 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
13278 }
13279
13280 static void
13281 dtrace_vstate_fini(dtrace_vstate_t *vstate)
13282 {
13283 /*
13284 * Logical XOR, where are you?
13285 */
13286 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
13287
13288 if (vstate->dtvs_nglobals > 0) {
13289 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
13290 sizeof (dtrace_statvar_t *));
13291 }
13292
13293 if (vstate->dtvs_ntlocals > 0) {
13294 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
13295 sizeof (dtrace_difv_t));
13296 }
13297
13298 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
13299
13300 if (vstate->dtvs_nlocals > 0) {
13301 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
13302 sizeof (dtrace_statvar_t *));
13303 }
13304 }
13305
13306 static void
13307 dtrace_state_clean(dtrace_state_t *state)
13308 {
13309 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
13310 return;
13311
13312 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
13313 dtrace_speculation_clean(state);
13314 }
13315
13316 static void
13317 dtrace_state_deadman(dtrace_state_t *state)
13318 {
13319 hrtime_t now;
13320
13321 dtrace_sync();
13322
13323 now = dtrace_gethrtime();
13324
13325 if (state != dtrace_anon.dta_state &&
13326 now - state->dts_laststatus >= dtrace_deadman_user)
13327 return;
13328
13329 /*
13330 * We must be sure that dts_alive never appears to be less than the
13331 * value upon entry to dtrace_state_deadman(), and because we lack a
13332 * dtrace_cas64(), we cannot store to it atomically. We thus instead
13333 * store INT64_MAX to it, followed by a memory barrier, followed by
13334 * the new value. This assures that dts_alive never appears to be
13335 * less than its true value, regardless of the order in which the
13336 * stores to the underlying storage are issued.
13337 */
13338 state->dts_alive = INT64_MAX;
13339 dtrace_membar_producer();
13340 state->dts_alive = now;
13341 }
13342
13343 dtrace_state_t *
13344 dtrace_state_create(dev_t *devp, cred_t *cr)
13345 {
13346 minor_t minor;
13347 major_t major;
13348 char c[30];
13349 dtrace_state_t *state;
13350 dtrace_optval_t *opt;
13351 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
13352
13353 ASSERT(MUTEX_HELD(&dtrace_lock));
13354 ASSERT(MUTEX_HELD(&cpu_lock));
13355
13356 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
13357 VM_BESTFIT | VM_SLEEP);
13358
13359 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
13360 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13361 return (NULL);
13362 }
13363
13364 state = ddi_get_soft_state(dtrace_softstate, minor);
13365 state->dts_epid = DTRACE_EPIDNONE + 1;
13366
13367 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor);
13368 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
13369 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
13370
13371 if (devp != NULL) {
13372 major = getemajor(*devp);
13373 } else {
13374 major = ddi_driver_major(dtrace_devi);
13375 }
13376
13377 state->dts_dev = makedevice(major, minor);
13378
13379 if (devp != NULL)
13380 *devp = state->dts_dev;
13381
13382 /*
13383 * We allocate NCPU buffers. On the one hand, this can be quite
13384 * a bit of memory per instance (nearly 36K on a Starcat). On the
13385 * other hand, it saves an additional memory reference in the probe
13386 * path.
13387 */
13388 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
13389 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
13390 state->dts_cleaner = CYCLIC_NONE;
13391 state->dts_deadman = CYCLIC_NONE;
13392 state->dts_vstate.dtvs_state = state;
13393
13394 for (i = 0; i < DTRACEOPT_MAX; i++)
13395 state->dts_options[i] = DTRACEOPT_UNSET;
13396
13397 /*
13398 * Set the default options.
13399 */
13400 opt = state->dts_options;
13401 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
13402 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
13403 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
13404 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
13405 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
13406 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
13407 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
13408 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
13409 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
13410 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
13411 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
13412 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
13413 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
13414 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
13415
13416 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
13417
13418 /*
13419 * Depending on the user credentials, we set flag bits which alter probe
13420 * visibility or the amount of destructiveness allowed. In the case of
13421 * actual anonymous tracing, or the possession of all privileges, all of
13422 * the normal checks are bypassed.
13423 */
13424 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
13425 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
13426 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
13427 } else {
13428 /*
13429 * Set up the credentials for this instantiation. We take a
13430 * hold on the credential to prevent it from disappearing on
13431 * us; this in turn prevents the zone_t referenced by this
13432 * credential from disappearing. This means that we can
13433 * examine the credential and the zone from probe context.
13434 */
13435 crhold(cr);
13436 state->dts_cred.dcr_cred = cr;
13437
13438 /*
13439 * CRA_PROC means "we have *some* privilege for dtrace" and
13440 * unlocks the use of variables like pid, zonename, etc.
13441 */
13442 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
13443 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13444 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
13445 }
13446
13447 /*
13448 * dtrace_user allows use of syscall and profile providers.
13449 * If the user also has proc_owner and/or proc_zone, we
13450 * extend the scope to include additional visibility and
13451 * destructive power.
13452 */
13453 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
13454 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
13455 state->dts_cred.dcr_visible |=
13456 DTRACE_CRV_ALLPROC;
13457
13458 state->dts_cred.dcr_action |=
13459 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13460 }
13461
13462 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
13463 state->dts_cred.dcr_visible |=
13464 DTRACE_CRV_ALLZONE;
13465
13466 state->dts_cred.dcr_action |=
13467 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13468 }
13469
13470 /*
13471 * If we have all privs in whatever zone this is,
13472 * we can do destructive things to processes which
13473 * have altered credentials.
13474 */
13475 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13476 cr->cr_zone->zone_privset)) {
13477 state->dts_cred.dcr_action |=
13478 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13479 }
13480 }
13481
13482 /*
13483 * Holding the dtrace_kernel privilege also implies that
13484 * the user has the dtrace_user privilege from a visibility
13485 * perspective. But without further privileges, some
13486 * destructive actions are not available.
13487 */
13488 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
13489 /*
13490 * Make all probes in all zones visible. However,
13491 * this doesn't mean that all actions become available
13492 * to all zones.
13493 */
13494 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
13495 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
13496
13497 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
13498 DTRACE_CRA_PROC;
13499 /*
13500 * Holding proc_owner means that destructive actions
13501 * for *this* zone are allowed.
13502 */
13503 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13504 state->dts_cred.dcr_action |=
13505 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13506
13507 /*
13508 * Holding proc_zone means that destructive actions
13509 * for this user/group ID in all zones is allowed.
13510 */
13511 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13512 state->dts_cred.dcr_action |=
13513 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13514
13515 /*
13516 * If we have all privs in whatever zone this is,
13517 * we can do destructive things to processes which
13518 * have altered credentials.
13519 */
13520 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
13521 cr->cr_zone->zone_privset)) {
13522 state->dts_cred.dcr_action |=
13523 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
13524 }
13525 }
13526
13527 /*
13528 * Holding the dtrace_proc privilege gives control over fasttrap
13529 * and pid providers. We need to grant wider destructive
13530 * privileges in the event that the user has proc_owner and/or
13531 * proc_zone.
13532 */
13533 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
13534 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
13535 state->dts_cred.dcr_action |=
13536 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
13537
13538 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
13539 state->dts_cred.dcr_action |=
13540 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
13541 }
13542 }
13543
13544 return (state);
13545 }
13546
13547 static int
13548 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
13549 {
13550 dtrace_optval_t *opt = state->dts_options, size;
13551 processorid_t cpu;
13552 int flags = 0, rval, factor, divisor = 1;
13553
13554 ASSERT(MUTEX_HELD(&dtrace_lock));
13555 ASSERT(MUTEX_HELD(&cpu_lock));
13556 ASSERT(which < DTRACEOPT_MAX);
13557 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
13558 (state == dtrace_anon.dta_state &&
13559 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
13560
13561 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
13562 return (0);
13563
13564 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
13565 cpu = opt[DTRACEOPT_CPU];
13566
13567 if (which == DTRACEOPT_SPECSIZE)
13568 flags |= DTRACEBUF_NOSWITCH;
13569
13570 if (which == DTRACEOPT_BUFSIZE) {
13571 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
13572 flags |= DTRACEBUF_RING;
13573
13574 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
13575 flags |= DTRACEBUF_FILL;
13576
13577 if (state != dtrace_anon.dta_state ||
13578 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
13579 flags |= DTRACEBUF_INACTIVE;
13580 }
13581
13582 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) {
13583 /*
13584 * The size must be 8-byte aligned. If the size is not 8-byte
13585 * aligned, drop it down by the difference.
13586 */
13587 if (size & (sizeof (uint64_t) - 1))
13588 size -= size & (sizeof (uint64_t) - 1);
13589
13590 if (size < state->dts_reserve) {
13591 /*
13592 * Buffers always must be large enough to accommodate
13593 * their prereserved space. We return E2BIG instead
13594 * of ENOMEM in this case to allow for user-level
13595 * software to differentiate the cases.
13596 */
13597 return (E2BIG);
13598 }
13599
13600 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor);
13601
13602 if (rval != ENOMEM) {
13603 opt[which] = size;
13604 return (rval);
13605 }
13606
13607 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13608 return (rval);
13609
13610 for (divisor = 2; divisor < factor; divisor <<= 1)
13611 continue;
13612 }
13613
13614 return (ENOMEM);
13615 }
13616
13617 static int
13618 dtrace_state_buffers(dtrace_state_t *state)
13619 {
13620 dtrace_speculation_t *spec = state->dts_speculations;
13621 int rval, i;
13622
13623 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
13624 DTRACEOPT_BUFSIZE)) != 0)
13625 return (rval);
13626
13627 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
13628 DTRACEOPT_AGGSIZE)) != 0)
13629 return (rval);
13630
13631 for (i = 0; i < state->dts_nspeculations; i++) {
13632 if ((rval = dtrace_state_buffer(state,
13633 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
13634 return (rval);
13635 }
13636
13637 return (0);
13638 }
13639
13640 static void
13641 dtrace_state_prereserve(dtrace_state_t *state)
13642 {
13643 dtrace_ecb_t *ecb;
13644 dtrace_probe_t *probe;
13645
13646 state->dts_reserve = 0;
13647
13648 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
13649 return;
13650
13651 /*
13652 * If our buffer policy is a "fill" buffer policy, we need to set the
13653 * prereserved space to be the space required by the END probes.
13654 */
13655 probe = dtrace_probes[dtrace_probeid_end - 1];
13656 ASSERT(probe != NULL);
13657
13658 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
13659 if (ecb->dte_state != state)
13660 continue;
13661
13662 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
13663 }
13664 }
13665
13666 static int
13667 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
13668 {
13669 dtrace_optval_t *opt = state->dts_options, sz, nspec;
13670 dtrace_speculation_t *spec;
13671 dtrace_buffer_t *buf;
13672 cyc_handler_t hdlr;
13673 cyc_time_t when;
13674 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13675 dtrace_icookie_t cookie;
13676
13677 mutex_enter(&cpu_lock);
13678 mutex_enter(&dtrace_lock);
13679
13680 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
13681 rval = EBUSY;
13682 goto out;
13683 }
13684
13685 /*
13686 * Before we can perform any checks, we must prime all of the
13687 * retained enablings that correspond to this state.
13688 */
13689 dtrace_enabling_prime(state);
13690
13691 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
13692 rval = EACCES;
13693 goto out;
13694 }
13695
13696 dtrace_state_prereserve(state);
13697
13698 /*
13699 * Now we want to do is try to allocate our speculations.
13700 * We do not automatically resize the number of speculations; if
13701 * this fails, we will fail the operation.
13702 */
13703 nspec = opt[DTRACEOPT_NSPEC];
13704 ASSERT(nspec != DTRACEOPT_UNSET);
13705
13706 if (nspec > INT_MAX) {
13707 rval = ENOMEM;
13708 goto out;
13709 }
13710
13711 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t),
13712 KM_NOSLEEP | KM_NORMALPRI);
13713
13714 if (spec == NULL) {
13715 rval = ENOMEM;
13716 goto out;
13717 }
13718
13719 state->dts_speculations = spec;
13720 state->dts_nspeculations = (int)nspec;
13721
13722 for (i = 0; i < nspec; i++) {
13723 if ((buf = kmem_zalloc(bufsize,
13724 KM_NOSLEEP | KM_NORMALPRI)) == NULL) {
13725 rval = ENOMEM;
13726 goto err;
13727 }
13728
13729 spec[i].dtsp_buffer = buf;
13730 }
13731
13732 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
13733 if (dtrace_anon.dta_state == NULL) {
13734 rval = ENOENT;
13735 goto out;
13736 }
13737
13738 if (state->dts_necbs != 0) {
13739 rval = EALREADY;
13740 goto out;
13741 }
13742
13743 state->dts_anon = dtrace_anon_grab();
13744 ASSERT(state->dts_anon != NULL);
13745 state = state->dts_anon;
13746
13747 /*
13748 * We want "grabanon" to be set in the grabbed state, so we'll
13749 * copy that option value from the grabbing state into the
13750 * grabbed state.
13751 */
13752 state->dts_options[DTRACEOPT_GRABANON] =
13753 opt[DTRACEOPT_GRABANON];
13754
13755 *cpu = dtrace_anon.dta_beganon;
13756
13757 /*
13758 * If the anonymous state is active (as it almost certainly
13759 * is if the anonymous enabling ultimately matched anything),
13760 * we don't allow any further option processing -- but we
13761 * don't return failure.
13762 */
13763 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13764 goto out;
13765 }
13766
13767 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
13768 opt[DTRACEOPT_AGGSIZE] != 0) {
13769 if (state->dts_aggregations == NULL) {
13770 /*
13771 * We're not going to create an aggregation buffer
13772 * because we don't have any ECBs that contain
13773 * aggregations -- set this option to 0.
13774 */
13775 opt[DTRACEOPT_AGGSIZE] = 0;
13776 } else {
13777 /*
13778 * If we have an aggregation buffer, we must also have
13779 * a buffer to use as scratch.
13780 */
13781 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
13782 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
13783 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
13784 }
13785 }
13786 }
13787
13788 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
13789 opt[DTRACEOPT_SPECSIZE] != 0) {
13790 if (!state->dts_speculates) {
13791 /*
13792 * We're not going to create speculation buffers
13793 * because we don't have any ECBs that actually
13794 * speculate -- set the speculation size to 0.
13795 */
13796 opt[DTRACEOPT_SPECSIZE] = 0;
13797 }
13798 }
13799
13800 /*
13801 * The bare minimum size for any buffer that we're actually going to
13802 * do anything to is sizeof (uint64_t).
13803 */
13804 sz = sizeof (uint64_t);
13805
13806 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
13807 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
13808 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
13809 /*
13810 * A buffer size has been explicitly set to 0 (or to a size
13811 * that will be adjusted to 0) and we need the space -- we
13812 * need to return failure. We return ENOSPC to differentiate
13813 * it from failing to allocate a buffer due to failure to meet
13814 * the reserve (for which we return E2BIG).
13815 */
13816 rval = ENOSPC;
13817 goto out;
13818 }
13819
13820 if ((rval = dtrace_state_buffers(state)) != 0)
13821 goto err;
13822
13823 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
13824 sz = dtrace_dstate_defsize;
13825
13826 do {
13827 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
13828
13829 if (rval == 0)
13830 break;
13831
13832 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13833 goto err;
13834 } while (sz >>= 1);
13835
13836 opt[DTRACEOPT_DYNVARSIZE] = sz;
13837
13838 if (rval != 0)
13839 goto err;
13840
13841 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
13842 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
13843
13844 if (opt[DTRACEOPT_CLEANRATE] == 0)
13845 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13846
13847 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
13848 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
13849
13850 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
13851 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13852
13853 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
13854 hdlr.cyh_arg = state;
13855 hdlr.cyh_level = CY_LOW_LEVEL;
13856
13857 when.cyt_when = 0;
13858 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
13859
13860 state->dts_cleaner = cyclic_add(&hdlr, &when);
13861
13862 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
13863 hdlr.cyh_arg = state;
13864 hdlr.cyh_level = CY_LOW_LEVEL;
13865
13866 when.cyt_when = 0;
13867 when.cyt_interval = dtrace_deadman_interval;
13868
13869 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
13870 state->dts_deadman = cyclic_add(&hdlr, &when);
13871
13872 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
13873
13874 if (state->dts_getf != 0 &&
13875 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
13876 /*
13877 * We don't have kernel privs but we have at least one call
13878 * to getf(); we need to bump our zone's count, and (if
13879 * this is the first enabling to have an unprivileged call
13880 * to getf()) we need to hook into closef().
13881 */
13882 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++;
13883
13884 if (dtrace_getf++ == 0) {
13885 ASSERT(dtrace_closef == NULL);
13886 dtrace_closef = dtrace_getf_barrier;
13887 }
13888 }
13889
13890 /*
13891 * Now it's time to actually fire the BEGIN probe. We need to disable
13892 * interrupts here both to record the CPU on which we fired the BEGIN
13893 * probe (the data from this CPU will be processed first at user
13894 * level) and to manually activate the buffer for this CPU.
13895 */
13896 cookie = dtrace_interrupt_disable();
13897 *cpu = CPU->cpu_id;
13898 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
13899 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
13900
13901 dtrace_probe(dtrace_probeid_begin,
13902 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13903 dtrace_interrupt_enable(cookie);
13904 /*
13905 * We may have had an exit action from a BEGIN probe; only change our
13906 * state to ACTIVE if we're still in WARMUP.
13907 */
13908 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
13909 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
13910
13911 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
13912 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
13913
13914 /*
13915 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
13916 * want each CPU to transition its principal buffer out of the
13917 * INACTIVE state. Doing this assures that no CPU will suddenly begin
13918 * processing an ECB halfway down a probe's ECB chain; all CPUs will
13919 * atomically transition from processing none of a state's ECBs to
13920 * processing all of them.
13921 */
13922 dtrace_xcall(DTRACE_CPUALL,
13923 (dtrace_xcall_t)dtrace_buffer_activate, state);
13924 goto out;
13925
13926 err:
13927 dtrace_buffer_free(state->dts_buffer);
13928 dtrace_buffer_free(state->dts_aggbuffer);
13929
13930 if ((nspec = state->dts_nspeculations) == 0) {
13931 ASSERT(state->dts_speculations == NULL);
13932 goto out;
13933 }
13934
13935 spec = state->dts_speculations;
13936 ASSERT(spec != NULL);
13937
13938 for (i = 0; i < state->dts_nspeculations; i++) {
13939 if ((buf = spec[i].dtsp_buffer) == NULL)
13940 break;
13941
13942 dtrace_buffer_free(buf);
13943 kmem_free(buf, bufsize);
13944 }
13945
13946 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13947 state->dts_nspeculations = 0;
13948 state->dts_speculations = NULL;
13949
13950 out:
13951 mutex_exit(&dtrace_lock);
13952 mutex_exit(&cpu_lock);
13953
13954 return (rval);
13955 }
13956
13957 static int
13958 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
13959 {
13960 dtrace_icookie_t cookie;
13961
13962 ASSERT(MUTEX_HELD(&dtrace_lock));
13963
13964 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
13965 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
13966 return (EINVAL);
13967
13968 /*
13969 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
13970 * to be sure that every CPU has seen it. See below for the details
13971 * on why this is done.
13972 */
13973 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
13974 dtrace_sync();
13975
13976 /*
13977 * By this point, it is impossible for any CPU to be still processing
13978 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
13979 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
13980 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
13981 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
13982 * iff we're in the END probe.
13983 */
13984 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
13985 dtrace_sync();
13986 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
13987
13988 /*
13989 * Finally, we can release the reserve and call the END probe. We
13990 * disable interrupts across calling the END probe to allow us to
13991 * return the CPU on which we actually called the END probe. This
13992 * allows user-land to be sure that this CPU's principal buffer is
13993 * processed last.
13994 */
13995 state->dts_reserve = 0;
13996
13997 cookie = dtrace_interrupt_disable();
13998 *cpu = CPU->cpu_id;
13999 dtrace_probe(dtrace_probeid_end,
14000 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14001 dtrace_interrupt_enable(cookie);
14002
14003 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
14004 dtrace_sync();
14005
14006 if (state->dts_getf != 0 &&
14007 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
14008 /*
14009 * We don't have kernel privs but we have at least one call
14010 * to getf(); we need to lower our zone's count, and (if
14011 * this is the last enabling to have an unprivileged call
14012 * to getf()) we need to clear the closef() hook.
14013 */
14014 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0);
14015 ASSERT(dtrace_closef == dtrace_getf_barrier);
14016 ASSERT(dtrace_getf > 0);
14017
14018 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--;
14019
14020 if (--dtrace_getf == 0)
14021 dtrace_closef = NULL;
14022 }
14023
14024 return (0);
14025 }
14026
14027 static int
14028 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
14029 dtrace_optval_t val)
14030 {
14031 ASSERT(MUTEX_HELD(&dtrace_lock));
14032
14033 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14034 return (EBUSY);
14035
14036 if (option >= DTRACEOPT_MAX)
14037 return (EINVAL);
14038
14039 if (option != DTRACEOPT_CPU && val < 0)
14040 return (EINVAL);
14041
14042 switch (option) {
14043 case DTRACEOPT_DESTRUCTIVE:
14044 if (dtrace_destructive_disallow)
14045 return (EACCES);
14046
14047 state->dts_cred.dcr_destructive = 1;
14048 break;
14049
14050 case DTRACEOPT_BUFSIZE:
14051 case DTRACEOPT_DYNVARSIZE:
14052 case DTRACEOPT_AGGSIZE:
14053 case DTRACEOPT_SPECSIZE:
14054 case DTRACEOPT_STRSIZE:
14055 if (val < 0)
14056 return (EINVAL);
14057
14058 if (val >= LONG_MAX) {
14059 /*
14060 * If this is an otherwise negative value, set it to
14061 * the highest multiple of 128m less than LONG_MAX.
14062 * Technically, we're adjusting the size without
14063 * regard to the buffer resizing policy, but in fact,
14064 * this has no effect -- if we set the buffer size to
14065 * ~LONG_MAX and the buffer policy is ultimately set to
14066 * be "manual", the buffer allocation is guaranteed to
14067 * fail, if only because the allocation requires two
14068 * buffers. (We set the the size to the highest
14069 * multiple of 128m because it ensures that the size
14070 * will remain a multiple of a megabyte when
14071 * repeatedly halved -- all the way down to 15m.)
14072 */
14073 val = LONG_MAX - (1 << 27) + 1;
14074 }
14075 }
14076
14077 state->dts_options[option] = val;
14078
14079 return (0);
14080 }
14081
14082 static void
14083 dtrace_state_destroy(dtrace_state_t *state)
14084 {
14085 dtrace_ecb_t *ecb;
14086 dtrace_vstate_t *vstate = &state->dts_vstate;
14087 minor_t minor = getminor(state->dts_dev);
14088 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
14089 dtrace_speculation_t *spec = state->dts_speculations;
14090 int nspec = state->dts_nspeculations;
14091 uint32_t match;
14092
14093 ASSERT(MUTEX_HELD(&dtrace_lock));
14094 ASSERT(MUTEX_HELD(&cpu_lock));
14095
14096 /*
14097 * First, retract any retained enablings for this state.
14098 */
14099 dtrace_enabling_retract(state);
14100 ASSERT(state->dts_nretained == 0);
14101
14102 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
14103 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
14104 /*
14105 * We have managed to come into dtrace_state_destroy() on a
14106 * hot enabling -- almost certainly because of a disorderly
14107 * shutdown of a consumer. (That is, a consumer that is
14108 * exiting without having called dtrace_stop().) In this case,
14109 * we're going to set our activity to be KILLED, and then
14110 * issue a sync to be sure that everyone is out of probe
14111 * context before we start blowing away ECBs.
14112 */
14113 state->dts_activity = DTRACE_ACTIVITY_KILLED;
14114 dtrace_sync();
14115 }
14116
14117 /*
14118 * Release the credential hold we took in dtrace_state_create().
14119 */
14120 if (state->dts_cred.dcr_cred != NULL)
14121 crfree(state->dts_cred.dcr_cred);
14122
14123 /*
14124 * Now we can safely disable and destroy any enabled probes. Because
14125 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
14126 * (especially if they're all enabled), we take two passes through the
14127 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
14128 * in the second we disable whatever is left over.
14129 */
14130 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
14131 for (i = 0; i < state->dts_necbs; i++) {
14132 if ((ecb = state->dts_ecbs[i]) == NULL)
14133 continue;
14134
14135 if (match && ecb->dte_probe != NULL) {
14136 dtrace_probe_t *probe = ecb->dte_probe;
14137 dtrace_provider_t *prov = probe->dtpr_provider;
14138
14139 if (!(prov->dtpv_priv.dtpp_flags & match))
14140 continue;
14141 }
14142
14143 dtrace_ecb_disable(ecb);
14144 dtrace_ecb_destroy(ecb);
14145 }
14146
14147 if (!match)
14148 break;
14149 }
14150
14151 /*
14152 * Before we free the buffers, perform one more sync to assure that
14153 * every CPU is out of probe context.
14154 */
14155 dtrace_sync();
14156
14157 dtrace_buffer_free(state->dts_buffer);
14158 dtrace_buffer_free(state->dts_aggbuffer);
14159
14160 for (i = 0; i < nspec; i++)
14161 dtrace_buffer_free(spec[i].dtsp_buffer);
14162
14163 if (state->dts_cleaner != CYCLIC_NONE)
14164 cyclic_remove(state->dts_cleaner);
14165
14166 if (state->dts_deadman != CYCLIC_NONE)
14167 cyclic_remove(state->dts_deadman);
14168
14169 dtrace_dstate_fini(&vstate->dtvs_dynvars);
14170 dtrace_vstate_fini(vstate);
14171 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
14172
14173 if (state->dts_aggregations != NULL) {
14174 #ifdef DEBUG
14175 for (i = 0; i < state->dts_naggregations; i++)
14176 ASSERT(state->dts_aggregations[i] == NULL);
14177 #endif
14178 ASSERT(state->dts_naggregations > 0);
14179 kmem_free(state->dts_aggregations,
14180 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
14181 }
14182
14183 kmem_free(state->dts_buffer, bufsize);
14184 kmem_free(state->dts_aggbuffer, bufsize);
14185
14186 for (i = 0; i < nspec; i++)
14187 kmem_free(spec[i].dtsp_buffer, bufsize);
14188
14189 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14190
14191 dtrace_format_destroy(state);
14192
14193 vmem_destroy(state->dts_aggid_arena);
14194 ddi_soft_state_free(dtrace_softstate, minor);
14195 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
14196 }
14197
14198 /*
14199 * DTrace Anonymous Enabling Functions
14200 */
14201 static dtrace_state_t *
14202 dtrace_anon_grab(void)
14203 {
14204 dtrace_state_t *state;
14205
14206 ASSERT(MUTEX_HELD(&dtrace_lock));
14207
14208 if ((state = dtrace_anon.dta_state) == NULL) {
14209 ASSERT(dtrace_anon.dta_enabling == NULL);
14210 return (NULL);
14211 }
14212
14213 ASSERT(dtrace_anon.dta_enabling != NULL);
14214 ASSERT(dtrace_retained != NULL);
14215
14216 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
14217 dtrace_anon.dta_enabling = NULL;
14218 dtrace_anon.dta_state = NULL;
14219
14220 return (state);
14221 }
14222
14223 static void
14224 dtrace_anon_property(void)
14225 {
14226 int i, rv;
14227 dtrace_state_t *state;
14228 dof_hdr_t *dof;
14229 char c[32]; /* enough for "dof-data-" + digits */
14230
14231 ASSERT(MUTEX_HELD(&dtrace_lock));
14232 ASSERT(MUTEX_HELD(&cpu_lock));
14233
14234 for (i = 0; ; i++) {
14235 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
14236
14237 dtrace_err_verbose = 1;
14238
14239 if ((dof = dtrace_dof_property(c)) == NULL) {
14240 dtrace_err_verbose = 0;
14241 break;
14242 }
14243
14244 /*
14245 * We want to create anonymous state, so we need to transition
14246 * the kernel debugger to indicate that DTrace is active. If
14247 * this fails (e.g. because the debugger has modified text in
14248 * some way), we won't continue with the processing.
14249 */
14250 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
14251 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
14252 "enabling ignored.");
14253 dtrace_dof_destroy(dof);
14254 break;
14255 }
14256
14257 /*
14258 * If we haven't allocated an anonymous state, we'll do so now.
14259 */
14260 if ((state = dtrace_anon.dta_state) == NULL) {
14261 state = dtrace_state_create(NULL, NULL);
14262 dtrace_anon.dta_state = state;
14263
14264 if (state == NULL) {
14265 /*
14266 * This basically shouldn't happen: the only
14267 * failure mode from dtrace_state_create() is a
14268 * failure of ddi_soft_state_zalloc() that
14269 * itself should never happen. Still, the
14270 * interface allows for a failure mode, and
14271 * we want to fail as gracefully as possible:
14272 * we'll emit an error message and cease
14273 * processing anonymous state in this case.
14274 */
14275 cmn_err(CE_WARN, "failed to create "
14276 "anonymous state");
14277 dtrace_dof_destroy(dof);
14278 break;
14279 }
14280 }
14281
14282 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
14283 &dtrace_anon.dta_enabling, 0, B_TRUE);
14284
14285 if (rv == 0)
14286 rv = dtrace_dof_options(dof, state);
14287
14288 dtrace_err_verbose = 0;
14289 dtrace_dof_destroy(dof);
14290
14291 if (rv != 0) {
14292 /*
14293 * This is malformed DOF; chuck any anonymous state
14294 * that we created.
14295 */
14296 ASSERT(dtrace_anon.dta_enabling == NULL);
14297 dtrace_state_destroy(state);
14298 dtrace_anon.dta_state = NULL;
14299 break;
14300 }
14301
14302 ASSERT(dtrace_anon.dta_enabling != NULL);
14303 }
14304
14305 if (dtrace_anon.dta_enabling != NULL) {
14306 int rval;
14307
14308 /*
14309 * dtrace_enabling_retain() can only fail because we are
14310 * trying to retain more enablings than are allowed -- but
14311 * we only have one anonymous enabling, and we are guaranteed
14312 * to be allowed at least one retained enabling; we assert
14313 * that dtrace_enabling_retain() returns success.
14314 */
14315 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
14316 ASSERT(rval == 0);
14317
14318 dtrace_enabling_dump(dtrace_anon.dta_enabling);
14319 }
14320 }
14321
14322 /*
14323 * DTrace Helper Functions
14324 */
14325 static void
14326 dtrace_helper_trace(dtrace_helper_action_t *helper,
14327 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
14328 {
14329 uint32_t size, next, nnext, i;
14330 dtrace_helptrace_t *ent, *buffer;
14331 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14332
14333 if ((buffer = dtrace_helptrace_buffer) == NULL)
14334 return;
14335
14336 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
14337
14338 /*
14339 * What would a tracing framework be without its own tracing
14340 * framework? (Well, a hell of a lot simpler, for starters...)
14341 */
14342 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
14343 sizeof (uint64_t) - sizeof (uint64_t);
14344
14345 /*
14346 * Iterate until we can allocate a slot in the trace buffer.
14347 */
14348 do {
14349 next = dtrace_helptrace_next;
14350
14351 if (next + size < dtrace_helptrace_bufsize) {
14352 nnext = next + size;
14353 } else {
14354 nnext = size;
14355 }
14356 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
14357
14358 /*
14359 * We have our slot; fill it in.
14360 */
14361 if (nnext == size) {
14362 dtrace_helptrace_wrapped++;
14363 next = 0;
14364 }
14365
14366 ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next);
14367 ent->dtht_helper = helper;
14368 ent->dtht_where = where;
14369 ent->dtht_nlocals = vstate->dtvs_nlocals;
14370
14371 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
14372 mstate->dtms_fltoffs : -1;
14373 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
14374 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
14375
14376 for (i = 0; i < vstate->dtvs_nlocals; i++) {
14377 dtrace_statvar_t *svar;
14378
14379 if ((svar = vstate->dtvs_locals[i]) == NULL)
14380 continue;
14381
14382 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
14383 ent->dtht_locals[i] =
14384 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
14385 }
14386 }
14387
14388 static uint64_t
14389 dtrace_helper(int which, dtrace_mstate_t *mstate,
14390 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
14391 {
14392 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
14393 uint64_t sarg0 = mstate->dtms_arg[0];
14394 uint64_t sarg1 = mstate->dtms_arg[1];
14395 uint64_t rval;
14396 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
14397 dtrace_helper_action_t *helper;
14398 dtrace_vstate_t *vstate;
14399 dtrace_difo_t *pred;
14400 int i, trace = dtrace_helptrace_buffer != NULL;
14401
14402 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
14403
14404 if (helpers == NULL)
14405 return (0);
14406
14407 if ((helper = helpers->dthps_actions[which]) == NULL)
14408 return (0);
14409
14410 vstate = &helpers->dthps_vstate;
14411 mstate->dtms_arg[0] = arg0;
14412 mstate->dtms_arg[1] = arg1;
14413
14414 /*
14415 * Now iterate over each helper. If its predicate evaluates to 'true',
14416 * we'll call the corresponding actions. Note that the below calls
14417 * to dtrace_dif_emulate() may set faults in machine state. This is
14418 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
14419 * the stored DIF offset with its own (which is the desired behavior).
14420 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
14421 * from machine state; this is okay, too.
14422 */
14423 for (; helper != NULL; helper = helper->dtha_next) {
14424 if ((pred = helper->dtha_predicate) != NULL) {
14425 if (trace)
14426 dtrace_helper_trace(helper, mstate, vstate, 0);
14427
14428 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
14429 goto next;
14430
14431 if (*flags & CPU_DTRACE_FAULT)
14432 goto err;
14433 }
14434
14435 for (i = 0; i < helper->dtha_nactions; i++) {
14436 if (trace)
14437 dtrace_helper_trace(helper,
14438 mstate, vstate, i + 1);
14439
14440 rval = dtrace_dif_emulate(helper->dtha_actions[i],
14441 mstate, vstate, state);
14442
14443 if (*flags & CPU_DTRACE_FAULT)
14444 goto err;
14445 }
14446
14447 next:
14448 if (trace)
14449 dtrace_helper_trace(helper, mstate, vstate,
14450 DTRACE_HELPTRACE_NEXT);
14451 }
14452
14453 if (trace)
14454 dtrace_helper_trace(helper, mstate, vstate,
14455 DTRACE_HELPTRACE_DONE);
14456
14457 /*
14458 * Restore the arg0 that we saved upon entry.
14459 */
14460 mstate->dtms_arg[0] = sarg0;
14461 mstate->dtms_arg[1] = sarg1;
14462
14463 return (rval);
14464
14465 err:
14466 if (trace)
14467 dtrace_helper_trace(helper, mstate, vstate,
14468 DTRACE_HELPTRACE_ERR);
14469
14470 /*
14471 * Restore the arg0 that we saved upon entry.
14472 */
14473 mstate->dtms_arg[0] = sarg0;
14474 mstate->dtms_arg[1] = sarg1;
14475
14476 return (NULL);
14477 }
14478
14479 static void
14480 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
14481 dtrace_vstate_t *vstate)
14482 {
14483 int i;
14484
14485 if (helper->dtha_predicate != NULL)
14486 dtrace_difo_release(helper->dtha_predicate, vstate);
14487
14488 for (i = 0; i < helper->dtha_nactions; i++) {
14489 ASSERT(helper->dtha_actions[i] != NULL);
14490 dtrace_difo_release(helper->dtha_actions[i], vstate);
14491 }
14492
14493 kmem_free(helper->dtha_actions,
14494 helper->dtha_nactions * sizeof (dtrace_difo_t *));
14495 kmem_free(helper, sizeof (dtrace_helper_action_t));
14496 }
14497
14498 static int
14499 dtrace_helper_destroygen(int gen)
14500 {
14501 proc_t *p = curproc;
14502 dtrace_helpers_t *help = p->p_dtrace_helpers;
14503 dtrace_vstate_t *vstate;
14504 int i;
14505
14506 ASSERT(MUTEX_HELD(&dtrace_lock));
14507
14508 if (help == NULL || gen > help->dthps_generation)
14509 return (EINVAL);
14510
14511 vstate = &help->dthps_vstate;
14512
14513 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14514 dtrace_helper_action_t *last = NULL, *h, *next;
14515
14516 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14517 next = h->dtha_next;
14518
14519 if (h->dtha_generation == gen) {
14520 if (last != NULL) {
14521 last->dtha_next = next;
14522 } else {
14523 help->dthps_actions[i] = next;
14524 }
14525
14526 dtrace_helper_action_destroy(h, vstate);
14527 } else {
14528 last = h;
14529 }
14530 }
14531 }
14532
14533 /*
14534 * Interate until we've cleared out all helper providers with the
14535 * given generation number.
14536 */
14537 for (;;) {
14538 dtrace_helper_provider_t *prov;
14539
14540 /*
14541 * Look for a helper provider with the right generation. We
14542 * have to start back at the beginning of the list each time
14543 * because we drop dtrace_lock. It's unlikely that we'll make
14544 * more than two passes.
14545 */
14546 for (i = 0; i < help->dthps_nprovs; i++) {
14547 prov = help->dthps_provs[i];
14548
14549 if (prov->dthp_generation == gen)
14550 break;
14551 }
14552
14553 /*
14554 * If there were no matches, we're done.
14555 */
14556 if (i == help->dthps_nprovs)
14557 break;
14558
14559 /*
14560 * Move the last helper provider into this slot.
14561 */
14562 help->dthps_nprovs--;
14563 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
14564 help->dthps_provs[help->dthps_nprovs] = NULL;
14565
14566 mutex_exit(&dtrace_lock);
14567
14568 /*
14569 * If we have a meta provider, remove this helper provider.
14570 */
14571 mutex_enter(&dtrace_meta_lock);
14572 if (dtrace_meta_pid != NULL) {
14573 ASSERT(dtrace_deferred_pid == NULL);
14574 dtrace_helper_provider_remove(&prov->dthp_prov,
14575 p->p_pid);
14576 }
14577 mutex_exit(&dtrace_meta_lock);
14578
14579 dtrace_helper_provider_destroy(prov);
14580
14581 mutex_enter(&dtrace_lock);
14582 }
14583
14584 return (0);
14585 }
14586
14587 static int
14588 dtrace_helper_validate(dtrace_helper_action_t *helper)
14589 {
14590 int err = 0, i;
14591 dtrace_difo_t *dp;
14592
14593 if ((dp = helper->dtha_predicate) != NULL)
14594 err += dtrace_difo_validate_helper(dp);
14595
14596 for (i = 0; i < helper->dtha_nactions; i++)
14597 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
14598
14599 return (err == 0);
14600 }
14601
14602 static int
14603 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
14604 {
14605 dtrace_helpers_t *help;
14606 dtrace_helper_action_t *helper, *last;
14607 dtrace_actdesc_t *act;
14608 dtrace_vstate_t *vstate;
14609 dtrace_predicate_t *pred;
14610 int count = 0, nactions = 0, i;
14611
14612 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
14613 return (EINVAL);
14614
14615 help = curproc->p_dtrace_helpers;
14616 last = help->dthps_actions[which];
14617 vstate = &help->dthps_vstate;
14618
14619 for (count = 0; last != NULL; last = last->dtha_next) {
14620 count++;
14621 if (last->dtha_next == NULL)
14622 break;
14623 }
14624
14625 /*
14626 * If we already have dtrace_helper_actions_max helper actions for this
14627 * helper action type, we'll refuse to add a new one.
14628 */
14629 if (count >= dtrace_helper_actions_max)
14630 return (ENOSPC);
14631
14632 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
14633 helper->dtha_generation = help->dthps_generation;
14634
14635 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
14636 ASSERT(pred->dtp_difo != NULL);
14637 dtrace_difo_hold(pred->dtp_difo);
14638 helper->dtha_predicate = pred->dtp_difo;
14639 }
14640
14641 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
14642 if (act->dtad_kind != DTRACEACT_DIFEXPR)
14643 goto err;
14644
14645 if (act->dtad_difo == NULL)
14646 goto err;
14647
14648 nactions++;
14649 }
14650
14651 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
14652 (helper->dtha_nactions = nactions), KM_SLEEP);
14653
14654 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
14655 dtrace_difo_hold(act->dtad_difo);
14656 helper->dtha_actions[i++] = act->dtad_difo;
14657 }
14658
14659 if (!dtrace_helper_validate(helper))
14660 goto err;
14661
14662 if (last == NULL) {
14663 help->dthps_actions[which] = helper;
14664 } else {
14665 last->dtha_next = helper;
14666 }
14667
14668 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
14669 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
14670 dtrace_helptrace_next = 0;
14671 }
14672
14673 return (0);
14674 err:
14675 dtrace_helper_action_destroy(helper, vstate);
14676 return (EINVAL);
14677 }
14678
14679 static void
14680 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
14681 dof_helper_t *dofhp)
14682 {
14683 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
14684
14685 mutex_enter(&dtrace_meta_lock);
14686 mutex_enter(&dtrace_lock);
14687
14688 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
14689 /*
14690 * If the dtrace module is loaded but not attached, or if
14691 * there aren't isn't a meta provider registered to deal with
14692 * these provider descriptions, we need to postpone creating
14693 * the actual providers until later.
14694 */
14695
14696 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
14697 dtrace_deferred_pid != help) {
14698 help->dthps_deferred = 1;
14699 help->dthps_pid = p->p_pid;
14700 help->dthps_next = dtrace_deferred_pid;
14701 help->dthps_prev = NULL;
14702 if (dtrace_deferred_pid != NULL)
14703 dtrace_deferred_pid->dthps_prev = help;
14704 dtrace_deferred_pid = help;
14705 }
14706
14707 mutex_exit(&dtrace_lock);
14708
14709 } else if (dofhp != NULL) {
14710 /*
14711 * If the dtrace module is loaded and we have a particular
14712 * helper provider description, pass that off to the
14713 * meta provider.
14714 */
14715
14716 mutex_exit(&dtrace_lock);
14717
14718 dtrace_helper_provide(dofhp, p->p_pid);
14719
14720 } else {
14721 /*
14722 * Otherwise, just pass all the helper provider descriptions
14723 * off to the meta provider.
14724 */
14725
14726 int i;
14727 mutex_exit(&dtrace_lock);
14728
14729 for (i = 0; i < help->dthps_nprovs; i++) {
14730 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
14731 p->p_pid);
14732 }
14733 }
14734
14735 mutex_exit(&dtrace_meta_lock);
14736 }
14737
14738 static int
14739 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
14740 {
14741 dtrace_helpers_t *help;
14742 dtrace_helper_provider_t *hprov, **tmp_provs;
14743 uint_t tmp_maxprovs, i;
14744
14745 ASSERT(MUTEX_HELD(&dtrace_lock));
14746
14747 help = curproc->p_dtrace_helpers;
14748 ASSERT(help != NULL);
14749
14750 /*
14751 * If we already have dtrace_helper_providers_max helper providers,
14752 * we're refuse to add a new one.
14753 */
14754 if (help->dthps_nprovs >= dtrace_helper_providers_max)
14755 return (ENOSPC);
14756
14757 /*
14758 * Check to make sure this isn't a duplicate.
14759 */
14760 for (i = 0; i < help->dthps_nprovs; i++) {
14761 if (dofhp->dofhp_dof ==
14762 help->dthps_provs[i]->dthp_prov.dofhp_dof)
14763 return (EALREADY);
14764 }
14765
14766 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
14767 hprov->dthp_prov = *dofhp;
14768 hprov->dthp_ref = 1;
14769 hprov->dthp_generation = gen;
14770
14771 /*
14772 * Allocate a bigger table for helper providers if it's already full.
14773 */
14774 if (help->dthps_maxprovs == help->dthps_nprovs) {
14775 tmp_maxprovs = help->dthps_maxprovs;
14776 tmp_provs = help->dthps_provs;
14777
14778 if (help->dthps_maxprovs == 0)
14779 help->dthps_maxprovs = 2;
14780 else
14781 help->dthps_maxprovs *= 2;
14782 if (help->dthps_maxprovs > dtrace_helper_providers_max)
14783 help->dthps_maxprovs = dtrace_helper_providers_max;
14784
14785 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
14786
14787 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
14788 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14789
14790 if (tmp_provs != NULL) {
14791 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
14792 sizeof (dtrace_helper_provider_t *));
14793 kmem_free(tmp_provs, tmp_maxprovs *
14794 sizeof (dtrace_helper_provider_t *));
14795 }
14796 }
14797
14798 help->dthps_provs[help->dthps_nprovs] = hprov;
14799 help->dthps_nprovs++;
14800
14801 return (0);
14802 }
14803
14804 static void
14805 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
14806 {
14807 mutex_enter(&dtrace_lock);
14808
14809 if (--hprov->dthp_ref == 0) {
14810 dof_hdr_t *dof;
14811 mutex_exit(&dtrace_lock);
14812 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
14813 dtrace_dof_destroy(dof);
14814 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
14815 } else {
14816 mutex_exit(&dtrace_lock);
14817 }
14818 }
14819
14820 static int
14821 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
14822 {
14823 uintptr_t daddr = (uintptr_t)dof;
14824 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
14825 dof_provider_t *provider;
14826 dof_probe_t *probe;
14827 uint8_t *arg;
14828 char *strtab, *typestr;
14829 dof_stridx_t typeidx;
14830 size_t typesz;
14831 uint_t nprobes, j, k;
14832
14833 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
14834
14835 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
14836 dtrace_dof_error(dof, "misaligned section offset");
14837 return (-1);
14838 }
14839
14840 /*
14841 * The section needs to be large enough to contain the DOF provider
14842 * structure appropriate for the given version.
14843 */
14844 if (sec->dofs_size <
14845 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
14846 offsetof(dof_provider_t, dofpv_prenoffs) :
14847 sizeof (dof_provider_t))) {
14848 dtrace_dof_error(dof, "provider section too small");
14849 return (-1);
14850 }
14851
14852 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
14853 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
14854 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
14855 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
14856 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
14857
14858 if (str_sec == NULL || prb_sec == NULL ||
14859 arg_sec == NULL || off_sec == NULL)
14860 return (-1);
14861
14862 enoff_sec = NULL;
14863
14864 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
14865 provider->dofpv_prenoffs != DOF_SECT_NONE &&
14866 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
14867 provider->dofpv_prenoffs)) == NULL)
14868 return (-1);
14869
14870 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
14871
14872 if (provider->dofpv_name >= str_sec->dofs_size ||
14873 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
14874 dtrace_dof_error(dof, "invalid provider name");
14875 return (-1);
14876 }
14877
14878 if (prb_sec->dofs_entsize == 0 ||
14879 prb_sec->dofs_entsize > prb_sec->dofs_size) {
14880 dtrace_dof_error(dof, "invalid entry size");
14881 return (-1);
14882 }
14883
14884 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
14885 dtrace_dof_error(dof, "misaligned entry size");
14886 return (-1);
14887 }
14888
14889 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
14890 dtrace_dof_error(dof, "invalid entry size");
14891 return (-1);
14892 }
14893
14894 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
14895 dtrace_dof_error(dof, "misaligned section offset");
14896 return (-1);
14897 }
14898
14899 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
14900 dtrace_dof_error(dof, "invalid entry size");
14901 return (-1);
14902 }
14903
14904 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
14905
14906 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
14907
14908 /*
14909 * Take a pass through the probes to check for errors.
14910 */
14911 for (j = 0; j < nprobes; j++) {
14912 probe = (dof_probe_t *)(uintptr_t)(daddr +
14913 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
14914
14915 if (probe->dofpr_func >= str_sec->dofs_size) {
14916 dtrace_dof_error(dof, "invalid function name");
14917 return (-1);
14918 }
14919
14920 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
14921 dtrace_dof_error(dof, "function name too long");
14922 return (-1);
14923 }
14924
14925 if (probe->dofpr_name >= str_sec->dofs_size ||
14926 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
14927 dtrace_dof_error(dof, "invalid probe name");
14928 return (-1);
14929 }
14930
14931 /*
14932 * The offset count must not wrap the index, and the offsets
14933 * must also not overflow the section's data.
14934 */
14935 if (probe->dofpr_offidx + probe->dofpr_noffs <
14936 probe->dofpr_offidx ||
14937 (probe->dofpr_offidx + probe->dofpr_noffs) *
14938 off_sec->dofs_entsize > off_sec->dofs_size) {
14939 dtrace_dof_error(dof, "invalid probe offset");
14940 return (-1);
14941 }
14942
14943 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
14944 /*
14945 * If there's no is-enabled offset section, make sure
14946 * there aren't any is-enabled offsets. Otherwise
14947 * perform the same checks as for probe offsets
14948 * (immediately above).
14949 */
14950 if (enoff_sec == NULL) {
14951 if (probe->dofpr_enoffidx != 0 ||
14952 probe->dofpr_nenoffs != 0) {
14953 dtrace_dof_error(dof, "is-enabled "
14954 "offsets with null section");
14955 return (-1);
14956 }
14957 } else if (probe->dofpr_enoffidx +
14958 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
14959 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
14960 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
14961 dtrace_dof_error(dof, "invalid is-enabled "
14962 "offset");
14963 return (-1);
14964 }
14965
14966 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
14967 dtrace_dof_error(dof, "zero probe and "
14968 "is-enabled offsets");
14969 return (-1);
14970 }
14971 } else if (probe->dofpr_noffs == 0) {
14972 dtrace_dof_error(dof, "zero probe offsets");
14973 return (-1);
14974 }
14975
14976 if (probe->dofpr_argidx + probe->dofpr_xargc <
14977 probe->dofpr_argidx ||
14978 (probe->dofpr_argidx + probe->dofpr_xargc) *
14979 arg_sec->dofs_entsize > arg_sec->dofs_size) {
14980 dtrace_dof_error(dof, "invalid args");
14981 return (-1);
14982 }
14983
14984 typeidx = probe->dofpr_nargv;
14985 typestr = strtab + probe->dofpr_nargv;
14986 for (k = 0; k < probe->dofpr_nargc; k++) {
14987 if (typeidx >= str_sec->dofs_size) {
14988 dtrace_dof_error(dof, "bad "
14989 "native argument type");
14990 return (-1);
14991 }
14992
14993 typesz = strlen(typestr) + 1;
14994 if (typesz > DTRACE_ARGTYPELEN) {
14995 dtrace_dof_error(dof, "native "
14996 "argument type too long");
14997 return (-1);
14998 }
14999 typeidx += typesz;
15000 typestr += typesz;
15001 }
15002
15003 typeidx = probe->dofpr_xargv;
15004 typestr = strtab + probe->dofpr_xargv;
15005 for (k = 0; k < probe->dofpr_xargc; k++) {
15006 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
15007 dtrace_dof_error(dof, "bad "
15008 "native argument index");
15009 return (-1);
15010 }
15011
15012 if (typeidx >= str_sec->dofs_size) {
15013 dtrace_dof_error(dof, "bad "
15014 "translated argument type");
15015 return (-1);
15016 }
15017
15018 typesz = strlen(typestr) + 1;
15019 if (typesz > DTRACE_ARGTYPELEN) {
15020 dtrace_dof_error(dof, "translated argument "
15021 "type too long");
15022 return (-1);
15023 }
15024
15025 typeidx += typesz;
15026 typestr += typesz;
15027 }
15028 }
15029
15030 return (0);
15031 }
15032
15033 static int
15034 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
15035 {
15036 dtrace_helpers_t *help;
15037 dtrace_vstate_t *vstate;
15038 dtrace_enabling_t *enab = NULL;
15039 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
15040 uintptr_t daddr = (uintptr_t)dof;
15041
15042 ASSERT(MUTEX_HELD(&dtrace_lock));
15043
15044 if ((help = curproc->p_dtrace_helpers) == NULL)
15045 help = dtrace_helpers_create(curproc);
15046
15047 vstate = &help->dthps_vstate;
15048
15049 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
15050 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
15051 dtrace_dof_destroy(dof);
15052 return (rv);
15053 }
15054
15055 /*
15056 * Look for helper providers and validate their descriptions.
15057 */
15058 if (dhp != NULL) {
15059 for (i = 0; i < dof->dofh_secnum; i++) {
15060 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
15061 dof->dofh_secoff + i * dof->dofh_secsize);
15062
15063 if (sec->dofs_type != DOF_SECT_PROVIDER)
15064 continue;
15065
15066 if (dtrace_helper_provider_validate(dof, sec) != 0) {
15067 dtrace_enabling_destroy(enab);
15068 dtrace_dof_destroy(dof);
15069 return (-1);
15070 }
15071
15072 nprovs++;
15073 }
15074 }
15075
15076 /*
15077 * Now we need to walk through the ECB descriptions in the enabling.
15078 */
15079 for (i = 0; i < enab->dten_ndesc; i++) {
15080 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
15081 dtrace_probedesc_t *desc = &ep->dted_probe;
15082
15083 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
15084 continue;
15085
15086 if (strcmp(desc->dtpd_mod, "helper") != 0)
15087 continue;
15088
15089 if (strcmp(desc->dtpd_func, "ustack") != 0)
15090 continue;
15091
15092 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
15093 ep)) != 0) {
15094 /*
15095 * Adding this helper action failed -- we are now going
15096 * to rip out the entire generation and return failure.
15097 */
15098 (void) dtrace_helper_destroygen(help->dthps_generation);
15099 dtrace_enabling_destroy(enab);
15100 dtrace_dof_destroy(dof);
15101 return (-1);
15102 }
15103
15104 nhelpers++;
15105 }
15106
15107 if (nhelpers < enab->dten_ndesc)
15108 dtrace_dof_error(dof, "unmatched helpers");
15109
15110 gen = help->dthps_generation++;
15111 dtrace_enabling_destroy(enab);
15112
15113 if (dhp != NULL && nprovs > 0) {
15114 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
15115 if (dtrace_helper_provider_add(dhp, gen) == 0) {
15116 mutex_exit(&dtrace_lock);
15117 dtrace_helper_provider_register(curproc, help, dhp);
15118 mutex_enter(&dtrace_lock);
15119
15120 destroy = 0;
15121 }
15122 }
15123
15124 if (destroy)
15125 dtrace_dof_destroy(dof);
15126
15127 return (gen);
15128 }
15129
15130 static dtrace_helpers_t *
15131 dtrace_helpers_create(proc_t *p)
15132 {
15133 dtrace_helpers_t *help;
15134
15135 ASSERT(MUTEX_HELD(&dtrace_lock));
15136 ASSERT(p->p_dtrace_helpers == NULL);
15137
15138 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
15139 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
15140 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
15141
15142 p->p_dtrace_helpers = help;
15143 dtrace_helpers++;
15144
15145 return (help);
15146 }
15147
15148 static void
15149 dtrace_helpers_destroy(void)
15150 {
15151 dtrace_helpers_t *help;
15152 dtrace_vstate_t *vstate;
15153 proc_t *p = curproc;
15154 int i;
15155
15156 mutex_enter(&dtrace_lock);
15157
15158 ASSERT(p->p_dtrace_helpers != NULL);
15159 ASSERT(dtrace_helpers > 0);
15160
15161 help = p->p_dtrace_helpers;
15162 vstate = &help->dthps_vstate;
15163
15164 /*
15165 * We're now going to lose the help from this process.
15166 */
15167 p->p_dtrace_helpers = NULL;
15168 dtrace_sync();
15169
15170 /*
15171 * Destory the helper actions.
15172 */
15173 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15174 dtrace_helper_action_t *h, *next;
15175
15176 for (h = help->dthps_actions[i]; h != NULL; h = next) {
15177 next = h->dtha_next;
15178 dtrace_helper_action_destroy(h, vstate);
15179 h = next;
15180 }
15181 }
15182
15183 mutex_exit(&dtrace_lock);
15184
15185 /*
15186 * Destroy the helper providers.
15187 */
15188 if (help->dthps_maxprovs > 0) {
15189 mutex_enter(&dtrace_meta_lock);
15190 if (dtrace_meta_pid != NULL) {
15191 ASSERT(dtrace_deferred_pid == NULL);
15192
15193 for (i = 0; i < help->dthps_nprovs; i++) {
15194 dtrace_helper_provider_remove(
15195 &help->dthps_provs[i]->dthp_prov, p->p_pid);
15196 }
15197 } else {
15198 mutex_enter(&dtrace_lock);
15199 ASSERT(help->dthps_deferred == 0 ||
15200 help->dthps_next != NULL ||
15201 help->dthps_prev != NULL ||
15202 help == dtrace_deferred_pid);
15203
15204 /*
15205 * Remove the helper from the deferred list.
15206 */
15207 if (help->dthps_next != NULL)
15208 help->dthps_next->dthps_prev = help->dthps_prev;
15209 if (help->dthps_prev != NULL)
15210 help->dthps_prev->dthps_next = help->dthps_next;
15211 if (dtrace_deferred_pid == help) {
15212 dtrace_deferred_pid = help->dthps_next;
15213 ASSERT(help->dthps_prev == NULL);
15214 }
15215
15216 mutex_exit(&dtrace_lock);
15217 }
15218
15219 mutex_exit(&dtrace_meta_lock);
15220
15221 for (i = 0; i < help->dthps_nprovs; i++) {
15222 dtrace_helper_provider_destroy(help->dthps_provs[i]);
15223 }
15224
15225 kmem_free(help->dthps_provs, help->dthps_maxprovs *
15226 sizeof (dtrace_helper_provider_t *));
15227 }
15228
15229 mutex_enter(&dtrace_lock);
15230
15231 dtrace_vstate_fini(&help->dthps_vstate);
15232 kmem_free(help->dthps_actions,
15233 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
15234 kmem_free(help, sizeof (dtrace_helpers_t));
15235
15236 --dtrace_helpers;
15237 mutex_exit(&dtrace_lock);
15238 }
15239
15240 static void
15241 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
15242 {
15243 dtrace_helpers_t *help, *newhelp;
15244 dtrace_helper_action_t *helper, *new, *last;
15245 dtrace_difo_t *dp;
15246 dtrace_vstate_t *vstate;
15247 int i, j, sz, hasprovs = 0;
15248
15249 mutex_enter(&dtrace_lock);
15250 ASSERT(from->p_dtrace_helpers != NULL);
15251 ASSERT(dtrace_helpers > 0);
15252
15253 help = from->p_dtrace_helpers;
15254 newhelp = dtrace_helpers_create(to);
15255 ASSERT(to->p_dtrace_helpers != NULL);
15256
15257 newhelp->dthps_generation = help->dthps_generation;
15258 vstate = &newhelp->dthps_vstate;
15259
15260 /*
15261 * Duplicate the helper actions.
15262 */
15263 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15264 if ((helper = help->dthps_actions[i]) == NULL)
15265 continue;
15266
15267 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
15268 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
15269 KM_SLEEP);
15270 new->dtha_generation = helper->dtha_generation;
15271
15272 if ((dp = helper->dtha_predicate) != NULL) {
15273 dp = dtrace_difo_duplicate(dp, vstate);
15274 new->dtha_predicate = dp;
15275 }
15276
15277 new->dtha_nactions = helper->dtha_nactions;
15278 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
15279 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
15280
15281 for (j = 0; j < new->dtha_nactions; j++) {
15282 dtrace_difo_t *dp = helper->dtha_actions[j];
15283
15284 ASSERT(dp != NULL);
15285 dp = dtrace_difo_duplicate(dp, vstate);
15286 new->dtha_actions[j] = dp;
15287 }
15288
15289 if (last != NULL) {
15290 last->dtha_next = new;
15291 } else {
15292 newhelp->dthps_actions[i] = new;
15293 }
15294
15295 last = new;
15296 }
15297 }
15298
15299 /*
15300 * Duplicate the helper providers and register them with the
15301 * DTrace framework.
15302 */
15303 if (help->dthps_nprovs > 0) {
15304 newhelp->dthps_nprovs = help->dthps_nprovs;
15305 newhelp->dthps_maxprovs = help->dthps_nprovs;
15306 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
15307 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15308 for (i = 0; i < newhelp->dthps_nprovs; i++) {
15309 newhelp->dthps_provs[i] = help->dthps_provs[i];
15310 newhelp->dthps_provs[i]->dthp_ref++;
15311 }
15312
15313 hasprovs = 1;
15314 }
15315
15316 mutex_exit(&dtrace_lock);
15317
15318 if (hasprovs)
15319 dtrace_helper_provider_register(to, newhelp, NULL);
15320 }
15321
15322 /*
15323 * DTrace Hook Functions
15324 */
15325 static void
15326 dtrace_module_loaded(struct modctl *ctl)
15327 {
15328 dtrace_provider_t *prv;
15329
15330 mutex_enter(&dtrace_provider_lock);
15331 mutex_enter(&mod_lock);
15332
15333 ASSERT(ctl->mod_busy);
15334
15335 /*
15336 * We're going to call each providers per-module provide operation
15337 * specifying only this module.
15338 */
15339 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
15340 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
15341
15342 mutex_exit(&mod_lock);
15343 mutex_exit(&dtrace_provider_lock);
15344
15345 /*
15346 * If we have any retained enablings, we need to match against them.
15347 * Enabling probes requires that cpu_lock be held, and we cannot hold
15348 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
15349 * module. (In particular, this happens when loading scheduling
15350 * classes.) So if we have any retained enablings, we need to dispatch
15351 * our task queue to do the match for us.
15352 */
15353 mutex_enter(&dtrace_lock);
15354
15355 if (dtrace_retained == NULL) {
15356 mutex_exit(&dtrace_lock);
15357 return;
15358 }
15359
15360 (void) taskq_dispatch(dtrace_taskq,
15361 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
15362
15363 mutex_exit(&dtrace_lock);
15364
15365 /*
15366 * And now, for a little heuristic sleaze: in general, we want to
15367 * match modules as soon as they load. However, we cannot guarantee
15368 * this, because it would lead us to the lock ordering violation
15369 * outlined above. The common case, of course, is that cpu_lock is
15370 * _not_ held -- so we delay here for a clock tick, hoping that that's
15371 * long enough for the task queue to do its work. If it's not, it's
15372 * not a serious problem -- it just means that the module that we
15373 * just loaded may not be immediately instrumentable.
15374 */
15375 delay(1);
15376 }
15377
15378 static void
15379 dtrace_module_unloaded(struct modctl *ctl)
15380 {
15381 dtrace_probe_t template, *probe, *first, *next;
15382 dtrace_provider_t *prov;
15383
15384 template.dtpr_mod = ctl->mod_modname;
15385
15386 mutex_enter(&dtrace_provider_lock);
15387 mutex_enter(&mod_lock);
15388 mutex_enter(&dtrace_lock);
15389
15390 if (dtrace_bymod == NULL) {
15391 /*
15392 * The DTrace module is loaded (obviously) but not attached;
15393 * we don't have any work to do.
15394 */
15395 mutex_exit(&dtrace_provider_lock);
15396 mutex_exit(&mod_lock);
15397 mutex_exit(&dtrace_lock);
15398 return;
15399 }
15400
15401 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
15402 probe != NULL; probe = probe->dtpr_nextmod) {
15403 if (probe->dtpr_ecb != NULL) {
15404 mutex_exit(&dtrace_provider_lock);
15405 mutex_exit(&mod_lock);
15406 mutex_exit(&dtrace_lock);
15407
15408 /*
15409 * This shouldn't _actually_ be possible -- we're
15410 * unloading a module that has an enabled probe in it.
15411 * (It's normally up to the provider to make sure that
15412 * this can't happen.) However, because dtps_enable()
15413 * doesn't have a failure mode, there can be an
15414 * enable/unload race. Upshot: we don't want to
15415 * assert, but we're not going to disable the
15416 * probe, either.
15417 */
15418 if (dtrace_err_verbose) {
15419 cmn_err(CE_WARN, "unloaded module '%s' had "
15420 "enabled probes", ctl->mod_modname);
15421 }
15422
15423 return;
15424 }
15425 }
15426
15427 probe = first;
15428
15429 for (first = NULL; probe != NULL; probe = next) {
15430 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
15431
15432 dtrace_probes[probe->dtpr_id - 1] = NULL;
15433
15434 next = probe->dtpr_nextmod;
15435 dtrace_hash_remove(dtrace_bymod, probe);
15436 dtrace_hash_remove(dtrace_byfunc, probe);
15437 dtrace_hash_remove(dtrace_byname, probe);
15438
15439 if (first == NULL) {
15440 first = probe;
15441 probe->dtpr_nextmod = NULL;
15442 } else {
15443 probe->dtpr_nextmod = first;
15444 first = probe;
15445 }
15446 }
15447
15448 /*
15449 * We've removed all of the module's probes from the hash chains and
15450 * from the probe array. Now issue a dtrace_sync() to be sure that
15451 * everyone has cleared out from any probe array processing.
15452 */
15453 dtrace_sync();
15454
15455 for (probe = first; probe != NULL; probe = first) {
15456 first = probe->dtpr_nextmod;
15457 prov = probe->dtpr_provider;
15458 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
15459 probe->dtpr_arg);
15460 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
15461 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
15462 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
15463 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
15464 kmem_free(probe, sizeof (dtrace_probe_t));
15465 }
15466
15467 mutex_exit(&dtrace_lock);
15468 mutex_exit(&mod_lock);
15469 mutex_exit(&dtrace_provider_lock);
15470 }
15471
15472 void
15473 dtrace_suspend(void)
15474 {
15475 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
15476 }
15477
15478 void
15479 dtrace_resume(void)
15480 {
15481 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
15482 }
15483
15484 static int
15485 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
15486 {
15487 ASSERT(MUTEX_HELD(&cpu_lock));
15488 mutex_enter(&dtrace_lock);
15489
15490 switch (what) {
15491 case CPU_CONFIG: {
15492 dtrace_state_t *state;
15493 dtrace_optval_t *opt, rs, c;
15494
15495 /*
15496 * For now, we only allocate a new buffer for anonymous state.
15497 */
15498 if ((state = dtrace_anon.dta_state) == NULL)
15499 break;
15500
15501 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
15502 break;
15503
15504 opt = state->dts_options;
15505 c = opt[DTRACEOPT_CPU];
15506
15507 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
15508 break;
15509
15510 /*
15511 * Regardless of what the actual policy is, we're going to
15512 * temporarily set our resize policy to be manual. We're
15513 * also going to temporarily set our CPU option to denote
15514 * the newly configured CPU.
15515 */
15516 rs = opt[DTRACEOPT_BUFRESIZE];
15517 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
15518 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
15519
15520 (void) dtrace_state_buffers(state);
15521
15522 opt[DTRACEOPT_BUFRESIZE] = rs;
15523 opt[DTRACEOPT_CPU] = c;
15524
15525 break;
15526 }
15527
15528 case CPU_UNCONFIG:
15529 /*
15530 * We don't free the buffer in the CPU_UNCONFIG case. (The
15531 * buffer will be freed when the consumer exits.)
15532 */
15533 break;
15534
15535 default:
15536 break;
15537 }
15538
15539 mutex_exit(&dtrace_lock);
15540 return (0);
15541 }
15542
15543 static void
15544 dtrace_cpu_setup_initial(processorid_t cpu)
15545 {
15546 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
15547 }
15548
15549 static void
15550 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
15551 {
15552 if (dtrace_toxranges >= dtrace_toxranges_max) {
15553 int osize, nsize;
15554 dtrace_toxrange_t *range;
15555
15556 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15557
15558 if (osize == 0) {
15559 ASSERT(dtrace_toxrange == NULL);
15560 ASSERT(dtrace_toxranges_max == 0);
15561 dtrace_toxranges_max = 1;
15562 } else {
15563 dtrace_toxranges_max <<= 1;
15564 }
15565
15566 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
15567 range = kmem_zalloc(nsize, KM_SLEEP);
15568
15569 if (dtrace_toxrange != NULL) {
15570 ASSERT(osize != 0);
15571 bcopy(dtrace_toxrange, range, osize);
15572 kmem_free(dtrace_toxrange, osize);
15573 }
15574
15575 dtrace_toxrange = range;
15576 }
15577
15578 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL);
15579 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL);
15580
15581 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
15582 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
15583 dtrace_toxranges++;
15584 }
15585
15586 static void
15587 dtrace_getf_barrier()
15588 {
15589 /*
15590 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings
15591 * that contain calls to getf(), this routine will be called on every
15592 * closef() before either the underlying vnode is released or the
15593 * file_t itself is freed. By the time we are here, it is essential
15594 * that the file_t can no longer be accessed from a call to getf()
15595 * in probe context -- that assures that a dtrace_sync() can be used
15596 * to clear out any enablings referring to the old structures.
15597 */
15598 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 ||
15599 kcred->cr_zone->zone_dtrace_getf != 0)
15600 dtrace_sync();
15601 }
15602
15603 /*
15604 * DTrace Driver Cookbook Functions
15605 */
15606 /*ARGSUSED*/
15607 static int
15608 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
15609 {
15610 dtrace_provider_id_t id;
15611 dtrace_state_t *state = NULL;
15612 dtrace_enabling_t *enab;
15613
15614 mutex_enter(&cpu_lock);
15615 mutex_enter(&dtrace_provider_lock);
15616 mutex_enter(&dtrace_lock);
15617
15618 if (ddi_soft_state_init(&dtrace_softstate,
15619 sizeof (dtrace_state_t), 0) != 0) {
15620 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
15621 mutex_exit(&cpu_lock);
15622 mutex_exit(&dtrace_provider_lock);
15623 mutex_exit(&dtrace_lock);
15624 return (DDI_FAILURE);
15625 }
15626
15627 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
15628 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
15629 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
15630 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
15631 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
15632 ddi_remove_minor_node(devi, NULL);
15633 ddi_soft_state_fini(&dtrace_softstate);
15634 mutex_exit(&cpu_lock);
15635 mutex_exit(&dtrace_provider_lock);
15636 mutex_exit(&dtrace_lock);
15637 return (DDI_FAILURE);
15638 }
15639
15640 ddi_report_dev(devi);
15641 dtrace_devi = devi;
15642
15643 dtrace_modload = dtrace_module_loaded;
15644 dtrace_modunload = dtrace_module_unloaded;
15645 dtrace_cpu_init = dtrace_cpu_setup_initial;
15646 dtrace_helpers_cleanup = dtrace_helpers_destroy;
15647 dtrace_helpers_fork = dtrace_helpers_duplicate;
15648 dtrace_cpustart_init = dtrace_suspend;
15649 dtrace_cpustart_fini = dtrace_resume;
15650 dtrace_debugger_init = dtrace_suspend;
15651 dtrace_debugger_fini = dtrace_resume;
15652
15653 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
15654
15655 ASSERT(MUTEX_HELD(&cpu_lock));
15656
15657 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
15658 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
15659 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
15660 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
15661 VM_SLEEP | VMC_IDENTIFIER);
15662 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
15663 1, INT_MAX, 0);
15664
15665 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
15666 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
15667 NULL, NULL, NULL, NULL, NULL, 0);
15668
15669 ASSERT(MUTEX_HELD(&cpu_lock));
15670 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
15671 offsetof(dtrace_probe_t, dtpr_nextmod),
15672 offsetof(dtrace_probe_t, dtpr_prevmod));
15673
15674 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
15675 offsetof(dtrace_probe_t, dtpr_nextfunc),
15676 offsetof(dtrace_probe_t, dtpr_prevfunc));
15677
15678 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
15679 offsetof(dtrace_probe_t, dtpr_nextname),
15680 offsetof(dtrace_probe_t, dtpr_prevname));
15681
15682 if (dtrace_retain_max < 1) {
15683 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
15684 "setting to 1", dtrace_retain_max);
15685 dtrace_retain_max = 1;
15686 }
15687
15688 /*
15689 * Now discover our toxic ranges.
15690 */
15691 dtrace_toxic_ranges(dtrace_toxrange_add);
15692
15693 /*
15694 * Before we register ourselves as a provider to our own framework,
15695 * we would like to assert that dtrace_provider is NULL -- but that's
15696 * not true if we were loaded as a dependency of a DTrace provider.
15697 * Once we've registered, we can assert that dtrace_provider is our
15698 * pseudo provider.
15699 */
15700 (void) dtrace_register("dtrace", &dtrace_provider_attr,
15701 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
15702
15703 ASSERT(dtrace_provider != NULL);
15704 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
15705
15706 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
15707 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
15708 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
15709 dtrace_provider, NULL, NULL, "END", 0, NULL);
15710 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
15711 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
15712
15713 dtrace_anon_property();
15714 mutex_exit(&cpu_lock);
15715
15716 /*
15717 * If there are already providers, we must ask them to provide their
15718 * probes, and then match any anonymous enabling against them. Note
15719 * that there should be no other retained enablings at this time:
15720 * the only retained enablings at this time should be the anonymous
15721 * enabling.
15722 */
15723 if (dtrace_anon.dta_enabling != NULL) {
15724 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
15725
15726 dtrace_enabling_provide(NULL);
15727 state = dtrace_anon.dta_state;
15728
15729 /*
15730 * We couldn't hold cpu_lock across the above call to
15731 * dtrace_enabling_provide(), but we must hold it to actually
15732 * enable the probes. We have to drop all of our locks, pick
15733 * up cpu_lock, and regain our locks before matching the
15734 * retained anonymous enabling.
15735 */
15736 mutex_exit(&dtrace_lock);
15737 mutex_exit(&dtrace_provider_lock);
15738
15739 mutex_enter(&cpu_lock);
15740 mutex_enter(&dtrace_provider_lock);
15741 mutex_enter(&dtrace_lock);
15742
15743 if ((enab = dtrace_anon.dta_enabling) != NULL)
15744 (void) dtrace_enabling_match(enab, NULL);
15745
15746 mutex_exit(&cpu_lock);
15747 }
15748
15749 mutex_exit(&dtrace_lock);
15750 mutex_exit(&dtrace_provider_lock);
15751
15752 if (state != NULL) {
15753 /*
15754 * If we created any anonymous state, set it going now.
15755 */
15756 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
15757 }
15758
15759 return (DDI_SUCCESS);
15760 }
15761
15762 /*ARGSUSED*/
15763 static int
15764 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
15765 {
15766 dtrace_state_t *state;
15767 uint32_t priv;
15768 uid_t uid;
15769 zoneid_t zoneid;
15770
15771 if (getminor(*devp) == DTRACEMNRN_HELPER)
15772 return (0);
15773
15774 /*
15775 * If this wasn't an open with the "helper" minor, then it must be
15776 * the "dtrace" minor.
15777 */
15778 if (getminor(*devp) != DTRACEMNRN_DTRACE)
15779 return (ENXIO);
15780
15781 /*
15782 * If no DTRACE_PRIV_* bits are set in the credential, then the
15783 * caller lacks sufficient permission to do anything with DTrace.
15784 */
15785 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
15786 if (priv == DTRACE_PRIV_NONE)
15787 return (EACCES);
15788
15789 /*
15790 * Ask all providers to provide all their probes.
15791 */
15792 mutex_enter(&dtrace_provider_lock);
15793 dtrace_probe_provide(NULL, NULL);
15794 mutex_exit(&dtrace_provider_lock);
15795
15796 mutex_enter(&cpu_lock);
15797 mutex_enter(&dtrace_lock);
15798 dtrace_opens++;
15799 dtrace_membar_producer();
15800
15801 /*
15802 * If the kernel debugger is active (that is, if the kernel debugger
15803 * modified text in some way), we won't allow the open.
15804 */
15805 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15806 dtrace_opens--;
15807 mutex_exit(&cpu_lock);
15808 mutex_exit(&dtrace_lock);
15809 return (EBUSY);
15810 }
15811
15812 if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) {
15813 /*
15814 * If DTrace helper tracing is enabled, we need to allocate the
15815 * trace buffer and initialize the values.
15816 */
15817 dtrace_helptrace_buffer =
15818 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
15819 dtrace_helptrace_next = 0;
15820 dtrace_helptrace_wrapped = 0;
15821 dtrace_helptrace_enable = 0;
15822 }
15823
15824 state = dtrace_state_create(devp, cred_p);
15825 mutex_exit(&cpu_lock);
15826
15827 if (state == NULL) {
15828 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15829 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15830 mutex_exit(&dtrace_lock);
15831 return (EAGAIN);
15832 }
15833
15834 mutex_exit(&dtrace_lock);
15835
15836 return (0);
15837 }
15838
15839 /*ARGSUSED*/
15840 static int
15841 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15842 {
15843 minor_t minor = getminor(dev);
15844 dtrace_state_t *state;
15845 dtrace_helptrace_t *buf = NULL;
15846
15847 if (minor == DTRACEMNRN_HELPER)
15848 return (0);
15849
15850 state = ddi_get_soft_state(dtrace_softstate, minor);
15851
15852 mutex_enter(&cpu_lock);
15853 mutex_enter(&dtrace_lock);
15854
15855 if (state->dts_anon) {
15856 /*
15857 * There is anonymous state. Destroy that first.
15858 */
15859 ASSERT(dtrace_anon.dta_state == NULL);
15860 dtrace_state_destroy(state->dts_anon);
15861 }
15862
15863 if (dtrace_helptrace_disable) {
15864 /*
15865 * If we have been told to disable helper tracing, set the
15866 * buffer to NULL before calling into dtrace_state_destroy();
15867 * we take advantage of its dtrace_sync() to know that no
15868 * CPU is in probe context with enabled helper tracing
15869 * after it returns.
15870 */
15871 buf = dtrace_helptrace_buffer;
15872 dtrace_helptrace_buffer = NULL;
15873 }
15874
15875 dtrace_state_destroy(state);
15876 ASSERT(dtrace_opens > 0);
15877
15878 /*
15879 * Only relinquish control of the kernel debugger interface when there
15880 * are no consumers and no anonymous enablings.
15881 */
15882 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15883 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15884
15885 if (buf != NULL) {
15886 kmem_free(buf, dtrace_helptrace_bufsize);
15887 dtrace_helptrace_disable = 0;
15888 }
15889
15890 mutex_exit(&dtrace_lock);
15891 mutex_exit(&cpu_lock);
15892
15893 return (0);
15894 }
15895
15896 /*ARGSUSED*/
15897 static int
15898 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
15899 {
15900 int rval;
15901 dof_helper_t help, *dhp = NULL;
15902
15903 switch (cmd) {
15904 case DTRACEHIOC_ADDDOF:
15905 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
15906 dtrace_dof_error(NULL, "failed to copyin DOF helper");
15907 return (EFAULT);
15908 }
15909
15910 dhp = &help;
15911 arg = (intptr_t)help.dofhp_dof;
15912 /*FALLTHROUGH*/
15913
15914 case DTRACEHIOC_ADD: {
15915 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
15916
15917 if (dof == NULL)
15918 return (rval);
15919
15920 mutex_enter(&dtrace_lock);
15921
15922 /*
15923 * dtrace_helper_slurp() takes responsibility for the dof --
15924 * it may free it now or it may save it and free it later.
15925 */
15926 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
15927 *rv = rval;
15928 rval = 0;
15929 } else {
15930 rval = EINVAL;
15931 }
15932
15933 mutex_exit(&dtrace_lock);
15934 return (rval);
15935 }
15936
15937 case DTRACEHIOC_REMOVE: {
15938 mutex_enter(&dtrace_lock);
15939 rval = dtrace_helper_destroygen(arg);
15940 mutex_exit(&dtrace_lock);
15941
15942 return (rval);
15943 }
15944
15945 default:
15946 break;
15947 }
15948
15949 return (ENOTTY);
15950 }
15951
15952 /*ARGSUSED*/
15953 static int
15954 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
15955 {
15956 minor_t minor = getminor(dev);
15957 dtrace_state_t *state;
15958 int rval;
15959
15960 if (minor == DTRACEMNRN_HELPER)
15961 return (dtrace_ioctl_helper(cmd, arg, rv));
15962
15963 state = ddi_get_soft_state(dtrace_softstate, minor);
15964
15965 if (state->dts_anon) {
15966 ASSERT(dtrace_anon.dta_state == NULL);
15967 state = state->dts_anon;
15968 }
15969
15970 switch (cmd) {
15971 case DTRACEIOC_PROVIDER: {
15972 dtrace_providerdesc_t pvd;
15973 dtrace_provider_t *pvp;
15974
15975 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
15976 return (EFAULT);
15977
15978 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
15979 mutex_enter(&dtrace_provider_lock);
15980
15981 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
15982 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
15983 break;
15984 }
15985
15986 mutex_exit(&dtrace_provider_lock);
15987
15988 if (pvp == NULL)
15989 return (ESRCH);
15990
15991 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
15992 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
15993 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
15994 return (EFAULT);
15995
15996 return (0);
15997 }
15998
15999 case DTRACEIOC_EPROBE: {
16000 dtrace_eprobedesc_t epdesc;
16001 dtrace_ecb_t *ecb;
16002 dtrace_action_t *act;
16003 void *buf;
16004 size_t size;
16005 uintptr_t dest;
16006 int nrecs;
16007
16008 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
16009 return (EFAULT);
16010
16011 mutex_enter(&dtrace_lock);
16012
16013 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
16014 mutex_exit(&dtrace_lock);
16015 return (EINVAL);
16016 }
16017
16018 if (ecb->dte_probe == NULL) {
16019 mutex_exit(&dtrace_lock);
16020 return (EINVAL);
16021 }
16022
16023 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
16024 epdesc.dtepd_uarg = ecb->dte_uarg;
16025 epdesc.dtepd_size = ecb->dte_size;
16026
16027 nrecs = epdesc.dtepd_nrecs;
16028 epdesc.dtepd_nrecs = 0;
16029 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
16030 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
16031 continue;
16032
16033 epdesc.dtepd_nrecs++;
16034 }
16035
16036 /*
16037 * Now that we have the size, we need to allocate a temporary
16038 * buffer in which to store the complete description. We need
16039 * the temporary buffer to be able to drop dtrace_lock()
16040 * across the copyout(), below.
16041 */
16042 size = sizeof (dtrace_eprobedesc_t) +
16043 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
16044
16045 buf = kmem_alloc(size, KM_SLEEP);
16046 dest = (uintptr_t)buf;
16047
16048 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
16049 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
16050
16051 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
16052 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
16053 continue;
16054
16055 if (nrecs-- == 0)
16056 break;
16057
16058 bcopy(&act->dta_rec, (void *)dest,
16059 sizeof (dtrace_recdesc_t));
16060 dest += sizeof (dtrace_recdesc_t);
16061 }
16062
16063 mutex_exit(&dtrace_lock);
16064
16065 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
16066 kmem_free(buf, size);
16067 return (EFAULT);
16068 }
16069
16070 kmem_free(buf, size);
16071 return (0);
16072 }
16073
16074 case DTRACEIOC_AGGDESC: {
16075 dtrace_aggdesc_t aggdesc;
16076 dtrace_action_t *act;
16077 dtrace_aggregation_t *agg;
16078 int nrecs;
16079 uint32_t offs;
16080 dtrace_recdesc_t *lrec;
16081 void *buf;
16082 size_t size;
16083 uintptr_t dest;
16084
16085 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
16086 return (EFAULT);
16087
16088 mutex_enter(&dtrace_lock);
16089
16090 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
16091 mutex_exit(&dtrace_lock);
16092 return (EINVAL);
16093 }
16094
16095 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
16096
16097 nrecs = aggdesc.dtagd_nrecs;
16098 aggdesc.dtagd_nrecs = 0;
16099
16100 offs = agg->dtag_base;
16101 lrec = &agg->dtag_action.dta_rec;
16102 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
16103
16104 for (act = agg->dtag_first; ; act = act->dta_next) {
16105 ASSERT(act->dta_intuple ||
16106 DTRACEACT_ISAGG(act->dta_kind));
16107
16108 /*
16109 * If this action has a record size of zero, it
16110 * denotes an argument to the aggregating action.
16111 * Because the presence of this record doesn't (or
16112 * shouldn't) affect the way the data is interpreted,
16113 * we don't copy it out to save user-level the
16114 * confusion of dealing with a zero-length record.
16115 */
16116 if (act->dta_rec.dtrd_size == 0) {
16117 ASSERT(agg->dtag_hasarg);
16118 continue;
16119 }
16120
16121 aggdesc.dtagd_nrecs++;
16122
16123 if (act == &agg->dtag_action)
16124 break;
16125 }
16126
16127 /*
16128 * Now that we have the size, we need to allocate a temporary
16129 * buffer in which to store the complete description. We need
16130 * the temporary buffer to be able to drop dtrace_lock()
16131 * across the copyout(), below.
16132 */
16133 size = sizeof (dtrace_aggdesc_t) +
16134 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
16135
16136 buf = kmem_alloc(size, KM_SLEEP);
16137 dest = (uintptr_t)buf;
16138
16139 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
16140 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
16141
16142 for (act = agg->dtag_first; ; act = act->dta_next) {
16143 dtrace_recdesc_t rec = act->dta_rec;
16144
16145 /*
16146 * See the comment in the above loop for why we pass
16147 * over zero-length records.
16148 */
16149 if (rec.dtrd_size == 0) {
16150 ASSERT(agg->dtag_hasarg);
16151 continue;
16152 }
16153
16154 if (nrecs-- == 0)
16155 break;
16156
16157 rec.dtrd_offset -= offs;
16158 bcopy(&rec, (void *)dest, sizeof (rec));
16159 dest += sizeof (dtrace_recdesc_t);
16160
16161 if (act == &agg->dtag_action)
16162 break;
16163 }
16164
16165 mutex_exit(&dtrace_lock);
16166
16167 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
16168 kmem_free(buf, size);
16169 return (EFAULT);
16170 }
16171
16172 kmem_free(buf, size);
16173 return (0);
16174 }
16175
16176 case DTRACEIOC_ENABLE: {
16177 dof_hdr_t *dof;
16178 dtrace_enabling_t *enab = NULL;
16179 dtrace_vstate_t *vstate;
16180 int err = 0;
16181
16182 *rv = 0;
16183
16184 /*
16185 * If a NULL argument has been passed, we take this as our
16186 * cue to reevaluate our enablings.
16187 */
16188 if (arg == NULL) {
16189 dtrace_enabling_matchall();
16190
16191 return (0);
16192 }
16193
16194 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
16195 return (rval);
16196
16197 mutex_enter(&cpu_lock);
16198 mutex_enter(&dtrace_lock);
16199 vstate = &state->dts_vstate;
16200
16201 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
16202 mutex_exit(&dtrace_lock);
16203 mutex_exit(&cpu_lock);
16204 dtrace_dof_destroy(dof);
16205 return (EBUSY);
16206 }
16207
16208 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
16209 mutex_exit(&dtrace_lock);
16210 mutex_exit(&cpu_lock);
16211 dtrace_dof_destroy(dof);
16212 return (EINVAL);
16213 }
16214
16215 if ((rval = dtrace_dof_options(dof, state)) != 0) {
16216 dtrace_enabling_destroy(enab);
16217 mutex_exit(&dtrace_lock);
16218 mutex_exit(&cpu_lock);
16219 dtrace_dof_destroy(dof);
16220 return (rval);
16221 }
16222
16223 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
16224 err = dtrace_enabling_retain(enab);
16225 } else {
16226 dtrace_enabling_destroy(enab);
16227 }
16228
16229 mutex_exit(&cpu_lock);
16230 mutex_exit(&dtrace_lock);
16231 dtrace_dof_destroy(dof);
16232
16233 return (err);
16234 }
16235
16236 case DTRACEIOC_REPLICATE: {
16237 dtrace_repldesc_t desc;
16238 dtrace_probedesc_t *match = &desc.dtrpd_match;
16239 dtrace_probedesc_t *create = &desc.dtrpd_create;
16240 int err;
16241
16242 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16243 return (EFAULT);
16244
16245 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16246 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16247 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16248 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16249
16250 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16251 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16252 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16253 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16254
16255 mutex_enter(&dtrace_lock);
16256 err = dtrace_enabling_replicate(state, match, create);
16257 mutex_exit(&dtrace_lock);
16258
16259 return (err);
16260 }
16261
16262 case DTRACEIOC_PROBEMATCH:
16263 case DTRACEIOC_PROBES: {
16264 dtrace_probe_t *probe = NULL;
16265 dtrace_probedesc_t desc;
16266 dtrace_probekey_t pkey;
16267 dtrace_id_t i;
16268 int m = 0;
16269 uint32_t priv;
16270 uid_t uid;
16271 zoneid_t zoneid;
16272
16273 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16274 return (EFAULT);
16275
16276 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
16277 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
16278 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
16279 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
16280
16281 /*
16282 * Before we attempt to match this probe, we want to give
16283 * all providers the opportunity to provide it.
16284 */
16285 if (desc.dtpd_id == DTRACE_IDNONE) {
16286 mutex_enter(&dtrace_provider_lock);
16287 dtrace_probe_provide(&desc, NULL);
16288 mutex_exit(&dtrace_provider_lock);
16289 desc.dtpd_id++;
16290 }
16291
16292 if (cmd == DTRACEIOC_PROBEMATCH) {
16293 dtrace_probekey(&desc, &pkey);
16294 pkey.dtpk_id = DTRACE_IDNONE;
16295 }
16296
16297 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
16298
16299 mutex_enter(&dtrace_lock);
16300
16301 if (cmd == DTRACEIOC_PROBEMATCH) {
16302 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16303 if ((probe = dtrace_probes[i - 1]) != NULL &&
16304 (m = dtrace_match_probe(probe, &pkey,
16305 priv, uid, zoneid)) != 0)
16306 break;
16307 }
16308
16309 if (m < 0) {
16310 mutex_exit(&dtrace_lock);
16311 return (EINVAL);
16312 }
16313
16314 } else {
16315 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
16316 if ((probe = dtrace_probes[i - 1]) != NULL &&
16317 dtrace_match_priv(probe, priv, uid, zoneid))
16318 break;
16319 }
16320 }
16321
16322 if (probe == NULL) {
16323 mutex_exit(&dtrace_lock);
16324 return (ESRCH);
16325 }
16326
16327 dtrace_probe_description(probe, &desc);
16328 mutex_exit(&dtrace_lock);
16329
16330 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16331 return (EFAULT);
16332
16333 return (0);
16334 }
16335
16336 case DTRACEIOC_PROBEARG: {
16337 dtrace_argdesc_t desc;
16338 dtrace_probe_t *probe;
16339 dtrace_provider_t *prov;
16340
16341 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16342 return (EFAULT);
16343
16344 if (desc.dtargd_id == DTRACE_IDNONE)
16345 return (EINVAL);
16346
16347 if (desc.dtargd_ndx == DTRACE_ARGNONE)
16348 return (EINVAL);
16349
16350 mutex_enter(&dtrace_provider_lock);
16351 mutex_enter(&mod_lock);
16352 mutex_enter(&dtrace_lock);
16353
16354 if (desc.dtargd_id > dtrace_nprobes) {
16355 mutex_exit(&dtrace_lock);
16356 mutex_exit(&mod_lock);
16357 mutex_exit(&dtrace_provider_lock);
16358 return (EINVAL);
16359 }
16360
16361 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
16362 mutex_exit(&dtrace_lock);
16363 mutex_exit(&mod_lock);
16364 mutex_exit(&dtrace_provider_lock);
16365 return (EINVAL);
16366 }
16367
16368 mutex_exit(&dtrace_lock);
16369
16370 prov = probe->dtpr_provider;
16371
16372 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
16373 /*
16374 * There isn't any typed information for this probe.
16375 * Set the argument number to DTRACE_ARGNONE.
16376 */
16377 desc.dtargd_ndx = DTRACE_ARGNONE;
16378 } else {
16379 desc.dtargd_native[0] = '\0';
16380 desc.dtargd_xlate[0] = '\0';
16381 desc.dtargd_mapping = desc.dtargd_ndx;
16382
16383 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
16384 probe->dtpr_id, probe->dtpr_arg, &desc);
16385 }
16386
16387 mutex_exit(&mod_lock);
16388 mutex_exit(&dtrace_provider_lock);
16389
16390 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16391 return (EFAULT);
16392
16393 return (0);
16394 }
16395
16396 case DTRACEIOC_GO: {
16397 processorid_t cpuid;
16398 rval = dtrace_state_go(state, &cpuid);
16399
16400 if (rval != 0)
16401 return (rval);
16402
16403 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16404 return (EFAULT);
16405
16406 return (0);
16407 }
16408
16409 case DTRACEIOC_STOP: {
16410 processorid_t cpuid;
16411
16412 mutex_enter(&dtrace_lock);
16413 rval = dtrace_state_stop(state, &cpuid);
16414 mutex_exit(&dtrace_lock);
16415
16416 if (rval != 0)
16417 return (rval);
16418
16419 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
16420 return (EFAULT);
16421
16422 return (0);
16423 }
16424
16425 case DTRACEIOC_DOFGET: {
16426 dof_hdr_t hdr, *dof;
16427 uint64_t len;
16428
16429 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
16430 return (EFAULT);
16431
16432 mutex_enter(&dtrace_lock);
16433 dof = dtrace_dof_create(state);
16434 mutex_exit(&dtrace_lock);
16435
16436 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
16437 rval = copyout(dof, (void *)arg, len);
16438 dtrace_dof_destroy(dof);
16439
16440 return (rval == 0 ? 0 : EFAULT);
16441 }
16442
16443 case DTRACEIOC_AGGSNAP:
16444 case DTRACEIOC_BUFSNAP: {
16445 dtrace_bufdesc_t desc;
16446 caddr_t cached;
16447 dtrace_buffer_t *buf;
16448
16449 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
16450 return (EFAULT);
16451
16452 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
16453 return (EINVAL);
16454
16455 mutex_enter(&dtrace_lock);
16456
16457 if (cmd == DTRACEIOC_BUFSNAP) {
16458 buf = &state->dts_buffer[desc.dtbd_cpu];
16459 } else {
16460 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
16461 }
16462
16463 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
16464 size_t sz = buf->dtb_offset;
16465
16466 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
16467 mutex_exit(&dtrace_lock);
16468 return (EBUSY);
16469 }
16470
16471 /*
16472 * If this buffer has already been consumed, we're
16473 * going to indicate that there's nothing left here
16474 * to consume.
16475 */
16476 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
16477 mutex_exit(&dtrace_lock);
16478
16479 desc.dtbd_size = 0;
16480 desc.dtbd_drops = 0;
16481 desc.dtbd_errors = 0;
16482 desc.dtbd_oldest = 0;
16483 sz = sizeof (desc);
16484
16485 if (copyout(&desc, (void *)arg, sz) != 0)
16486 return (EFAULT);
16487
16488 return (0);
16489 }
16490
16491 /*
16492 * If this is a ring buffer that has wrapped, we want
16493 * to copy the whole thing out.
16494 */
16495 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
16496 dtrace_buffer_polish(buf);
16497 sz = buf->dtb_size;
16498 }
16499
16500 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
16501 mutex_exit(&dtrace_lock);
16502 return (EFAULT);
16503 }
16504
16505 desc.dtbd_size = sz;
16506 desc.dtbd_drops = buf->dtb_drops;
16507 desc.dtbd_errors = buf->dtb_errors;
16508 desc.dtbd_oldest = buf->dtb_xamot_offset;
16509 desc.dtbd_timestamp = dtrace_gethrtime();
16510
16511 mutex_exit(&dtrace_lock);
16512
16513 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16514 return (EFAULT);
16515
16516 buf->dtb_flags |= DTRACEBUF_CONSUMED;
16517
16518 return (0);
16519 }
16520
16521 if (buf->dtb_tomax == NULL) {
16522 ASSERT(buf->dtb_xamot == NULL);
16523 mutex_exit(&dtrace_lock);
16524 return (ENOENT);
16525 }
16526
16527 cached = buf->dtb_tomax;
16528 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
16529
16530 dtrace_xcall(desc.dtbd_cpu,
16531 (dtrace_xcall_t)dtrace_buffer_switch, buf);
16532
16533 state->dts_errors += buf->dtb_xamot_errors;
16534
16535 /*
16536 * If the buffers did not actually switch, then the cross call
16537 * did not take place -- presumably because the given CPU is
16538 * not in the ready set. If this is the case, we'll return
16539 * ENOENT.
16540 */
16541 if (buf->dtb_tomax == cached) {
16542 ASSERT(buf->dtb_xamot != cached);
16543 mutex_exit(&dtrace_lock);
16544 return (ENOENT);
16545 }
16546
16547 ASSERT(cached == buf->dtb_xamot);
16548
16549 /*
16550 * We have our snapshot; now copy it out.
16551 */
16552 if (copyout(buf->dtb_xamot, desc.dtbd_data,
16553 buf->dtb_xamot_offset) != 0) {
16554 mutex_exit(&dtrace_lock);
16555 return (EFAULT);
16556 }
16557
16558 desc.dtbd_size = buf->dtb_xamot_offset;
16559 desc.dtbd_drops = buf->dtb_xamot_drops;
16560 desc.dtbd_errors = buf->dtb_xamot_errors;
16561 desc.dtbd_oldest = 0;
16562 desc.dtbd_timestamp = buf->dtb_switched;
16563
16564 mutex_exit(&dtrace_lock);
16565
16566 /*
16567 * Finally, copy out the buffer description.
16568 */
16569 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
16570 return (EFAULT);
16571
16572 return (0);
16573 }
16574
16575 case DTRACEIOC_CONF: {
16576 dtrace_conf_t conf;
16577
16578 bzero(&conf, sizeof (conf));
16579 conf.dtc_difversion = DIF_VERSION;
16580 conf.dtc_difintregs = DIF_DIR_NREGS;
16581 conf.dtc_diftupregs = DIF_DTR_NREGS;
16582 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
16583
16584 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
16585 return (EFAULT);
16586
16587 return (0);
16588 }
16589
16590 case DTRACEIOC_STATUS: {
16591 dtrace_status_t stat;
16592 dtrace_dstate_t *dstate;
16593 int i, j;
16594 uint64_t nerrs;
16595
16596 /*
16597 * See the comment in dtrace_state_deadman() for the reason
16598 * for setting dts_laststatus to INT64_MAX before setting
16599 * it to the correct value.
16600 */
16601 state->dts_laststatus = INT64_MAX;
16602 dtrace_membar_producer();
16603 state->dts_laststatus = dtrace_gethrtime();
16604
16605 bzero(&stat, sizeof (stat));
16606
16607 mutex_enter(&dtrace_lock);
16608
16609 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
16610 mutex_exit(&dtrace_lock);
16611 return (ENOENT);
16612 }
16613
16614 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
16615 stat.dtst_exiting = 1;
16616
16617 nerrs = state->dts_errors;
16618 dstate = &state->dts_vstate.dtvs_dynvars;
16619
16620 for (i = 0; i < NCPU; i++) {
16621 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
16622
16623 stat.dtst_dyndrops += dcpu->dtdsc_drops;
16624 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
16625 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
16626
16627 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
16628 stat.dtst_filled++;
16629
16630 nerrs += state->dts_buffer[i].dtb_errors;
16631
16632 for (j = 0; j < state->dts_nspeculations; j++) {
16633 dtrace_speculation_t *spec;
16634 dtrace_buffer_t *buf;
16635
16636 spec = &state->dts_speculations[j];
16637 buf = &spec->dtsp_buffer[i];
16638 stat.dtst_specdrops += buf->dtb_xamot_drops;
16639 }
16640 }
16641
16642 stat.dtst_specdrops_busy = state->dts_speculations_busy;
16643 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
16644 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
16645 stat.dtst_dblerrors = state->dts_dblerrors;
16646 stat.dtst_killed =
16647 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
16648 stat.dtst_errors = nerrs;
16649
16650 mutex_exit(&dtrace_lock);
16651
16652 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
16653 return (EFAULT);
16654
16655 return (0);
16656 }
16657
16658 case DTRACEIOC_FORMAT: {
16659 dtrace_fmtdesc_t fmt;
16660 char *str;
16661 int len;
16662
16663 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
16664 return (EFAULT);
16665
16666 mutex_enter(&dtrace_lock);
16667
16668 if (fmt.dtfd_format == 0 ||
16669 fmt.dtfd_format > state->dts_nformats) {
16670 mutex_exit(&dtrace_lock);
16671 return (EINVAL);
16672 }
16673
16674 /*
16675 * Format strings are allocated contiguously and they are
16676 * never freed; if a format index is less than the number
16677 * of formats, we can assert that the format map is non-NULL
16678 * and that the format for the specified index is non-NULL.
16679 */
16680 ASSERT(state->dts_formats != NULL);
16681 str = state->dts_formats[fmt.dtfd_format - 1];
16682 ASSERT(str != NULL);
16683
16684 len = strlen(str) + 1;
16685
16686 if (len > fmt.dtfd_length) {
16687 fmt.dtfd_length = len;
16688
16689 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
16690 mutex_exit(&dtrace_lock);
16691 return (EINVAL);
16692 }
16693 } else {
16694 if (copyout(str, fmt.dtfd_string, len) != 0) {
16695 mutex_exit(&dtrace_lock);
16696 return (EINVAL);
16697 }
16698 }
16699
16700 mutex_exit(&dtrace_lock);
16701 return (0);
16702 }
16703
16704 default:
16705 break;
16706 }
16707
16708 return (ENOTTY);
16709 }
16710
16711 /*ARGSUSED*/
16712 static int
16713 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
16714 {
16715 dtrace_state_t *state;
16716
16717 switch (cmd) {
16718 case DDI_DETACH:
16719 break;
16720
16721 case DDI_SUSPEND:
16722 return (DDI_SUCCESS);
16723
16724 default:
16725 return (DDI_FAILURE);
16726 }
16727
16728 mutex_enter(&cpu_lock);
16729 mutex_enter(&dtrace_provider_lock);
16730 mutex_enter(&dtrace_lock);
16731
16732 ASSERT(dtrace_opens == 0);
16733
16734 if (dtrace_helpers > 0) {
16735 mutex_exit(&dtrace_provider_lock);
16736 mutex_exit(&dtrace_lock);
16737 mutex_exit(&cpu_lock);
16738 return (DDI_FAILURE);
16739 }
16740
16741 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
16742 mutex_exit(&dtrace_provider_lock);
16743 mutex_exit(&dtrace_lock);
16744 mutex_exit(&cpu_lock);
16745 return (DDI_FAILURE);
16746 }
16747
16748 dtrace_provider = NULL;
16749
16750 if ((state = dtrace_anon_grab()) != NULL) {
16751 /*
16752 * If there were ECBs on this state, the provider should
16753 * have not been allowed to detach; assert that there is
16754 * none.
16755 */
16756 ASSERT(state->dts_necbs == 0);
16757 dtrace_state_destroy(state);
16758
16759 /*
16760 * If we're being detached with anonymous state, we need to
16761 * indicate to the kernel debugger that DTrace is now inactive.
16762 */
16763 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
16764 }
16765
16766 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
16767 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16768 dtrace_cpu_init = NULL;
16769 dtrace_helpers_cleanup = NULL;
16770 dtrace_helpers_fork = NULL;
16771 dtrace_cpustart_init = NULL;
16772 dtrace_cpustart_fini = NULL;
16773 dtrace_debugger_init = NULL;
16774 dtrace_debugger_fini = NULL;
16775 dtrace_modload = NULL;
16776 dtrace_modunload = NULL;
16777
16778 ASSERT(dtrace_getf == 0);
16779 ASSERT(dtrace_closef == NULL);
16780
16781 mutex_exit(&cpu_lock);
16782
16783 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16784 dtrace_probes = NULL;
16785 dtrace_nprobes = 0;
16786
16787 dtrace_hash_destroy(dtrace_bymod);
16788 dtrace_hash_destroy(dtrace_byfunc);
16789 dtrace_hash_destroy(dtrace_byname);
16790 dtrace_bymod = NULL;
16791 dtrace_byfunc = NULL;
16792 dtrace_byname = NULL;
16793
16794 kmem_cache_destroy(dtrace_state_cache);
16795 vmem_destroy(dtrace_minor);
16796 vmem_destroy(dtrace_arena);
16797
16798 if (dtrace_toxrange != NULL) {
16799 kmem_free(dtrace_toxrange,
16800 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16801 dtrace_toxrange = NULL;
16802 dtrace_toxranges = 0;
16803 dtrace_toxranges_max = 0;
16804 }
16805
16806 ddi_remove_minor_node(dtrace_devi, NULL);
16807 dtrace_devi = NULL;
16808
16809 ddi_soft_state_fini(&dtrace_softstate);
16810
16811 ASSERT(dtrace_vtime_references == 0);
16812 ASSERT(dtrace_opens == 0);
16813 ASSERT(dtrace_retained == NULL);
16814
16815 mutex_exit(&dtrace_lock);
16816 mutex_exit(&dtrace_provider_lock);
16817
16818 /*
16819 * We don't destroy the task queue until after we have dropped our
16820 * locks (taskq_destroy() may block on running tasks). To prevent
16821 * attempting to do work after we have effectively detached but before
16822 * the task queue has been destroyed, all tasks dispatched via the
16823 * task queue must check that DTrace is still attached before
16824 * performing any operation.
16825 */
16826 taskq_destroy(dtrace_taskq);
16827 dtrace_taskq = NULL;
16828
16829 return (DDI_SUCCESS);
16830 }
16831
16832 /*ARGSUSED*/
16833 static int
16834 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
16835 {
16836 int error;
16837
16838 switch (infocmd) {
16839 case DDI_INFO_DEVT2DEVINFO:
16840 *result = (void *)dtrace_devi;
16841 error = DDI_SUCCESS;
16842 break;
16843 case DDI_INFO_DEVT2INSTANCE:
16844 *result = (void *)0;
16845 error = DDI_SUCCESS;
16846 break;
16847 default:
16848 error = DDI_FAILURE;
16849 }
16850 return (error);
16851 }
16852
16853 static struct cb_ops dtrace_cb_ops = {
16854 dtrace_open, /* open */
16855 dtrace_close, /* close */
16856 nulldev, /* strategy */
16857 nulldev, /* print */
16858 nodev, /* dump */
16859 nodev, /* read */
16860 nodev, /* write */
16861 dtrace_ioctl, /* ioctl */
16862 nodev, /* devmap */
16863 nodev, /* mmap */
16864 nodev, /* segmap */
16865 nochpoll, /* poll */
16866 ddi_prop_op, /* cb_prop_op */
16867 0, /* streamtab */
16868 D_NEW | D_MP /* Driver compatibility flag */
16869 };
16870
16871 static struct dev_ops dtrace_ops = {
16872 DEVO_REV, /* devo_rev */
16873 0, /* refcnt */
16874 dtrace_info, /* get_dev_info */
16875 nulldev, /* identify */
16876 nulldev, /* probe */
16877 dtrace_attach, /* attach */
16878 dtrace_detach, /* detach */
16879 nodev, /* reset */
16880 &dtrace_cb_ops, /* driver operations */
16881 NULL, /* bus operations */
16882 nodev, /* dev power */
16883 ddi_quiesce_not_needed, /* quiesce */
16884 };
16885
16886 static struct modldrv modldrv = {
16887 &mod_driverops, /* module type (this is a pseudo driver) */
16888 "Dynamic Tracing", /* name of module */
16889 &dtrace_ops, /* driver ops */
16890 };
16891
16892 static struct modlinkage modlinkage = {
16893 MODREV_1,
16894 (void *)&modldrv,
16895 NULL
16896 };
16897
16898 int
16899 _init(void)
16900 {
16901 return (mod_install(&modlinkage));
16902 }
16903
16904 int
16905 _info(struct modinfo *modinfop)
16906 {
16907 return (mod_info(&modlinkage, modinfop));
16908 }
16909
16910 int
16911 _fini(void)
16912 {
16913 return (mod_remove(&modlinkage));
16914 }
--- EOF ---