Print this page
5042 stop using deprecated atomic functions
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/clock_highres.c
+++ new/usr/src/uts/common/os/clock_highres.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2003 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2012, Joyent Inc. All rights reserved.
29 29 */
30 30
31 31 #include <sys/timer.h>
32 32 #include <sys/systm.h>
33 33 #include <sys/param.h>
34 34 #include <sys/kmem.h>
35 35 #include <sys/debug.h>
36 36 #include <sys/cyclic.h>
37 37 #include <sys/cmn_err.h>
38 38 #include <sys/pset.h>
39 39 #include <sys/atomic.h>
40 40 #include <sys/policy.h>
41 41
42 42 static clock_backend_t clock_highres;
43 43
44 44 /*ARGSUSED*/
45 45 static int
46 46 clock_highres_settime(timespec_t *ts)
47 47 {
48 48 return (EINVAL);
49 49 }
50 50
51 51 static int
52 52 clock_highres_gettime(timespec_t *ts)
53 53 {
54 54 hrt2ts(gethrtime(), (timestruc_t *)ts);
55 55
56 56 return (0);
57 57 }
58 58
59 59 static int
60 60 clock_highres_getres(timespec_t *ts)
61 61 {
62 62 hrt2ts(cyclic_getres(), (timestruc_t *)ts);
63 63
64 64 return (0);
65 65 }
66 66
67 67 /*ARGSUSED*/
68 68 static int
69 69 clock_highres_timer_create(itimer_t *it, struct sigevent *ev)
70 70 {
71 71 /*
72 72 * CLOCK_HIGHRES timers of sufficiently high resolution can deny
73 73 * service; only allow privileged users to create such timers.
74 74 * Sites that do not wish to have this restriction should
75 75 * give users the "proc_clock_highres" privilege.
76 76 */
77 77 if (secpolicy_clock_highres(CRED()) != 0) {
78 78 it->it_arg = NULL;
79 79 return (EPERM);
80 80 }
81 81
82 82 it->it_arg = kmem_zalloc(sizeof (cyclic_id_t), KM_SLEEP);
83 83
84 84 return (0);
85 85 }
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
86 86
87 87 static void
88 88 clock_highres_fire(void *arg)
89 89 {
90 90 itimer_t *it = (itimer_t *)arg;
91 91 hrtime_t *addr = &it->it_hrtime;
92 92 hrtime_t old = *addr, new = gethrtime();
93 93
94 94 do {
95 95 old = *addr;
96 - } while (cas64((uint64_t *)addr, old, new) != old);
96 + } while (atomic_cas_64((uint64_t *)addr, old, new) != old);
97 97
98 98 timer_fire(it);
99 99 }
100 100
101 101 static int
102 102 clock_highres_timer_settime(itimer_t *it, int flags,
103 103 const struct itimerspec *when)
104 104 {
105 105 cyclic_id_t cyc, *cycp = it->it_arg;
106 106 proc_t *p = curproc;
107 107 kthread_t *t = curthread;
108 108 cyc_time_t cyctime;
109 109 cyc_handler_t hdlr;
110 110 cpu_t *cpu;
111 111 cpupart_t *cpupart;
112 112 int pset;
113 113
114 114 cyctime.cyt_when = ts2hrt(&when->it_value);
115 115 cyctime.cyt_interval = ts2hrt(&when->it_interval);
116 116
117 117 if (cyctime.cyt_when != 0 && cyctime.cyt_interval == 0 &&
118 118 it->it_itime.it_interval.tv_sec == 0 &&
119 119 it->it_itime.it_interval.tv_nsec == 0 &&
120 120 (cyc = *cycp) != CYCLIC_NONE) {
121 121 /*
122 122 * If our existing timer is a one-shot and our new timer is a
123 123 * one-shot, we'll save ourselves a world of grief and just
124 124 * reprogram the cyclic.
125 125 */
126 126 it->it_itime = *when;
127 127
128 128 if (!(flags & TIMER_ABSTIME))
129 129 cyctime.cyt_when += gethrtime();
130 130
131 131 hrt2ts(cyctime.cyt_when, &it->it_itime.it_value);
132 132 (void) cyclic_reprogram(cyc, cyctime.cyt_when);
133 133 return (0);
134 134 }
135 135
136 136 mutex_enter(&cpu_lock);
137 137 if ((cyc = *cycp) != CYCLIC_NONE) {
138 138 cyclic_remove(cyc);
139 139 *cycp = CYCLIC_NONE;
140 140 }
141 141
142 142 if (cyctime.cyt_when == 0) {
143 143 mutex_exit(&cpu_lock);
144 144 return (0);
145 145 }
146 146
147 147 if (!(flags & TIMER_ABSTIME))
148 148 cyctime.cyt_when += gethrtime();
149 149
150 150 /*
151 151 * Now we will check for overflow (that is, we will check to see
152 152 * that the start time plus the interval time doesn't exceed
153 153 * INT64_MAX). The astute code reviewer will observe that this
154 154 * one-time check doesn't guarantee that a future expiration
155 155 * will not wrap. We wish to prove, then, that if a future
156 156 * expiration does wrap, the earliest the problem can be encountered
157 157 * is (INT64_MAX / 2) nanoseconds (191 years) after boot. Formally:
158 158 *
159 159 * Given: s + i < m s > 0 i > 0
160 160 * s + ni > m n > 1
161 161 *
162 162 * (where "s" is the start time, "i" is the interval, "n" is the
163 163 * number of times the cyclic has fired and "m" is INT64_MAX)
164 164 *
165 165 * Prove:
166 166 * (a) s + (n - 1)i > (m / 2)
167 167 * (b) s + (n - 1)i < m
168 168 *
169 169 * That is, prove that we must have fired at least once 191 years
170 170 * after boot. The proof is very straightforward; since the left
171 171 * side of (a) is minimized when i is small, it is sufficient to show
172 172 * that the statement is true for i's smallest possible value
173 173 * (((m - s) / n) + epsilon). The same goes for (b); showing that the
174 174 * statement is true for i's largest possible value (m - s + epsilon)
175 175 * is sufficient to prove the statement.
176 176 *
177 177 * The actual arithmetic manipulation is left up to reader.
178 178 */
179 179 if (cyctime.cyt_when > INT64_MAX - cyctime.cyt_interval) {
180 180 mutex_exit(&cpu_lock);
181 181 return (EOVERFLOW);
182 182 }
183 183
184 184 if (cyctime.cyt_interval == 0) {
185 185 /*
186 186 * If this is a one-shot, then we set the interval to be
187 187 * inifinite. If this timer is never touched, this cyclic will
188 188 * simply consume space in the cyclic subsystem. As soon as
189 189 * timer_settime() or timer_delete() is called, the cyclic is
190 190 * removed (so it's not possible to run the machine out
191 191 * of resources by creating one-shots).
192 192 */
193 193 cyctime.cyt_interval = CY_INFINITY;
194 194 }
195 195
196 196 it->it_itime = *when;
197 197
198 198 hrt2ts(cyctime.cyt_when, &it->it_itime.it_value);
199 199
200 200 hdlr.cyh_func = (cyc_func_t)clock_highres_fire;
201 201 hdlr.cyh_arg = it;
202 202 hdlr.cyh_level = CY_LOW_LEVEL;
203 203
204 204 if (cyctime.cyt_when != 0)
205 205 *cycp = cyc = cyclic_add(&hdlr, &cyctime);
206 206
207 207 /*
208 208 * Now that we have the cyclic created, we need to bind it to our
209 209 * bound CPU and processor set (if any).
210 210 */
211 211 mutex_enter(&p->p_lock);
212 212 cpu = t->t_bound_cpu;
213 213 cpupart = t->t_cpupart;
214 214 pset = t->t_bind_pset;
215 215
216 216 mutex_exit(&p->p_lock);
217 217
218 218 cyclic_bind(cyc, cpu, pset == PS_NONE ? NULL : cpupart);
219 219
220 220 mutex_exit(&cpu_lock);
221 221
222 222 return (0);
223 223 }
224 224
225 225 static int
226 226 clock_highres_timer_gettime(itimer_t *it, struct itimerspec *when)
227 227 {
↓ open down ↓ |
121 lines elided |
↑ open up ↑ |
228 228 /*
229 229 * CLOCK_HIGHRES doesn't update it_itime.
230 230 */
231 231 hrtime_t start = ts2hrt(&it->it_itime.it_value);
232 232 hrtime_t interval = ts2hrt(&it->it_itime.it_interval);
233 233 hrtime_t diff, now = gethrtime();
234 234 hrtime_t *addr = &it->it_hrtime;
235 235 hrtime_t last;
236 236
237 237 /*
238 - * We're using cas64() here only to assure that we slurp the entire
239 - * timestamp atomically.
238 + * We're using atomic_cas_64() here only to assure that we slurp the
239 + * entire timestamp atomically.
240 240 */
241 - last = cas64((uint64_t *)addr, 0, 0);
241 + last = atomic_cas_64((uint64_t *)addr, 0, 0);
242 242
243 243 *when = it->it_itime;
244 244
245 245 if (!timerspecisset(&when->it_value))
246 246 return (0);
247 247
248 248 if (start > now) {
249 249 /*
250 250 * We haven't gone off yet...
251 251 */
252 252 diff = start - now;
253 253 } else {
254 254 if (interval == 0) {
255 255 /*
256 256 * This is a one-shot which should have already
257 257 * fired; set it_value to 0.
258 258 */
259 259 timerspecclear(&when->it_value);
260 260 return (0);
261 261 }
262 262
263 263 /*
264 264 * Calculate how far we are into this interval.
265 265 */
266 266 diff = (now - start) % interval;
267 267
268 268 /*
269 269 * Now check to see if we've dealt with the last interval
270 270 * yet.
271 271 */
272 272 if (now - diff > last) {
273 273 /*
274 274 * The last interval hasn't fired; set it_value to 0.
275 275 */
276 276 timerspecclear(&when->it_value);
277 277 return (0);
278 278 }
279 279
280 280 /*
281 281 * The last interval _has_ fired; we can return the amount
282 282 * of time left in this interval.
283 283 */
284 284 diff = interval - diff;
285 285 }
286 286
287 287 hrt2ts(diff, &when->it_value);
288 288
289 289 return (0);
290 290 }
291 291
292 292 static int
293 293 clock_highres_timer_delete(itimer_t *it)
294 294 {
295 295 cyclic_id_t cyc;
296 296
297 297 if (it->it_arg == NULL) {
298 298 /*
299 299 * This timer was never fully created; we must have failed
300 300 * in the clock_highres_timer_create() routine.
301 301 */
302 302 return (0);
303 303 }
304 304
305 305 mutex_enter(&cpu_lock);
306 306
307 307 if ((cyc = *((cyclic_id_t *)it->it_arg)) != CYCLIC_NONE)
308 308 cyclic_remove(cyc);
309 309
310 310 mutex_exit(&cpu_lock);
311 311
312 312 kmem_free(it->it_arg, sizeof (cyclic_id_t));
313 313
314 314 return (0);
315 315 }
316 316
317 317 static void
318 318 clock_highres_timer_lwpbind(itimer_t *it)
319 319 {
320 320 proc_t *p = curproc;
321 321 kthread_t *t = curthread;
322 322 cyclic_id_t cyc = *((cyclic_id_t *)it->it_arg);
323 323 cpu_t *cpu;
324 324 cpupart_t *cpupart;
325 325 int pset;
326 326
327 327 if (cyc == CYCLIC_NONE)
328 328 return;
329 329
330 330 mutex_enter(&cpu_lock);
331 331 mutex_enter(&p->p_lock);
332 332
333 333 /*
334 334 * Okay, now we can safely look at the bindings.
335 335 */
336 336 cpu = t->t_bound_cpu;
337 337 cpupart = t->t_cpupart;
338 338 pset = t->t_bind_pset;
339 339
340 340 /*
341 341 * Now we drop p_lock. We haven't dropped cpu_lock; we're guaranteed
342 342 * that even if the bindings change, the CPU and/or processor set
343 343 * that this timer was bound to remain valid (and the combination
344 344 * remains self-consistent).
345 345 */
346 346 mutex_exit(&p->p_lock);
347 347
348 348 cyclic_bind(cyc, cpu, pset == PS_NONE ? NULL : cpupart);
349 349
350 350 mutex_exit(&cpu_lock);
351 351 }
352 352
353 353 void
354 354 clock_highres_init()
355 355 {
356 356 clock_backend_t *be = &clock_highres;
357 357 struct sigevent *ev = &be->clk_default;
358 358
359 359 ev->sigev_signo = SIGALRM;
360 360 ev->sigev_notify = SIGEV_SIGNAL;
361 361 ev->sigev_value.sival_ptr = NULL;
362 362
363 363 be->clk_clock_settime = clock_highres_settime;
364 364 be->clk_clock_gettime = clock_highres_gettime;
365 365 be->clk_clock_getres = clock_highres_getres;
366 366 be->clk_timer_create = clock_highres_timer_create;
367 367 be->clk_timer_gettime = clock_highres_timer_gettime;
368 368 be->clk_timer_settime = clock_highres_timer_settime;
369 369 be->clk_timer_delete = clock_highres_timer_delete;
370 370 be->clk_timer_lwpbind = clock_highres_timer_lwpbind;
371 371
372 372 clock_add_backend(CLOCK_HIGHRES, &clock_highres);
373 373 }
↓ open down ↓ |
122 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX