Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


1165         fasttrap_pid_getargdesc,
1166         fasttrap_usdt_getarg,
1167         NULL,
1168         fasttrap_pid_destroy
1169 };
1170 
1171 static fasttrap_proc_t *
1172 fasttrap_proc_lookup(pid_t pid)
1173 {
1174         fasttrap_bucket_t *bucket;
1175         fasttrap_proc_t *fprc, *new_fprc;
1176 
1177         bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1178         mutex_enter(&bucket->ftb_mtx);
1179 
1180         for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1181                 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1182                         mutex_enter(&fprc->ftpc_mtx);
1183                         mutex_exit(&bucket->ftb_mtx);
1184                         fprc->ftpc_rcount++;
1185                         atomic_add_64(&fprc->ftpc_acount, 1);
1186                         ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1187                         mutex_exit(&fprc->ftpc_mtx);
1188 
1189                         return (fprc);
1190                 }
1191         }
1192 
1193         /*
1194          * Drop the bucket lock so we don't try to perform a sleeping
1195          * allocation under it.
1196          */
1197         mutex_exit(&bucket->ftb_mtx);
1198 
1199         new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1200         new_fprc->ftpc_pid = pid;
1201         new_fprc->ftpc_rcount = 1;
1202         new_fprc->ftpc_acount = 1;
1203 
1204         mutex_enter(&bucket->ftb_mtx);
1205 
1206         /*
1207          * Take another lap through the list to make sure a proc hasn't
1208          * been created for this pid while we weren't under the bucket lock.
1209          */
1210         for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1211                 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1212                         mutex_enter(&fprc->ftpc_mtx);
1213                         mutex_exit(&bucket->ftb_mtx);
1214                         fprc->ftpc_rcount++;
1215                         atomic_add_64(&fprc->ftpc_acount, 1);
1216                         ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1217                         mutex_exit(&fprc->ftpc_mtx);
1218 
1219                         kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1220 
1221                         return (fprc);
1222                 }
1223         }
1224 
1225         new_fprc->ftpc_next = bucket->ftb_data;
1226         bucket->ftb_data = new_fprc;
1227 
1228         mutex_exit(&bucket->ftb_mtx);
1229 
1230         return (new_fprc);
1231 }
1232 
1233 static void
1234 fasttrap_proc_release(fasttrap_proc_t *proc)
1235 {


1407 
1408 static void
1409 fasttrap_provider_free(fasttrap_provider_t *provider)
1410 {
1411         pid_t pid = provider->ftp_pid;
1412         proc_t *p;
1413 
1414         /*
1415          * There need to be no associated enabled probes, no consumers
1416          * creating probes, and no meta providers referencing this provider.
1417          */
1418         ASSERT(provider->ftp_rcount == 0);
1419         ASSERT(provider->ftp_ccount == 0);
1420         ASSERT(provider->ftp_mcount == 0);
1421 
1422         /*
1423          * If this provider hasn't been retired, we need to explicitly drop the
1424          * count of active providers on the associated process structure.
1425          */
1426         if (!provider->ftp_retired) {
1427                 atomic_add_64(&provider->ftp_proc->ftpc_acount, -1);
1428                 ASSERT(provider->ftp_proc->ftpc_acount <
1429                     provider->ftp_proc->ftpc_rcount);
1430         }
1431 
1432         fasttrap_proc_release(provider->ftp_proc);
1433 
1434         kmem_free(provider, sizeof (fasttrap_provider_t));
1435 
1436         /*
1437          * Decrement p_dtrace_probes on the process whose provider we're
1438          * freeing. We don't have to worry about clobbering somone else's
1439          * modifications to it because we have locked the bucket that
1440          * corresponds to this process's hash chain in the provider hash
1441          * table. Don't sweat it if we can't find the process.
1442          */
1443         mutex_enter(&pidlock);
1444         if ((p = prfind(pid)) == NULL) {
1445                 mutex_exit(&pidlock);
1446                 return;
1447         }


1482                 mutex_exit(&fp->ftp_mtx);
1483                 mutex_exit(&bucket->ftb_mtx);
1484                 return;
1485         }
1486 
1487         /*
1488          * Mark the provider to be removed in our post-processing step, mark it
1489          * retired, and drop the active count on its proc. Marking it indicates
1490          * that we should try to remove it; setting the retired flag indicates
1491          * that we're done with this provider; dropping the active the proc
1492          * releases our hold, and when this reaches zero (as it will during
1493          * exit or exec) the proc and associated providers become defunct.
1494          *
1495          * We obviously need to take the bucket lock before the provider lock
1496          * to perform the lookup, but we need to drop the provider lock
1497          * before calling into the DTrace framework since we acquire the
1498          * provider lock in callbacks invoked from the DTrace framework. The
1499          * bucket lock therefore protects the integrity of the provider hash
1500          * table.
1501          */
1502         atomic_add_64(&fp->ftp_proc->ftpc_acount, -1);
1503         ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1504 
1505         fp->ftp_retired = 1;
1506         fp->ftp_marked = 1;
1507         provid = fp->ftp_provid;
1508         mutex_exit(&fp->ftp_mtx);
1509 
1510         /*
1511          * We don't have to worry about invalidating the same provider twice
1512          * since fasttrap_provider_lookup() will ignore provider that have
1513          * been marked as retired.
1514          */
1515         dtrace_invalidate(provid);
1516 
1517         mutex_exit(&bucket->ftb_mtx);
1518 
1519         fasttrap_pid_cleanup();
1520 }
1521 
1522 static int


1578         /*
1579          * Grab the creation lock to ensure consistency between calls to
1580          * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1581          * other threads creating probes. We must drop the provider lock
1582          * before taking this lock to avoid a three-way deadlock with the
1583          * DTrace framework.
1584          */
1585         mutex_enter(&provider->ftp_cmtx);
1586 
1587         if (name == NULL) {
1588                 for (i = 0; i < pdata->ftps_noffs; i++) {
1589                         char name_str[17];
1590 
1591                         (void) sprintf(name_str, "%llx",
1592                             (unsigned long long)pdata->ftps_offs[i]);
1593 
1594                         if (dtrace_probe_lookup(provider->ftp_provid,
1595                             pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1596                                 continue;
1597 
1598                         atomic_add_32(&fasttrap_total, 1);
1599 
1600                         if (fasttrap_total > fasttrap_max) {
1601                                 atomic_add_32(&fasttrap_total, -1);
1602                                 goto no_mem;
1603                         }
1604 
1605                         pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1606 
1607                         pp->ftp_prov = provider;
1608                         pp->ftp_faddr = pdata->ftps_pc;
1609                         pp->ftp_fsize = pdata->ftps_size;
1610                         pp->ftp_pid = pdata->ftps_pid;
1611                         pp->ftp_ntps = 1;
1612 
1613                         tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1614                             KM_SLEEP);
1615 
1616                         tp->ftt_proc = provider->ftp_proc;
1617                         tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1618                         tp->ftt_pid = pdata->ftps_pid;
1619 
1620                         pp->ftp_tps[0].fit_tp = tp;
1621                         pp->ftp_tps[0].fit_id.fti_probe = pp;




1165         fasttrap_pid_getargdesc,
1166         fasttrap_usdt_getarg,
1167         NULL,
1168         fasttrap_pid_destroy
1169 };
1170 
1171 static fasttrap_proc_t *
1172 fasttrap_proc_lookup(pid_t pid)
1173 {
1174         fasttrap_bucket_t *bucket;
1175         fasttrap_proc_t *fprc, *new_fprc;
1176 
1177         bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1178         mutex_enter(&bucket->ftb_mtx);
1179 
1180         for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1181                 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1182                         mutex_enter(&fprc->ftpc_mtx);
1183                         mutex_exit(&bucket->ftb_mtx);
1184                         fprc->ftpc_rcount++;
1185                         atomic_inc_64(&fprc->ftpc_acount);
1186                         ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1187                         mutex_exit(&fprc->ftpc_mtx);
1188 
1189                         return (fprc);
1190                 }
1191         }
1192 
1193         /*
1194          * Drop the bucket lock so we don't try to perform a sleeping
1195          * allocation under it.
1196          */
1197         mutex_exit(&bucket->ftb_mtx);
1198 
1199         new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1200         new_fprc->ftpc_pid = pid;
1201         new_fprc->ftpc_rcount = 1;
1202         new_fprc->ftpc_acount = 1;
1203 
1204         mutex_enter(&bucket->ftb_mtx);
1205 
1206         /*
1207          * Take another lap through the list to make sure a proc hasn't
1208          * been created for this pid while we weren't under the bucket lock.
1209          */
1210         for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1211                 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1212                         mutex_enter(&fprc->ftpc_mtx);
1213                         mutex_exit(&bucket->ftb_mtx);
1214                         fprc->ftpc_rcount++;
1215                         atomic_inc_64(&fprc->ftpc_acount);
1216                         ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1217                         mutex_exit(&fprc->ftpc_mtx);
1218 
1219                         kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1220 
1221                         return (fprc);
1222                 }
1223         }
1224 
1225         new_fprc->ftpc_next = bucket->ftb_data;
1226         bucket->ftb_data = new_fprc;
1227 
1228         mutex_exit(&bucket->ftb_mtx);
1229 
1230         return (new_fprc);
1231 }
1232 
1233 static void
1234 fasttrap_proc_release(fasttrap_proc_t *proc)
1235 {


1407 
1408 static void
1409 fasttrap_provider_free(fasttrap_provider_t *provider)
1410 {
1411         pid_t pid = provider->ftp_pid;
1412         proc_t *p;
1413 
1414         /*
1415          * There need to be no associated enabled probes, no consumers
1416          * creating probes, and no meta providers referencing this provider.
1417          */
1418         ASSERT(provider->ftp_rcount == 0);
1419         ASSERT(provider->ftp_ccount == 0);
1420         ASSERT(provider->ftp_mcount == 0);
1421 
1422         /*
1423          * If this provider hasn't been retired, we need to explicitly drop the
1424          * count of active providers on the associated process structure.
1425          */
1426         if (!provider->ftp_retired) {
1427                 atomic_dec_64(&provider->ftp_proc->ftpc_acount);
1428                 ASSERT(provider->ftp_proc->ftpc_acount <
1429                     provider->ftp_proc->ftpc_rcount);
1430         }
1431 
1432         fasttrap_proc_release(provider->ftp_proc);
1433 
1434         kmem_free(provider, sizeof (fasttrap_provider_t));
1435 
1436         /*
1437          * Decrement p_dtrace_probes on the process whose provider we're
1438          * freeing. We don't have to worry about clobbering somone else's
1439          * modifications to it because we have locked the bucket that
1440          * corresponds to this process's hash chain in the provider hash
1441          * table. Don't sweat it if we can't find the process.
1442          */
1443         mutex_enter(&pidlock);
1444         if ((p = prfind(pid)) == NULL) {
1445                 mutex_exit(&pidlock);
1446                 return;
1447         }


1482                 mutex_exit(&fp->ftp_mtx);
1483                 mutex_exit(&bucket->ftb_mtx);
1484                 return;
1485         }
1486 
1487         /*
1488          * Mark the provider to be removed in our post-processing step, mark it
1489          * retired, and drop the active count on its proc. Marking it indicates
1490          * that we should try to remove it; setting the retired flag indicates
1491          * that we're done with this provider; dropping the active the proc
1492          * releases our hold, and when this reaches zero (as it will during
1493          * exit or exec) the proc and associated providers become defunct.
1494          *
1495          * We obviously need to take the bucket lock before the provider lock
1496          * to perform the lookup, but we need to drop the provider lock
1497          * before calling into the DTrace framework since we acquire the
1498          * provider lock in callbacks invoked from the DTrace framework. The
1499          * bucket lock therefore protects the integrity of the provider hash
1500          * table.
1501          */
1502         atomic_dec_64(&fp->ftp_proc->ftpc_acount);
1503         ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1504 
1505         fp->ftp_retired = 1;
1506         fp->ftp_marked = 1;
1507         provid = fp->ftp_provid;
1508         mutex_exit(&fp->ftp_mtx);
1509 
1510         /*
1511          * We don't have to worry about invalidating the same provider twice
1512          * since fasttrap_provider_lookup() will ignore provider that have
1513          * been marked as retired.
1514          */
1515         dtrace_invalidate(provid);
1516 
1517         mutex_exit(&bucket->ftb_mtx);
1518 
1519         fasttrap_pid_cleanup();
1520 }
1521 
1522 static int


1578         /*
1579          * Grab the creation lock to ensure consistency between calls to
1580          * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1581          * other threads creating probes. We must drop the provider lock
1582          * before taking this lock to avoid a three-way deadlock with the
1583          * DTrace framework.
1584          */
1585         mutex_enter(&provider->ftp_cmtx);
1586 
1587         if (name == NULL) {
1588                 for (i = 0; i < pdata->ftps_noffs; i++) {
1589                         char name_str[17];
1590 
1591                         (void) sprintf(name_str, "%llx",
1592                             (unsigned long long)pdata->ftps_offs[i]);
1593 
1594                         if (dtrace_probe_lookup(provider->ftp_provid,
1595                             pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1596                                 continue;
1597 
1598                         atomic_inc_32(&fasttrap_total);
1599 
1600                         if (fasttrap_total > fasttrap_max) {
1601                                 atomic_dec_32(&fasttrap_total);
1602                                 goto no_mem;
1603                         }
1604 
1605                         pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1606 
1607                         pp->ftp_prov = provider;
1608                         pp->ftp_faddr = pdata->ftps_pc;
1609                         pp->ftp_fsize = pdata->ftps_size;
1610                         pp->ftp_pid = pdata->ftps_pid;
1611                         pp->ftp_ntps = 1;
1612 
1613                         tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1614                             KM_SLEEP);
1615 
1616                         tp->ftt_proc = provider->ftp_proc;
1617                         tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1618                         tp->ftt_pid = pdata->ftps_pid;
1619 
1620                         pp->ftp_tps[0].fit_tp = tp;
1621                         pp->ftp_tps[0].fit_id.fti_probe = pp;