Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


1397                 old_list = old_list->ml_next;
1398                 kmem_free(el, sizeof (struct memlist));
1399         }
1400 
1401         return (0);
1402 }
1403 
1404 /*ARGSUSED*/
1405 static void
1406 memscrub_mem_config_post_add(
1407         void *arg,
1408         pgcnt_t delta_pages)
1409 {
1410         /*
1411          * We increment pause_memscrub before entering new_memscrub(). This
1412          * will force the memscrubber to sleep, allowing the DR callback
1413          * thread to acquire memscrub_lock in new_memscrub(). The use of
1414          * atomic_add_32() allows concurrent memory DR operations to use the
1415          * callbacks safely.
1416          */
1417         atomic_add_32(&pause_memscrub, 1);
1418         ASSERT(pause_memscrub != 0);
1419 
1420         /*
1421          * "Don't care" if we are not scrubbing new memory.
1422          */
1423         (void) new_memscrub(0);         /* retain page retire list */
1424 
1425         /* Restore the pause setting. */
1426         atomic_add_32(&pause_memscrub, -1);
1427 }
1428 
1429 /*ARGSUSED*/
1430 static int
1431 memscrub_mem_config_pre_del(
1432         void *arg,
1433         pgcnt_t delta_pages)
1434 {
1435         /* Nothing to do. */
1436         return (0);
1437 }
1438 
1439 /*ARGSUSED*/
1440 static void
1441 memscrub_mem_config_post_del(
1442         void *arg,
1443         pgcnt_t delta_pages,
1444         int cancelled)
1445 {
1446         /*
1447          * We increment pause_memscrub before entering new_memscrub(). This
1448          * will force the memscrubber to sleep, allowing the DR callback
1449          * thread to acquire memscrub_lock in new_memscrub(). The use of
1450          * atomic_add_32() allows concurrent memory DR operations to use the
1451          * callbacks safely.
1452          */
1453         atomic_add_32(&pause_memscrub, 1);
1454         ASSERT(pause_memscrub != 0);
1455 
1456         /*
1457          * Must stop scrubbing deleted memory as it may be disconnected.
1458          */
1459         if (new_memscrub(1)) {  /* update page retire list */
1460                 disable_memscrub = 1;
1461         }
1462 
1463         /* Restore the pause setting. */
1464         atomic_add_32(&pause_memscrub, -1);
1465 }
1466 
1467 static kphysm_setup_vector_t memscrub_mem_config_vec = {
1468         KPHYSM_SETUP_VECTOR_VERSION,
1469         memscrub_mem_config_post_add,
1470         memscrub_mem_config_pre_del,
1471         memscrub_mem_config_post_del,
1472 };
1473 
1474 static void
1475 memscrub_init_mem_config()
1476 {
1477         int ret;
1478 
1479         ret = kphysm_setup_func_register(&memscrub_mem_config_vec,
1480             (void *)NULL);
1481         ASSERT(ret == 0);
1482 }
1483 
1484 static void


1397                 old_list = old_list->ml_next;
1398                 kmem_free(el, sizeof (struct memlist));
1399         }
1400 
1401         return (0);
1402 }
1403 
1404 /*ARGSUSED*/
1405 static void
1406 memscrub_mem_config_post_add(
1407         void *arg,
1408         pgcnt_t delta_pages)
1409 {
1410         /*
1411          * We increment pause_memscrub before entering new_memscrub(). This
1412          * will force the memscrubber to sleep, allowing the DR callback
1413          * thread to acquire memscrub_lock in new_memscrub(). The use of
1414          * atomic_add_32() allows concurrent memory DR operations to use the
1415          * callbacks safely.
1416          */
1417         atomic_inc_32(&pause_memscrub);
1418         ASSERT(pause_memscrub != 0);
1419 
1420         /*
1421          * "Don't care" if we are not scrubbing new memory.
1422          */
1423         (void) new_memscrub(0);         /* retain page retire list */
1424 
1425         /* Restore the pause setting. */
1426         atomic_dec_32(&pause_memscrub);
1427 }
1428 
1429 /*ARGSUSED*/
1430 static int
1431 memscrub_mem_config_pre_del(
1432         void *arg,
1433         pgcnt_t delta_pages)
1434 {
1435         /* Nothing to do. */
1436         return (0);
1437 }
1438 
1439 /*ARGSUSED*/
1440 static void
1441 memscrub_mem_config_post_del(
1442         void *arg,
1443         pgcnt_t delta_pages,
1444         int cancelled)
1445 {
1446         /*
1447          * We increment pause_memscrub before entering new_memscrub(). This
1448          * will force the memscrubber to sleep, allowing the DR callback
1449          * thread to acquire memscrub_lock in new_memscrub(). The use of
1450          * atomic_add_32() allows concurrent memory DR operations to use the
1451          * callbacks safely.
1452          */
1453         atomic_inc_32(&pause_memscrub);
1454         ASSERT(pause_memscrub != 0);
1455 
1456         /*
1457          * Must stop scrubbing deleted memory as it may be disconnected.
1458          */
1459         if (new_memscrub(1)) {  /* update page retire list */
1460                 disable_memscrub = 1;
1461         }
1462 
1463         /* Restore the pause setting. */
1464         atomic_dec_32(&pause_memscrub);
1465 }
1466 
1467 static kphysm_setup_vector_t memscrub_mem_config_vec = {
1468         KPHYSM_SETUP_VECTOR_VERSION,
1469         memscrub_mem_config_post_add,
1470         memscrub_mem_config_pre_del,
1471         memscrub_mem_config_post_del,
1472 };
1473 
1474 static void
1475 memscrub_init_mem_config()
1476 {
1477         int ret;
1478 
1479         ret = kphysm_setup_func_register(&memscrub_mem_config_vec,
1480             (void *)NULL);
1481         ASSERT(ret == 0);
1482 }
1483 
1484 static void