Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/seg_kp.c
          +++ new/usr/src/uts/common/vm/seg_kp.c
↓ open down ↓ 706 lines elided ↑ open up ↑
 707  707                                  }
 708  708                                  if (PP_ISRAF(pp))
 709  709                                          PP_CLRRAF(pp);
 710  710  
 711  711                                  page_unlock(pp);
 712  712                          }
 713  713                          if ((kpd->kp_flags & KPD_HASAMP) == 0) {
 714  714                                  anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
 715  715                                      PAGESIZE);
 716  716                                  anon_unresv_zone(PAGESIZE, NULL);
 717      -                                atomic_add_long(&anon_segkp_pages_resv,
 718      -                                    -1);
      717 +                                atomic_dec_ulong(&anon_segkp_pages_resv);
 719  718                          }
 720  719                          TRACE_5(TR_FAC_VM,
 721  720                              TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
 722  721                              kpd, va, PAGESIZE, 0, 0);
 723  722                  } else {
 724  723                          if (kpd->kp_flags & KPD_LOCKED) {
 725  724                                  pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
 726  725                                  if (pp == NULL) {
 727  726                                          panic("segkp_release: "
 728  727                                              "no page to unlock");
↓ open down ↓ 102 lines elided ↑ open up ↑
 831  830                  hat_memload(kas.a_hat, red_va, red_pp,
 832  831                      (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
 833  832                  page_downgrade(red_pp);
 834  833  
 835  834                  /*
 836  835                   * The page is left SE_SHARED locked so we can hold on to
 837  836                   * the page_t pointer.
 838  837                   */
 839  838                  curthread->t_red_pp = red_pp;
 840  839  
 841      -                atomic_add_32(&red_nmapped, 1);
      840 +                atomic_inc_32(&red_nmapped);
 842  841                  while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
 843  842                          (void) atomic_cas_32(&red_closest, red_closest,
 844  843                              (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
 845  844                  }
 846  845                  return (1);
 847  846          }
 848  847  
 849  848          stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
 850  849              (uintptr_t)PAGEMASK) - PAGESIZE);
 851  850  
 852      -        atomic_add_32(&red_ndoubles, 1);
      851 +        atomic_inc_32(&red_ndoubles);
 853  852  
 854  853          if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
 855  854                  /*
 856  855                   * Oh boy.  We're already deep within the mapped-in
 857  856                   * redzone page, and the caller is trying to prepare
 858  857                   * for a deep stack run.  We're running without a
 859  858                   * redzone right now:  if the caller plows off the
 860  859                   * end of the stack, it'll plow another thread or
 861  860                   * LWP structure.  That situation could result in
 862  861                   * a very hard-to-debug panic, so, in the spirit of
↓ open down ↓ 567 lines elided ↑ open up ↑
1430 1429  /*
1431 1430   * During memory delete, turn off caches so that pages are not held.
1432 1431   * A better solution may be to unlock the pages while they are
1433 1432   * in the cache so that they may be collected naturally.
1434 1433   */
1435 1434  
1436 1435  /*ARGSUSED*/
1437 1436  static int
1438 1437  segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1439 1438  {
1440      -        atomic_add_32(&segkp_indel, 1);
     1439 +        atomic_inc_32(&segkp_indel);
1441 1440          segkp_cache_free();
1442 1441          return (0);
1443 1442  }
1444 1443  
1445 1444  /*ARGSUSED*/
1446 1445  static void
1447 1446  segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1448 1447  {
1449      -        atomic_add_32(&segkp_indel, -1);
     1448 +        atomic_dec_32(&segkp_indel);
1450 1449  }
1451 1450  
1452 1451  static kphysm_setup_vector_t segkp_mem_config_vec = {
1453 1452          KPHYSM_SETUP_VECTOR_VERSION,
1454 1453          segkp_mem_config_post_add,
1455 1454          segkp_mem_config_pre_del,
1456 1455          segkp_mem_config_post_del,
1457 1456  };
1458 1457  
1459 1458  static void
1460 1459  segkpinit_mem_config(struct seg *seg)
1461 1460  {
1462 1461          int ret;
1463 1462  
1464 1463          ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1465 1464          ASSERT(ret == 0);
1466 1465  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX