Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/vm/htable.c
          +++ new/usr/src/uts/i86pc/vm/htable.c
↓ open down ↓ 295 lines elided ↑ open up ↑
 296  296          }
 297  297  #endif /* DEBUG */
 298  298  
 299  299          pp = page_get_physical(seed);
 300  300          if (pp == NULL)
 301  301                  return (PFN_INVALID);
 302  302          ASSERT(PAGE_SHARED(pp));
 303  303          pfn = pp->p_pagenum;
 304  304          if (pfn == PFN_INVALID)
 305  305                  panic("ptable_alloc(): Invalid PFN!!");
 306      -        atomic_add_32(&active_ptables, 1);
      306 +        atomic_inc_32(&active_ptables);
 307  307          HATSTAT_INC(hs_ptable_allocs);
 308  308          return (pfn);
 309  309  }
 310  310  
 311  311  /*
 312  312   * Free an htable's associated page table page.  See the comments
 313  313   * for ptable_alloc().
 314  314   */
 315  315  static void
 316  316  ptable_free(pfn_t pfn)
 317  317  {
 318  318          page_t *pp = page_numtopp_nolock(pfn);
 319  319  
 320  320          /*
 321  321           * need to destroy the page used for the pagetable
 322  322           */
 323  323          ASSERT(pfn != PFN_INVALID);
 324  324          HATSTAT_INC(hs_ptable_frees);
 325      -        atomic_add_32(&active_ptables, -1);
      325 +        atomic_dec_32(&active_ptables);
 326  326          if (pp == NULL)
 327  327                  panic("ptable_free(): no page for pfn!");
 328  328          ASSERT(PAGE_SHARED(pp));
 329  329          ASSERT(pfn == pp->p_pagenum);
 330  330          ASSERT(!IN_XPV_PANIC());
 331  331  
 332  332          /*
 333  333           * Get an exclusive lock, might have to wait for a kmem reader.
 334  334           */
 335  335          if (!page_tryupgrade(pp)) {
↓ open down ↓ 117 lines elided ↑ open up ↑
 453  453           */
 454  454          if (htable_steal_passes == 0)
 455  455                  htable_steal_passes = 1;
 456  456          if (htable_steal_passes > mmu.ptes_per_table)
 457  457                  htable_steal_passes = mmu.ptes_per_table;
 458  458  
 459  459          /*
 460  460           * Loop through all user hats. The 1st pass takes cached htables that
 461  461           * aren't in use. The later passes steal by removing mappings, too.
 462  462           */
 463      -        atomic_add_32(&htable_dont_cache, 1);
      463 +        atomic_inc_32(&htable_dont_cache);
 464  464          for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) {
 465  465                  threshold = pass * mmu.ptes_per_table / htable_steal_passes;
 466  466                  hat = kas.a_hat;
 467  467                  for (;;) {
 468  468  
 469  469                          /*
 470  470                           * Clear the victim flag and move to next hat
 471  471                           */
 472  472                          mutex_enter(&hat_list_lock);
 473  473                          if (hat != kas.a_hat) {
↓ open down ↓ 188 lines elided ↑ open up ↑
 662  662                                          break;
 663  663                                  }
 664  664                                  HTABLE_EXIT(h);
 665  665                                  if (higher != NULL)
 666  666                                          htable_release(higher);
 667  667                                  if (++h == hat->hat_num_hash)
 668  668                                          h = 0;
 669  669                          } while (stolen < cnt && h != h_start);
 670  670                  }
 671  671          }
 672      -        atomic_add_32(&htable_dont_cache, -1);
      672 +        atomic_dec_32(&htable_dont_cache);
 673  673          return (list);
 674  674  }
 675  675  
 676  676  /*
 677  677   * This is invoked from kmem when the system is low on memory.  We try
 678  678   * to free hments, htables, and ptables to improve the memory situation.
 679  679   */
 680  680  /*ARGSUSED*/
 681  681  static void
 682  682  htable_reap(void *handle)
↓ open down ↓ 295 lines elided ↑ open up ↑
 978  978  void
 979  979  htable_purge_hat(hat_t *hat)
 980  980  {
 981  981          htable_t *ht;
 982  982          int h;
 983  983  
 984  984          /*
 985  985           * Purge the htable cache if just reaping.
 986  986           */
 987  987          if (!(hat->hat_flags & HAT_FREEING)) {
 988      -                atomic_add_32(&htable_dont_cache, 1);
      988 +                atomic_inc_32(&htable_dont_cache);
 989  989                  for (;;) {
 990  990                          hat_enter(hat);
 991  991                          ht = hat->hat_ht_cached;
 992  992                          if (ht == NULL) {
 993  993                                  hat_exit(hat);
 994  994                                  break;
 995  995                          }
 996  996                          hat->hat_ht_cached = ht->ht_next;
 997  997                          hat_exit(hat);
 998  998                          htable_free(ht);
 999  999                  }
1000      -                atomic_add_32(&htable_dont_cache, -1);
     1000 +                atomic_dec_32(&htable_dont_cache);
1001 1001                  return;
1002 1002          }
1003 1003  
1004 1004          /*
1005 1005           * if freeing, no locking is needed
1006 1006           */
1007 1007          while ((ht = hat->hat_ht_cached) != NULL) {
1008 1008                  hat->hat_ht_cached = ht->ht_next;
1009 1009                  htable_free(ht);
1010 1010          }
↓ open down ↓ 1446 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX