Print this page
patch cpu-pause-func-deglobalize

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/os/cpu.c
          +++ new/usr/src/uts/common/os/cpu.c
↓ open down ↓ 159 lines elided ↑ open up ↑
 160  160   * Variables used in pause_cpus().
 161  161   */
 162  162  static volatile char safe_list[NCPU];
 163  163  
 164  164  static struct _cpu_pause_info {
 165  165          int             cp_spl;         /* spl saved in pause_cpus() */
 166  166          volatile int    cp_go;          /* Go signal sent after all ready */
 167  167          int             cp_count;       /* # of CPUs to pause */
 168  168          ksema_t         cp_sem;         /* synch pause_cpus & cpu_pause */
 169  169          kthread_id_t    cp_paused;
      170 +        void            *(*cp_func)(void *);
 170  171  } cpu_pause_info;
 171  172  
 172  173  static kmutex_t pause_free_mutex;
 173  174  static kcondvar_t pause_free_cv;
 174  175  
 175      -void *(*cpu_pause_func)(void *) = NULL;
 176      -
 177  176  
 178  177  static struct cpu_sys_stats_ks_data {
 179  178          kstat_named_t cpu_ticks_idle;
 180  179          kstat_named_t cpu_ticks_user;
 181  180          kstat_named_t cpu_ticks_kernel;
 182  181          kstat_named_t cpu_ticks_wait;
 183  182          kstat_named_t cpu_nsec_idle;
 184  183          kstat_named_t cpu_nsec_user;
 185  184          kstat_named_t cpu_nsec_kernel;
 186  185          kstat_named_t cpu_nsec_dtrace;
↓ open down ↓ 598 lines elided ↑ open up ↑
 785  784                  while (cpi->cp_go == 0)
 786  785                          ;
 787  786                  /*
 788  787                   * Even though we are at the highest disp prio, we need
 789  788                   * to block out all interrupts below LOCK_LEVEL so that
 790  789                   * an intr doesn't come in, wake up a thread, and call
 791  790                   * setbackdq/setfrontdq.
 792  791                   */
 793  792                  s = splhigh();
 794  793                  /*
 795      -                 * if cpu_pause_func() has been set then call it using
 796      -                 * index as the argument, currently only used by
 797      -                 * cpr_suspend_cpus().  This function is used as the
 798      -                 * code to execute on the "paused" cpu's when a machine
 799      -                 * comes out of a sleep state and CPU's were powered off.
 800      -                 * (could also be used for hotplugging CPU's).
      794 +                 * if cp_func has been set then call it using index as the
      795 +                 * argument, currently only used by cpr_suspend_cpus().
      796 +                 * This function is used as the code to execute on the
      797 +                 * "paused" cpu's when a machine comes out of a sleep state
      798 +                 * and CPU's were powered off.  (could also be used for
      799 +                 * hotplugging CPU's).
 801  800                   */
 802      -                if (cpu_pause_func != NULL)
 803      -                        (*cpu_pause_func)((void *)lindex);
      801 +                if (cpi->cp_func != NULL)
      802 +                        (*cpi->cp_func)((void *)lindex);
 804  803  
 805  804                  mach_cpu_pause(safe);
 806  805  
 807  806                  splx(s);
 808  807                  /*
 809  808                   * Waiting is at an end. Switch out of cpu_pause
 810  809                   * loop and resume useful work.
 811  810                   */
 812  811                  swtch();
 813  812          }
↓ open down ↓ 167 lines elided ↑ open up ↑
 981  980   * lock, code executed with CPUs paused must not acquire adaptive
 982  981   * (or low-level spin) locks.  Also, such code must not block,
 983  982   * since the thread that is supposed to initiate the wakeup may
 984  983   * never run.
 985  984   *
 986  985   * With a few exceptions, the restrictions on code executed with CPUs
 987  986   * paused match those for code executed at high-level interrupt
 988  987   * context.
 989  988   */
 990  989  void
 991      -pause_cpus(cpu_t *off_cp)
      990 +pause_cpus(cpu_t *off_cp, void *(*func)(void *))
 992  991  {
 993  992          processorid_t   cpu_id;
 994  993          int             i;
 995  994          struct _cpu_pause_info  *cpi = &cpu_pause_info;
 996  995  
 997  996          ASSERT(MUTEX_HELD(&cpu_lock));
 998  997          ASSERT(cpi->cp_paused == NULL);
 999  998          cpi->cp_count = 0;
1000  999          cpi->cp_go = 0;
1001 1000          for (i = 0; i < NCPU; i++)
1002 1001                  safe_list[i] = PAUSE_IDLE;
1003 1002          kpreempt_disable();
1004 1003  
     1004 +        cpi->cp_func = func;
     1005 +
1005 1006          /*
1006 1007           * If running on the cpu that is going offline, get off it.
1007 1008           * This is so that it won't be necessary to rechoose a CPU
1008 1009           * when done.
1009 1010           */
1010 1011          if (CPU == off_cp)
1011 1012                  cpu_id = off_cp->cpu_next_part->cpu_id;
1012 1013          else
1013 1014                  cpu_id = CPU->cpu_id;
1014 1015          affinity_set(cpu_id);
↓ open down ↓ 184 lines elided ↑ open up ↑
1199 1200  
1200 1201          ASSERT(MUTEX_HELD(&cpu_lock));
1201 1202  
1202 1203          /*
1203 1204           * Put all the cpus into a known safe place.
1204 1205           * No mutexes can be entered while CPUs are paused.
1205 1206           */
1206 1207          error = mp_cpu_start(cp);       /* arch-dep hook */
1207 1208          if (error == 0) {
1208 1209                  pg_cpupart_in(cp, cp->cpu_part);
1209      -                pause_cpus(NULL);
     1210 +                pause_cpus(NULL, NULL);
1210 1211                  cpu_add_active_internal(cp);
1211 1212                  if (cp->cpu_flags & CPU_FAULTED) {
1212 1213                          cp->cpu_flags &= ~CPU_FAULTED;
1213 1214                          mp_cpu_faulted_exit(cp);
1214 1215                  }
1215 1216                  cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN |
1216 1217                      CPU_SPARE);
1217 1218                  CPU_NEW_GENERATION(cp);
1218 1219                  start_cpus();
1219 1220                  cpu_stats_kstat_create(cp);
↓ open down ↓ 178 lines elided ↑ open up ↑
1398 1399  
1399 1400          /*
1400 1401           * If that all worked, take the CPU offline and decrement
1401 1402           * ncpus_online.
1402 1403           */
1403 1404          if (error == 0) {
1404 1405                  /*
1405 1406                   * Put all the cpus into a known safe place.
1406 1407                   * No mutexes can be entered while CPUs are paused.
1407 1408                   */
1408      -                pause_cpus(cp);
     1409 +                pause_cpus(cp, NULL);
1409 1410                  /*
1410 1411                   * Repeat the operation, if necessary, to make sure that
1411 1412                   * all outstanding low-level interrupts run to completion
1412 1413                   * before we set the CPU_QUIESCED flag.  It's also possible
1413 1414                   * that a thread has weak bound to the cpu despite our raising
1414 1415                   * cpu_inmotion above since it may have loaded that
1415 1416                   * value before the barrier became visible (this would have
1416 1417                   * to be the thread that was on the target cpu at the time
1417 1418                   * we raised the barrier).
1418 1419                   */
↓ open down ↓ 332 lines elided ↑ open up ↑
1751 1752           * be done with the cpu_lock held or kernel preemption
1752 1753           * disabled.  This check relies upon the fact that
1753 1754           * old cpu structures are not free'ed or cleared after
1754 1755           * then are removed from the cpu_list.
1755 1756           *
1756 1757           * Note that the clock code walks the cpu list dereferencing
1757 1758           * the cpu_part pointer, so we need to initialize it before
1758 1759           * adding the cpu to the list.
1759 1760           */
1760 1761          cp->cpu_part = &cp_default;
1761      -        (void) pause_cpus(NULL);
     1762 +        (void) pause_cpus(NULL, NULL);
1762 1763          cp->cpu_next = cpu_list;
1763 1764          cp->cpu_prev = cpu_list->cpu_prev;
1764 1765          cpu_list->cpu_prev->cpu_next = cp;
1765 1766          cpu_list->cpu_prev = cp;
1766 1767          start_cpus();
1767 1768  
1768 1769          for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++)
1769 1770                  continue;
1770 1771          CPUSET_ADD(cpu_seqid_inuse, seqid);
1771 1772          cp->cpu_seqid = seqid;
↓ open down ↓ 74 lines elided ↑ open up ↑
1846 1847           * other routines holding cpu pointers while possibly sleeping
1847 1848           * must be sure to call kpreempt_disable before processing the
1848 1849           * list and be sure to check that the cpu has not been deleted
1849 1850           * after any sleeps (check cp->cpu_next != NULL). We guarantee
1850 1851           * to keep the deleted cpu structure around.
1851 1852           *
1852 1853           * Note that this MUST be done AFTER cpu_available
1853 1854           * has been updated so that we don't waste time
1854 1855           * trying to pause the cpu we're trying to delete.
1855 1856           */
1856      -        (void) pause_cpus(NULL);
     1857 +        (void) pause_cpus(NULL, NULL);
1857 1858  
1858 1859          cpnext = cp->cpu_next;
1859 1860          cp->cpu_prev->cpu_next = cp->cpu_next;
1860 1861          cp->cpu_next->cpu_prev = cp->cpu_prev;
1861 1862          if (cp == cpu_list)
1862 1863                  cpu_list = cpnext;
1863 1864  
1864 1865          /*
1865 1866           * Signals that the cpu has been deleted (see above).
1866 1867           */
↓ open down ↓ 51 lines elided ↑ open up ↑
1918 1919  
1919 1920  /*
1920 1921   * Add a CPU to the list of active CPUs.
1921 1922   *      This is called from machine-dependent layers when a new CPU is started.
1922 1923   */
1923 1924  void
1924 1925  cpu_add_active(cpu_t *cp)
1925 1926  {
1926 1927          pg_cpupart_in(cp, cp->cpu_part);
1927 1928  
1928      -        pause_cpus(NULL);
     1929 +        pause_cpus(NULL, NULL);
1929 1930          cpu_add_active_internal(cp);
1930 1931          start_cpus();
1931 1932  
1932 1933          cpu_stats_kstat_create(cp);
1933 1934          cpu_create_intrstat(cp);
1934 1935          lgrp_kstat_create(cp);
1935 1936          cpu_state_change_notify(cp->cpu_id, CPU_INIT);
1936 1937  }
1937 1938  
1938 1939  
↓ open down ↓ 1512 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX