Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
          +++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
↓ open down ↓ 2143 lines elided ↑ open up ↑
2144 2144  
2145 2145          if (flag == HAT_DUP_COW) {
2146 2146                  panic("hat_dup: HAT_DUP_COW not supported");
2147 2147          }
2148 2148  
2149 2149          if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
2150 2150                  ASSERT(srdp->srd_evp != NULL);
2151 2151                  VN_HOLD(srdp->srd_evp);
2152 2152                  ASSERT(srdp->srd_refcnt > 0);
2153 2153                  newhat->sfmmu_srdp = srdp;
2154      -                atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1);
     2154 +                atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
2155 2155          }
2156 2156  
2157 2157          /*
2158 2158           * HAT_DUP_ALL flag is used after as duplication is done.
2159 2159           */
2160 2160          if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) {
2161 2161                  ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2);
2162 2162                  newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2163 2163                  if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2164 2164                          newhat->sfmmu_flags |= HAT_4MTEXT_FLAG;
↓ open down ↓ 1055 lines elided ↑ open up ↑
3220 3220          ASSERT(remap || (sfhme->hme_page == NULL));
3221 3221  
3222 3222          /* if it is not a remap then hme->next better be NULL */
3223 3223          ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
3224 3224  
3225 3225          if (flags & HAT_LOAD_LOCK) {
3226 3226                  if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3227 3227                          panic("too high lckcnt-hmeblk %p",
3228 3228                              (void *)hmeblkp);
3229 3229                  }
3230      -                atomic_add_32(&hmeblkp->hblk_lckcnt, 1);
     3230 +                atomic_inc_32(&hmeblkp->hblk_lckcnt);
3231 3231  
3232 3232                  HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3233 3233          }
3234 3234  
3235 3235  #ifdef VAC
3236 3236          if (pp && PP_ISNC(pp)) {
3237 3237                  /*
3238 3238                   * If the physical page is marked to be uncacheable, like
3239 3239                   * by a vac conflict, make sure the new mapping is also
3240 3240                   * uncacheable.
↓ open down ↓ 14 lines elided ↑ open up ↑
3255 3255                          sfmmu_copytte(&sfhme->hme_tte, &tteold);
3256 3256                  }
3257 3257  #ifdef DEBUG
3258 3258                  chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3259 3259  #endif /* DEBUG */
3260 3260          }
3261 3261          ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
3262 3262  
3263 3263          if (!TTE_IS_VALID(&tteold)) {
3264 3264  
3265      -                atomic_add_16(&hmeblkp->hblk_vcnt, 1);
     3265 +                atomic_inc_16(&hmeblkp->hblk_vcnt);
3266 3266                  if (rid == SFMMU_INVALID_SHMERID) {
3267      -                        atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1);
     3267 +                        atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]);
3268 3268                  } else {
3269 3269                          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
3270 3270                          sf_region_t *rgnp = srdp->srd_hmergnp[rid];
3271 3271                          /*
3272 3272                           * We already accounted for region ttecnt's in sfmmu
3273 3273                           * during hat_join_region() processing. Here we
3274 3274                           * only update ttecnt's in region struture.
3275 3275                           */
3276      -                        atomic_add_long(&rgnp->rgn_ttecnt[size], 1);
     3276 +                        atomic_inc_ulong(&rgnp->rgn_ttecnt[size]);
3277 3277                  }
3278 3278          }
3279 3279  
3280 3280          myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup);
3281 3281          if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
3282 3282              sfmmup != ksfmmup) {
3283 3283                  uchar_t tteflag = 1 << size;
3284 3284                  if (rid == SFMMU_INVALID_SHMERID) {
3285 3285                          if (!(sfmmup->sfmmu_tteflags & tteflag)) {
3286 3286                                  hatlockp = sfmmu_hat_enter(sfmmup);
↓ open down ↓ 87 lines elided ↑ open up ↑
3374 3374                              !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
3375 3375                                  sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte,
3376 3376                                      size);
3377 3377                          }
3378 3378                          sfmmu_hat_exit(hatlockp);
3379 3379                  }
3380 3380          }
3381 3381          if (pp) {
3382 3382                  if (!remap) {
3383 3383                          HME_ADD(sfhme, pp);
3384      -                        atomic_add_16(&hmeblkp->hblk_hmecnt, 1);
     3384 +                        atomic_inc_16(&hmeblkp->hblk_hmecnt);
3385 3385                          ASSERT(hmeblkp->hblk_hmecnt > 0);
3386 3386  
3387 3387                          /*
3388 3388                           * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3389 3389                           * see pageunload() for comment.
3390 3390                           */
3391 3391                  }
3392 3392                  sfmmu_mlist_exit(pml);
3393 3393          }
3394 3394  
↓ open down ↓ 752 lines elided ↑ open up ↑
4147 4147                                  goto readtte;
4148 4148  
4149 4149                          if (hmeblkp->hblk_lckcnt == 0)
4150 4150                                  panic("zero hblk lckcnt");
4151 4151  
4152 4152                          if (((uintptr_t)addr + TTEBYTES(ttesz)) >
4153 4153                              (uintptr_t)endaddr)
4154 4154                                  panic("can't unlock large tte");
4155 4155  
4156 4156                          ASSERT(hmeblkp->hblk_lckcnt > 0);
4157      -                        atomic_add_32(&hmeblkp->hblk_lckcnt, -1);
     4157 +                        atomic_dec_32(&hmeblkp->hblk_lckcnt);
4158 4158                          HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4159 4159                  } else {
4160 4160                          panic("sfmmu_hblk_unlock: invalid tte");
4161 4161                  }
4162 4162                  addr += TTEBYTES(ttesz);
4163 4163                  sfhme++;
4164 4164          }
4165 4165          return (addr);
4166 4166  }
4167 4167  
↓ open down ↓ 1958 lines elided ↑ open up ↑
6126 6126                                  sfmmu_ttesync(sfmmup, addr, &tte, pp);
6127 6127                          }
6128 6128  
6129 6129                          /*
6130 6130                           * Ok- we invalidated the tte. Do the rest of the job.
6131 6131                           */
6132 6132                          ttecnt++;
6133 6133  
6134 6134                          if (flags & HAT_UNLOAD_UNLOCK) {
6135 6135                                  ASSERT(hmeblkp->hblk_lckcnt > 0);
6136      -                                atomic_add_32(&hmeblkp->hblk_lckcnt, -1);
     6136 +                                atomic_dec_32(&hmeblkp->hblk_lckcnt);
6137 6137                                  HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6138 6138                          }
6139 6139  
6140 6140                          /*
6141 6141                           * Normally we would need to flush the page
6142 6142                           * from the virtual cache at this point in
6143 6143                           * order to prevent a potential cache alias
6144 6144                           * inconsistency.
6145 6145                           * The particular scenario we need to worry
6146 6146                           * about is:
↓ open down ↓ 33 lines elided ↑ open up ↑
6180 6180                                   * Remove the hment from the mapping list
6181 6181                                   */
6182 6182                                  ASSERT(hmeblkp->hblk_hmecnt > 0);
6183 6183  
6184 6184                                  /*
6185 6185                                   * Again, we cannot
6186 6186                                   * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6187 6187                                   */
6188 6188                                  HME_SUB(sfhmep, pp);
6189 6189                                  membar_stst();
6190      -                                atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
     6190 +                                atomic_dec_16(&hmeblkp->hblk_hmecnt);
6191 6191                          }
6192 6192  
6193 6193                          ASSERT(hmeblkp->hblk_vcnt > 0);
6194      -                        atomic_add_16(&hmeblkp->hblk_vcnt, -1);
     6194 +                        atomic_dec_16(&hmeblkp->hblk_vcnt);
6195 6195  
6196 6196                          ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6197 6197                              !hmeblkp->hblk_lckcnt);
6198 6198  
6199 6199  #ifdef VAC
6200 6200                          if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
6201 6201                                  if (PP_ISTNC(pp)) {
6202 6202                                          /*
6203 6203                                           * If page was temporary
6204 6204                                           * uncached, try to recache
↓ open down ↓ 1137 lines elided ↑ open up ↑
7342 7342                          uint_t rid = hmeblkp->hblk_tag.htag_rid;
7343 7343                          sf_region_t *rgnp;
7344 7344                          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7345 7345                          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7346 7346                          ASSERT(srdp != NULL);
7347 7347                          rgnp = srdp->srd_hmergnp[rid];
7348 7348                          SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7349 7349                          cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7350 7350                          sfmmu_ttesync(NULL, addr, &tte, pp);
7351 7351                          ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
7352      -                        atomic_add_long(&rgnp->rgn_ttecnt[ttesz], -1);
     7352 +                        atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]);
7353 7353                  } else {
7354 7354                          sfmmu_ttesync(sfmmup, addr, &tte, pp);
7355      -                        atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1);
     7355 +                        atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]);
7356 7356  
7357 7357                          /*
7358 7358                           * We need to flush the page from the virtual cache
7359 7359                           * in order to prevent a virtual cache alias
7360 7360                           * inconsistency. The particular scenario we need
7361 7361                           * to worry about is:
7362 7362                           * Given:  va1 and va2 are two virtual address that
7363 7363                           * alias and will map the same physical address.
7364 7364                           * 1.   mapping exists from va1 to pa and data has
7365 7365                           *      been read into the cache.
↓ open down ↓ 40 lines elided ↑ open up ↑
7406 7406                   * we did the HME_SUB() above. Hmecnt is now maintained
7407 7407                   * by cas only. no lock guranteed its value. The only
7408 7408                   * gurantee we have is the hmecnt should not be less than
7409 7409                   * what it should be so the hblk will not be taken away.
7410 7410                   * It's also important that we decremented the hmecnt after
7411 7411                   * we are done with hmeblkp so that this hmeblk won't be
7412 7412                   * stolen.
7413 7413                   */
7414 7414                  ASSERT(hmeblkp->hblk_hmecnt > 0);
7415 7415                  ASSERT(hmeblkp->hblk_vcnt > 0);
7416      -                atomic_add_16(&hmeblkp->hblk_vcnt, -1);
7417      -                atomic_add_16(&hmeblkp->hblk_hmecnt, -1);
     7416 +                atomic_dec_16(&hmeblkp->hblk_vcnt);
     7417 +                atomic_dec_16(&hmeblkp->hblk_hmecnt);
7418 7418                  /*
7419 7419                   * This is bug 4063182.
7420 7420                   * XXX: fixme
7421 7421                   * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7422 7422                   *      !hmeblkp->hblk_lckcnt);
7423 7423                   */
7424 7424          } else {
7425 7425                  panic("invalid tte? pp %p &tte %p",
7426 7426                      (void *)pp, (void *)&tte);
7427 7427          }
↓ open down ↓ 6378 lines elided ↑ open up ↑
13806 13806  
13807 13807          VN_HOLD(evp);
13808 13808  
13809 13809          if (srd_buckets[hash].srdb_srdp != NULL) {
13810 13810                  mutex_enter(&srd_buckets[hash].srdb_lock);
13811 13811                  for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13812 13812                      srdp = srdp->srd_hash) {
13813 13813                          if (srdp->srd_evp == evp) {
13814 13814                                  ASSERT(srdp->srd_refcnt >= 0);
13815 13815                                  sfmmup->sfmmu_srdp = srdp;
13816      -                                atomic_add_32(
13817      -                                    (volatile uint_t *)&srdp->srd_refcnt, 1);
     13816 +                                atomic_inc_32(
     13817 +                                    (volatile uint_t *)&srdp->srd_refcnt);
13818 13818                                  mutex_exit(&srd_buckets[hash].srdb_lock);
13819 13819                                  return;
13820 13820                          }
13821 13821                  }
13822 13822                  mutex_exit(&srd_buckets[hash].srdb_lock);
13823 13823          }
13824 13824          newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP);
13825 13825          ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0);
13826 13826  
13827 13827          newsrdp->srd_evp = evp;
13828 13828          newsrdp->srd_refcnt = 1;
13829 13829          newsrdp->srd_hmergnfree = NULL;
13830 13830          newsrdp->srd_ismrgnfree = NULL;
13831 13831  
13832 13832          mutex_enter(&srd_buckets[hash].srdb_lock);
13833 13833          for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13834 13834              srdp = srdp->srd_hash) {
13835 13835                  if (srdp->srd_evp == evp) {
13836 13836                          ASSERT(srdp->srd_refcnt >= 0);
13837 13837                          sfmmup->sfmmu_srdp = srdp;
13838      -                        atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1);
     13838 +                        atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
13839 13839                          mutex_exit(&srd_buckets[hash].srdb_lock);
13840 13840                          kmem_cache_free(srd_cache, newsrdp);
13841 13841                          return;
13842 13842                  }
13843 13843          }
13844 13844          newsrdp->srd_hash = srd_buckets[hash].srdb_srdp;
13845 13845          srd_buckets[hash].srdb_srdp = newsrdp;
13846 13846          sfmmup->sfmmu_srdp = newsrdp;
13847 13847  
13848 13848          mutex_exit(&srd_buckets[hash].srdb_lock);
↓ open down ↓ 16 lines elided ↑ open up ↑
13865 13865  
13866 13866          ASSERT(sfmmup != ksfmmup);
13867 13867          ASSERT(srdp != NULL);
13868 13868          ASSERT(srdp->srd_refcnt > 0);
13869 13869          ASSERT(sfmmup->sfmmu_scdp == NULL);
13870 13870          ASSERT(sfmmup->sfmmu_free == 1);
13871 13871  
13872 13872          sfmmup->sfmmu_srdp = NULL;
13873 13873          evp = srdp->srd_evp;
13874 13874          ASSERT(evp != NULL);
13875      -        if (atomic_add_32_nv(
13876      -            (volatile uint_t *)&srdp->srd_refcnt, -1)) {
     13875 +        if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) {
13877 13876                  VN_RELE(evp);
13878 13877                  return;
13879 13878          }
13880 13879  
13881 13880          hash = SRD_HASH_FUNCTION(evp);
13882 13881          mutex_enter(&srd_buckets[hash].srdb_lock);
13883 13882          for (prev_srdpp = &srd_buckets[hash].srdb_srdp;
13884 13883              (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) {
13885 13884                  if (srdp->srd_evp == evp) {
13886 13885                          break;
↓ open down ↓ 196 lines elided ↑ open up ↑
14083 14082  
14084 14083  rfound:
14085 14084          if (rgnp != NULL) {
14086 14085                  ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14087 14086                  ASSERT(rgnp->rgn_cb_function == r_cb_function);
14088 14087                  ASSERT(rgnp->rgn_refcnt >= 0);
14089 14088                  rid = rgnp->rgn_id;
14090 14089                  ASSERT(rid < maxids);
14091 14090                  ASSERT(rarrp[rid] == rgnp);
14092 14091                  ASSERT(rid < *nextidp);
14093      -                atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1);
     14092 +                atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14094 14093                  mutex_exit(&srdp->srd_mutex);
14095 14094                  if (new_rgnp != NULL) {
14096 14095                          kmem_cache_free(region_cache, new_rgnp);
14097 14096                  }
14098 14097                  if (r_type == SFMMU_REGION_HME) {
14099 14098                          int myjoin =
14100 14099                              (sfmmup == astosfmmu(curthread->t_procp->p_as));
14101 14100  
14102 14101                          sfmmu_link_to_hmeregion(sfmmup, rgnp);
14103 14102                          /*
↓ open down ↓ 329 lines elided ↑ open up ↑
14433 14432                  } else {
14434 14433                          sfmmu_check_page_sizes(sfmmup, 0);
14435 14434                  }
14436 14435          }
14437 14436  
14438 14437          if (r_type == SFMMU_REGION_HME) {
14439 14438                  sfmmu_unlink_from_hmeregion(sfmmup, rgnp);
14440 14439          }
14441 14440  
14442 14441          r_obj = rgnp->rgn_obj;
14443      -        if (atomic_add_32_nv((volatile uint_t *)&rgnp->rgn_refcnt, -1)) {
     14442 +        if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) {
14444 14443                  return;
14445 14444          }
14446 14445  
14447 14446          /*
14448 14447           * looks like nobody uses this region anymore. Free it.
14449 14448           */
14450 14449          rhash = RGN_HASH_FUNCTION(r_obj);
14451 14450          mutex_enter(&srdp->srd_mutex);
14452 14451          for (prev_rgnpp = &srdp->srd_rgnhash[rhash];
14453 14452              (cur_rgnp = *prev_rgnpp) != NULL;
↓ open down ↓ 64 lines elided ↑ open up ↑
14518 14517          ASSERT(rid < srdp->srd_next_hmerid);
14519 14518          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14520 14519          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14521 14520  
14522 14521          rgnp = srdp->srd_hmergnp[rid];
14523 14522          ASSERT(rgnp->rgn_refcnt > 0);
14524 14523          ASSERT(rgnp->rgn_id == rid);
14525 14524          ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
14526 14525          ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14527 14526  
14528      -        atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1);
     14527 +        atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14529 14528  
14530 14529          /* LINTED: constant in conditional context */
14531 14530          SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
14532 14531          ASSERT(rlink != NULL);
14533 14532          mutex_enter(&rgnp->rgn_mutex);
14534 14533          ASSERT(rgnp->rgn_sfmmu_head != NULL);
14535 14534          /* LINTED: constant in conditional context */
14536 14535          SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0);
14537 14536          ASSERT(hrlink != NULL);
14538 14537          ASSERT(hrlink->prev == NULL);
↓ open down ↓ 709 lines elided ↑ open up ↑
15248 15247          mutex_enter(&srdp->srd_scd_mutex);
15249 15248          for (scdp = srdp->srd_scdp; scdp != NULL;
15250 15249              scdp = scdp->scd_next) {
15251 15250                  SF_RGNMAP_EQUAL(&scdp->scd_region_map,
15252 15251                      &sfmmup->sfmmu_region_map, ret);
15253 15252                  if (ret == 1) {
15254 15253                          SF_SCD_INCR_REF(scdp);
15255 15254                          mutex_exit(&srdp->srd_scd_mutex);
15256 15255                          sfmmu_join_scd(scdp, sfmmup);
15257 15256                          ASSERT(scdp->scd_refcnt >= 2);
15258      -                        atomic_add_32((volatile uint32_t *)
15259      -                            &scdp->scd_refcnt, -1);
     15257 +                        atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt);
15260 15258                          return;
15261 15259                  } else {
15262 15260                          /*
15263 15261                           * If the sfmmu region map is a subset of the scd
15264 15262                           * region map, then the assumption is that this process
15265 15263                           * will continue attaching to ISM segments until the
15266 15264                           * region maps are equal.
15267 15265                           */
15268 15266                          SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map,
15269 15267                              &sfmmup->sfmmu_region_map, ret);
↓ open down ↓ 24 lines elided ↑ open up ↑
15294 15292          /*
15295 15293           * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists.
15296 15294           */
15297 15295          sfmmu_link_scd_to_regions(srdp, new_scdp);
15298 15296          sfmmu_add_scd(&srdp->srd_scdp, new_scdp);
15299 15297          SFMMU_STAT_ADD(sf_create_scd, 1);
15300 15298  
15301 15299          mutex_exit(&srdp->srd_scd_mutex);
15302 15300          sfmmu_join_scd(new_scdp, sfmmup);
15303 15301          ASSERT(new_scdp->scd_refcnt >= 2);
15304      -        atomic_add_32((volatile uint32_t *)&new_scdp->scd_refcnt, -1);
     15302 +        atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt);
15305 15303  }
15306 15304  
15307 15305  /*
15308 15306   * This routine is called by a process to remove itself from an SCD. It is
15309 15307   * either called when the processes has detached from a segment or from
15310 15308   * hat_free_start() as a result of calling exit.
15311 15309   */
15312 15310  static void
15313 15311  sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
15314 15312  {
↓ open down ↓ 541 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX