Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


 257                                                                         \
 258         while (_s < (ep)) {                                          \
 259                 if ((_c = *_s) == '%') {                                \
 260                         H2A(_s, (ep), _c);                              \
 261                 }                                                       \
 262                 CHASH(hv, _c);                                          \
 263                 _s++;                                                   \
 264         }                                                               \
 265 }
 266 
 267 #define URI_HASH_IX(hix, which) (hix) = (hix) % (uri_hash_sz[(which)])
 268 
 269 #define URI_HASH_MIGRATE(from, hp, to) {                                \
 270         uri_desc_t      *_nuri;                                         \
 271         uint32_t        _nhix;                                          \
 272         uri_hash_t      *_nhp;                                          \
 273                                                                         \
 274         mutex_enter(&(hp)->lock);                                        \
 275         while ((_nuri = (hp)->list) != NULL) {                               \
 276                 (hp)->list = _nuri->hash;                         \
 277                 atomic_add_32(&uri_hash_cnt[(from)], -1);           \
 278                 atomic_add_32(&uri_hash_cnt[(to)], 1);                      \
 279                 _nhix = _nuri->hvalue;                                       \
 280                 URI_HASH_IX(_nhix, to);                                 \
 281                 _nhp = &uri_hash_ab[(to)][_nhix];                   \
 282                 mutex_enter(&_nhp->lock);                                \
 283                 _nuri->hash = _nhp->list;                         \
 284                 _nhp->list = _nuri;                                  \
 285                 _nuri->hit = 0;                                              \
 286                 mutex_exit(&_nhp->lock);                         \
 287         }                                                               \
 288         mutex_exit(&(hp)->lock);                                 \
 289 }
 290 
 291 #define URI_HASH_UNLINK(cur, new, hp, puri, uri) {                      \
 292         if ((puri) != NULL) {                                           \
 293                 (puri)->hash = (uri)->hash;                               \
 294         } else {                                                        \
 295                 (hp)->list = (uri)->hash;                         \
 296         }                                                               \
 297         if (atomic_add_32_nv(&uri_hash_cnt[(cur)], -1) == 0 &&              \
 298             uri_hash_ab[(new)] != NULL) {                               \
 299                 kmem_free(uri_hash_ab[cur],                             \
 300                     sizeof (uri_hash_t) * uri_hash_sz[cur]);            \
 301                 uri_hash_ab[(cur)] = NULL;                              \
 302                 uri_hash_lru[(cur)] = NULL;                             \
 303                 uri_hash_which = (new);                                 \
 304         } else {                                                        \
 305                 uri_hash_lru[(cur)] = (hp);                             \
 306         }                                                               \
 307 }
 308 
 309 void
 310 nl7c_uri_init(void)
 311 {
 312         uint32_t        cur = uri_hash_which;
 313 
 314         rw_init(&uri_hash_access, NULL, RW_DEFAULT, NULL);
 315 
 316         uri_hash_sz[cur] = P2Ps[URI_HASH_N_INIT];
 317         uri_hash_overflow[cur] = P2Ps[URI_HASH_N_INIT] * URI_HASH_AVRG;


 580             (rwlock == RW_WRITER && RW_WRITE_HELD(&uri_hash_access)));
 581         /*
 582          * uri_add() always succeeds so add a hash ref to the URI now.
 583          */
 584         REF_HOLD(uri);
 585 again:
 586         hix = uri->hvalue;
 587         URI_HASH_IX(hix, cur);
 588         if (uri_hash_ab[new] == NULL &&
 589             uri_hash_cnt[cur] < uri_hash_overflow[cur]) {
 590                 /*
 591                  * Easy case, no new hash and current hasn't overflowed,
 592                  * add URI to current hash and return.
 593                  *
 594                  * Note, the check for uri_hash_cnt[] above aren't done
 595                  * atomictally, i.e. multiple threads can be in this code
 596                  * as RW_READER and update the cnt[], this isn't a problem
 597                  * as the check is only advisory.
 598                  */
 599         fast:
 600                 atomic_add_32(&uri_hash_cnt[cur], 1);
 601                 hp = &uri_hash_ab[cur][hix];
 602                 mutex_enter(&hp->lock);
 603                 uri->hash = hp->list;
 604                 hp->list = uri;
 605                 mutex_exit(&hp->lock);
 606                 rw_exit(&uri_hash_access);
 607                 return;
 608         }
 609         if (uri_hash_ab[new] == NULL) {
 610                 /*
 611                  * Need a new a or b hash, if not already RW_WRITER
 612                  * try to upgrade our lock to writer.
 613                  */
 614                 if (rwlock != RW_WRITER && ! rw_tryupgrade(&uri_hash_access)) {
 615                         /*
 616                          * Upgrade failed, we can't simple exit and reenter
 617                          * the lock as after the exit and before the reenter
 618                          * the whole world can change so just wait for writer
 619                          * then do everything again.
 620                          */


 672         }
 673         /*
 674          * Hashed against current hash so migrate any current hash chain
 675          * members, if any.
 676          *
 677          * Note, the hash chain list can be checked for a non empty list
 678          * outside of the hash chain list lock as the hash chain struct
 679          * can't be destroyed while in the uri_hash_access rwlock, worst
 680          * case is that a non empty list is found and after acquiring the
 681          * lock another thread beats us to it (i.e. migrated the list).
 682          */
 683         hp = &uri_hash_ab[cur][hix];
 684         if (hp->list != NULL) {
 685                 URI_HASH_MIGRATE(cur, hp, new);
 686         }
 687         /*
 688          * If new hash has overflowed before current hash has been
 689          * completely migrated then walk all current hash chains and
 690          * migrate list members now.
 691          */
 692         if (atomic_add_32_nv(&uri_hash_cnt[new], 1) >= uri_hash_overflow[new]) {
 693                 for (hix = 0; hix < uri_hash_sz[cur]; hix++) {
 694                         hp = &uri_hash_ab[cur][hix];
 695                         if (hp->list != NULL) {
 696                                 URI_HASH_MIGRATE(cur, hp, new);
 697                         }
 698                 }
 699         }
 700         /*
 701          * Add URI to new hash.
 702          */
 703         hix = uri->hvalue;
 704         URI_HASH_IX(hix, new);
 705         hp = &uri_hash_ab[new][hix];
 706         mutex_enter(&hp->lock);
 707         uri->hash = hp->list;
 708         hp->list = uri;
 709         mutex_exit(&hp->lock);
 710         /*
 711          * Last, check to see if last cur hash chain has been
 712          * migrated, if so free cur hash and make new hash cur.


 820                  * of requested URI, check for expire or request no cache
 821                  * purge.
 822                  */
 823                 if (uri->expire >= 0 && uri->expire <= ddi_get_lbolt() ||
 824                     ruri->nocache) {
 825                         /*
 826                          * URI has expired or request specified to not use
 827                          * the cached version, unlink the URI from the hash
 828                          * chain, release all locks, release the hash ref
 829                          * on the URI, and last look it up again.
 830                          *
 831                          * Note, this will cause all variants of the named
 832                          * URI to be purged.
 833                          */
 834                         if (puri != NULL) {
 835                                 puri->hash = uri->hash;
 836                         } else {
 837                                 hp->list = uri->hash;
 838                         }
 839                         mutex_exit(&hp->lock);
 840                         atomic_add_32(&uri_hash_cnt[cur], -1);
 841                         rw_exit(&uri_hash_access);
 842                         if (ruri->nocache)
 843                                 nl7c_uri_purge++;
 844                         else
 845                                 nl7c_uri_expire++;
 846                         REF_RELE(uri);
 847                         goto again;
 848                 }
 849                 if (uri->scheme != NULL) {
 850                         /*
 851                          * URI has scheme private qualifier(s), if request
 852                          * URI doesn't or if no match skip this URI.
 853                          */
 854                         if (ruri->scheme == NULL ||
 855                             ! nl7c_http_cmp(uri->scheme, ruri->scheme))
 856                                 goto nexturi;
 857                 } else if (ruri->scheme != NULL) {
 858                         /*
 859                          * URI doesn't have scheme private qualifiers but
 860                          * request URI does, no match, skip this URI.




 257                                                                         \
 258         while (_s < (ep)) {                                          \
 259                 if ((_c = *_s) == '%') {                                \
 260                         H2A(_s, (ep), _c);                              \
 261                 }                                                       \
 262                 CHASH(hv, _c);                                          \
 263                 _s++;                                                   \
 264         }                                                               \
 265 }
 266 
 267 #define URI_HASH_IX(hix, which) (hix) = (hix) % (uri_hash_sz[(which)])
 268 
 269 #define URI_HASH_MIGRATE(from, hp, to) {                                \
 270         uri_desc_t      *_nuri;                                         \
 271         uint32_t        _nhix;                                          \
 272         uri_hash_t      *_nhp;                                          \
 273                                                                         \
 274         mutex_enter(&(hp)->lock);                                        \
 275         while ((_nuri = (hp)->list) != NULL) {                               \
 276                 (hp)->list = _nuri->hash;                         \
 277                 atomic_dec_32(&uri_hash_cnt[(from)]);               \
 278                 atomic_inc_32(&uri_hash_cnt[(to)]);                 \
 279                 _nhix = _nuri->hvalue;                                       \
 280                 URI_HASH_IX(_nhix, to);                                 \
 281                 _nhp = &uri_hash_ab[(to)][_nhix];                   \
 282                 mutex_enter(&_nhp->lock);                                \
 283                 _nuri->hash = _nhp->list;                         \
 284                 _nhp->list = _nuri;                                  \
 285                 _nuri->hit = 0;                                              \
 286                 mutex_exit(&_nhp->lock);                         \
 287         }                                                               \
 288         mutex_exit(&(hp)->lock);                                 \
 289 }
 290 
 291 #define URI_HASH_UNLINK(cur, new, hp, puri, uri) {                      \
 292         if ((puri) != NULL) {                                           \
 293                 (puri)->hash = (uri)->hash;                               \
 294         } else {                                                        \
 295                 (hp)->list = (uri)->hash;                         \
 296         }                                                               \
 297         if (atomic_dec_32_nv(&uri_hash_cnt[(cur)]) == 0 &&          \
 298             uri_hash_ab[(new)] != NULL) {                               \
 299                 kmem_free(uri_hash_ab[cur],                             \
 300                     sizeof (uri_hash_t) * uri_hash_sz[cur]);            \
 301                 uri_hash_ab[(cur)] = NULL;                              \
 302                 uri_hash_lru[(cur)] = NULL;                             \
 303                 uri_hash_which = (new);                                 \
 304         } else {                                                        \
 305                 uri_hash_lru[(cur)] = (hp);                             \
 306         }                                                               \
 307 }
 308 
 309 void
 310 nl7c_uri_init(void)
 311 {
 312         uint32_t        cur = uri_hash_which;
 313 
 314         rw_init(&uri_hash_access, NULL, RW_DEFAULT, NULL);
 315 
 316         uri_hash_sz[cur] = P2Ps[URI_HASH_N_INIT];
 317         uri_hash_overflow[cur] = P2Ps[URI_HASH_N_INIT] * URI_HASH_AVRG;


 580             (rwlock == RW_WRITER && RW_WRITE_HELD(&uri_hash_access)));
 581         /*
 582          * uri_add() always succeeds so add a hash ref to the URI now.
 583          */
 584         REF_HOLD(uri);
 585 again:
 586         hix = uri->hvalue;
 587         URI_HASH_IX(hix, cur);
 588         if (uri_hash_ab[new] == NULL &&
 589             uri_hash_cnt[cur] < uri_hash_overflow[cur]) {
 590                 /*
 591                  * Easy case, no new hash and current hasn't overflowed,
 592                  * add URI to current hash and return.
 593                  *
 594                  * Note, the check for uri_hash_cnt[] above aren't done
 595                  * atomictally, i.e. multiple threads can be in this code
 596                  * as RW_READER and update the cnt[], this isn't a problem
 597                  * as the check is only advisory.
 598                  */
 599         fast:
 600                 atomic_inc_32(&uri_hash_cnt[cur]);
 601                 hp = &uri_hash_ab[cur][hix];
 602                 mutex_enter(&hp->lock);
 603                 uri->hash = hp->list;
 604                 hp->list = uri;
 605                 mutex_exit(&hp->lock);
 606                 rw_exit(&uri_hash_access);
 607                 return;
 608         }
 609         if (uri_hash_ab[new] == NULL) {
 610                 /*
 611                  * Need a new a or b hash, if not already RW_WRITER
 612                  * try to upgrade our lock to writer.
 613                  */
 614                 if (rwlock != RW_WRITER && ! rw_tryupgrade(&uri_hash_access)) {
 615                         /*
 616                          * Upgrade failed, we can't simple exit and reenter
 617                          * the lock as after the exit and before the reenter
 618                          * the whole world can change so just wait for writer
 619                          * then do everything again.
 620                          */


 672         }
 673         /*
 674          * Hashed against current hash so migrate any current hash chain
 675          * members, if any.
 676          *
 677          * Note, the hash chain list can be checked for a non empty list
 678          * outside of the hash chain list lock as the hash chain struct
 679          * can't be destroyed while in the uri_hash_access rwlock, worst
 680          * case is that a non empty list is found and after acquiring the
 681          * lock another thread beats us to it (i.e. migrated the list).
 682          */
 683         hp = &uri_hash_ab[cur][hix];
 684         if (hp->list != NULL) {
 685                 URI_HASH_MIGRATE(cur, hp, new);
 686         }
 687         /*
 688          * If new hash has overflowed before current hash has been
 689          * completely migrated then walk all current hash chains and
 690          * migrate list members now.
 691          */
 692         if (atomic_inc_32_nv(&uri_hash_cnt[new]) >= uri_hash_overflow[new]) {
 693                 for (hix = 0; hix < uri_hash_sz[cur]; hix++) {
 694                         hp = &uri_hash_ab[cur][hix];
 695                         if (hp->list != NULL) {
 696                                 URI_HASH_MIGRATE(cur, hp, new);
 697                         }
 698                 }
 699         }
 700         /*
 701          * Add URI to new hash.
 702          */
 703         hix = uri->hvalue;
 704         URI_HASH_IX(hix, new);
 705         hp = &uri_hash_ab[new][hix];
 706         mutex_enter(&hp->lock);
 707         uri->hash = hp->list;
 708         hp->list = uri;
 709         mutex_exit(&hp->lock);
 710         /*
 711          * Last, check to see if last cur hash chain has been
 712          * migrated, if so free cur hash and make new hash cur.


 820                  * of requested URI, check for expire or request no cache
 821                  * purge.
 822                  */
 823                 if (uri->expire >= 0 && uri->expire <= ddi_get_lbolt() ||
 824                     ruri->nocache) {
 825                         /*
 826                          * URI has expired or request specified to not use
 827                          * the cached version, unlink the URI from the hash
 828                          * chain, release all locks, release the hash ref
 829                          * on the URI, and last look it up again.
 830                          *
 831                          * Note, this will cause all variants of the named
 832                          * URI to be purged.
 833                          */
 834                         if (puri != NULL) {
 835                                 puri->hash = uri->hash;
 836                         } else {
 837                                 hp->list = uri->hash;
 838                         }
 839                         mutex_exit(&hp->lock);
 840                         atomic_dec_32(&uri_hash_cnt[cur]);
 841                         rw_exit(&uri_hash_access);
 842                         if (ruri->nocache)
 843                                 nl7c_uri_purge++;
 844                         else
 845                                 nl7c_uri_expire++;
 846                         REF_RELE(uri);
 847                         goto again;
 848                 }
 849                 if (uri->scheme != NULL) {
 850                         /*
 851                          * URI has scheme private qualifier(s), if request
 852                          * URI doesn't or if no match skip this URI.
 853                          */
 854                         if (ruri->scheme == NULL ||
 855                             ! nl7c_http_cmp(uri->scheme, ruri->scheme))
 856                                 goto nexturi;
 857                 } else if (ruri->scheme != NULL) {
 858                         /*
 859                          * URI doesn't have scheme private qualifiers but
 860                          * request URI does, no match, skip this URI.