Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


  70 typedef struct credklpd {
  71         kmutex_t        crkl_lock;
  72         klpd_reg_t      *crkl_reg;
  73         uint32_t        crkl_ref;
  74 } credklpd_t;
  75 
  76 klpd_reg_t *klpd_list;
  77 
  78 static void klpd_unlink(klpd_reg_t *);
  79 static int klpd_unreg_dh(door_handle_t);
  80 
  81 static credklpd_t *crklpd_alloc(void);
  82 
  83 void crklpd_setreg(credklpd_t *, klpd_reg_t *);
  84 
  85 extern size_t max_vnode_path;
  86 
  87 void
  88 klpd_rele(klpd_reg_t *p)
  89 {
  90         if (atomic_add_32_nv(&p->klpd_ref, -1) == 0) {
  91                 if (p->klpd_refp != NULL)
  92                         klpd_unlink(p);
  93                 if (p->klpd_cred != NULL)
  94                         crfree(p->klpd_cred);
  95                 door_ki_rele(p->klpd_door);
  96                 kmem_free(p, sizeof (*p));
  97         }
  98 }
  99 
 100 /*
 101  * In order to be able to walk the lists, we can't unlink the entry
 102  * until the reference count drops to 0.  If we remove it too soon,
 103  * list walkers will terminate when they happen to call a now orphaned
 104  * entry.
 105  */
 106 static klpd_reg_t *
 107 klpd_rele_next(klpd_reg_t *p)
 108 {
 109         klpd_reg_t *r = p->klpd_next;
 110 
 111         klpd_rele(p);
 112         return (r);
 113 }
 114 
 115 
 116 static void
 117 klpd_hold(klpd_reg_t *p)
 118 {
 119         atomic_add_32(&p->klpd_ref, 1);
 120 }
 121 
 122 /*
 123  * Remove registration from where it is registered.  Returns next in list.
 124  */
 125 static void
 126 klpd_unlink(klpd_reg_t *p)
 127 {
 128         ASSERT(p->klpd_refp == NULL || *p->klpd_refp == p);
 129 
 130         if (p->klpd_refp != NULL)
 131                 *p->klpd_refp = p->klpd_next;
 132 
 133         if (p->klpd_next != NULL)
 134                 p->klpd_next->klpd_refp = p->klpd_refp;
 135         p->klpd_refp = NULL;
 136 }
 137 
 138 /*
 139  * Remove all elements of the klpd list and decrement their refcnts.


 333 
 334 uint32_t klpd_bad_locks;
 335 
 336 int
 337 klpd_call(const cred_t *cr, const priv_set_t *req, va_list ap)
 338 {
 339         klpd_reg_t *p;
 340         int rv = -1;
 341         credklpd_t *ckp;
 342         zone_t *ckzone;
 343 
 344         /*
 345          * These locks must not be held when this code is called;
 346          * callbacks to userland with these locks held will result
 347          * in issues.  That said, the code at the call sides was
 348          * restructured not to call with any of the locks held and
 349          * no policies operate by default on most processes.
 350          */
 351         if (mutex_owned(&pidlock) || mutex_owned(&curproc->p_lock) ||
 352             mutex_owned(&curproc->p_crlock)) {
 353                 atomic_add_32(&klpd_bad_locks, 1);
 354                 return (-1);
 355         }
 356 
 357         /*
 358          * Enforce the limit set for the call process (still).
 359          */
 360         if (!priv_issubset(req, &CR_LPRIV(cr)))
 361                 return (-1);
 362 
 363         /* Try 1: get the credential specific klpd */
 364         if ((ckp = crgetcrklpd(cr)) != NULL) {
 365                 mutex_enter(&ckp->crkl_lock);
 366                 if ((p = ckp->crkl_reg) != NULL &&
 367                     p->klpd_indel == 0 &&
 368                     priv_issubset(req, &p->klpd_pset)) {
 369                         klpd_hold(p);
 370                         mutex_exit(&ckp->crkl_lock);
 371                         rv = klpd_do_call(p, req, ap);
 372                         mutex_enter(&ckp->crkl_lock);
 373                         klpd_rele(p);


 657 
 658         ckp = crgetcrklpd(p->p_cred);
 659         if (ckp != NULL) {
 660                 crklpd_setreg(ckp, NULL);
 661         } else {
 662                 res = ESRCH;
 663         }
 664         mutex_exit(&p->p_crlock);
 665 
 666 out:
 667         door_ki_rele(dh);
 668 
 669         if (res != 0)
 670                 return (set_errno(res));
 671         return (0);
 672 }
 673 
 674 void
 675 crklpd_hold(credklpd_t *crkpd)
 676 {
 677         atomic_add_32(&crkpd->crkl_ref, 1);
 678 }
 679 
 680 void
 681 crklpd_rele(credklpd_t *crkpd)
 682 {
 683         if (atomic_add_32_nv(&crkpd->crkl_ref, -1) == 0) {
 684                 if (crkpd->crkl_reg != NULL)
 685                         klpd_rele(crkpd->crkl_reg);
 686                 mutex_destroy(&crkpd->crkl_lock);
 687                 kmem_free(crkpd, sizeof (*crkpd));
 688         }
 689 }
 690 
 691 static credklpd_t *
 692 crklpd_alloc(void)
 693 {
 694         credklpd_t *res = kmem_alloc(sizeof (*res), KM_SLEEP);
 695 
 696         mutex_init(&res->crkl_lock, NULL, MUTEX_DEFAULT, NULL);
 697         res->crkl_ref = 1;
 698         res->crkl_reg = NULL;
 699 
 700         return (res);
 701 }
 702 
 703 void




  70 typedef struct credklpd {
  71         kmutex_t        crkl_lock;
  72         klpd_reg_t      *crkl_reg;
  73         uint32_t        crkl_ref;
  74 } credklpd_t;
  75 
  76 klpd_reg_t *klpd_list;
  77 
  78 static void klpd_unlink(klpd_reg_t *);
  79 static int klpd_unreg_dh(door_handle_t);
  80 
  81 static credklpd_t *crklpd_alloc(void);
  82 
  83 void crklpd_setreg(credklpd_t *, klpd_reg_t *);
  84 
  85 extern size_t max_vnode_path;
  86 
  87 void
  88 klpd_rele(klpd_reg_t *p)
  89 {
  90         if (atomic_dec_32_nv(&p->klpd_ref) == 0) {
  91                 if (p->klpd_refp != NULL)
  92                         klpd_unlink(p);
  93                 if (p->klpd_cred != NULL)
  94                         crfree(p->klpd_cred);
  95                 door_ki_rele(p->klpd_door);
  96                 kmem_free(p, sizeof (*p));
  97         }
  98 }
  99 
 100 /*
 101  * In order to be able to walk the lists, we can't unlink the entry
 102  * until the reference count drops to 0.  If we remove it too soon,
 103  * list walkers will terminate when they happen to call a now orphaned
 104  * entry.
 105  */
 106 static klpd_reg_t *
 107 klpd_rele_next(klpd_reg_t *p)
 108 {
 109         klpd_reg_t *r = p->klpd_next;
 110 
 111         klpd_rele(p);
 112         return (r);
 113 }
 114 
 115 
 116 static void
 117 klpd_hold(klpd_reg_t *p)
 118 {
 119         atomic_inc_32(&p->klpd_ref);
 120 }
 121 
 122 /*
 123  * Remove registration from where it is registered.  Returns next in list.
 124  */
 125 static void
 126 klpd_unlink(klpd_reg_t *p)
 127 {
 128         ASSERT(p->klpd_refp == NULL || *p->klpd_refp == p);
 129 
 130         if (p->klpd_refp != NULL)
 131                 *p->klpd_refp = p->klpd_next;
 132 
 133         if (p->klpd_next != NULL)
 134                 p->klpd_next->klpd_refp = p->klpd_refp;
 135         p->klpd_refp = NULL;
 136 }
 137 
 138 /*
 139  * Remove all elements of the klpd list and decrement their refcnts.


 333 
 334 uint32_t klpd_bad_locks;
 335 
 336 int
 337 klpd_call(const cred_t *cr, const priv_set_t *req, va_list ap)
 338 {
 339         klpd_reg_t *p;
 340         int rv = -1;
 341         credklpd_t *ckp;
 342         zone_t *ckzone;
 343 
 344         /*
 345          * These locks must not be held when this code is called;
 346          * callbacks to userland with these locks held will result
 347          * in issues.  That said, the code at the call sides was
 348          * restructured not to call with any of the locks held and
 349          * no policies operate by default on most processes.
 350          */
 351         if (mutex_owned(&pidlock) || mutex_owned(&curproc->p_lock) ||
 352             mutex_owned(&curproc->p_crlock)) {
 353                 atomic_inc_32(&klpd_bad_locks);
 354                 return (-1);
 355         }
 356 
 357         /*
 358          * Enforce the limit set for the call process (still).
 359          */
 360         if (!priv_issubset(req, &CR_LPRIV(cr)))
 361                 return (-1);
 362 
 363         /* Try 1: get the credential specific klpd */
 364         if ((ckp = crgetcrklpd(cr)) != NULL) {
 365                 mutex_enter(&ckp->crkl_lock);
 366                 if ((p = ckp->crkl_reg) != NULL &&
 367                     p->klpd_indel == 0 &&
 368                     priv_issubset(req, &p->klpd_pset)) {
 369                         klpd_hold(p);
 370                         mutex_exit(&ckp->crkl_lock);
 371                         rv = klpd_do_call(p, req, ap);
 372                         mutex_enter(&ckp->crkl_lock);
 373                         klpd_rele(p);


 657 
 658         ckp = crgetcrklpd(p->p_cred);
 659         if (ckp != NULL) {
 660                 crklpd_setreg(ckp, NULL);
 661         } else {
 662                 res = ESRCH;
 663         }
 664         mutex_exit(&p->p_crlock);
 665 
 666 out:
 667         door_ki_rele(dh);
 668 
 669         if (res != 0)
 670                 return (set_errno(res));
 671         return (0);
 672 }
 673 
 674 void
 675 crklpd_hold(credklpd_t *crkpd)
 676 {
 677         atomic_inc_32(&crkpd->crkl_ref);
 678 }
 679 
 680 void
 681 crklpd_rele(credklpd_t *crkpd)
 682 {
 683         if (atomic_dec_32_nv(&crkpd->crkl_ref) == 0) {
 684                 if (crkpd->crkl_reg != NULL)
 685                         klpd_rele(crkpd->crkl_reg);
 686                 mutex_destroy(&crkpd->crkl_lock);
 687                 kmem_free(crkpd, sizeof (*crkpd));
 688         }
 689 }
 690 
 691 static credklpd_t *
 692 crklpd_alloc(void)
 693 {
 694         credklpd_t *res = kmem_alloc(sizeof (*res), KM_SLEEP);
 695 
 696         mutex_init(&res->crkl_lock, NULL, MUTEX_DEFAULT, NULL);
 697         res->crkl_ref = 1;
 698         res->crkl_reg = NULL;
 699 
 700         return (res);
 701 }
 702 
 703 void