Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


 349  * Acquire a grant reference.
 350  */
 351 static grant_ref_t
 352 gref_get(xnf_t *xnfp)
 353 {
 354         grant_ref_t gref;
 355 
 356         mutex_enter(&xnfp->xnf_gref_lock);
 357 
 358         do {
 359                 gref = gnttab_claim_grant_reference(&xnfp->xnf_gref_head);
 360 
 361         } while ((gref == INVALID_GRANT_REF) &&
 362             (gnttab_alloc_grant_references(16, &xnfp->xnf_gref_head) == 0));
 363 
 364         mutex_exit(&xnfp->xnf_gref_lock);
 365 
 366         if (gref == INVALID_GRANT_REF) {
 367                 xnfp->xnf_stat_gref_failure++;
 368         } else {
 369                 atomic_add_64(&xnfp->xnf_stat_gref_outstanding, 1);
 370                 if (xnfp->xnf_stat_gref_outstanding > xnfp->xnf_stat_gref_peak)
 371                         xnfp->xnf_stat_gref_peak =
 372                             xnfp->xnf_stat_gref_outstanding;
 373         }
 374 
 375         return (gref);
 376 }
 377 
 378 /*
 379  * Release a grant reference.
 380  */
 381 static void
 382 gref_put(xnf_t *xnfp, grant_ref_t gref)
 383 {
 384         ASSERT(gref != INVALID_GRANT_REF);
 385 
 386         mutex_enter(&xnfp->xnf_gref_lock);
 387         gnttab_release_grant_reference(&xnfp->xnf_gref_head, gref);
 388         mutex_exit(&xnfp->xnf_gref_lock);
 389 
 390         atomic_add_64(&xnfp->xnf_stat_gref_outstanding, -1);
 391 }
 392 
 393 /*
 394  * Acquire a transmit id.
 395  */
 396 static xnf_txid_t *
 397 txid_get(xnf_t *xnfp)
 398 {
 399         xnf_txid_t *tidp;
 400 
 401         ASSERT(MUTEX_HELD(&xnfp->xnf_txlock));
 402 
 403         if (xnfp->xnf_tx_pkt_id_head == INVALID_TX_ID)
 404                 return (NULL);
 405 
 406         ASSERT(TX_ID_VALID(xnfp->xnf_tx_pkt_id_head));
 407 
 408         tidp = TX_ID_TO_TXID(xnfp, xnfp->xnf_tx_pkt_id_head);
 409         xnfp->xnf_tx_pkt_id_head = tidp->next;
 410         tidp->next = INVALID_TX_ID;


2335             PAGESIZE, &data_accattr, DDI_DMA_STREAMING, ddiflags, 0,
2336             &bdesc->buf, &len, &bdesc->acc_handle) != DDI_SUCCESS)
2337                 goto failure_1;
2338 
2339         /* Bind to virtual address of buffer to get physical address. */
2340         if (ddi_dma_addr_bind_handle(bdesc->dma_handle, NULL,
2341             bdesc->buf, len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
2342             ddiflags, 0, &dma_cookie, &ncookies) != DDI_DMA_MAPPED)
2343                 goto failure_2;
2344         ASSERT(ncookies == 1);
2345 
2346         bdesc->free_rtn.free_func = xnf_buf_recycle;
2347         bdesc->free_rtn.free_arg = (caddr_t)bdesc;
2348         bdesc->xnfp = xnfp;
2349         bdesc->buf_phys = dma_cookie.dmac_laddress;
2350         bdesc->buf_mfn = pfn_to_mfn(xnf_btop(bdesc->buf_phys));
2351         bdesc->len = dma_cookie.dmac_size;
2352         bdesc->grant_ref = INVALID_GRANT_REF;
2353         bdesc->gen = xnfp->xnf_gen;
2354 
2355         atomic_add_64(&xnfp->xnf_stat_buf_allocated, 1);
2356 
2357         return (0);
2358 
2359 failure_2:
2360         ddi_dma_mem_free(&bdesc->acc_handle);
2361 
2362 failure_1:
2363         ddi_dma_free_handle(&bdesc->dma_handle);
2364 
2365 failure:
2366 
2367         ASSERT(kmflag & KM_NOSLEEP); /* Cannot fail for KM_SLEEP. */
2368         return (-1);
2369 }
2370 
2371 static void
2372 xnf_buf_destructor(void *buf, void *arg)
2373 {
2374         xnf_buf_t *bdesc = buf;
2375         xnf_t *xnfp = arg;
2376 
2377         (void) ddi_dma_unbind_handle(bdesc->dma_handle);
2378         ddi_dma_mem_free(&bdesc->acc_handle);
2379         ddi_dma_free_handle(&bdesc->dma_handle);
2380 
2381         atomic_add_64(&xnfp->xnf_stat_buf_allocated, -1);
2382 }
2383 
2384 static xnf_buf_t *
2385 xnf_buf_get(xnf_t *xnfp, int flags, boolean_t readonly)
2386 {
2387         grant_ref_t gref;
2388         xnf_buf_t *bufp;
2389 
2390         /*
2391          * Usually grant references are more scarce than memory, so we
2392          * attempt to acquire a grant reference first.
2393          */
2394         gref = gref_get(xnfp);
2395         if (gref == INVALID_GRANT_REF)
2396                 return (NULL);
2397 
2398         bufp = kmem_cache_alloc(xnfp->xnf_buf_cache, flags);
2399         if (bufp == NULL) {
2400                 gref_put(xnfp, gref);
2401                 return (NULL);
2402         }
2403 
2404         ASSERT(bufp->grant_ref == INVALID_GRANT_REF);
2405 
2406         bufp->grant_ref = gref;
2407 
2408         if (bufp->gen != xnfp->xnf_gen)
2409                 xnf_buf_refresh(bufp);
2410 
2411         gnttab_grant_foreign_access_ref(bufp->grant_ref,
2412             xvdi_get_oeid(bufp->xnfp->xnf_devinfo),
2413             bufp->buf_mfn, readonly ? 1 : 0);
2414 
2415         atomic_add_64(&xnfp->xnf_stat_buf_outstanding, 1);
2416 
2417         return (bufp);
2418 }
2419 
2420 static void
2421 xnf_buf_put(xnf_t *xnfp, xnf_buf_t *bufp, boolean_t readonly)
2422 {
2423         if (bufp->grant_ref != INVALID_GRANT_REF) {
2424                 (void) gnttab_end_foreign_access_ref(
2425                     bufp->grant_ref, readonly ? 1 : 0);
2426                 gref_put(xnfp, bufp->grant_ref);
2427                 bufp->grant_ref = INVALID_GRANT_REF;
2428         }
2429 
2430         kmem_cache_free(xnfp->xnf_buf_cache, bufp);
2431 
2432         atomic_add_64(&xnfp->xnf_stat_buf_outstanding, -1);
2433 }
2434 
2435 /*
2436  * Refresh any cached data about a buffer after resume.
2437  */
2438 static void
2439 xnf_buf_refresh(xnf_buf_t *bdesc)
2440 {
2441         bdesc->buf_mfn = pfn_to_mfn(xnf_btop(bdesc->buf_phys));
2442         bdesc->gen = bdesc->xnfp->xnf_gen;
2443 }
2444 
2445 /*
2446  * Streams `freeb' routine for `xnf_buf_t' when used as transmit
2447  * look-aside buffers.
2448  */
2449 static void
2450 xnf_buf_recycle(xnf_buf_t *bdesc)
2451 {
2452         xnf_t *xnfp = bdesc->xnfp;




 349  * Acquire a grant reference.
 350  */
 351 static grant_ref_t
 352 gref_get(xnf_t *xnfp)
 353 {
 354         grant_ref_t gref;
 355 
 356         mutex_enter(&xnfp->xnf_gref_lock);
 357 
 358         do {
 359                 gref = gnttab_claim_grant_reference(&xnfp->xnf_gref_head);
 360 
 361         } while ((gref == INVALID_GRANT_REF) &&
 362             (gnttab_alloc_grant_references(16, &xnfp->xnf_gref_head) == 0));
 363 
 364         mutex_exit(&xnfp->xnf_gref_lock);
 365 
 366         if (gref == INVALID_GRANT_REF) {
 367                 xnfp->xnf_stat_gref_failure++;
 368         } else {
 369                 atomic_inc_64(&xnfp->xnf_stat_gref_outstanding);
 370                 if (xnfp->xnf_stat_gref_outstanding > xnfp->xnf_stat_gref_peak)
 371                         xnfp->xnf_stat_gref_peak =
 372                             xnfp->xnf_stat_gref_outstanding;
 373         }
 374 
 375         return (gref);
 376 }
 377 
 378 /*
 379  * Release a grant reference.
 380  */
 381 static void
 382 gref_put(xnf_t *xnfp, grant_ref_t gref)
 383 {
 384         ASSERT(gref != INVALID_GRANT_REF);
 385 
 386         mutex_enter(&xnfp->xnf_gref_lock);
 387         gnttab_release_grant_reference(&xnfp->xnf_gref_head, gref);
 388         mutex_exit(&xnfp->xnf_gref_lock);
 389 
 390         atomic_dec_64(&xnfp->xnf_stat_gref_outstanding);
 391 }
 392 
 393 /*
 394  * Acquire a transmit id.
 395  */
 396 static xnf_txid_t *
 397 txid_get(xnf_t *xnfp)
 398 {
 399         xnf_txid_t *tidp;
 400 
 401         ASSERT(MUTEX_HELD(&xnfp->xnf_txlock));
 402 
 403         if (xnfp->xnf_tx_pkt_id_head == INVALID_TX_ID)
 404                 return (NULL);
 405 
 406         ASSERT(TX_ID_VALID(xnfp->xnf_tx_pkt_id_head));
 407 
 408         tidp = TX_ID_TO_TXID(xnfp, xnfp->xnf_tx_pkt_id_head);
 409         xnfp->xnf_tx_pkt_id_head = tidp->next;
 410         tidp->next = INVALID_TX_ID;


2335             PAGESIZE, &data_accattr, DDI_DMA_STREAMING, ddiflags, 0,
2336             &bdesc->buf, &len, &bdesc->acc_handle) != DDI_SUCCESS)
2337                 goto failure_1;
2338 
2339         /* Bind to virtual address of buffer to get physical address. */
2340         if (ddi_dma_addr_bind_handle(bdesc->dma_handle, NULL,
2341             bdesc->buf, len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
2342             ddiflags, 0, &dma_cookie, &ncookies) != DDI_DMA_MAPPED)
2343                 goto failure_2;
2344         ASSERT(ncookies == 1);
2345 
2346         bdesc->free_rtn.free_func = xnf_buf_recycle;
2347         bdesc->free_rtn.free_arg = (caddr_t)bdesc;
2348         bdesc->xnfp = xnfp;
2349         bdesc->buf_phys = dma_cookie.dmac_laddress;
2350         bdesc->buf_mfn = pfn_to_mfn(xnf_btop(bdesc->buf_phys));
2351         bdesc->len = dma_cookie.dmac_size;
2352         bdesc->grant_ref = INVALID_GRANT_REF;
2353         bdesc->gen = xnfp->xnf_gen;
2354 
2355         atomic_inc_64(&xnfp->xnf_stat_buf_allocated);
2356 
2357         return (0);
2358 
2359 failure_2:
2360         ddi_dma_mem_free(&bdesc->acc_handle);
2361 
2362 failure_1:
2363         ddi_dma_free_handle(&bdesc->dma_handle);
2364 
2365 failure:
2366 
2367         ASSERT(kmflag & KM_NOSLEEP); /* Cannot fail for KM_SLEEP. */
2368         return (-1);
2369 }
2370 
2371 static void
2372 xnf_buf_destructor(void *buf, void *arg)
2373 {
2374         xnf_buf_t *bdesc = buf;
2375         xnf_t *xnfp = arg;
2376 
2377         (void) ddi_dma_unbind_handle(bdesc->dma_handle);
2378         ddi_dma_mem_free(&bdesc->acc_handle);
2379         ddi_dma_free_handle(&bdesc->dma_handle);
2380 
2381         atomic_dec_64(&xnfp->xnf_stat_buf_allocated);
2382 }
2383 
2384 static xnf_buf_t *
2385 xnf_buf_get(xnf_t *xnfp, int flags, boolean_t readonly)
2386 {
2387         grant_ref_t gref;
2388         xnf_buf_t *bufp;
2389 
2390         /*
2391          * Usually grant references are more scarce than memory, so we
2392          * attempt to acquire a grant reference first.
2393          */
2394         gref = gref_get(xnfp);
2395         if (gref == INVALID_GRANT_REF)
2396                 return (NULL);
2397 
2398         bufp = kmem_cache_alloc(xnfp->xnf_buf_cache, flags);
2399         if (bufp == NULL) {
2400                 gref_put(xnfp, gref);
2401                 return (NULL);
2402         }
2403 
2404         ASSERT(bufp->grant_ref == INVALID_GRANT_REF);
2405 
2406         bufp->grant_ref = gref;
2407 
2408         if (bufp->gen != xnfp->xnf_gen)
2409                 xnf_buf_refresh(bufp);
2410 
2411         gnttab_grant_foreign_access_ref(bufp->grant_ref,
2412             xvdi_get_oeid(bufp->xnfp->xnf_devinfo),
2413             bufp->buf_mfn, readonly ? 1 : 0);
2414 
2415         atomic_inc_64(&xnfp->xnf_stat_buf_outstanding);
2416 
2417         return (bufp);
2418 }
2419 
2420 static void
2421 xnf_buf_put(xnf_t *xnfp, xnf_buf_t *bufp, boolean_t readonly)
2422 {
2423         if (bufp->grant_ref != INVALID_GRANT_REF) {
2424                 (void) gnttab_end_foreign_access_ref(
2425                     bufp->grant_ref, readonly ? 1 : 0);
2426                 gref_put(xnfp, bufp->grant_ref);
2427                 bufp->grant_ref = INVALID_GRANT_REF;
2428         }
2429 
2430         kmem_cache_free(xnfp->xnf_buf_cache, bufp);
2431 
2432         atomic_dec_64(&xnfp->xnf_stat_buf_outstanding);
2433 }
2434 
2435 /*
2436  * Refresh any cached data about a buffer after resume.
2437  */
2438 static void
2439 xnf_buf_refresh(xnf_buf_t *bdesc)
2440 {
2441         bdesc->buf_mfn = pfn_to_mfn(xnf_btop(bdesc->buf_phys));
2442         bdesc->gen = bdesc->xnfp->xnf_gen;
2443 }
2444 
2445 /*
2446  * Streams `freeb' routine for `xnf_buf_t' when used as transmit
2447  * look-aside buffers.
2448  */
2449 static void
2450 xnf_buf_recycle(xnf_buf_t *bdesc)
2451 {
2452         xnf_t *xnfp = bdesc->xnfp;