1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /* Copyright © 2003-2011 Emulex. All rights reserved.  */
  23 
  24 /*
  25  * Source file containing the Receive Path handling
  26  * functions
  27  */
  28 #include <oce_impl.h>
  29 
  30 
  31 void oce_rx_pool_free(char *arg);
  32 static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
  33 static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq,
  34     size_t size, int flags);
  35 
  36 static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
  37     struct oce_nic_rx_cqe *cqe);
  38 static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
  39         struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
  40 static int oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost);
  41 static void oce_rx_insert_tag(mblk_t *mp, uint16_t vtag);
  42 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
  43 static inline void oce_rx_drop_pkt(struct oce_rq *rq,
  44     struct oce_nic_rx_cqe *cqe);
  45 static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
  46 static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
  47 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs);
  48 
  49 #pragma inline(oce_rx)
  50 #pragma inline(oce_rx_bcopy)
  51 #pragma inline(oce_rq_charge)
  52 #pragma inline(oce_rx_insert_tag)
  53 #pragma inline(oce_set_rx_oflags)
  54 #pragma inline(oce_rx_drop_pkt)
  55 #pragma inline(oce_rqb_alloc)
  56 #pragma inline(oce_rqb_free)
  57 #pragma inline(oce_rq_post_buffer)
  58 
  59 static ddi_dma_attr_t oce_rx_buf_attr = {
  60         DMA_ATTR_V0,            /* version number */
  61         0x0000000000000000ull,  /* low address */
  62         0xFFFFFFFFFFFFFFFFull,  /* high address */
  63         0x00000000FFFFFFFFull,  /* dma counter max */
  64         OCE_DMA_ALIGNMENT,      /* alignment */
  65         0x000007FF,             /* burst sizes */
  66         0x00000001,             /* minimum transfer size */
  67         0x00000000FFFFFFFFull,  /* maximum transfer size */
  68         0xFFFFFFFFFFFFFFFFull,  /* maximum segment size */
  69         1,                      /* scatter/gather list length */
  70         0x00000001,             /* granularity */
  71         DDI_DMA_FLAGERR|DDI_DMA_RELAXED_ORDERING                /* DMA flags */
  72 };
  73 
  74 /*
  75  * function to create a DMA buffer pool for RQ
  76  *
  77  * dev - software handle to the device
  78  * num_items - number of buffers in the pool
  79  * item_size - size of each buffer
  80  *
  81  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
  82  */
  83 int
  84 oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
  85 {
  86         int size;
  87         int cnt;
  88         int ret;
  89         oce_rq_bdesc_t *rqbd;
  90 
  91         _NOTE(ARGUNUSED(buf_size));
  92         rqbd = rq->rq_bdesc_array;
  93         size = rq->cfg.frag_size + OCE_RQE_BUF_HEADROOM;
  94         for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
  95                 rq->rqb_freelist[cnt] = rqbd;
  96                 ret = oce_rqb_ctor(rqbd, rq,
  97                     size, (DDI_DMA_RDWR|DDI_DMA_STREAMING));
  98                 if (ret != DDI_SUCCESS) {
  99                         goto rqb_fail;
 100                 }
 101         }
 102         rq->rqb_free = rq->cfg.nbufs;
 103         rq->rqb_rc_head = 0;
 104         rq->rqb_next_free = 0;
 105         return (DDI_SUCCESS);
 106 
 107 rqb_fail:
 108         oce_rqb_cache_destroy(rq);
 109         return (DDI_FAILURE);
 110 } /* oce_rqb_cache_create */
 111 
 112 /*
 113  * function to Destroy RQ DMA buffer cache
 114  *
 115  * rq - pointer to rq structure
 116  *
 117  * return none
 118  */
 119 void
 120 oce_rqb_cache_destroy(struct oce_rq *rq)
 121 {
 122         oce_rq_bdesc_t *rqbd = NULL;
 123         int cnt;
 124 
 125         rqbd = rq->rq_bdesc_array;
 126         for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
 127                 oce_rqb_dtor(rqbd);
 128         }
 129 } /* oce_rqb_cache_destroy */
 130 
 131 /*
 132  * RQ buffer destructor function
 133  *
 134  * rqbd - pointer to rq buffer descriptor
 135  *
 136  * return none
 137  */
 138 static  void
 139 oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
 140 {
 141         if ((rqbd == NULL) || (rqbd->rq == NULL)) {
 142                 return;
 143         }
 144         if (rqbd->mp != NULL) {
 145                 rqbd->fr_rtn.free_arg = NULL;
 146                 freemsg(rqbd->mp);
 147                 rqbd->mp = NULL;
 148         }
 149         oce_free_dma_buffer(rqbd->rq->parent, rqbd->rqb);
 150 } /* oce_rqb_dtor */
 151 
 152 /*
 153  * RQ buffer constructor function
 154  *
 155  * rqbd - pointer to rq buffer descriptor
 156  * rq - pointer to RQ structure
 157  * size - size of the buffer
 158  * flags - KM_SLEEP OR KM_NOSLEEP
 159  *
 160  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
 161  */
 162 static int
 163 oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq, size_t size, int flags)
 164 {
 165         struct oce_dev *dev;
 166         oce_dma_buf_t *dbuf;
 167 
 168         dev = rq->parent;
 169 
 170         dbuf  = oce_alloc_dma_buffer(dev, size, &oce_rx_buf_attr, flags);
 171         if (dbuf == NULL) {
 172                 return (DDI_FAILURE);
 173         }
 174 
 175         /* Set the call back function parameters */
 176         rqbd->fr_rtn.free_func = (void (*)())oce_rx_pool_free;
 177         rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
 178         rqbd->mp = desballoc((uchar_t *)(dbuf->base),
 179             dbuf->size, 0, &rqbd->fr_rtn);
 180         if (rqbd->mp == NULL) {
 181                 oce_free_dma_buffer(dev, dbuf);
 182                 return (DDI_FAILURE);
 183         }
 184         rqbd->rqb = dbuf;
 185         rqbd->rq = rq;
 186         rqbd->frag_addr.dw.addr_lo = ADDR_LO(dbuf->addr + OCE_RQE_BUF_HEADROOM);
 187         rqbd->frag_addr.dw.addr_hi = ADDR_HI(dbuf->addr + OCE_RQE_BUF_HEADROOM);
 188         rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
 189 
 190         return (DDI_SUCCESS);
 191 } /* oce_rqb_ctor */
 192 
 193 /*
 194  * RQ buffer allocator function
 195  *
 196  * rq - pointer to RQ structure
 197  *
 198  * return pointer to RQ buffer descriptor
 199  */
 200 static inline oce_rq_bdesc_t *
 201 oce_rqb_alloc(struct oce_rq *rq)
 202 {
 203         oce_rq_bdesc_t *rqbd;
 204         uint32_t free_index;
 205         free_index = rq->rqb_next_free;
 206         rqbd = rq->rqb_freelist[free_index];
 207         rq->rqb_freelist[free_index] = NULL;
 208         rq->rqb_next_free = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
 209         return (rqbd);
 210 } /* oce_rqb_alloc */
 211 
 212 /*
 213  * function to free the RQ buffer
 214  *
 215  * rq - pointer to RQ structure
 216  * rqbd - pointer to recieve buffer descriptor
 217  *
 218  * return none
 219  */
 220 static inline void
 221 oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
 222 {
 223         uint32_t free_index;
 224         mutex_enter(&rq->rc_lock);
 225         free_index = rq->rqb_rc_head;
 226         rq->rqb_freelist[free_index] = rqbd;
 227         rq->rqb_rc_head = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
 228         mutex_exit(&rq->rc_lock);
 229         atomic_add_32(&rq->rqb_free, 1);
 230 } /* oce_rqb_free */
 231 
 232 
 233 
 234 
 235 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs)
 236 {
 237         pd_rxulp_db_t rxdb_reg;
 238         int count;
 239         struct oce_dev *dev =  rq->parent;
 240 
 241 
 242         rxdb_reg.dw0 = 0;
 243         rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
 244 
 245         for (count = nbufs/OCE_MAX_RQ_POSTS; count > 0; count--) {
 246                 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
 247                 OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
 248                 rq->buf_avail += OCE_MAX_RQ_POSTS;
 249                 nbufs -= OCE_MAX_RQ_POSTS;
 250         }
 251         if (nbufs > 0) {
 252                 rxdb_reg.bits.num_posted = nbufs;
 253                 OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
 254                 rq->buf_avail += nbufs;
 255         }
 256 }
 257 /*
 258  * function to charge a given rq with buffers from a pool's free list
 259  *
 260  * dev - software handle to the device
 261  * rq - pointer to the RQ to charge
 262  * nbufs - numbers of buffers to be charged
 263  *
 264  * return number of rqe's charges.
 265  */
 266 static inline int
 267 oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost)
 268 {
 269         struct oce_nic_rqe *rqe;
 270         oce_rq_bdesc_t *rqbd;
 271         oce_rq_bdesc_t **shadow_rq;
 272         int cnt;
 273         int cur_index;
 274         oce_ring_buffer_t *ring;
 275 
 276         shadow_rq = rq->shadow_ring;
 277         ring = rq->ring;
 278         cur_index = ring->cidx;
 279 
 280         for (cnt = 0; cnt < nbufs; cnt++) {
 281                 if (!repost) {
 282                         rqbd = oce_rqb_alloc(rq);
 283                 } else {
 284                         /* just repost the buffers from shadow ring */
 285                         rqbd = shadow_rq[cur_index];
 286                         cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
 287                 }
 288                 /* fill the rqes */
 289                 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring,
 290                     struct oce_nic_rqe);
 291                 rqe->u0.s.frag_pa_lo = rqbd->frag_addr.dw.addr_lo;
 292                 rqe->u0.s.frag_pa_hi = rqbd->frag_addr.dw.addr_hi;
 293                 shadow_rq[rq->ring->pidx] = rqbd;
 294                 DW_SWAP(u32ptr(rqe), sizeof (struct oce_nic_rqe));
 295                 RING_PUT(rq->ring, 1);
 296         }
 297 
 298         return (cnt);
 299 } /* oce_rq_charge */
 300 
 301 /*
 302  * function to release the posted buffers
 303  *
 304  * rq - pointer to the RQ to charge
 305  *
 306  * return none
 307  */
 308 void
 309 oce_rq_discharge(struct oce_rq *rq)
 310 {
 311         oce_rq_bdesc_t *rqbd;
 312         oce_rq_bdesc_t **shadow_rq;
 313 
 314         shadow_rq = rq->shadow_ring;
 315         /* Free the posted buffer since RQ is destroyed already */
 316         while ((int32_t)rq->buf_avail > 0) {
 317                 rqbd = shadow_rq[rq->ring->cidx];
 318                 oce_rqb_free(rq, rqbd);
 319                 RING_GET(rq->ring, 1);
 320                 rq->buf_avail--;
 321         }
 322 }
 323 /*
 324  * function to process a single packet
 325  *
 326  * dev - software handle to the device
 327  * rq - pointer to the RQ to charge
 328  * cqe - Pointer to Completion Q entry
 329  *
 330  * return mblk pointer =>  success, NULL  => error
 331  */
 332 static inline mblk_t *
 333 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
 334 {
 335         mblk_t *mp;
 336         int pkt_len;
 337         int32_t frag_cnt = 0;
 338         mblk_t **mblk_tail;
 339         mblk_t  *mblk_head;
 340         int frag_size;
 341         oce_rq_bdesc_t *rqbd;
 342         uint16_t cur_index;
 343         oce_ring_buffer_t *ring;
 344         int i;
 345 
 346         frag_cnt  = cqe->u0.s.num_fragments & 0x7;
 347         mblk_head = NULL;
 348         mblk_tail = &mblk_head;
 349 
 350         ring = rq->ring;
 351         cur_index = ring->cidx;
 352 
 353         /* Get the relevant Queue pointers */
 354         pkt_len = cqe->u0.s.pkt_size;
 355         for (i = 0; i < frag_cnt; i++) {
 356                 rqbd = rq->shadow_ring[cur_index];
 357                 if (rqbd->mp == NULL) {
 358                         rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
 359                             rqbd->rqb->size, 0, &rqbd->fr_rtn);
 360                         if (rqbd->mp == NULL) {
 361                                 return (NULL);
 362                         }
 363 
 364                         rqbd->mp->b_rptr =
 365                             (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
 366                 }
 367 
 368                 mp = rqbd->mp;
 369                 frag_size  = (pkt_len > rq->cfg.frag_size) ?
 370                     rq->cfg.frag_size : pkt_len;
 371                 mp->b_wptr = mp->b_rptr + frag_size;
 372                 pkt_len   -= frag_size;
 373                 mp->b_next = mp->b_cont = NULL;
 374                 /* Chain the message mblks */
 375                 *mblk_tail = mp;
 376                 mblk_tail = &mp->b_cont;
 377                 (void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
 378                 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
 379         }
 380 
 381         if (mblk_head == NULL) {
 382                 oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
 383                 return (NULL);
 384         }
 385 
 386         /* replace the buffer with new ones */
 387         (void) oce_rq_charge(rq, frag_cnt, B_FALSE);
 388         atomic_add_32(&rq->pending, frag_cnt);
 389         return (mblk_head);
 390 } /* oce_rx */
 391 
 392 static inline mblk_t *
 393 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
 394 {
 395         mblk_t *mp;
 396         int pkt_len;
 397         int alloc_len;
 398         int32_t frag_cnt = 0;
 399         int frag_size;
 400         oce_rq_bdesc_t *rqbd;
 401         unsigned char  *rptr;
 402         uint32_t cur_index;
 403         oce_ring_buffer_t *ring;
 404         oce_rq_bdesc_t **shadow_rq;
 405         int cnt = 0;
 406 
 407         _NOTE(ARGUNUSED(dev));
 408 
 409         shadow_rq = rq->shadow_ring;
 410         pkt_len = cqe->u0.s.pkt_size;
 411         alloc_len = pkt_len + OCE_RQE_BUF_HEADROOM;
 412         frag_cnt = cqe->u0.s.num_fragments & 0x7;
 413 
 414         mp = allocb(alloc_len, BPRI_HI);
 415         if (mp == NULL) {
 416                 return (NULL);
 417         }
 418 
 419         mp->b_rptr += OCE_RQE_BUF_HEADROOM;
 420         rptr = mp->b_rptr;
 421         mp->b_wptr = mp->b_rptr + pkt_len;
 422         ring = rq->ring;
 423 
 424         cur_index = ring->cidx;
 425         for (cnt = 0; cnt < frag_cnt; cnt++) {
 426                 rqbd = shadow_rq[cur_index];
 427                 frag_size  = (pkt_len > rq->cfg.frag_size) ?
 428                     rq->cfg.frag_size : pkt_len;
 429                 (void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
 430                 bcopy(rqbd->rqb->base + OCE_RQE_BUF_HEADROOM, rptr, frag_size);
 431                 rptr += frag_size;
 432                 pkt_len   -= frag_size;
 433                 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
 434         }
 435         (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
 436         return (mp);
 437 }
 438 
 439 static inline void
 440 oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
 441 {
 442         int csum_flags = 0;
 443 
 444         /* set flags */
 445         if (cqe->u0.s.ip_cksum_pass) {
 446                 csum_flags |= HCK_IPV4_HDRCKSUM_OK;
 447         }
 448 
 449         if (cqe->u0.s.l4_cksum_pass) {
 450                 csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
 451         }
 452 
 453         if (csum_flags) {
 454                 (void) mac_hcksum_set(mp, 0, 0, 0, 0, csum_flags);
 455         }
 456 }
 457 
 458 static inline void
 459 oce_rx_insert_tag(mblk_t *mp, uint16_t vtag)
 460 {
 461         struct ether_vlan_header *ehp;
 462 
 463         (void) memmove(mp->b_rptr - VTAG_SIZE,
 464             mp->b_rptr, 2 * ETHERADDRL);
 465         mp->b_rptr -= VTAG_SIZE;
 466         ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
 467         ehp->ether_tpid = htons(ETHERTYPE_VLAN);
 468         ehp->ether_tci = LE_16(vtag);
 469 }
 470 
 471 static inline void
 472 oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
 473 {
 474         int frag_cnt;
 475         oce_rq_bdesc_t *rqbd;
 476         oce_rq_bdesc_t  **shadow_rq;
 477         shadow_rq = rq->shadow_ring;
 478         for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
 479                 rqbd = shadow_rq[rq->ring->cidx];
 480                 oce_rqb_free(rq, rqbd);
 481                 RING_GET(rq->ring, 1);
 482         }
 483 }
 484 
 485 
 486 /*
 487  * function to process a Recieve queue
 488  *
 489  * arg - pointer to the RQ to charge
 490  *
 491  * return number of cqes processed
 492  */
 493 uint16_t
 494 oce_drain_rq_cq(void *arg)
 495 {
 496         struct oce_nic_rx_cqe *cqe;
 497         struct oce_rq *rq;
 498         mblk_t *mp = NULL;
 499         mblk_t *mblk_head;
 500         mblk_t **mblk_tail;
 501         uint16_t num_cqe = 0;
 502         struct oce_cq  *cq;
 503         struct oce_dev *dev;
 504         int32_t frag_cnt;
 505         uint32_t nbufs = 0;
 506 
 507         rq = (struct oce_rq *)arg;
 508         dev = rq->parent;
 509         cq = rq->cq;
 510         mblk_head = NULL;
 511         mblk_tail = &mblk_head;
 512 
 513         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
 514 
 515         (void) DBUF_SYNC(cq->ring->dbuf, DDI_DMA_SYNC_FORKERNEL);
 516         /* dequeue till you reach an invalid cqe */
 517         while (RQ_CQE_VALID(cqe)) {
 518                 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
 519                 frag_cnt = cqe->u0.s.num_fragments & 0x7;
 520                 /* if insufficient buffers to charge then do copy */
 521                 if ((cqe->u0.s.pkt_size < dev->rx_bcopy_limit) ||
 522                     (oce_atomic_reserve(&rq->rqb_free, frag_cnt) < 0)) {
 523                         mp = oce_rx_bcopy(dev, rq, cqe);
 524                 } else {
 525                         mp = oce_rx(dev, rq, cqe);
 526                         if (mp == NULL) {
 527                                 atomic_add_32(&rq->rqb_free, frag_cnt);
 528                                 mp = oce_rx_bcopy(dev, rq, cqe);
 529                         }
 530                 }
 531                 if (mp != NULL) {
 532                         if (dev->function_mode & FLEX10_MODE) {
 533                                 if (cqe->u0.s.vlan_tag_present &&
 534                                     cqe->u0.s.qnq) {
 535                                         oce_rx_insert_tag(mp,
 536                                             cqe->u0.s.vlan_tag);
 537                                 }
 538                         } else if (cqe->u0.s.vlan_tag_present) {
 539                                 oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
 540                         }
 541                         oce_set_rx_oflags(mp, cqe);
 542 
 543                         *mblk_tail = mp;
 544                         mblk_tail = &mp->b_next;
 545                 } else {
 546                         (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
 547                 }
 548                 RING_GET(rq->ring, frag_cnt);
 549                 rq->buf_avail -= frag_cnt;
 550                 nbufs += frag_cnt;
 551 
 552                 oce_rq_post_buffer(rq, frag_cnt);
 553                 RQ_CQE_INVALIDATE(cqe);
 554                 RING_GET(cq->ring, 1);
 555                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
 556                     struct oce_nic_rx_cqe);
 557                 num_cqe++;
 558                 /* process max ring size */
 559                 if (num_cqe > dev->rx_pkt_per_intr) {
 560                         break;
 561                 }
 562         } /* for all valid CQEs */
 563 
 564         if (mblk_head) {
 565                 mac_rx(dev->mac_handle, NULL, mblk_head);
 566         }
 567         oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
 568         return (num_cqe);
 569 } /* oce_drain_rq_cq */
 570 
 571 /*
 572  * function to free mblk databuffer to the RQ pool
 573  *
 574  * arg - pointer to the receive buffer descriptor
 575  *
 576  * return none
 577  */
 578 void
 579 oce_rx_pool_free(char *arg)
 580 {
 581         oce_rq_bdesc_t *rqbd;
 582         struct oce_rq  *rq;
 583 
 584         /* During destroy, arg will be NULL */
 585         if (arg == NULL) {
 586                 return;
 587         }
 588 
 589         /* retrieve the pointers from arg */
 590         rqbd = (oce_rq_bdesc_t *)(void *)arg;
 591         rq = rqbd->rq;
 592         rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
 593             rqbd->rqb->size, 0, &rqbd->fr_rtn);
 594 
 595         if (rqbd->mp) {
 596                 rqbd->mp->b_rptr =
 597                     (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
 598         }
 599 
 600         oce_rqb_free(rq, rqbd);
 601         (void) atomic_add_32(&rq->pending, -1);
 602 } /* rx_pool_free */
 603 
 604 /*
 605  * function to stop the RX
 606  *
 607  * rq - pointer to RQ structure
 608  *
 609  * return none
 610  */
 611 void
 612 oce_clean_rq(struct oce_rq *rq)
 613 {
 614         uint16_t num_cqe = 0;
 615         struct oce_cq  *cq;
 616         struct oce_dev *dev;
 617         struct oce_nic_rx_cqe *cqe;
 618         int32_t ti = 0;
 619 
 620         dev = rq->parent;
 621         cq = rq->cq;
 622         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
 623         /* dequeue till you reach an invalid cqe */
 624         for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
 625 
 626                 while (RQ_CQE_VALID(cqe)) {
 627                         DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
 628                         oce_rx_drop_pkt(rq, cqe);
 629                         atomic_add_32(&rq->buf_avail,
 630                             -(cqe->u0.s.num_fragments & 0x7));
 631                         oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
 632                         RQ_CQE_INVALIDATE(cqe);
 633                         RING_GET(cq->ring, 1);
 634                         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
 635                             struct oce_nic_rx_cqe);
 636                         num_cqe++;
 637                 }
 638                 OCE_MSDELAY(1);
 639         }
 640 } /* oce_clean_rq */
 641 
 642 /*
 643  * function to start  the RX
 644  *
 645  * rq - pointer to RQ structure
 646  *
 647  * return number of rqe's charges.
 648  */
 649 int
 650 oce_start_rq(struct oce_rq *rq)
 651 {
 652         int ret = 0;
 653         int to_charge = 0;
 654         struct oce_dev *dev = rq->parent;
 655         to_charge = rq->cfg.q_len - rq->buf_avail;
 656         to_charge = min(to_charge, rq->rqb_free);
 657         atomic_add_32(&rq->rqb_free, -to_charge);
 658         (void) oce_rq_charge(rq, to_charge, B_FALSE);
 659         /* ok to do it here since Rx has not even started */
 660         oce_rq_post_buffer(rq, to_charge);
 661         oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
 662         return (ret);
 663 } /* oce_start_rq */
 664 
 665 /* Checks for pending rx buffers with Stack */
 666 int
 667 oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout)
 668 {
 669         int ti;
 670         _NOTE(ARGUNUSED(dev));
 671 
 672         for (ti = 0; ti < timeout; ti++) {
 673                 if (rq->pending > 0) {
 674                         OCE_MSDELAY(10);
 675                         continue;
 676                 } else {
 677                         rq->pending = 0;
 678                         break;
 679                 }
 680         }
 681         return (rq->pending);
 682 }