1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #include <hxge_impl.h>
  27 #include <hxge_rxdma.h>
  28 #include <hpi.h>
  29 #include <hpi_vir.h>
  30 
  31 /*
  32  * Number of blocks to accumulate before re-enabling DMA
  33  * when we get RBR empty.
  34  */
  35 #define HXGE_RBR_EMPTY_THRESHOLD        64
  36 
  37 /*
  38  * Globals: tunable parameters (/etc/system or adb)
  39  *
  40  */
  41 extern uint32_t hxge_rbr_size;
  42 extern uint32_t hxge_rcr_size;
  43 extern uint32_t hxge_rbr_spare_size;
  44 extern uint32_t hxge_mblks_pending;
  45 
  46 /*
  47  * Tunables to manage the receive buffer blocks.
  48  *
  49  * hxge_rx_threshold_hi: copy all buffers.
  50  * hxge_rx_bcopy_size_type: receive buffer block size type.
  51  * hxge_rx_threshold_lo: copy only up to tunable block size type.
  52  */
  53 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi;
  54 extern hxge_rxbuf_type_t hxge_rx_buf_size_type;
  55 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo;
  56 
  57 /*
  58  * Static local functions.
  59  */
  60 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep);
  61 static void hxge_unmap_rxdma(p_hxge_t hxgep);
  62 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep);
  63 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep);
  64 static void hxge_rxdma_hw_stop(p_hxge_t hxgep);
  65 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
  66     p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
  67     uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p,
  68     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
  69     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
  70 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
  71         p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
  72 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep,
  73     uint16_t dma_channel, p_hxge_dma_common_t *dma_rbr_cntl_p,
  74     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
  75     p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
  76 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
  77         p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
  78 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep,
  79         uint16_t channel, p_hxge_dma_common_t *dma_buf_p,
  80         p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks);
  81 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
  82         p_rx_rbr_ring_t rbr_p);
  83 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
  84         p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
  85         int n_init_kick);
  86 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel);
  87 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
  88         p_rx_rcr_ring_t rcr_p, rdc_stat_t cs, int bytes_to_read);
  89 static uint32_t hxge_scan_for_last_eop(p_rx_rcr_ring_t rcr_p,
  90     p_rcr_entry_t rcr_desc_rd_head_p, uint32_t num_rcrs);
  91 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p,
  92         p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p,
  93         mblk_t ** mp, mblk_t ** mp_cont, uint32_t *invalid_rcr_entry);
  94 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep,
  95         uint16_t channel);
  96 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t);
  97 static void hxge_freeb(p_rx_msg_t);
  98 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index,
  99         p_hxge_ldv_t ldvp, rdc_stat_t cs);
 100 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep,
 101         p_rx_rbr_ring_t rx_dmap);
 102 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep,
 103         uint16_t channel);
 104 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep);
 105 static void hxge_rbr_empty_restore(p_hxge_t hxgep,
 106         p_rx_rbr_ring_t rx_rbr_p);
 107 
 108 hxge_status_t
 109 hxge_init_rxdma_channels(p_hxge_t hxgep)
 110 {
 111         hxge_status_t           status = HXGE_OK;
 112         block_reset_t           reset_reg;
 113         int                     i;
 114 
 115         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels"));
 116 
 117         for (i = 0; i < HXGE_MAX_RDCS; i++)
 118                 hxgep->rdc_first_intr[i] = B_TRUE;
 119 
 120         /* Reset RDC block from PEU to clear any previous state */
 121         reset_reg.value = 0;
 122         reset_reg.bits.rdc_rst = 1;
 123         HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
 124         HXGE_DELAY(1000);
 125 
 126         status = hxge_map_rxdma(hxgep);
 127         if (status != HXGE_OK) {
 128                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
 129                     "<== hxge_init_rxdma: status 0x%x", status));
 130                 return (status);
 131         }
 132 
 133         status = hxge_rxdma_hw_start_common(hxgep);
 134         if (status != HXGE_OK) {
 135                 hxge_unmap_rxdma(hxgep);
 136         }
 137 
 138         status = hxge_rxdma_hw_start(hxgep);
 139         if (status != HXGE_OK) {
 140                 hxge_unmap_rxdma(hxgep);
 141         }
 142 
 143         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
 144             "<== hxge_init_rxdma_channels: status 0x%x", status));
 145         return (status);
 146 }
 147 
 148 void
 149 hxge_uninit_rxdma_channels(p_hxge_t hxgep)
 150 {
 151         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels"));
 152 
 153         hxge_rxdma_hw_stop(hxgep);
 154         hxge_unmap_rxdma(hxgep);
 155 
 156         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels"));
 157 }
 158 
 159 hxge_status_t
 160 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel,
 161     rdc_stat_t *cs_p)
 162 {
 163         hpi_handle_t    handle;
 164         hpi_status_t    rs = HPI_SUCCESS;
 165         hxge_status_t   status = HXGE_OK;
 166 
 167         HXGE_DEBUG_MSG((hxgep, DMA_CTL,
 168             "<== hxge_init_rxdma_channel_cntl_stat"));
 169 
 170         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 171         rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p);
 172 
 173         if (rs != HPI_SUCCESS) {
 174                 status = HXGE_ERROR | rs;
 175         }
 176         return (status);
 177 }
 178 
 179 
 180 hxge_status_t
 181 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
 182     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
 183     int n_init_kick)
 184 {
 185         hpi_handle_t            handle;
 186         rdc_desc_cfg_t          rdc_desc;
 187         rdc_rcr_cfg_b_t         *cfgb_p;
 188         hpi_status_t            rs = HPI_SUCCESS;
 189 
 190         HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel"));
 191         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 192 
 193         /*
 194          * Use configuration data composed at init time. Write to hardware the
 195          * receive ring configurations.
 196          */
 197         rdc_desc.mbox_enable = 1;
 198         rdc_desc.mbox_addr = mbox_p->mbox_addr;
 199         HXGE_DEBUG_MSG((hxgep, RX_CTL,
 200             "==> hxge_enable_rxdma_channel: mboxp $%p($%p)",
 201             mbox_p->mbox_addr, rdc_desc.mbox_addr));
 202 
 203         rdc_desc.rbr_len = rbr_p->rbb_max;
 204         rdc_desc.rbr_addr = rbr_p->rbr_addr;
 205 
 206         switch (hxgep->rx_bksize_code) {
 207         case RBR_BKSIZE_4K:
 208                 rdc_desc.page_size = SIZE_4KB;
 209                 break;
 210         case RBR_BKSIZE_8K:
 211                 rdc_desc.page_size = SIZE_8KB;
 212                 break;
 213         }
 214 
 215         rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0;
 216         rdc_desc.valid0 = 1;
 217 
 218         rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1;
 219         rdc_desc.valid1 = 1;
 220 
 221         rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2;
 222         rdc_desc.valid2 = 1;
 223 
 224         rdc_desc.full_hdr = rcr_p->full_hdr_flag;
 225         rdc_desc.offset = rcr_p->sw_priv_hdr_len;
 226 
 227         rdc_desc.rcr_len = rcr_p->comp_size;
 228         rdc_desc.rcr_addr = rcr_p->rcr_addr;
 229 
 230         cfgb_p = &(rcr_p->rcr_cfgb);
 231         rdc_desc.rcr_threshold = cfgb_p->bits.pthres;
 232         rdc_desc.rcr_timeout = cfgb_p->bits.timeout;
 233         rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout;
 234 
 235         HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
 236             "rbr_len qlen %d pagesize code %d rcr_len %d",
 237             rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
 238         HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
 239             "size 0 %d size 1 %d size 2 %d",
 240             rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1,
 241             rbr_p->hpi_pkt_buf_size2));
 242 
 243         rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
 244         if (rs != HPI_SUCCESS) {
 245                 return (HXGE_ERROR | rs);
 246         }
 247 
 248         /*
 249          * Enable the timeout and threshold.
 250          */
 251         rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
 252             rdc_desc.rcr_threshold);
 253         if (rs != HPI_SUCCESS) {
 254                 return (HXGE_ERROR | rs);
 255         }
 256 
 257         rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
 258             rdc_desc.rcr_timeout);
 259         if (rs != HPI_SUCCESS) {
 260                 return (HXGE_ERROR | rs);
 261         }
 262 
 263         /* Kick the DMA engine */
 264         hpi_rxdma_rdc_rbr_kick(handle, channel, n_init_kick);
 265 
 266         /* Clear the rbr empty bit */
 267         (void) hpi_rxdma_channel_rbr_empty_clear(handle, channel);
 268 
 269         /*
 270          * Enable the DMA
 271          */
 272         rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
 273         if (rs != HPI_SUCCESS) {
 274                 return (HXGE_ERROR | rs);
 275         }
 276 
 277         HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel"));
 278 
 279         return (HXGE_OK);
 280 }
 281 
 282 static hxge_status_t
 283 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel)
 284 {
 285         hpi_handle_t handle;
 286         hpi_status_t rs = HPI_SUCCESS;
 287 
 288         HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel"));
 289 
 290         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 291 
 292         /* disable the DMA */
 293         rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
 294         if (rs != HPI_SUCCESS) {
 295                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
 296                     "<== hxge_disable_rxdma_channel:failed (0x%x)", rs));
 297                 return (HXGE_ERROR | rs);
 298         }
 299         HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel"));
 300         return (HXGE_OK);
 301 }
 302 
 303 hxge_status_t
 304 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel)
 305 {
 306         hpi_handle_t    handle;
 307         hxge_status_t   status = HXGE_OK;
 308 
 309         HXGE_DEBUG_MSG((hxgep, DMA_CTL,
 310             "==> hxge_rxdma_channel_rcrflush"));
 311 
 312         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 313         hpi_rxdma_rdc_rcr_flush(handle, channel);
 314 
 315         HXGE_DEBUG_MSG((hxgep, DMA_CTL,
 316             "<== hxge_rxdma_channel_rcrflush"));
 317         return (status);
 318 
 319 }
 320 
 321 #define MID_INDEX(l, r) ((r + l + 1) >> 1)
 322 
 323 #define TO_LEFT -1
 324 #define TO_RIGHT 1
 325 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
 326 #define BOTH_LEFT (TO_LEFT + TO_LEFT)
 327 #define IN_MIDDLE (TO_RIGHT + TO_LEFT)
 328 #define NO_HINT 0xffffffff
 329 
 330 /*ARGSUSED*/
 331 hxge_status_t
 332 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p,
 333     uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
 334     uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
 335 {
 336         int                     bufsize;
 337         uint64_t                pktbuf_pp;
 338         uint64_t                dvma_addr;
 339         rxring_info_t           *ring_info;
 340         int                     base_side, end_side;
 341         int                     r_index, l_index, anchor_index;
 342         int                     found, search_done;
 343         uint32_t                offset, chunk_size, block_size, page_size_mask;
 344         uint32_t                chunk_index, block_index, total_index;
 345         int                     max_iterations, iteration;
 346         rxbuf_index_info_t      *bufinfo;
 347 
 348         HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp"));
 349 
 350         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 351             "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
 352             pkt_buf_addr_pp, pktbufsz_type));
 353 
 354 #if defined(__i386)
 355         pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp;
 356 #else
 357         pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
 358 #endif
 359 
 360         switch (pktbufsz_type) {
 361         case 0:
 362                 bufsize = rbr_p->pkt_buf_size0;
 363                 break;
 364         case 1:
 365                 bufsize = rbr_p->pkt_buf_size1;
 366                 break;
 367         case 2:
 368                 bufsize = rbr_p->pkt_buf_size2;
 369                 break;
 370         case RCR_SINGLE_BLOCK:
 371                 bufsize = 0;
 372                 anchor_index = 0;
 373                 break;
 374         default:
 375                 return (HXGE_ERROR);
 376         }
 377 
 378         if (rbr_p->num_blocks == 1) {
 379                 anchor_index = 0;
 380                 ring_info = rbr_p->ring_info;
 381                 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
 382 
 383                 HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 384                     "==> hxge_rxbuf_pp_to_vp: (found, 1 block) "
 385                     "buf_pp $%p btype %d anchor_index %d bufinfo $%p",
 386                     pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo));
 387 
 388                 goto found_index;
 389         }
 390 
 391         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 392             "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d",
 393             pkt_buf_addr_pp, pktbufsz_type, anchor_index));
 394 
 395         ring_info = rbr_p->ring_info;
 396         found = B_FALSE;
 397         bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
 398         iteration = 0;
 399         max_iterations = ring_info->max_iterations;
 400 
 401         /*
 402          * First check if this block have been seen recently. This is indicated
 403          * by a hint which is initialized when the first buffer of the block is
 404          * seen. The hint is reset when the last buffer of the block has been
 405          * processed. As three block sizes are supported, three hints are kept.
 406          * The idea behind the hints is that once the hardware  uses a block
 407          * for a buffer  of that size, it will use it exclusively for that size
 408          * and will use it until it is exhausted. It is assumed that there
 409          * would a single block being used for the same buffer sizes at any
 410          * given time.
 411          */
 412         if (ring_info->hint[pktbufsz_type] != NO_HINT) {
 413                 anchor_index = ring_info->hint[pktbufsz_type];
 414                 dvma_addr = bufinfo[anchor_index].dvma_addr;
 415                 chunk_size = bufinfo[anchor_index].buf_size;
 416                 if ((pktbuf_pp >= dvma_addr) &&
 417                     (pktbuf_pp < (dvma_addr + chunk_size))) {
 418                         found = B_TRUE;
 419                         /*
 420                          * check if this is the last buffer in the block If so,
 421                          * then reset the hint for the size;
 422                          */
 423 
 424                         if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
 425                                 ring_info->hint[pktbufsz_type] = NO_HINT;
 426                 }
 427         }
 428 
 429         if (found == B_FALSE) {
 430                 HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 431                     "==> hxge_rxbuf_pp_to_vp: (!found)"
 432                     "buf_pp $%p btype %d anchor_index %d",
 433                     pkt_buf_addr_pp, pktbufsz_type, anchor_index));
 434 
 435                 /*
 436                  * This is the first buffer of the block of this size. Need to
 437                  * search the whole information array. the search algorithm
 438                  * uses a binary tree search algorithm. It assumes that the
 439                  * information is already sorted with increasing order info[0]
 440                  * < info[1] < info[2]  .... < info[n-1] where n is the size of
 441                  * the information array
 442                  */
 443                 r_index = rbr_p->num_blocks - 1;
 444                 l_index = 0;
 445                 search_done = B_FALSE;
 446                 anchor_index = MID_INDEX(r_index, l_index);
 447                 while (search_done == B_FALSE) {
 448                         if ((r_index == l_index) ||
 449                             (iteration >= max_iterations))
 450                                 search_done = B_TRUE;
 451 
 452                         end_side = TO_RIGHT;    /* to the right */
 453                         base_side = TO_LEFT;    /* to the left */
 454                         /* read the DVMA address information and sort it */
 455                         dvma_addr = bufinfo[anchor_index].dvma_addr;
 456                         chunk_size = bufinfo[anchor_index].buf_size;
 457 
 458                         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 459                             "==> hxge_rxbuf_pp_to_vp: (searching)"
 460                             "buf_pp $%p btype %d "
 461                             "anchor_index %d chunk_size %d dvmaaddr $%p",
 462                             pkt_buf_addr_pp, pktbufsz_type, anchor_index,
 463                             chunk_size, dvma_addr));
 464 
 465                         if (pktbuf_pp >= dvma_addr)
 466                                 base_side = TO_RIGHT;   /* to the right */
 467                         if (pktbuf_pp < (dvma_addr + chunk_size))
 468                                 end_side = TO_LEFT;     /* to the left */
 469 
 470                         switch (base_side + end_side) {
 471                         case IN_MIDDLE:
 472                                 /* found */
 473                                 found = B_TRUE;
 474                                 search_done = B_TRUE;
 475                                 if ((pktbuf_pp + bufsize) <
 476                                     (dvma_addr + chunk_size))
 477                                         ring_info->hint[pktbufsz_type] =
 478                                             bufinfo[anchor_index].buf_index;
 479                                 break;
 480                         case BOTH_RIGHT:
 481                                 /* not found: go to the right */
 482                                 l_index = anchor_index + 1;
 483                                 anchor_index = MID_INDEX(r_index, l_index);
 484                                 break;
 485 
 486                         case BOTH_LEFT:
 487                                 /* not found: go to the left */
 488                                 r_index = anchor_index - 1;
 489                                 anchor_index = MID_INDEX(r_index, l_index);
 490                                 break;
 491                         default:        /* should not come here */
 492                                 return (HXGE_ERROR);
 493                         }
 494                         iteration++;
 495                 }
 496 
 497                 HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 498                     "==> hxge_rxbuf_pp_to_vp: (search done)"
 499                     "buf_pp $%p btype %d anchor_index %d",
 500                     pkt_buf_addr_pp, pktbufsz_type, anchor_index));
 501         }
 502 
 503         if (found == B_FALSE) {
 504                 HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 505                     "==> hxge_rxbuf_pp_to_vp: (search failed)"
 506                     "buf_pp $%p btype %d anchor_index %d",
 507                     pkt_buf_addr_pp, pktbufsz_type, anchor_index));
 508                 return (HXGE_ERROR);
 509         }
 510 
 511 found_index:
 512         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 513             "==> hxge_rxbuf_pp_to_vp: (FOUND1)"
 514             "buf_pp $%p btype %d bufsize %d anchor_index %d",
 515             pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index));
 516 
 517         /* index of the first block in this chunk */
 518         chunk_index = bufinfo[anchor_index].start_index;
 519         dvma_addr = bufinfo[anchor_index].dvma_addr;
 520         page_size_mask = ring_info->block_size_mask;
 521 
 522         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 523             "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
 524             "buf_pp $%p btype %d bufsize %d "
 525             "anchor_index %d chunk_index %d dvma $%p",
 526             pkt_buf_addr_pp, pktbufsz_type, bufsize,
 527             anchor_index, chunk_index, dvma_addr));
 528 
 529         offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
 530         block_size = rbr_p->block_size;      /* System  block(page) size */
 531 
 532         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 533             "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
 534             "buf_pp $%p btype %d bufsize %d "
 535             "anchor_index %d chunk_index %d dvma $%p "
 536             "offset %d block_size %d",
 537             pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index,
 538             chunk_index, dvma_addr, offset, block_size));
 539         HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index"));
 540 
 541         block_index = (offset / block_size);    /* index within chunk */
 542         total_index = chunk_index + block_index;
 543 
 544         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 545             "==> hxge_rxbuf_pp_to_vp: "
 546             "total_index %d dvma_addr $%p "
 547             "offset %d block_size %d "
 548             "block_index %d ",
 549             total_index, dvma_addr, offset, block_size, block_index));
 550 
 551 #if defined(__i386)
 552         *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr +
 553             (uint32_t)offset);
 554 #else
 555         *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
 556             offset);
 557 #endif
 558 
 559         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 560             "==> hxge_rxbuf_pp_to_vp: "
 561             "total_index %d dvma_addr $%p "
 562             "offset %d block_size %d "
 563             "block_index %d "
 564             "*pkt_buf_addr_p $%p",
 565             total_index, dvma_addr, offset, block_size,
 566             block_index, *pkt_buf_addr_p));
 567 
 568         *msg_index = total_index;
 569         *bufoffset = (offset & page_size_mask);
 570 
 571         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
 572             "==> hxge_rxbuf_pp_to_vp: get msg index: "
 573             "msg_index %d bufoffset_index %d",
 574             *msg_index, *bufoffset));
 575         HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp"));
 576 
 577         return (HXGE_OK);
 578 }
 579 
 580 
 581 /*
 582  * used by quick sort (qsort) function
 583  * to perform comparison
 584  */
 585 static int
 586 hxge_sort_compare(const void *p1, const void *p2)
 587 {
 588 
 589         rxbuf_index_info_t *a, *b;
 590 
 591         a = (rxbuf_index_info_t *)p1;
 592         b = (rxbuf_index_info_t *)p2;
 593 
 594         if (a->dvma_addr > b->dvma_addr)
 595                 return (1);
 596         if (a->dvma_addr < b->dvma_addr)
 597                 return (-1);
 598         return (0);
 599 }
 600 
 601 /*
 602  * Grabbed this sort implementation from common/syscall/avl.c
 603  *
 604  * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
 605  * v = Ptr to array/vector of objs
 606  * n = # objs in the array
 607  * s = size of each obj (must be multiples of a word size)
 608  * f = ptr to function to compare two objs
 609  *      returns (-1 = less than, 0 = equal, 1 = greater than
 610  */
 611 void
 612 hxge_ksort(caddr_t v, int n, int s, int (*f) ())
 613 {
 614         int             g, i, j, ii;
 615         unsigned int    *p1, *p2;
 616         unsigned int    tmp;
 617 
 618         /* No work to do */
 619         if (v == NULL || n <= 1)
 620                 return;
 621         /* Sanity check on arguments */
 622         ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
 623         ASSERT(s > 0);
 624 
 625         for (g = n / 2; g > 0; g /= 2) {
 626                 for (i = g; i < n; i++) {
 627                         for (j = i - g; j >= 0 &&
 628                             (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) {
 629                                 p1 = (unsigned *)(v + j * s);
 630                                 p2 = (unsigned *)(v + (j + g) * s);
 631                                 for (ii = 0; ii < s / 4; ii++) {
 632                                         tmp = *p1;
 633                                         *p1++ = *p2;
 634                                         *p2++ = tmp;
 635                                 }
 636                         }
 637                 }
 638         }
 639 }
 640 
 641 /*
 642  * Initialize data structures required for rxdma
 643  * buffer dvma->vmem address lookup
 644  */
 645 /*ARGSUSED*/
 646 static hxge_status_t
 647 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp)
 648 {
 649         int             index;
 650         rxring_info_t   *ring_info;
 651         int             max_iteration = 0, max_index = 0;
 652 
 653         HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init"));
 654 
 655         ring_info = rbrp->ring_info;
 656         ring_info->hint[0] = NO_HINT;
 657         ring_info->hint[1] = NO_HINT;
 658         ring_info->hint[2] = NO_HINT;
 659         ring_info->hint[3] = NO_HINT;
 660         max_index = rbrp->num_blocks;
 661 
 662         /* read the DVMA address information and sort it */
 663         /* do init of the information array */
 664 
 665         HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
 666             " hxge_rxbuf_index_info_init Sort ptrs"));
 667 
 668         /* sort the array */
 669         hxge_ksort((void *) ring_info->buffer, max_index,
 670             sizeof (rxbuf_index_info_t), hxge_sort_compare);
 671 
 672         for (index = 0; index < max_index; index++) {
 673                 HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
 674                     " hxge_rxbuf_index_info_init: sorted chunk %d "
 675                     " ioaddr $%p kaddr $%p size %x",
 676                     index, ring_info->buffer[index].dvma_addr,
 677                     ring_info->buffer[index].kaddr,
 678                     ring_info->buffer[index].buf_size));
 679         }
 680 
 681         max_iteration = 0;
 682         while (max_index >= (1ULL << max_iteration))
 683                 max_iteration++;
 684         ring_info->max_iterations = max_iteration + 1;
 685 
 686         HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
 687             " hxge_rxbuf_index_info_init Find max iter %d",
 688             ring_info->max_iterations));
 689         HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init"));
 690 
 691         return (HXGE_OK);
 692 }
 693 
 694 /*ARGSUSED*/
 695 void
 696 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p)
 697 {
 698 #ifdef  HXGE_DEBUG
 699 
 700         uint32_t bptr;
 701         uint64_t pp;
 702 
 703         bptr = entry_p->bits.pkt_buf_addr;
 704 
 705         HXGE_DEBUG_MSG((hxgep, RX_CTL,
 706             "\trcr entry $%p "
 707             "\trcr entry 0x%0llx "
 708             "\trcr entry 0x%08x "
 709             "\trcr entry 0x%08x "
 710             "\tvalue 0x%0llx\n"
 711             "\tmulti = %d\n"
 712             "\tpkt_type = 0x%x\n"
 713             "\terror = 0x%04x\n"
 714             "\tl2_len = %d\n"
 715             "\tpktbufsize = %d\n"
 716             "\tpkt_buf_addr = $%p\n"
 717             "\tpkt_buf_addr (<< 6) = $%p\n",
 718             entry_p,
 719             *(int64_t *)entry_p,
 720             *(int32_t *)entry_p,
 721             *(int32_t *)((char *)entry_p + 32),
 722             entry_p->value,
 723             entry_p->bits.multi,
 724             entry_p->bits.pkt_type,
 725             entry_p->bits.error,
 726             entry_p->bits.l2_len,
 727             entry_p->bits.pktbufsz,
 728             bptr,
 729             entry_p->bits.pkt_buf_addr_l));
 730 
 731         pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
 732             RCR_PKT_BUF_ADDR_SHIFT;
 733 
 734         HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
 735             pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
 736 #endif
 737 }
 738 
 739 /*ARGSUSED*/
 740 void
 741 hxge_rxdma_stop(p_hxge_t hxgep)
 742 {
 743         HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop"));
 744 
 745         MUTEX_ENTER(&hxgep->vmac_lock);
 746         (void) hxge_rx_vmac_disable(hxgep);
 747         (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
 748         MUTEX_EXIT(&hxgep->vmac_lock);
 749 
 750         HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop"));
 751 }
 752 
 753 void
 754 hxge_rxdma_stop_reinit(p_hxge_t hxgep)
 755 {
 756         HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit"));
 757 
 758         (void) hxge_rxdma_stop(hxgep);
 759         (void) hxge_uninit_rxdma_channels(hxgep);
 760         (void) hxge_init_rxdma_channels(hxgep);
 761 
 762         MUTEX_ENTER(&hxgep->vmac_lock);
 763         (void) hxge_rx_vmac_enable(hxgep);
 764         MUTEX_EXIT(&hxgep->vmac_lock);
 765 
 766         HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit"));
 767 }
 768 
 769 hxge_status_t
 770 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
 771 {
 772         int                     i, ndmas;
 773         uint16_t                channel;
 774         p_rx_rbr_rings_t        rx_rbr_rings;
 775         p_rx_rbr_ring_t         *rbr_rings;
 776         hpi_handle_t            handle;
 777         hpi_status_t            rs = HPI_SUCCESS;
 778         hxge_status_t           status = HXGE_OK;
 779 
 780         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
 781             "==> hxge_rxdma_hw_mode: mode %d", enable));
 782 
 783         if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
 784                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
 785                     "<== hxge_rxdma_mode: not initialized"));
 786                 return (HXGE_ERROR);
 787         }
 788 
 789         rx_rbr_rings = hxgep->rx_rbr_rings;
 790         if (rx_rbr_rings == NULL) {
 791                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
 792                     "<== hxge_rxdma_mode: NULL ring pointer"));
 793                 return (HXGE_ERROR);
 794         }
 795 
 796         if (rx_rbr_rings->rbr_rings == NULL) {
 797                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
 798                     "<== hxge_rxdma_mode: NULL rbr rings pointer"));
 799                 return (HXGE_ERROR);
 800         }
 801 
 802         ndmas = rx_rbr_rings->ndmas;
 803         if (!ndmas) {
 804                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
 805                     "<== hxge_rxdma_mode: no channel"));
 806                 return (HXGE_ERROR);
 807         }
 808 
 809         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
 810             "==> hxge_rxdma_mode (ndmas %d)", ndmas));
 811 
 812         rbr_rings = rx_rbr_rings->rbr_rings;
 813 
 814         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 815 
 816         for (i = 0; i < ndmas; i++) {
 817                 if (rbr_rings == NULL || rbr_rings[i] == NULL) {
 818                         continue;
 819                 }
 820                 channel = rbr_rings[i]->rdc;
 821                 if (enable) {
 822                         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
 823                             "==> hxge_rxdma_hw_mode: channel %d (enable)",
 824                             channel));
 825                         rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
 826                 } else {
 827                         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
 828                             "==> hxge_rxdma_hw_mode: channel %d (disable)",
 829                             channel));
 830                         rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
 831                 }
 832         }
 833 
 834         status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
 835         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
 836             "<== hxge_rxdma_hw_mode: status 0x%x", status));
 837 
 838         return (status);
 839 }
 840 
 841 /*
 842  * Static functions start here.
 843  */
 844 static p_rx_msg_t
 845 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p)
 846 {
 847         p_rx_msg_t              hxge_mp = NULL;
 848         p_hxge_dma_common_t     dmamsg_p;
 849         uchar_t                 *buffer;
 850 
 851         hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
 852         if (hxge_mp == NULL) {
 853                 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
 854                     "Allocation of a rx msg failed."));
 855                 goto hxge_allocb_exit;
 856         }
 857 
 858         hxge_mp->use_buf_pool = B_FALSE;
 859         if (dmabuf_p) {
 860                 hxge_mp->use_buf_pool = B_TRUE;
 861 
 862                 dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma;
 863                 *dmamsg_p = *dmabuf_p;
 864                 dmamsg_p->nblocks = 1;
 865                 dmamsg_p->block_size = size;
 866                 dmamsg_p->alength = size;
 867                 buffer = (uchar_t *)dmabuf_p->kaddrp;
 868 
 869                 dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size);
 870                 dmabuf_p->ioaddr_pp = (void *)
 871                     ((char *)dmabuf_p->ioaddr_pp + size);
 872 
 873                 dmabuf_p->alength -= size;
 874                 dmabuf_p->offset += size;
 875                 dmabuf_p->dma_cookie.dmac_laddress += size;
 876                 dmabuf_p->dma_cookie.dmac_size -= size;
 877         } else {
 878                 buffer = KMEM_ALLOC(size, KM_NOSLEEP);
 879                 if (buffer == NULL) {
 880                         HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
 881                             "Allocation of a receive page failed."));
 882                         goto hxge_allocb_fail1;
 883                 }
 884         }
 885 
 886         hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb);
 887         if (hxge_mp->rx_mblk_p == NULL) {
 888                 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed."));
 889                 goto hxge_allocb_fail2;
 890         }
 891         hxge_mp->buffer = buffer;
 892         hxge_mp->block_size = size;
 893         hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb;
 894         hxge_mp->freeb.free_arg = (caddr_t)hxge_mp;
 895         hxge_mp->ref_cnt = 1;
 896         hxge_mp->free = B_TRUE;
 897         hxge_mp->rx_use_bcopy = B_FALSE;
 898 
 899         atomic_inc_32(&hxge_mblks_pending);
 900 
 901         goto hxge_allocb_exit;
 902 
 903 hxge_allocb_fail2:
 904         if (!hxge_mp->use_buf_pool) {
 905                 KMEM_FREE(buffer, size);
 906         }
 907 hxge_allocb_fail1:
 908         KMEM_FREE(hxge_mp, sizeof (rx_msg_t));
 909         hxge_mp = NULL;
 910 
 911 hxge_allocb_exit:
 912         return (hxge_mp);
 913 }
 914 
 915 p_mblk_t
 916 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
 917 {
 918         p_mblk_t mp;
 919 
 920         HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb"));
 921         HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p "
 922             "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size));
 923 
 924         mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb);
 925         if (mp == NULL) {
 926                 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
 927                 goto hxge_dupb_exit;
 928         }
 929 
 930         atomic_inc_32(&hxge_mp->ref_cnt);
 931 
 932 hxge_dupb_exit:
 933         HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
 934         return (mp);
 935 }
 936 
 937 p_mblk_t
 938 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
 939 {
 940         p_mblk_t        mp;
 941         uchar_t         *dp;
 942 
 943         mp = allocb(size + HXGE_RXBUF_EXTRA, 0);
 944         if (mp == NULL) {
 945                 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
 946                 goto hxge_dupb_bcopy_exit;
 947         }
 948         dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA;
 949         bcopy((void *) &hxge_mp->buffer[offset], dp, size);
 950         mp->b_wptr = dp + size;
 951 
 952 hxge_dupb_bcopy_exit:
 953 
 954         HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
 955 
 956         return (mp);
 957 }
 958 
 959 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p,
 960     p_rx_msg_t rx_msg_p);
 961 
 962 void
 963 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
 964 {
 965         HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page"));
 966 
 967         /* Reuse this buffer */
 968         rx_msg_p->free = B_FALSE;
 969         rx_msg_p->cur_usage_cnt = 0;
 970         rx_msg_p->max_usage_cnt = 0;
 971         rx_msg_p->pkt_buf_size = 0;
 972 
 973         if (rx_rbr_p->rbr_use_bcopy) {
 974                 rx_msg_p->rx_use_bcopy = B_FALSE;
 975                 atomic_dec_32(&rx_rbr_p->rbr_consumed);
 976         }
 977         atomic_dec_32(&rx_rbr_p->rbr_used);
 978 
 979         /*
 980          * Get the rbr header pointer and its offset index.
 981          */
 982         rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
 983             rx_rbr_p->rbr_wrap_mask);
 984         rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
 985 
 986         /*
 987          * Accumulate some buffers in the ring before re-enabling the
 988          * DMA channel, if rbr empty was signaled.
 989          */
 990         hpi_rxdma_rdc_rbr_kick(HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc, 1);
 991         if (rx_rbr_p->rbr_is_empty && (rx_rbr_p->rbb_max -
 992             rx_rbr_p->rbr_used) >= HXGE_RBR_EMPTY_THRESHOLD) {
 993                 hxge_rbr_empty_restore(hxgep, rx_rbr_p);
 994         }
 995 
 996         HXGE_DEBUG_MSG((hxgep, RX_CTL,
 997             "<== hxge_post_page (channel %d post_next_index %d)",
 998             rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
 999         HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page"));
1000 }
1001 
1002 void
1003 hxge_freeb(p_rx_msg_t rx_msg_p)
1004 {
1005         size_t          size;
1006         uchar_t         *buffer = NULL;
1007         int             ref_cnt;
1008         boolean_t       free_state = B_FALSE;
1009         rx_rbr_ring_t   *ring = rx_msg_p->rx_rbr_p;
1010 
1011         HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb"));
1012         HXGE_DEBUG_MSG((NULL, MEM2_CTL,
1013             "hxge_freeb:rx_msg_p = $%p (block pending %d)",
1014             rx_msg_p, hxge_mblks_pending));
1015 
1016         if (ring == NULL)
1017                 return;
1018 
1019         /*
1020          * This is to prevent posting activities while we are recovering
1021          * from fatal errors. This should not be a performance drag since
1022          * ref_cnt != 0 most times.
1023          */
1024         if (ring->rbr_state == RBR_POSTING)
1025                 MUTEX_ENTER(&ring->post_lock);
1026 
1027         /*
1028          * First we need to get the free state, then
1029          * atomic decrement the reference count to prevent
1030          * the race condition with the interrupt thread that
1031          * is processing a loaned up buffer block.
1032          */
1033         free_state = rx_msg_p->free;
1034         ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt);
1035         if (!ref_cnt) {
1036                 atomic_dec_32(&hxge_mblks_pending);
1037 
1038                 buffer = rx_msg_p->buffer;
1039                 size = rx_msg_p->block_size;
1040 
1041                 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: "
1042                     "will free: rx_msg_p = $%p (block pending %d)",
1043                     rx_msg_p, hxge_mblks_pending));
1044 
1045                 if (!rx_msg_p->use_buf_pool) {
1046                         KMEM_FREE(buffer, size);
1047                 }
1048 
1049                 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1050                 /*
1051                  * Decrement the receive buffer ring's reference
1052                  * count, too.
1053                  */
1054                 atomic_dec_32(&ring->rbr_ref_cnt);
1055 
1056                 /*
1057                  * Free the receive buffer ring, iff
1058                  * 1. all the receive buffers have been freed
1059                  * 2. and we are in the proper state (that is,
1060                  *    we are not UNMAPPING).
1061                  */
1062                 if (ring->rbr_ref_cnt == 0 &&
1063                     ring->rbr_state == RBR_UNMAPPED) {
1064                         KMEM_FREE(ring, sizeof (*ring));
1065                         /* post_lock has been destroyed already */
1066                         return;
1067                 }
1068         }
1069 
1070         /*
1071          * Repost buffer.
1072          */
1073         if (free_state && (ref_cnt == 1)) {
1074                 HXGE_DEBUG_MSG((NULL, RX_CTL,
1075                     "hxge_freeb: post page $%p:", rx_msg_p));
1076                 if (ring->rbr_state == RBR_POSTING)
1077                         hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p);
1078         }
1079 
1080         if (ring->rbr_state == RBR_POSTING)
1081                 MUTEX_EXIT(&ring->post_lock);
1082 
1083         HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb"));
1084 }
1085 
1086 uint_t
1087 hxge_rx_intr(caddr_t arg1, caddr_t arg2)
1088 {
1089         p_hxge_ring_handle_t    rhp;
1090         p_hxge_ldv_t            ldvp = (p_hxge_ldv_t)arg1;
1091         p_hxge_t                hxgep = (p_hxge_t)arg2;
1092         p_hxge_ldg_t            ldgp;
1093         uint8_t                 channel;
1094         hpi_handle_t            handle;
1095         rdc_stat_t              cs;
1096         p_rx_rcr_ring_t         ring;
1097         p_rx_rbr_ring_t         rbrp;
1098         mblk_t                  *mp = NULL;
1099 
1100         if (ldvp == NULL) {
1101                 HXGE_DEBUG_MSG((NULL, RX_INT_CTL,
1102                     "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
1103                 return (DDI_INTR_UNCLAIMED);
1104         }
1105 
1106         if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
1107                 hxgep = ldvp->hxgep;
1108         }
1109 
1110         HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1111             "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
1112 
1113         /*
1114          * This interrupt handler is for a specific receive dma channel.
1115          */
1116         handle = HXGE_DEV_HPI_HANDLE(hxgep);
1117 
1118         /*
1119          * Get the control and status for this channel.
1120          */
1121         channel = ldvp->vdma_index;
1122         ring = hxgep->rx_rcr_rings->rcr_rings[channel];
1123         rhp = &hxgep->rx_ring_handles[channel];
1124         ldgp = ldvp->ldgp;
1125 
1126         ASSERT(ring != NULL);
1127 #if defined(DEBUG)
1128         if (rhp->started) {
1129                 ASSERT(ring->ldgp == ldgp);
1130                 ASSERT(ring->ldvp == ldvp);
1131         }
1132 #endif
1133 
1134         MUTEX_ENTER(&ring->lock);
1135 
1136         if (!ring->poll_flag) {
1137                 RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value);
1138                 cs.bits.ptrread = 0;
1139                 cs.bits.pktread = 0;
1140                 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1141 
1142                 /*
1143                  * Process packets, if we are not in polling mode, the ring is
1144                  * started and the interface is started. The MAC layer under
1145                  * load will be operating in polling mode for RX traffic.
1146                  */
1147                 if ((rhp->started) &&
1148                     (hxgep->hxge_mac_state == HXGE_MAC_STARTED)) {
1149                         mp = hxge_rx_pkts(hxgep, ldvp->vdma_index,
1150                             ldvp, ring, cs, -1);
1151                 }
1152 
1153                 /* Process error events. */
1154                 if (cs.value & RDC_STAT_ERROR) {
1155                         MUTEX_EXIT(&ring->lock);
1156                         (void) hxge_rx_err_evnts(hxgep, channel, ldvp, cs);
1157                         MUTEX_ENTER(&ring->lock);
1158                 }
1159 
1160                 /*
1161                  * Enable the mailbox update interrupt if we want to use
1162                  * mailbox. We probably don't need to use mailbox as it only
1163                  * saves us one pio read.  Also write 1 to rcrthres and
1164                  * rcrto to clear these two edge triggered bits.
1165                  */
1166                 rbrp = hxgep->rx_rbr_rings->rbr_rings[channel];
1167                 MUTEX_ENTER(&rbrp->post_lock);
1168                 if (!rbrp->rbr_is_empty) {
1169                         cs.value = 0;
1170                         cs.bits.mex = 1;
1171                         cs.bits.ptrread = 0;
1172                         cs.bits.pktread = 0;
1173                         RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1174                 }
1175                 MUTEX_EXIT(&rbrp->post_lock);
1176 
1177                 if (ldgp->nldvs == 1) {
1178                         /*
1179                          * Re-arm the group.
1180                          */
1181                         (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE,
1182                             ldgp->ldg_timer);
1183                 }
1184         } else if ((ldgp->nldvs == 1) && (ring->poll_flag)) {
1185                 /*
1186                  * Disarm the group, if we are not a shared interrupt.
1187                  */
1188                 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_FALSE, 0);
1189         } else if (ring->poll_flag) {
1190                 /*
1191                  * Mask-off this device from the group.
1192                  */
1193                 (void) hpi_intr_mask_set(handle, ldvp->ldv, 1);
1194         }
1195 
1196         MUTEX_EXIT(&ring->lock);
1197 
1198         /*
1199          * Send the packets up the stack.
1200          */
1201         if (mp != NULL) {
1202                 mac_rx_ring(hxgep->mach, ring->rcr_mac_handle, mp,
1203                     ring->rcr_gen_num);
1204         }
1205 
1206         HXGE_DEBUG_MSG((NULL, RX_INT_CTL, "<== hxge_rx_intr"));
1207         return (DDI_INTR_CLAIMED);
1208 }
1209 
1210 /*
1211  * Enable polling for a ring. Interrupt for the ring is disabled when
1212  * the hxge interrupt comes (see hxge_rx_intr).
1213  */
1214 int
1215 hxge_enable_poll(void *arg)
1216 {
1217         p_hxge_ring_handle_t    ring_handle = (p_hxge_ring_handle_t)arg;
1218         p_rx_rcr_ring_t         ringp;
1219         p_hxge_t                hxgep;
1220         p_hxge_ldg_t            ldgp;
1221 
1222         if (ring_handle == NULL) {
1223                 ASSERT(ring_handle != NULL);
1224                 return (1);
1225         }
1226 
1227 
1228         hxgep = ring_handle->hxgep;
1229         ringp = hxgep->rx_rcr_rings->rcr_rings[ring_handle->index];
1230 
1231         MUTEX_ENTER(&ringp->lock);
1232 
1233         /*
1234          * Are we already polling ?
1235          */
1236         if (ringp->poll_flag) {
1237                 MUTEX_EXIT(&ringp->lock);
1238                 return (1);
1239         }
1240 
1241         ldgp = ringp->ldgp;
1242         if (ldgp == NULL) {
1243                 MUTEX_EXIT(&ringp->lock);
1244                 return (1);
1245         }
1246 
1247         /*
1248          * Enable polling
1249          */
1250         ringp->poll_flag = B_TRUE;
1251 
1252         MUTEX_EXIT(&ringp->lock);
1253         return (0);
1254 }
1255 
1256 /*
1257  * Disable polling for a ring and enable its interrupt.
1258  */
1259 int
1260 hxge_disable_poll(void *arg)
1261 {
1262         p_hxge_ring_handle_t    ring_handle = (p_hxge_ring_handle_t)arg;
1263         p_rx_rcr_ring_t         ringp;
1264         p_hxge_t                hxgep;
1265 
1266         if (ring_handle == NULL) {
1267                 ASSERT(ring_handle != NULL);
1268                 return (0);
1269         }
1270 
1271         hxgep = ring_handle->hxgep;
1272         ringp = hxgep->rx_rcr_rings->rcr_rings[ring_handle->index];
1273 
1274         MUTEX_ENTER(&ringp->lock);
1275 
1276         /*
1277          * Disable polling: enable interrupt
1278          */
1279         if (ringp->poll_flag) {
1280                 hpi_handle_t            handle;
1281                 rdc_stat_t              cs;
1282                 p_hxge_ldg_t            ldgp;
1283 
1284                 /*
1285                  * Get the control and status for this channel.
1286                  */
1287                 handle = HXGE_DEV_HPI_HANDLE(hxgep);
1288 
1289                 /*
1290                  * Rearm this logical group if this is a single device
1291                  * group.
1292                  */
1293                 ldgp = ringp->ldgp;
1294                 if (ldgp == NULL) {
1295                         MUTEX_EXIT(&ringp->lock);
1296                         return (1);
1297                 }
1298 
1299                 ringp->poll_flag = B_FALSE;
1300 
1301                 /*
1302                  * Enable mailbox update, to start interrupts again.
1303                  */
1304                 cs.value = 0ULL;
1305                 cs.bits.mex = 1;
1306                 cs.bits.pktread = 0;
1307                 cs.bits.ptrread = 0;
1308                 RXDMA_REG_WRITE64(handle, RDC_STAT, ringp->rdc, cs.value);
1309 
1310                 if (ldgp->nldvs == 1) {
1311                         /*
1312                          * Re-arm the group, since it is the only member
1313                          * of the group.
1314                          */
1315                         (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE,
1316                             ldgp->ldg_timer);
1317                 } else {
1318                         /*
1319                          * Mask-on interrupts for the device and re-arm
1320                          * the group.
1321                          */
1322                         (void) hpi_intr_mask_set(handle, ringp->ldvp->ldv, 0);
1323                         (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE,
1324                             ldgp->ldg_timer);
1325                 }
1326         }
1327         MUTEX_EXIT(&ringp->lock);
1328         return (0);
1329 }
1330 
1331 /*
1332  * Poll 'bytes_to_pickup' bytes of message from the rx ring.
1333  */
1334 mblk_t *
1335 hxge_rx_poll(void *arg, int bytes_to_pickup)
1336 {
1337         p_hxge_ring_handle_t    rhp = (p_hxge_ring_handle_t)arg;
1338         p_rx_rcr_ring_t         ring;
1339         p_hxge_t                hxgep;
1340         hpi_handle_t            handle;
1341         rdc_stat_t              cs;
1342         mblk_t                  *mblk;
1343         p_hxge_ldv_t            ldvp;
1344 
1345         hxgep = rhp->hxgep;
1346 
1347         /*
1348          * Get the control and status for this channel.
1349          */
1350         handle = HXGE_DEV_HPI_HANDLE(hxgep);
1351         ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
1352 
1353         MUTEX_ENTER(&ring->lock);
1354         ASSERT(ring->poll_flag == B_TRUE);
1355         ASSERT(rhp->started);
1356 
1357         if (!ring->poll_flag) {
1358                 MUTEX_EXIT(&ring->lock);
1359                 return ((mblk_t *)NULL);
1360         }
1361 
1362         /*
1363          * Get the control and status bits for the ring.
1364          */
1365         RXDMA_REG_READ64(handle, RDC_STAT, rhp->index, &cs.value);
1366         cs.bits.ptrread = 0;
1367         cs.bits.pktread = 0;
1368         RXDMA_REG_WRITE64(handle, RDC_STAT, rhp->index, cs.value);
1369 
1370         /*
1371          * Process packets.
1372          */
1373         mblk = hxge_rx_pkts(hxgep, ring->ldvp->vdma_index,
1374             ring->ldvp, ring, cs, bytes_to_pickup);
1375         ldvp = ring->ldvp;
1376 
1377         /*
1378          * Process Error Events.
1379          */
1380         if (ldvp && (cs.value & RDC_STAT_ERROR)) {
1381                 /*
1382                  * Recovery routines will grab the RCR ring lock.
1383                  */
1384                 MUTEX_EXIT(&ring->lock);
1385                 (void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
1386                 MUTEX_ENTER(&ring->lock);
1387         }
1388 
1389         MUTEX_EXIT(&ring->lock);
1390         return (mblk);
1391 }
1392 
1393 /*ARGSUSED*/
1394 mblk_t *
1395 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
1396     p_rx_rcr_ring_t rcrp, rdc_stat_t cs, int bytes_to_read)
1397 {
1398         hpi_handle_t            handle;
1399         uint8_t                 channel;
1400         uint32_t                comp_rd_index;
1401         p_rcr_entry_t           rcr_desc_rd_head_p;
1402         p_rcr_entry_t           rcr_desc_rd_head_pp;
1403         p_mblk_t                nmp, mp_cont, head_mp, *tail_mp;
1404         uint16_t                qlen, nrcr_read, npkt_read;
1405         uint32_t                qlen_hw, npkts, num_rcrs;
1406         uint32_t                invalid_rcr_entry;
1407         boolean_t               multi;
1408         rdc_stat_t              pktcs;
1409         rdc_rcr_cfg_b_t         rcr_cfg_b;
1410         uint64_t                rcr_head_index, rcr_tail_index;
1411         uint64_t                rcr_tail;
1412         rdc_rcr_tail_t          rcr_tail_reg;
1413         p_hxge_rx_ring_stats_t  rdc_stats;
1414         int                     totallen = 0;
1415 
1416         HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d "
1417             "channel %d", vindex, ldvp->channel));
1418 
1419         handle = HXGE_DEV_HPI_HANDLE(hxgep);
1420         channel = rcrp->rdc;
1421         if (channel != ldvp->channel) {
1422                 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d "
1423                     "channel %d, and rcr channel %d not matched.",
1424                     vindex, ldvp->channel, channel));
1425                 return (NULL);
1426         }
1427 
1428         HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1429             "==> hxge_rx_pkts: START: rcr channel %d "
1430             "head_p $%p head_pp $%p  index %d ",
1431             channel, rcrp->rcr_desc_rd_head_p,
1432             rcrp->rcr_desc_rd_head_pp, rcrp->comp_rd_index));
1433 
1434         (void) hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
1435         RXDMA_REG_READ64(handle, RDC_RCR_TAIL, channel, &rcr_tail_reg.value);
1436         rcr_tail = rcr_tail_reg.bits.tail;
1437 
1438         if (!qlen) {
1439                 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1440                     "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)",
1441                     channel, qlen));
1442                 return (NULL);
1443         }
1444 
1445         HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d "
1446             "qlen %d", channel, qlen));
1447 
1448         comp_rd_index = rcrp->comp_rd_index;
1449 
1450         rcr_desc_rd_head_p = rcrp->rcr_desc_rd_head_p;
1451         rcr_desc_rd_head_pp = rcrp->rcr_desc_rd_head_pp;
1452         nrcr_read = npkt_read = 0;
1453 
1454         if (hxgep->rdc_first_intr[channel])
1455                 qlen_hw = qlen;
1456         else
1457                 qlen_hw = qlen - 1;
1458 
1459         head_mp = NULL;
1460         tail_mp = &head_mp;
1461         nmp = mp_cont = NULL;
1462         multi = B_FALSE;
1463 
1464         rcr_head_index = rcrp->rcr_desc_rd_head_p - rcrp->rcr_desc_first_p;
1465         rcr_tail_index = rcr_tail - rcrp->rcr_tail_begin;
1466 
1467         if (rcr_tail_index >= rcr_head_index) {
1468                 num_rcrs = rcr_tail_index - rcr_head_index;
1469         } else {
1470                 /* rcr_tail has wrapped around */
1471                 num_rcrs = (rcrp->comp_size - rcr_head_index) + rcr_tail_index;
1472         }
1473 
1474         npkts = hxge_scan_for_last_eop(rcrp, rcr_desc_rd_head_p, num_rcrs);
1475         if (!npkts)
1476                 return (NULL);
1477 
1478         if (qlen_hw > npkts) {
1479                 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1480                     "Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n",
1481                     channel, qlen_hw, qlen_sw));
1482                 qlen_hw = npkts;
1483         }
1484 
1485         while (qlen_hw) {
1486 #ifdef HXGE_DEBUG
1487                 hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p);
1488 #endif
1489                 /*
1490                  * Process one completion ring entry.
1491                  */
1492                 invalid_rcr_entry = 0;
1493                 hxge_receive_packet(hxgep,
1494                     rcrp, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont,
1495                     &invalid_rcr_entry);
1496                 if (invalid_rcr_entry != 0) {
1497                         rdc_stats = rcrp->rdc_stats;
1498                         rdc_stats->rcr_invalids++;
1499                         HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1500                             "Channel %d could only read 0x%x packets, "
1501                             "but 0x%x pending\n", channel, npkt_read, qlen_hw));
1502                         break;
1503                 }
1504 
1505                 /*
1506                  * message chaining modes (nemo msg chaining)
1507                  */
1508                 if (nmp) {
1509                         nmp->b_next = NULL;
1510                         if (!multi && !mp_cont) { /* frame fits a partition */
1511                                 *tail_mp = nmp;
1512                                 tail_mp = &nmp->b_next;
1513                                 nmp = NULL;
1514                         } else if (multi && !mp_cont) { /* first segment */
1515                                 *tail_mp = nmp;
1516                                 tail_mp = &nmp->b_cont;
1517                         } else if (multi && mp_cont) {  /* mid of multi segs */
1518                                 *tail_mp = mp_cont;
1519                                 tail_mp = &mp_cont->b_cont;
1520                         } else if (!multi && mp_cont) { /* last segment */
1521                                 *tail_mp = mp_cont;
1522                                 tail_mp = &nmp->b_next;
1523                                 totallen += MBLKL(mp_cont);
1524                                 nmp = NULL;
1525                         }
1526                 }
1527 
1528                 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1529                     "==> hxge_rx_pkts: loop: rcr channel %d "
1530                     "before updating: multi %d "
1531                     "nrcr_read %d "
1532                     "npk read %d "
1533                     "head_pp $%p  index %d ",
1534                     channel, multi,
1535                     nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index));
1536 
1537                 if (!multi) {
1538                         qlen_hw--;
1539                         npkt_read++;
1540                 }
1541 
1542                 /*
1543                  * Update the next read entry.
1544                  */
1545                 comp_rd_index = NEXT_ENTRY(comp_rd_index,
1546                     rcrp->comp_wrap_mask);
1547 
1548                 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
1549                     rcrp->rcr_desc_first_p, rcrp->rcr_desc_last_p);
1550 
1551                 nrcr_read++;
1552 
1553                 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1554                     "<== hxge_rx_pkts: (SAM, process one packet) "
1555                     "nrcr_read %d", nrcr_read));
1556                 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1557                     "==> hxge_rx_pkts: loop: rcr channel %d "
1558                     "multi %d nrcr_read %d npk read %d head_pp $%p  index %d ",
1559                     channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp,
1560                     comp_rd_index));
1561 
1562                 if ((bytes_to_read != -1) &&
1563                     (totallen >= bytes_to_read)) {
1564                         break;
1565                 }
1566         }
1567 
1568         rcrp->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
1569         rcrp->comp_rd_index = comp_rd_index;
1570         rcrp->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
1571 
1572         if ((hxgep->intr_timeout != rcrp->intr_timeout) ||
1573             (hxgep->intr_threshold != rcrp->intr_threshold)) {
1574                 rcrp->intr_timeout = hxgep->intr_timeout;
1575                 rcrp->intr_threshold = hxgep->intr_threshold;
1576                 rcr_cfg_b.value = 0x0ULL;
1577                 if (rcrp->intr_timeout)
1578                         rcr_cfg_b.bits.entout = 1;
1579                 rcr_cfg_b.bits.timeout = rcrp->intr_timeout;
1580                 rcr_cfg_b.bits.pthres = rcrp->intr_threshold;
1581                 RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B,
1582                     channel, rcr_cfg_b.value);
1583         }
1584 
1585         pktcs.value = 0;
1586         if (hxgep->rdc_first_intr[channel] && (npkt_read > 0)) {
1587                 hxgep->rdc_first_intr[channel] = B_FALSE;
1588                 pktcs.bits.pktread = npkt_read - 1;
1589         } else
1590                 pktcs.bits.pktread = npkt_read;
1591         pktcs.bits.ptrread = nrcr_read;
1592         RXDMA_REG_WRITE64(handle, RDC_STAT, channel, pktcs.value);
1593 
1594         HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1595             "==> hxge_rx_pkts: EXIT: rcr channel %d "
1596             "head_pp $%p  index %016llx ",
1597             channel, rcrp->rcr_desc_rd_head_pp, rcrp->comp_rd_index));
1598 
1599         HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts"));
1600         return (head_mp);
1601 }
1602 
1603 #define RCR_ENTRY_PATTERN       0x5a5a6b6b7c7c8d8dULL
1604 #define NO_PORT_BIT             0x20
1605 #define L4_CS_EQ_BIT            0x40
1606 
1607 static uint32_t hxge_scan_for_last_eop(p_rx_rcr_ring_t rcrp,
1608     p_rcr_entry_t rcr_desc_rd_head_p, uint32_t num_rcrs)
1609 {
1610         uint64_t        rcr_entry;
1611         uint32_t        rcrs = 0;
1612         uint32_t        pkts = 0;
1613 
1614         while (rcrs < num_rcrs) {
1615                 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
1616 
1617                 if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN))
1618                         break;
1619 
1620                 if (!(rcr_entry & RCR_MULTI_MASK))
1621                         pkts++;
1622 
1623                 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
1624                     rcrp->rcr_desc_first_p, rcrp->rcr_desc_last_p);
1625 
1626                 rcrs++;
1627         }
1628 
1629         return (pkts);
1630 }
1631 
1632 /*ARGSUSED*/
1633 void
1634 hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p,
1635     p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p, mblk_t **mp,
1636     mblk_t **mp_cont, uint32_t *invalid_rcr_entry)
1637 {
1638         p_mblk_t nmp = NULL;
1639         uint64_t multi;
1640         uint8_t channel;
1641         boolean_t first_entry = B_TRUE;
1642         boolean_t is_tcp_udp = B_FALSE;
1643         boolean_t buffer_free = B_FALSE;
1644         boolean_t error_send_up = B_FALSE;
1645         uint8_t error_type;
1646         uint16_t l2_len;
1647         uint16_t skip_len;
1648         uint8_t pktbufsz_type;
1649         uint64_t rcr_entry;
1650         uint64_t *pkt_buf_addr_pp;
1651         uint64_t *pkt_buf_addr_p;
1652         uint32_t buf_offset;
1653         uint32_t bsize;
1654         uint32_t msg_index;
1655         p_rx_rbr_ring_t rx_rbr_p;
1656         p_rx_msg_t *rx_msg_ring_p;
1657         p_rx_msg_t rx_msg_p;
1658         uint16_t sw_offset_bytes = 0, hdr_size = 0;
1659         hxge_status_t status = HXGE_OK;
1660         boolean_t is_valid = B_FALSE;
1661         p_hxge_rx_ring_stats_t rdc_stats;
1662         uint32_t bytes_read;
1663         uint8_t header0 = 0;
1664         uint8_t header1 = 0;
1665         uint64_t pkt_type;
1666         uint8_t no_port_bit = 0;
1667         uint8_t l4_cs_eq_bit = 0;
1668 
1669         channel = rcr_p->rdc;
1670 
1671         HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet"));
1672 
1673         first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
1674         rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
1675 
1676         /* Verify the content of the rcr_entry for a hardware bug workaround */
1677         if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) {
1678                 *invalid_rcr_entry = 1;
1679                 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "hxge_receive_packet "
1680                     "Channel %d invalid RCR entry 0x%llx found, returning\n",
1681                     channel, (long long) rcr_entry));
1682                 return;
1683         }
1684         *((uint64_t *)rcr_desc_rd_head_p) = RCR_ENTRY_PATTERN;
1685 
1686         multi = (rcr_entry & RCR_MULTI_MASK);
1687         pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
1688 
1689         error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
1690         l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
1691 
1692         /*
1693          * Hardware does not strip the CRC due bug ID 11451 where
1694          * the hardware mis handles minimum size packets.
1695          */
1696         l2_len -= ETHERFCSL;
1697 
1698         pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
1699             RCR_PKTBUFSZ_SHIFT);
1700 #if defined(__i386)
1701         pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry &
1702             RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT);
1703 #else
1704         pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
1705             RCR_PKT_BUF_ADDR_SHIFT);
1706 #endif
1707 
1708         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1709             "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
1710             "pkt_buf_addr_pp $%p l2_len %d multi %d "
1711             "error_type 0x%x pktbufsz_type %d ",
1712             rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len,
1713             multi, error_type, pktbufsz_type));
1714 
1715         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1716             "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
1717             "pkt_buf_addr_pp $%p l2_len %d multi %d "
1718             "error_type 0x%x ", rcr_desc_rd_head_p,
1719             rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type));
1720 
1721         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1722             "==> (rbr) hxge_receive_packet: entry 0x%0llx "
1723             "full pkt_buf_addr_pp $%p l2_len %d",
1724             rcr_entry, pkt_buf_addr_pp, l2_len));
1725 
1726         /* get the stats ptr */
1727         rdc_stats = rcr_p->rdc_stats;
1728 
1729         if (!l2_len) {
1730                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
1731                     "<== hxge_receive_packet: failed: l2 length is 0."));
1732                 return;
1733         }
1734 
1735         /* shift 6 bits to get the full io address */
1736 #if defined(__i386)
1737         pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp <<
1738             RCR_PKT_BUF_ADDR_SHIFT_FULL);
1739 #else
1740         pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
1741             RCR_PKT_BUF_ADDR_SHIFT_FULL);
1742 #endif
1743         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1744             "==> (rbr) hxge_receive_packet: entry 0x%0llx "
1745             "full pkt_buf_addr_pp $%p l2_len %d",
1746             rcr_entry, pkt_buf_addr_pp, l2_len));
1747 
1748         rx_rbr_p = rcr_p->rx_rbr_p;
1749         rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
1750 
1751         if (first_entry) {
1752                 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
1753                     RXDMA_HDR_SIZE_DEFAULT);
1754 
1755                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
1756                     "==> hxge_receive_packet: first entry 0x%016llx "
1757                     "pkt_buf_addr_pp $%p l2_len %d hdr %d",
1758                     rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size));
1759         }
1760 
1761         MUTEX_ENTER(&rx_rbr_p->lock);
1762 
1763         HXGE_DEBUG_MSG((hxgep, RX_CTL,
1764             "==> (rbr 1) hxge_receive_packet: entry 0x%0llx "
1765             "full pkt_buf_addr_pp $%p l2_len %d",
1766             rcr_entry, pkt_buf_addr_pp, l2_len));
1767 
1768         /*
1769          * Packet buffer address in the completion entry points to the starting
1770          * buffer address (offset 0). Use the starting buffer address to locate
1771          * the corresponding kernel address.
1772          */
1773         status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p,
1774             pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
1775             &buf_offset, &msg_index);
1776 
1777         HXGE_DEBUG_MSG((hxgep, RX_CTL,
1778             "==> (rbr 2) hxge_receive_packet: entry 0x%0llx "
1779             "full pkt_buf_addr_pp $%p l2_len %d",
1780             rcr_entry, pkt_buf_addr_pp, l2_len));
1781 
1782         if (status != HXGE_OK) {
1783                 MUTEX_EXIT(&rx_rbr_p->lock);
1784                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
1785                     "<== hxge_receive_packet: found vaddr failed %d", status));
1786                 return;
1787         }
1788 
1789         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1790             "==> (rbr 3) hxge_receive_packet: entry 0x%0llx "
1791             "full pkt_buf_addr_pp $%p l2_len %d",
1792             rcr_entry, pkt_buf_addr_pp, l2_len));
1793         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1794             "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
1795             "full pkt_buf_addr_pp $%p l2_len %d",
1796             msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
1797 
1798         if (msg_index >= rx_rbr_p->tnblocks) {
1799                 MUTEX_EXIT(&rx_rbr_p->lock);
1800                 HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1801                     "==> hxge_receive_packet: FATAL msg_index (%d) "
1802                     "should be smaller than tnblocks (%d)\n",
1803                     msg_index, rx_rbr_p->tnblocks));
1804                 return;
1805         }
1806 
1807         rx_msg_p = rx_msg_ring_p[msg_index];
1808 
1809         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1810             "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
1811             "full pkt_buf_addr_pp $%p l2_len %d",
1812             msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
1813 
1814         switch (pktbufsz_type) {
1815         case RCR_PKTBUFSZ_0:
1816                 bsize = rx_rbr_p->pkt_buf_size0_bytes;
1817                 HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1818                     "==> hxge_receive_packet: 0 buf %d", bsize));
1819                 break;
1820         case RCR_PKTBUFSZ_1:
1821                 bsize = rx_rbr_p->pkt_buf_size1_bytes;
1822                 HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1823                     "==> hxge_receive_packet: 1 buf %d", bsize));
1824                 break;
1825         case RCR_PKTBUFSZ_2:
1826                 bsize = rx_rbr_p->pkt_buf_size2_bytes;
1827                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
1828                     "==> hxge_receive_packet: 2 buf %d", bsize));
1829                 break;
1830         case RCR_SINGLE_BLOCK:
1831                 bsize = rx_msg_p->block_size;
1832                 HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1833                     "==> hxge_receive_packet: single %d", bsize));
1834 
1835                 break;
1836         default:
1837                 MUTEX_EXIT(&rx_rbr_p->lock);
1838                 return;
1839         }
1840 
1841         DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
1842             (buf_offset + sw_offset_bytes), (hdr_size + l2_len),
1843             DDI_DMA_SYNC_FORCPU);
1844 
1845         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1846             "==> hxge_receive_packet: after first dump:usage count"));
1847 
1848         if (rx_msg_p->cur_usage_cnt == 0) {
1849                 atomic_inc_32(&rx_rbr_p->rbr_used);
1850                 if (rx_rbr_p->rbr_use_bcopy) {
1851                         atomic_inc_32(&rx_rbr_p->rbr_consumed);
1852                         if (rx_rbr_p->rbr_consumed <
1853                             rx_rbr_p->rbr_threshold_hi) {
1854                                 if (rx_rbr_p->rbr_threshold_lo == 0 ||
1855                                     ((rx_rbr_p->rbr_consumed >=
1856                                     rx_rbr_p->rbr_threshold_lo) &&
1857                                     (rx_rbr_p->rbr_bufsize_type >=
1858                                     pktbufsz_type))) {
1859                                         rx_msg_p->rx_use_bcopy = B_TRUE;
1860                                 }
1861                         } else {
1862                                 rx_msg_p->rx_use_bcopy = B_TRUE;
1863                         }
1864                 }
1865                 HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1866                     "==> hxge_receive_packet: buf %d (new block) ", bsize));
1867 
1868                 rx_msg_p->pkt_buf_size_code = pktbufsz_type;
1869                 rx_msg_p->pkt_buf_size = bsize;
1870                 rx_msg_p->cur_usage_cnt = 1;
1871                 if (pktbufsz_type == RCR_SINGLE_BLOCK) {
1872                         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1873                             "==> hxge_receive_packet: buf %d (single block) ",
1874                             bsize));
1875                         /*
1876                          * Buffer can be reused once the free function is
1877                          * called.
1878                          */
1879                         rx_msg_p->max_usage_cnt = 1;
1880                         buffer_free = B_TRUE;
1881                 } else {
1882                         rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize;
1883                         if (rx_msg_p->max_usage_cnt == 1) {
1884                                 buffer_free = B_TRUE;
1885                         }
1886                 }
1887         } else {
1888                 rx_msg_p->cur_usage_cnt++;
1889                 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
1890                         buffer_free = B_TRUE;
1891                 }
1892         }
1893 
1894         HXGE_DEBUG_MSG((hxgep, RX_CTL,
1895             "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
1896             msg_index, l2_len,
1897             rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
1898 
1899         if (error_type) {
1900                 rdc_stats->ierrors++;
1901                 /* Update error stats */
1902                 rdc_stats->errlog.compl_err_type = error_type;
1903                 HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR);
1904 
1905                 if (error_type & RCR_CTRL_FIFO_DED) {
1906                         rdc_stats->ctrl_fifo_ecc_err++;
1907                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1908                             " hxge_receive_packet: "
1909                             " channel %d RCR ctrl_fifo_ded error", channel));
1910                 } else if (error_type & RCR_DATA_FIFO_DED) {
1911                         rdc_stats->data_fifo_ecc_err++;
1912                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1913                             " hxge_receive_packet: channel %d"
1914                             " RCR data_fifo_ded error", channel));
1915                 }
1916 
1917                 /*
1918                  * Update and repost buffer block if max usage count is
1919                  * reached.
1920                  */
1921                 if (error_send_up == B_FALSE) {
1922                         atomic_inc_32(&rx_msg_p->ref_cnt);
1923                         if (buffer_free == B_TRUE) {
1924                                 rx_msg_p->free = B_TRUE;
1925                         }
1926 
1927                         MUTEX_EXIT(&rx_rbr_p->lock);
1928                         hxge_freeb(rx_msg_p);
1929                         return;
1930                 }
1931         }
1932 
1933         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1934             "==> hxge_receive_packet: DMA sync second "));
1935 
1936         bytes_read = rcr_p->rcvd_pkt_bytes;
1937         skip_len = sw_offset_bytes + hdr_size;
1938 
1939         if (first_entry) {
1940                 header0 = rx_msg_p->buffer[buf_offset];
1941                 no_port_bit = header0 & NO_PORT_BIT;
1942                 header1 = rx_msg_p->buffer[buf_offset + 1];
1943                 l4_cs_eq_bit = header1 & L4_CS_EQ_BIT;
1944         }
1945 
1946         if (!rx_msg_p->rx_use_bcopy) {
1947                 /*
1948                  * For loaned up buffers, the driver reference count
1949                  * will be incremented first and then the free state.
1950                  */
1951                 if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
1952                         if (first_entry) {
1953                                 nmp->b_rptr = &nmp->b_rptr[skip_len];
1954                                 if (l2_len < bsize - skip_len) {
1955                                         nmp->b_wptr = &nmp->b_rptr[l2_len];
1956                                 } else {
1957                                         nmp->b_wptr = &nmp->b_rptr[bsize
1958                                             - skip_len];
1959                                 }
1960                         } else {
1961                                 if (l2_len - bytes_read < bsize) {
1962                                         nmp->b_wptr =
1963                                             &nmp->b_rptr[l2_len - bytes_read];
1964                                 } else {
1965                                         nmp->b_wptr = &nmp->b_rptr[bsize];
1966                                 }
1967                         }
1968                 }
1969         } else {
1970                 if (first_entry) {
1971                         nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
1972                             l2_len < bsize - skip_len ?
1973                             l2_len : bsize - skip_len);
1974                 } else {
1975                         nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset,
1976                             l2_len - bytes_read < bsize ?
1977                             l2_len - bytes_read : bsize);
1978                 }
1979         }
1980 
1981         if (nmp != NULL) {
1982                 if (first_entry)
1983                         bytes_read  = nmp->b_wptr - nmp->b_rptr;
1984                 else
1985                         bytes_read += nmp->b_wptr - nmp->b_rptr;
1986 
1987                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
1988                     "==> hxge_receive_packet after dupb: "
1989                     "rbr consumed %d "
1990                     "pktbufsz_type %d "
1991                     "nmp $%p rptr $%p wptr $%p "
1992                     "buf_offset %d bzise %d l2_len %d skip_len %d",
1993                     rx_rbr_p->rbr_consumed,
1994                     pktbufsz_type,
1995                     nmp, nmp->b_rptr, nmp->b_wptr,
1996                     buf_offset, bsize, l2_len, skip_len));
1997         } else {
1998                 cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)");
1999 
2000                 atomic_inc_32(&rx_msg_p->ref_cnt);
2001                 if (buffer_free == B_TRUE) {
2002                         rx_msg_p->free = B_TRUE;
2003                 }
2004 
2005                 MUTEX_EXIT(&rx_rbr_p->lock);
2006                 hxge_freeb(rx_msg_p);
2007                 return;
2008         }
2009 
2010         if (buffer_free == B_TRUE) {
2011                 rx_msg_p->free = B_TRUE;
2012         }
2013 
2014         /*
2015          * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a
2016          * packet is not fragmented and no error bit is set, then L4 checksum
2017          * is OK.
2018          */
2019         is_valid = (nmp != NULL);
2020         if (first_entry) {
2021                 rdc_stats->ipackets++; /* count only 1st seg for jumbo */
2022                 if (l2_len > (STD_FRAME_SIZE - ETHERFCSL))
2023                         rdc_stats->jumbo_pkts++;
2024                 rdc_stats->ibytes += skip_len + l2_len < bsize ?
2025                     l2_len : bsize;
2026         } else {
2027                 /*
2028                  * Add the current portion of the packet to the kstats.
2029                  * The current portion of the packet is calculated by using
2030                  * length of the packet and the previously received portion.
2031                  */
2032                 rdc_stats->ibytes += l2_len - rcr_p->rcvd_pkt_bytes < bsize ?
2033                     l2_len - rcr_p->rcvd_pkt_bytes : bsize;
2034         }
2035 
2036         rcr_p->rcvd_pkt_bytes = bytes_read;
2037 
2038         if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
2039                 atomic_inc_32(&rx_msg_p->ref_cnt);
2040                 MUTEX_EXIT(&rx_rbr_p->lock);
2041                 hxge_freeb(rx_msg_p);
2042         } else
2043                 MUTEX_EXIT(&rx_rbr_p->lock);
2044 
2045         if (is_valid) {
2046                 nmp->b_cont = NULL;
2047                 if (first_entry) {
2048                         *mp = nmp;
2049                         *mp_cont = NULL;
2050                 } else {
2051                         *mp_cont = nmp;
2052                 }
2053         }
2054 
2055         /*
2056          * Update stats and hardware checksuming.
2057          */
2058         if (is_valid && !multi) {
2059                 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
2060                     pkt_type == RCR_PKT_IS_UDP) ? B_TRUE : B_FALSE);
2061 
2062                 if (!no_port_bit && l4_cs_eq_bit && is_tcp_udp && !error_type) {
2063                         mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK);
2064 
2065                         HXGE_DEBUG_MSG((hxgep, RX_CTL,
2066                             "==> hxge_receive_packet: Full tcp/udp cksum "
2067                             "is_valid 0x%x multi %d error %d",
2068                             is_valid, multi, error_type));
2069                 }
2070         }
2071 
2072         HXGE_DEBUG_MSG((hxgep, RX2_CTL,
2073             "==> hxge_receive_packet: *mp 0x%016llx", *mp));
2074 
2075         *multi_p = (multi == RCR_MULTI_MASK);
2076 
2077         HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: "
2078             "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2079             *multi_p, nmp, *mp, *mp_cont));
2080 }
2081 
2082 static void
2083 hxge_rx_rbr_empty_recover(p_hxge_t hxgep, uint8_t channel)
2084 {
2085         hpi_handle_t    handle;
2086         p_rx_rcr_ring_t rcrp;
2087         p_rx_rbr_ring_t rbrp;
2088 
2089         rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
2090         rbrp = rcrp->rx_rbr_p;
2091         handle = HXGE_DEV_HPI_HANDLE(hxgep);
2092 
2093         /*
2094          * Wait for the channel to be quiet
2095          */
2096         (void) hpi_rxdma_cfg_rdc_wait_for_qst(handle, channel);
2097 
2098         /*
2099          * Post page will accumulate some buffers before re-enabling
2100          * the DMA channel.
2101          */
2102 
2103         MUTEX_ENTER(&rbrp->post_lock);
2104         if ((rbrp->rbb_max - rbrp->rbr_used) >= HXGE_RBR_EMPTY_THRESHOLD) {
2105                 hxge_rbr_empty_restore(hxgep, rbrp);
2106         } else {
2107                 rbrp->rbr_is_empty = B_TRUE;
2108         }
2109         MUTEX_EXIT(&rbrp->post_lock);
2110 }
2111 
2112 
2113 /*ARGSUSED*/
2114 static hxge_status_t
2115 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
2116     rdc_stat_t cs)
2117 {
2118         p_hxge_rx_ring_stats_t  rdc_stats;
2119         hpi_handle_t            handle;
2120         boolean_t               rxchan_fatal = B_FALSE;
2121         uint8_t                 channel;
2122         hxge_status_t           status = HXGE_OK;
2123 
2124         HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts"));
2125 
2126         handle = HXGE_DEV_HPI_HANDLE(hxgep);
2127         channel = ldvp->channel;
2128 
2129         rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index];
2130 
2131         if (cs.bits.rbr_cpl_to) {
2132                 rdc_stats->rbr_tmout++;
2133                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2134                     HXGE_FM_EREPORT_RDMC_RBR_CPL_TO);
2135                 rxchan_fatal = B_TRUE;
2136                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2137                     "==> hxge_rx_err_evnts(channel %d): "
2138                     "fatal error: rx_rbr_timeout", channel));
2139         }
2140 
2141         if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) {
2142                 (void) hpi_rxdma_ring_perr_stat_get(handle,
2143                     &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par);
2144         }
2145 
2146         if (cs.bits.rcr_shadow_par_err) {
2147                 rdc_stats->rcr_sha_par++;
2148                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2149                     HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
2150                 rxchan_fatal = B_TRUE;
2151                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2152                     "==> hxge_rx_err_evnts(channel %d): "
2153                     "fatal error: rcr_shadow_par_err", channel));
2154         }
2155 
2156         if (cs.bits.rbr_prefetch_par_err) {
2157                 rdc_stats->rbr_pre_par++;
2158                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2159                     HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
2160                 rxchan_fatal = B_TRUE;
2161                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2162                     "==> hxge_rx_err_evnts(channel %d): "
2163                     "fatal error: rbr_prefetch_par_err", channel));
2164         }
2165 
2166         if (cs.bits.rbr_pre_empty) {
2167                 rdc_stats->rbr_pre_empty++;
2168                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2169                     HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY);
2170                 rxchan_fatal = B_TRUE;
2171                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2172                     "==> hxge_rx_err_evnts(channel %d): "
2173                     "fatal error: rbr_pre_empty", channel));
2174         }
2175 
2176         if (cs.bits.peu_resp_err) {
2177                 rdc_stats->peu_resp_err++;
2178                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2179                     HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR);
2180                 rxchan_fatal = B_TRUE;
2181                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2182                     "==> hxge_rx_err_evnts(channel %d): "
2183                     "fatal error: peu_resp_err", channel));
2184         }
2185 
2186         if (cs.bits.rcr_thres) {
2187                 rdc_stats->rcr_thres++;
2188         }
2189 
2190         if (cs.bits.rcr_to) {
2191                 rdc_stats->rcr_to++;
2192         }
2193 
2194         if (cs.bits.rcr_shadow_full) {
2195                 rdc_stats->rcr_shadow_full++;
2196                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2197                     HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL);
2198                 rxchan_fatal = B_TRUE;
2199                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2200                     "==> hxge_rx_err_evnts(channel %d): "
2201                     "fatal error: rcr_shadow_full", channel));
2202         }
2203 
2204         if (cs.bits.rcr_full) {
2205                 rdc_stats->rcrfull++;
2206                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2207                     HXGE_FM_EREPORT_RDMC_RCRFULL);
2208                 rxchan_fatal = B_TRUE;
2209                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2210                     "==> hxge_rx_err_evnts(channel %d): "
2211                     "fatal error: rcrfull error", channel));
2212         }
2213 
2214         if (cs.bits.rbr_empty) {
2215                 rdc_stats->rbr_empty++;
2216                 hxge_rx_rbr_empty_recover(hxgep, channel);
2217         }
2218 
2219         if (cs.bits.rbr_full) {
2220                 rdc_stats->rbrfull++;
2221                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2222                     HXGE_FM_EREPORT_RDMC_RBRFULL);
2223                 rxchan_fatal = B_TRUE;
2224                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2225                     "==> hxge_rx_err_evnts(channel %d): "
2226                     "fatal error: rbr_full error", channel));
2227         }
2228 
2229         if (rxchan_fatal) {
2230                 p_rx_rcr_ring_t rcrp;
2231                 p_rx_rbr_ring_t rbrp;
2232 
2233                 rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
2234                 rbrp = rcrp->rx_rbr_p;
2235 
2236                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2237                     " hxge_rx_err_evnts: fatal error on Channel #%d\n",
2238                     channel));
2239 
2240                 MUTEX_ENTER(&rbrp->post_lock);
2241                 /* This function needs to be inside the post_lock */
2242                 status = hxge_rxdma_fatal_err_recover(hxgep, channel);
2243                 MUTEX_EXIT(&rbrp->post_lock);
2244                 if (status == HXGE_OK) {
2245                         FM_SERVICE_RESTORED(hxgep);
2246                 }
2247         }
2248 
2249         HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_err_evnts"));
2250         return (status);
2251 }
2252 
2253 static hxge_status_t
2254 hxge_map_rxdma(p_hxge_t hxgep)
2255 {
2256         int                     i, ndmas;
2257         uint16_t                channel;
2258         p_rx_rbr_rings_t        rx_rbr_rings;
2259         p_rx_rbr_ring_t         *rbr_rings;
2260         p_rx_rcr_rings_t        rx_rcr_rings;
2261         p_rx_rcr_ring_t         *rcr_rings;
2262         p_rx_mbox_areas_t       rx_mbox_areas_p;
2263         p_rx_mbox_t             *rx_mbox_p;
2264         p_hxge_dma_pool_t       dma_buf_poolp;
2265         p_hxge_dma_common_t     *dma_buf_p;
2266         p_hxge_dma_pool_t       dma_rbr_cntl_poolp;
2267         p_hxge_dma_common_t     *dma_rbr_cntl_p;
2268         p_hxge_dma_pool_t       dma_rcr_cntl_poolp;
2269         p_hxge_dma_common_t     *dma_rcr_cntl_p;
2270         p_hxge_dma_pool_t       dma_mbox_cntl_poolp;
2271         p_hxge_dma_common_t     *dma_mbox_cntl_p;
2272         uint32_t                *num_chunks;
2273         hxge_status_t           status = HXGE_OK;
2274 
2275         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma"));
2276 
2277         dma_buf_poolp = hxgep->rx_buf_pool_p;
2278         dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
2279         dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
2280         dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
2281 
2282         if (!dma_buf_poolp->buf_allocated ||
2283             !dma_rbr_cntl_poolp->buf_allocated ||
2284             !dma_rcr_cntl_poolp->buf_allocated ||
2285             !dma_mbox_cntl_poolp->buf_allocated) {
2286                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2287                     "<== hxge_map_rxdma: buf not allocated"));
2288                 return (HXGE_ERROR);
2289         }
2290 
2291         ndmas = dma_buf_poolp->ndmas;
2292         if (!ndmas) {
2293                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
2294                     "<== hxge_map_rxdma: no dma allocated"));
2295                 return (HXGE_ERROR);
2296         }
2297 
2298         num_chunks = dma_buf_poolp->num_chunks;
2299         dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
2300         dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
2301         dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
2302         dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
2303 
2304         rx_rbr_rings = (p_rx_rbr_rings_t)
2305             KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2306         rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC(
2307             sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP);
2308 
2309         rx_rcr_rings = (p_rx_rcr_rings_t)
2310             KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2311         rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC(
2312             sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP);
2313 
2314         rx_mbox_areas_p = (p_rx_mbox_areas_t)
2315             KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2316         rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC(
2317             sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP);
2318 
2319         /*
2320          * Timeout should be set based on the system clock divider.
2321          * The following timeout value of 1 assumes that the
2322          * granularity (1000) is 3 microseconds running at 300MHz.
2323          */
2324 
2325         hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
2326         hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
2327 
2328         /*
2329          * Map descriptors from the buffer polls for each dam channel.
2330          */
2331         for (i = 0; i < ndmas; i++) {
2332                 if (((p_hxge_dma_common_t)dma_buf_p[i]) == NULL) {
2333                         status = HXGE_ERROR;
2334                         goto hxge_map_rxdma_fail1;
2335                 }
2336 
2337                 /*
2338                  * Set up and prepare buffer blocks, descriptors and mailbox.
2339                  */
2340                 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2341                 status = hxge_map_rxdma_channel(hxgep, channel,
2342                     (p_hxge_dma_common_t *)&dma_buf_p[i],
2343                     (p_rx_rbr_ring_t *)&rbr_rings[i],
2344                     num_chunks[i],
2345                     (p_hxge_dma_common_t *)&dma_rbr_cntl_p[i],
2346                     (p_hxge_dma_common_t *)&dma_rcr_cntl_p[i],
2347                     (p_hxge_dma_common_t *)&dma_mbox_cntl_p[i],
2348                     (p_rx_rcr_ring_t *)&rcr_rings[i],
2349                     (p_rx_mbox_t *)&rx_mbox_p[i]);
2350                 if (status != HXGE_OK) {
2351                         goto hxge_map_rxdma_fail1;
2352                 }
2353                 rbr_rings[i]->index = (uint16_t)i;
2354                 rcr_rings[i]->index = (uint16_t)i;
2355                 rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i];
2356         }
2357 
2358         rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas;
2359         rx_rbr_rings->rbr_rings = rbr_rings;
2360         hxgep->rx_rbr_rings = rx_rbr_rings;
2361         rx_rcr_rings->rcr_rings = rcr_rings;
2362         hxgep->rx_rcr_rings = rx_rcr_rings;
2363 
2364         rx_mbox_areas_p->rxmbox_areas = rx_mbox_p;
2365         hxgep->rx_mbox_areas_p = rx_mbox_areas_p;
2366 
2367         goto hxge_map_rxdma_exit;
2368 
2369 hxge_map_rxdma_fail1:
2370         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2371             "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)",
2372             status, channel, i));
2373         i--;
2374         for (; i >= 0; i--) {
2375                 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2376                 hxge_unmap_rxdma_channel(hxgep, channel,
2377                     rbr_rings[i], rcr_rings[i], rx_mbox_p[i]);
2378         }
2379 
2380         KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
2381         KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
2382         KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
2383         KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
2384         KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
2385         KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2386 
2387 hxge_map_rxdma_exit:
2388         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2389             "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel));
2390 
2391         return (status);
2392 }
2393 
2394 static void
2395 hxge_unmap_rxdma(p_hxge_t hxgep)
2396 {
2397         int                     i, ndmas;
2398         uint16_t                channel;
2399         p_rx_rbr_rings_t        rx_rbr_rings;
2400         p_rx_rbr_ring_t         *rbr_rings;
2401         p_rx_rcr_rings_t        rx_rcr_rings;
2402         p_rx_rcr_ring_t         *rcr_rings;
2403         p_rx_mbox_areas_t       rx_mbox_areas_p;
2404         p_rx_mbox_t             *rx_mbox_p;
2405         p_hxge_dma_pool_t       dma_buf_poolp;
2406         p_hxge_dma_pool_t       dma_rbr_cntl_poolp;
2407         p_hxge_dma_pool_t       dma_rcr_cntl_poolp;
2408         p_hxge_dma_pool_t       dma_mbox_cntl_poolp;
2409         p_hxge_dma_common_t     *dma_buf_p;
2410 
2411         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma"));
2412 
2413         dma_buf_poolp = hxgep->rx_buf_pool_p;
2414         dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
2415         dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
2416         dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
2417 
2418         if (!dma_buf_poolp->buf_allocated ||
2419             !dma_rbr_cntl_poolp->buf_allocated ||
2420             !dma_rcr_cntl_poolp->buf_allocated ||
2421             !dma_mbox_cntl_poolp->buf_allocated) {
2422                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2423                     "<== hxge_unmap_rxdma: NULL buf pointers"));
2424                 return;
2425         }
2426 
2427         rx_rbr_rings = hxgep->rx_rbr_rings;
2428         rx_rcr_rings = hxgep->rx_rcr_rings;
2429         if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
2430                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2431                     "<== hxge_unmap_rxdma: NULL pointers"));
2432                 return;
2433         }
2434 
2435         ndmas = rx_rbr_rings->ndmas;
2436         if (!ndmas) {
2437                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2438                     "<== hxge_unmap_rxdma: no channel"));
2439                 return;
2440         }
2441 
2442         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2443             "==> hxge_unmap_rxdma (ndmas %d)", ndmas));
2444 
2445         rbr_rings = rx_rbr_rings->rbr_rings;
2446         rcr_rings = rx_rcr_rings->rcr_rings;
2447         rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
2448         rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
2449         dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
2450 
2451         for (i = 0; i < ndmas; i++) {
2452                 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2453                 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2454                     "==> hxge_unmap_rxdma (ndmas %d) channel %d",
2455                     ndmas, channel));
2456                 (void) hxge_unmap_rxdma_channel(hxgep, channel,
2457                     (p_rx_rbr_ring_t)rbr_rings[i],
2458                     (p_rx_rcr_ring_t)rcr_rings[i],
2459                     (p_rx_mbox_t)rx_mbox_p[i]);
2460         }
2461 
2462         KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
2463         KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
2464         KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
2465         KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
2466         KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2467         KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
2468 
2469         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma"));
2470 }
2471 
2472 hxge_status_t
2473 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
2474     p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
2475     uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p,
2476     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
2477     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
2478 {
2479         int status = HXGE_OK;
2480 
2481         /*
2482          * Set up and prepare buffer blocks, descriptors and mailbox.
2483          */
2484         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2485             "==> hxge_map_rxdma_channel (channel %d)", channel));
2486 
2487         /*
2488          * Receive buffer blocks
2489          */
2490         status = hxge_map_rxdma_channel_buf_ring(hxgep, channel,
2491             dma_buf_p, rbr_p, num_chunks);
2492         if (status != HXGE_OK) {
2493                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2494                     "==> hxge_map_rxdma_channel (channel %d): "
2495                     "map buffer failed 0x%x", channel, status));
2496                 goto hxge_map_rxdma_channel_exit;
2497         }
2498 
2499         /*
2500          * Receive block ring, completion ring and mailbox.
2501          */
2502         status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel,
2503             dma_rbr_cntl_p, dma_rcr_cntl_p, dma_mbox_cntl_p,
2504             rbr_p, rcr_p, rx_mbox_p);
2505         if (status != HXGE_OK) {
2506                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2507                     "==> hxge_map_rxdma_channel (channel %d): "
2508                     "map config failed 0x%x", channel, status));
2509                 goto hxge_map_rxdma_channel_fail2;
2510         }
2511         goto hxge_map_rxdma_channel_exit;
2512 
2513 hxge_map_rxdma_channel_fail3:
2514         /* Free rbr, rcr */
2515         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2516             "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)",
2517             status, channel));
2518         hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p);
2519 
2520 hxge_map_rxdma_channel_fail2:
2521         /* Free buffer blocks */
2522         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2523             "==> hxge_map_rxdma_channel: free rx buffers"
2524             "(hxgep 0x%x status 0x%x channel %d)",
2525             hxgep, status, channel));
2526         hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p);
2527 
2528         status = HXGE_ERROR;
2529 
2530 hxge_map_rxdma_channel_exit:
2531         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2532             "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)",
2533             hxgep, status, channel));
2534 
2535         return (status);
2536 }
2537 
2538 /*ARGSUSED*/
2539 static void
2540 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
2541     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
2542 {
2543         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2544             "==> hxge_unmap_rxdma_channel (channel %d)", channel));
2545 
2546         /*
2547          * unmap receive block ring, completion ring and mailbox.
2548          */
2549         (void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p);
2550 
2551         /* unmap buffer blocks */
2552         (void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p);
2553 
2554         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel"));
2555 }
2556 
2557 /*ARGSUSED*/
2558 static hxge_status_t
2559 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
2560     p_hxge_dma_common_t *dma_rbr_cntl_p, p_hxge_dma_common_t *dma_rcr_cntl_p,
2561     p_hxge_dma_common_t *dma_mbox_cntl_p, p_rx_rbr_ring_t *rbr_p,
2562     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
2563 {
2564         p_rx_rbr_ring_t         rbrp;
2565         p_rx_rcr_ring_t         rcrp;
2566         p_rx_mbox_t             mboxp;
2567         p_hxge_dma_common_t     cntl_dmap;
2568         p_hxge_dma_common_t     dmap;
2569         p_rx_msg_t              *rx_msg_ring;
2570         p_rx_msg_t              rx_msg_p;
2571         rdc_rbr_cfg_a_t         *rcfga_p;
2572         rdc_rbr_cfg_b_t         *rcfgb_p;
2573         rdc_rcr_cfg_a_t         *cfga_p;
2574         rdc_rcr_cfg_b_t         *cfgb_p;
2575         rdc_rx_cfg1_t           *cfig1_p;
2576         rdc_rx_cfg2_t           *cfig2_p;
2577         rdc_rbr_kick_t          *kick_p;
2578         uint32_t                dmaaddrp;
2579         uint32_t                *rbr_vaddrp;
2580         uint32_t                bkaddr;
2581         hxge_status_t           status = HXGE_OK;
2582         int                     i;
2583         uint32_t                hxge_port_rcr_size;
2584 
2585         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2586             "==> hxge_map_rxdma_channel_cfg_ring"));
2587 
2588         cntl_dmap = *dma_rbr_cntl_p;
2589 
2590         /*
2591          * Map in the receive block ring
2592          */
2593         rbrp = *rbr_p;
2594         dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc;
2595         hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
2596 
2597         /*
2598          * Zero out buffer block ring descriptors.
2599          */
2600         bzero((caddr_t)dmap->kaddrp, dmap->alength);
2601 
2602         rcfga_p = &(rbrp->rbr_cfga);
2603         rcfgb_p = &(rbrp->rbr_cfgb);
2604         kick_p = &(rbrp->rbr_kick);
2605         rcfga_p->value = 0;
2606         rcfgb_p->value = 0;
2607         kick_p->value = 0;
2608         rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
2609         rcfga_p->value = (rbrp->rbr_addr &
2610             (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK));
2611         rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
2612 
2613         /* XXXX: how to choose packet buffer sizes */
2614         rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0;
2615         rcfgb_p->bits.vld0 = 1;
2616         rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1;
2617         rcfgb_p->bits.vld1 = 1;
2618         rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2;
2619         rcfgb_p->bits.vld2 = 1;
2620         rcfgb_p->bits.bksize = hxgep->rx_bksize_code;
2621 
2622         /*
2623          * For each buffer block, enter receive block address to the ring.
2624          */
2625         rbr_vaddrp = (uint32_t *)dmap->kaddrp;
2626         rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
2627         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2628             "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
2629             "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
2630 
2631         rx_msg_ring = rbrp->rx_msg_ring;
2632         for (i = 0; i < rbrp->tnblocks; i++) {
2633                 rx_msg_p = rx_msg_ring[i];
2634                 rx_msg_p->hxgep = hxgep;
2635                 rx_msg_p->rx_rbr_p = rbrp;
2636                 bkaddr = (uint32_t)
2637                     ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
2638                     RBR_BKADDR_SHIFT));
2639                 rx_msg_p->free = B_FALSE;
2640                 rx_msg_p->max_usage_cnt = 0xbaddcafe;
2641 
2642                 *rbr_vaddrp++ = bkaddr;
2643         }
2644 
2645         kick_p->bits.bkadd = rbrp->rbb_max;
2646         rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
2647 
2648         rbrp->rbr_rd_index = 0;
2649 
2650         rbrp->rbr_consumed = 0;
2651         rbrp->rbr_used = 0;
2652         rbrp->rbr_use_bcopy = B_TRUE;
2653         rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
2654 
2655         /*
2656          * Do bcopy on packets greater than bcopy size once the lo threshold is
2657          * reached. This lo threshold should be less than the hi threshold.
2658          *
2659          * Do bcopy on every packet once the hi threshold is reached.
2660          */
2661         if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) {
2662                 /* default it to use hi */
2663                 hxge_rx_threshold_lo = hxge_rx_threshold_hi;
2664         }
2665         if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) {
2666                 hxge_rx_buf_size_type = HXGE_RBR_TYPE2;
2667         }
2668         rbrp->rbr_bufsize_type = hxge_rx_buf_size_type;
2669 
2670         switch (hxge_rx_threshold_hi) {
2671         default:
2672         case HXGE_RX_COPY_NONE:
2673                 /* Do not do bcopy at all */
2674                 rbrp->rbr_use_bcopy = B_FALSE;
2675                 rbrp->rbr_threshold_hi = rbrp->rbb_max;
2676                 break;
2677 
2678         case HXGE_RX_COPY_1:
2679         case HXGE_RX_COPY_2:
2680         case HXGE_RX_COPY_3:
2681         case HXGE_RX_COPY_4:
2682         case HXGE_RX_COPY_5:
2683         case HXGE_RX_COPY_6:
2684         case HXGE_RX_COPY_7:
2685                 rbrp->rbr_threshold_hi =
2686                     rbrp->rbb_max * (hxge_rx_threshold_hi) /
2687                     HXGE_RX_BCOPY_SCALE;
2688                 break;
2689 
2690         case HXGE_RX_COPY_ALL:
2691                 rbrp->rbr_threshold_hi = 0;
2692                 break;
2693         }
2694 
2695         switch (hxge_rx_threshold_lo) {
2696         default:
2697         case HXGE_RX_COPY_NONE:
2698                 /* Do not do bcopy at all */
2699                 if (rbrp->rbr_use_bcopy) {
2700                         rbrp->rbr_use_bcopy = B_FALSE;
2701                 }
2702                 rbrp->rbr_threshold_lo = rbrp->rbb_max;
2703                 break;
2704 
2705         case HXGE_RX_COPY_1:
2706         case HXGE_RX_COPY_2:
2707         case HXGE_RX_COPY_3:
2708         case HXGE_RX_COPY_4:
2709         case HXGE_RX_COPY_5:
2710         case HXGE_RX_COPY_6:
2711         case HXGE_RX_COPY_7:
2712                 rbrp->rbr_threshold_lo =
2713                     rbrp->rbb_max * (hxge_rx_threshold_lo) /
2714                     HXGE_RX_BCOPY_SCALE;
2715                 break;
2716 
2717         case HXGE_RX_COPY_ALL:
2718                 rbrp->rbr_threshold_lo = 0;
2719                 break;
2720         }
2721 
2722         HXGE_DEBUG_MSG((hxgep, RX_CTL,
2723             "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d "
2724             "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d "
2725             "rbb_threshold_lo %d",
2726             dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type,
2727             rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo));
2728 
2729         /* Map in the receive completion ring */
2730         rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
2731         MUTEX_INIT(&rcrp->lock, NULL, MUTEX_DRIVER,
2732             (void *) hxgep->interrupt_cookie);
2733         rcrp->rdc = dma_channel;
2734         rcrp->hxgep = hxgep;
2735 
2736         hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
2737         rcrp->comp_size = hxge_port_rcr_size;
2738         rcrp->comp_wrap_mask = hxge_port_rcr_size - 1;
2739 
2740         cntl_dmap = *dma_rcr_cntl_p;
2741 
2742         dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
2743         hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
2744             sizeof (rcr_entry_t));
2745         rcrp->comp_rd_index = 0;
2746         rcrp->comp_wt_index = 0;
2747         rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
2748             (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
2749 #if defined(__i386)
2750         rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
2751             (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
2752 #else
2753         rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
2754             (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
2755 #endif
2756         rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
2757             (hxge_port_rcr_size - 1);
2758         rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
2759             (hxge_port_rcr_size - 1);
2760 
2761         rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc);
2762         rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3;
2763 
2764         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2765             "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
2766             "rbr_vaddrp $%p rcr_desc_rd_head_p $%p "
2767             "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p "
2768             "rcr_desc_rd_last_pp $%p ",
2769             dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p,
2770             rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p,
2771             rcrp->rcr_desc_last_pp));
2772 
2773         /*
2774          * Zero out buffer block ring descriptors.
2775          */
2776         bzero((caddr_t)dmap->kaddrp, dmap->alength);
2777         rcrp->intr_timeout = hxgep->intr_timeout;
2778         rcrp->intr_threshold = hxgep->intr_threshold;
2779         rcrp->full_hdr_flag = B_FALSE;
2780         rcrp->sw_priv_hdr_len = 0;
2781 
2782         cfga_p = &(rcrp->rcr_cfga);
2783         cfgb_p = &(rcrp->rcr_cfgb);
2784         cfga_p->value = 0;
2785         cfgb_p->value = 0;
2786         rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
2787 
2788         cfga_p->value = (rcrp->rcr_addr &
2789             (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK));
2790 
2791         cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF);
2792 
2793         /*
2794          * Timeout should be set based on the system clock divider. The
2795          * following timeout value of 1 assumes that the granularity (1000) is
2796          * 3 microseconds running at 300MHz.
2797          */
2798         cfgb_p->bits.pthres = rcrp->intr_threshold;
2799         cfgb_p->bits.timeout = rcrp->intr_timeout;
2800         cfgb_p->bits.entout = 1;
2801 
2802         /* Map in the mailbox */
2803         cntl_dmap = *dma_mbox_cntl_p;
2804         mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
2805         dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox;
2806         hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
2807         cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1;
2808         cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2;
2809         cfig1_p->value = cfig2_p->value = 0;
2810 
2811         mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
2812         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2813             "==> hxge_map_rxdma_channel_cfg_ring: "
2814             "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
2815             dma_channel, cfig1_p->value, cfig2_p->value,
2816             mboxp->mbox_addr));
2817 
2818         dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff);
2819         cfig1_p->bits.mbaddr_h = dmaaddrp;
2820 
2821         dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
2822         dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
2823             RXDMA_CFIG2_MBADDR_L_MASK);
2824 
2825         cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
2826 
2827         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2828             "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p "
2829             "cfg1 0x%016llx cfig2 0x%016llx",
2830             dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value));
2831 
2832         cfig2_p->bits.full_hdr = rcrp->full_hdr_flag;
2833         cfig2_p->bits.offset = rcrp->sw_priv_hdr_len;
2834 
2835         rbrp->rx_rcr_p = rcrp;
2836         rcrp->rx_rbr_p = rbrp;
2837         *rcr_p = rcrp;
2838         *rx_mbox_p = mboxp;
2839 
2840         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2841             "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
2842         return (status);
2843 }
2844 
2845 /*ARGSUSED*/
2846 static void
2847 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
2848     p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
2849 {
2850         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2851             "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc));
2852 
2853         MUTEX_DESTROY(&rcr_p->lock);
2854         KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
2855         KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
2856 
2857         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2858             "<== hxge_unmap_rxdma_channel_cfg_ring"));
2859 }
2860 
2861 static hxge_status_t
2862 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
2863     p_hxge_dma_common_t *dma_buf_p,
2864     p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
2865 {
2866         p_rx_rbr_ring_t         rbrp;
2867         p_hxge_dma_common_t     dma_bufp, tmp_bufp;
2868         p_rx_msg_t              *rx_msg_ring;
2869         p_rx_msg_t              rx_msg_p;
2870         p_mblk_t                mblk_p;
2871 
2872         rxring_info_t *ring_info;
2873         hxge_status_t status = HXGE_OK;
2874         int i, j, index;
2875         uint32_t size, bsize, nblocks, nmsgs;
2876 
2877         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2878             "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel));
2879 
2880         dma_bufp = tmp_bufp = *dma_buf_p;
2881         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2882             " hxge_map_rxdma_channel_buf_ring: channel %d to map %d "
2883             "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp));
2884 
2885         nmsgs = 0;
2886         for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2887                 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2888                     "==> hxge_map_rxdma_channel_buf_ring: channel %d "
2889                     "bufp 0x%016llx nblocks %d nmsgs %d",
2890                     channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2891                 nmsgs += tmp_bufp->nblocks;
2892         }
2893         if (!nmsgs) {
2894                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2895                     "<== hxge_map_rxdma_channel_buf_ring: channel %d "
2896                     "no msg blocks", channel));
2897                 status = HXGE_ERROR;
2898                 goto hxge_map_rxdma_channel_buf_ring_exit;
2899         }
2900         rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP);
2901 
2902         size = nmsgs * sizeof (p_rx_msg_t);
2903         rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2904         ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
2905             KM_SLEEP);
2906 
2907         MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
2908             (void *) hxgep->interrupt_cookie);
2909         MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
2910             (void *) hxgep->interrupt_cookie);
2911 
2912         rbrp->rdc = channel;
2913         rbrp->num_blocks = num_chunks;
2914         rbrp->tnblocks = nmsgs;
2915         rbrp->rbb_max = nmsgs;
2916         rbrp->rbr_max_size = nmsgs;
2917         rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
2918 
2919         /*
2920          * Buffer sizes: 256, 1K, and 2K.
2921          *
2922          * Blk 0 size.
2923          */
2924         rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
2925         rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
2926         rbrp->hpi_pkt_buf_size0 = SIZE_256B;
2927 
2928         /*
2929          * Blk 1 size.
2930          */
2931         rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
2932         rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
2933         rbrp->hpi_pkt_buf_size1 = SIZE_1KB;
2934 
2935         /*
2936          * Blk 2 size.
2937          */
2938         rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
2939         rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
2940         rbrp->hpi_pkt_buf_size2 = SIZE_2KB;
2941 
2942         rbrp->block_size = hxgep->rx_default_block_size;
2943 
2944         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2945             "==> hxge_map_rxdma_channel_buf_ring: channel %d "
2946             "actual rbr max %d rbb_max %d nmsgs %d "
2947             "rbrp->block_size %d default_block_size %d "
2948             "(config hxge_rbr_size %d hxge_rbr_spare_size %d)",
2949             channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
2950             rbrp->block_size, hxgep->rx_default_block_size,
2951             hxge_rbr_size, hxge_rbr_spare_size));
2952 
2953         /*
2954          * Map in buffers from the buffer pool.
2955          * Note that num_blocks is the num_chunks. For Sparc, there is likely
2956          * only one chunk. For x86, there will be many chunks.
2957          * Loop over chunks.
2958          */
2959         index = 0;
2960         for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
2961                 bsize = dma_bufp->block_size;
2962                 nblocks = dma_bufp->nblocks;
2963 #if defined(__i386)
2964                 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp;
2965 #else
2966                 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
2967 #endif
2968                 ring_info->buffer[i].buf_index = i;
2969                 ring_info->buffer[i].buf_size = dma_bufp->alength;
2970                 ring_info->buffer[i].start_index = index;
2971 #if defined(__i386)
2972                 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp;
2973 #else
2974                 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
2975 #endif
2976 
2977                 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2978                     " hxge_map_rxdma_channel_buf_ring: map channel %d "
2979                     "chunk %d nblocks %d chunk_size %x block_size 0x%x "
2980                     "dma_bufp $%p dvma_addr $%p", channel, i,
2981                     dma_bufp->nblocks,
2982                     ring_info->buffer[i].buf_size, bsize, dma_bufp,
2983                     ring_info->buffer[i].dvma_addr));
2984 
2985                 /* loop over blocks within a chunk */
2986                 for (j = 0; j < nblocks; j++) {
2987                         if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO,
2988                             dma_bufp)) == NULL) {
2989                                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2990                                     "allocb failed (index %d i %d j %d)",
2991                                     index, i, j));
2992                                 goto hxge_map_rxdma_channel_buf_ring_fail1;
2993                         }
2994                         rx_msg_ring[index] = rx_msg_p;
2995                         rx_msg_p->block_index = index;
2996                         rx_msg_p->shifted_addr = (uint32_t)
2997                             ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
2998                             RBR_BKADDR_SHIFT));
2999                         /*
3000                          * Too much output
3001                          * HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3002                          *      "index %d j %d rx_msg_p $%p mblk %p",
3003                          *      index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
3004                          */
3005                         mblk_p = rx_msg_p->rx_mblk_p;
3006                         mblk_p->b_wptr = mblk_p->b_rptr + bsize;
3007 
3008                         rbrp->rbr_ref_cnt++;
3009                         index++;
3010                         rx_msg_p->buf_dma.dma_channel = channel;
3011                 }
3012         }
3013         if (i < rbrp->num_blocks) {
3014                 goto hxge_map_rxdma_channel_buf_ring_fail1;
3015         }
3016         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3017             "hxge_map_rxdma_channel_buf_ring: done buf init "
3018             "channel %d msg block entries %d", channel, index));
3019         ring_info->block_size_mask = bsize - 1;
3020         rbrp->rx_msg_ring = rx_msg_ring;
3021         rbrp->dma_bufp = dma_buf_p;
3022         rbrp->ring_info = ring_info;
3023 
3024         status = hxge_rxbuf_index_info_init(hxgep, rbrp);
3025         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: "
3026             "channel %d done buf info init", channel));
3027 
3028         /*
3029          * Finally, permit hxge_freeb() to call hxge_post_page().
3030          */
3031         rbrp->rbr_state = RBR_POSTING;
3032 
3033         *rbr_p = rbrp;
3034 
3035         goto hxge_map_rxdma_channel_buf_ring_exit;
3036 
3037 hxge_map_rxdma_channel_buf_ring_fail1:
3038         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3039             " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
3040             channel, status));
3041 
3042         index--;
3043         for (; index >= 0; index--) {
3044                 rx_msg_p = rx_msg_ring[index];
3045                 if (rx_msg_p != NULL) {
3046                         freeb(rx_msg_p->rx_mblk_p);
3047                         rx_msg_ring[index] = NULL;
3048                 }
3049         }
3050 
3051 hxge_map_rxdma_channel_buf_ring_fail:
3052         MUTEX_DESTROY(&rbrp->post_lock);
3053         MUTEX_DESTROY(&rbrp->lock);
3054         KMEM_FREE(ring_info, sizeof (rxring_info_t));
3055         KMEM_FREE(rx_msg_ring, size);
3056         KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
3057 
3058         status = HXGE_ERROR;
3059 
3060 hxge_map_rxdma_channel_buf_ring_exit:
3061         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3062             "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status));
3063 
3064         return (status);
3065 }
3066 
3067 /*ARGSUSED*/
3068 static void
3069 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
3070     p_rx_rbr_ring_t rbr_p)
3071 {
3072         p_rx_msg_t      *rx_msg_ring;
3073         p_rx_msg_t      rx_msg_p;
3074         rxring_info_t   *ring_info;
3075         int             i;
3076         uint32_t        size;
3077 
3078         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3079             "==> hxge_unmap_rxdma_channel_buf_ring"));
3080         if (rbr_p == NULL) {
3081                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
3082                     "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
3083                 return;
3084         }
3085         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3086             "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc));
3087 
3088         rx_msg_ring = rbr_p->rx_msg_ring;
3089         ring_info = rbr_p->ring_info;
3090 
3091         if (rx_msg_ring == NULL || ring_info == NULL) {
3092                 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3093                     "<== hxge_unmap_rxdma_channel_buf_ring: "
3094                     "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info));
3095                 return;
3096         }
3097 
3098         size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
3099         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3100             " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
3101             "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks,
3102             rbr_p->tnblocks, rbr_p->rbr_max_size, size));
3103 
3104         for (i = 0; i < rbr_p->tnblocks; i++) {
3105                 rx_msg_p = rx_msg_ring[i];
3106                 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3107                     " hxge_unmap_rxdma_channel_buf_ring: "
3108                     "rx_msg_p $%p", rx_msg_p));
3109                 if (rx_msg_p != NULL) {
3110                         freeb(rx_msg_p->rx_mblk_p);
3111                         rx_msg_ring[i] = NULL;
3112                 }
3113         }
3114 
3115         /*
3116          * We no longer may use the mutex <post_lock>. By setting
3117          * <rbr_state> to anything but POSTING, we prevent
3118          * hxge_post_page() from accessing a dead mutex.
3119          */
3120         rbr_p->rbr_state = RBR_UNMAPPING;
3121         MUTEX_DESTROY(&rbr_p->post_lock);
3122 
3123         MUTEX_DESTROY(&rbr_p->lock);
3124         KMEM_FREE(ring_info, sizeof (rxring_info_t));
3125         KMEM_FREE(rx_msg_ring, size);
3126 
3127         if (rbr_p->rbr_ref_cnt == 0) {
3128                 /* This is the normal state of affairs. */
3129                 KMEM_FREE(rbr_p, sizeof (*rbr_p));
3130         } else {
3131                 /*
3132                  * Some of our buffers are still being used.
3133                  * Therefore, tell hxge_freeb() this ring is
3134                  * unmapped, so it may free <rbr_p> for us.
3135                  */
3136                 rbr_p->rbr_state = RBR_UNMAPPED;
3137                 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3138                     "unmap_rxdma_buf_ring: %d %s outstanding.",
3139                     rbr_p->rbr_ref_cnt,
3140                     rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
3141         }
3142 
3143         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3144             "<== hxge_unmap_rxdma_channel_buf_ring"));
3145 }
3146 
3147 static hxge_status_t
3148 hxge_rxdma_hw_start_common(p_hxge_t hxgep)
3149 {
3150         hxge_status_t status = HXGE_OK;
3151 
3152         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
3153 
3154         /*
3155          * Load the sharable parameters by writing to the function zero control
3156          * registers. These FZC registers should be initialized only once for
3157          * the entire chip.
3158          */
3159         (void) hxge_init_fzc_rx_common(hxgep);
3160 
3161         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
3162 
3163         return (status);
3164 }
3165 
3166 static hxge_status_t
3167 hxge_rxdma_hw_start(p_hxge_t hxgep)
3168 {
3169         int                     i, ndmas;
3170         uint16_t                channel;
3171         p_rx_rbr_rings_t        rx_rbr_rings;
3172         p_rx_rbr_ring_t         *rbr_rings;
3173         p_rx_rcr_rings_t        rx_rcr_rings;
3174         p_rx_rcr_ring_t         *rcr_rings;
3175         p_rx_mbox_areas_t       rx_mbox_areas_p;
3176         p_rx_mbox_t             *rx_mbox_p;
3177         hxge_status_t           status = HXGE_OK;
3178 
3179         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start"));
3180 
3181         rx_rbr_rings = hxgep->rx_rbr_rings;
3182         rx_rcr_rings = hxgep->rx_rcr_rings;
3183         if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
3184                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
3185                     "<== hxge_rxdma_hw_start: NULL ring pointers"));
3186                 return (HXGE_ERROR);
3187         }
3188 
3189         ndmas = rx_rbr_rings->ndmas;
3190         if (ndmas == 0) {
3191                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
3192                     "<== hxge_rxdma_hw_start: no dma channel allocated"));
3193                 return (HXGE_ERROR);
3194         }
3195         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3196             "==> hxge_rxdma_hw_start (ndmas %d)", ndmas));
3197 
3198         /*
3199          * Scrub the RDC Rx DMA Prefetch Buffer Command.
3200          */
3201         for (i = 0; i < 128; i++) {
3202                 HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i);
3203         }
3204 
3205         /*
3206          * Scrub Rx DMA Shadow Tail Command.
3207          */
3208         for (i = 0; i < 64; i++) {
3209                 HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i);
3210         }
3211 
3212         /*
3213          * Scrub Rx DMA Control Fifo Command.
3214          */
3215         for (i = 0; i < 512; i++) {
3216                 HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i);
3217         }
3218 
3219         /*
3220          * Scrub Rx DMA Data Fifo Command.
3221          */
3222         for (i = 0; i < 1536; i++) {
3223                 HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i);
3224         }
3225 
3226         /*
3227          * Reset the FIFO Error Stat.
3228          */
3229         HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF);
3230 
3231         /* Set the error mask to receive interrupts */
3232         HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
3233 
3234         rbr_rings = rx_rbr_rings->rbr_rings;
3235         rcr_rings = rx_rcr_rings->rcr_rings;
3236         rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
3237         if (rx_mbox_areas_p) {
3238                 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
3239         }
3240 
3241         for (i = 0; i < ndmas; i++) {
3242                 channel = rbr_rings[i]->rdc;
3243                 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3244                     "==> hxge_rxdma_hw_start (ndmas %d) channel %d",
3245                     ndmas, channel));
3246                 status = hxge_rxdma_start_channel(hxgep, channel,
3247                     (p_rx_rbr_ring_t)rbr_rings[i],
3248                     (p_rx_rcr_ring_t)rcr_rings[i],
3249                     (p_rx_mbox_t)rx_mbox_p[i], rbr_rings[i]->rbb_max);
3250                 if (status != HXGE_OK) {
3251                         goto hxge_rxdma_hw_start_fail1;
3252                 }
3253         }
3254 
3255         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: "
3256             "rx_rbr_rings 0x%016llx rings 0x%016llx",
3257             rx_rbr_rings, rx_rcr_rings));
3258         goto hxge_rxdma_hw_start_exit;
3259 
3260 hxge_rxdma_hw_start_fail1:
3261         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3262             "==> hxge_rxdma_hw_start: disable "
3263             "(status 0x%x channel %d i %d)", status, channel, i));
3264         for (; i >= 0; i--) {
3265                 channel = rbr_rings[i]->rdc;
3266                 (void) hxge_rxdma_stop_channel(hxgep, channel);
3267         }
3268 
3269 hxge_rxdma_hw_start_exit:
3270         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3271             "==> hxge_rxdma_hw_start: (status 0x%x)", status));
3272         return (status);
3273 }
3274 
3275 static void
3276 hxge_rxdma_hw_stop(p_hxge_t hxgep)
3277 {
3278         int                     i, ndmas;
3279         uint16_t                channel;
3280         p_rx_rbr_rings_t        rx_rbr_rings;
3281         p_rx_rbr_ring_t         *rbr_rings;
3282         p_rx_rcr_rings_t        rx_rcr_rings;
3283 
3284         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop"));
3285 
3286         rx_rbr_rings = hxgep->rx_rbr_rings;
3287         rx_rcr_rings = hxgep->rx_rcr_rings;
3288 
3289         if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
3290                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
3291                     "<== hxge_rxdma_hw_stop: NULL ring pointers"));
3292                 return;
3293         }
3294 
3295         ndmas = rx_rbr_rings->ndmas;
3296         if (!ndmas) {
3297                 HXGE_DEBUG_MSG((hxgep, RX_CTL,
3298                     "<== hxge_rxdma_hw_stop: no dma channel allocated"));
3299                 return;
3300         }
3301 
3302         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3303             "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas));
3304 
3305         rbr_rings = rx_rbr_rings->rbr_rings;
3306         for (i = 0; i < ndmas; i++) {
3307                 channel = rbr_rings[i]->rdc;
3308                 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3309                     "==> hxge_rxdma_hw_stop (ndmas %d) channel %d",
3310                     ndmas, channel));
3311                 (void) hxge_rxdma_stop_channel(hxgep, channel);
3312         }
3313 
3314         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: "
3315             "rx_rbr_rings 0x%016llx rings 0x%016llx",
3316             rx_rbr_rings, rx_rcr_rings));
3317 
3318         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop"));
3319 }
3320 
3321 static hxge_status_t
3322 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
3323     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
3324     int n_init_kick)
3325 {
3326         hpi_handle_t            handle;
3327         hpi_status_t            rs = HPI_SUCCESS;
3328         rdc_stat_t              cs;
3329         rdc_int_mask_t          ent_mask;
3330         hxge_status_t           status = HXGE_OK;
3331 
3332         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel"));
3333 
3334         handle = HXGE_DEV_HPI_HANDLE(hxgep);
3335 
3336         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: "
3337             "hpi handle addr $%p acc $%p",
3338             hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
3339 
3340         /* Reset RXDMA channel */
3341         rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3342         if (rs != HPI_SUCCESS) {
3343                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3344                     "==> hxge_rxdma_start_channel: "
3345                     "reset rxdma failed (0x%08x channel %d)",
3346                     status, channel));
3347                 return (HXGE_ERROR | rs);
3348         }
3349         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3350             "==> hxge_rxdma_start_channel: reset done: channel %d", channel));
3351 
3352         /*
3353          * Initialize the RXDMA channel specific FZC control configurations.
3354          * These FZC registers are pertaining to each RX channel (logical
3355          * pages).
3356          */
3357         status = hxge_init_fzc_rxdma_channel(hxgep,
3358             channel, rbr_p, rcr_p, mbox_p);
3359         if (status != HXGE_OK) {
3360                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3361                     "==> hxge_rxdma_start_channel: "
3362                     "init fzc rxdma failed (0x%08x channel %d)",
3363                     status, channel));
3364                 return (status);
3365         }
3366         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3367             "==> hxge_rxdma_start_channel: fzc done"));
3368 
3369         /*
3370          * Zero out the shadow  and prefetch ram.
3371          */
3372         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3373             "==> hxge_rxdma_start_channel: ram done"));
3374 
3375         /* Set up the interrupt event masks. */
3376         ent_mask.value = 0;
3377         rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3378         if (rs != HPI_SUCCESS) {
3379                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3380                     "==> hxge_rxdma_start_channel: "
3381                     "init rxdma event masks failed (0x%08x channel %d)",
3382                     status, channel));
3383                 return (HXGE_ERROR | rs);
3384         }
3385         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3386             "event done: channel %d (mask 0x%016llx)",
3387             channel, ent_mask.value));
3388 
3389         /*
3390          * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA
3391          * channels and enable each DMA channel.
3392          */
3393         status = hxge_enable_rxdma_channel(hxgep,
3394             channel, rbr_p, rcr_p, mbox_p, n_init_kick);
3395         if (status != HXGE_OK) {
3396                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3397                     " hxge_rxdma_start_channel: "
3398                     " init enable rxdma failed (0x%08x channel %d)",
3399                     status, channel));
3400                 return (status);
3401         }
3402 
3403         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3404             "control done - channel %d cs 0x%016llx", channel, cs.value));
3405 
3406         /*
3407          * Initialize the receive DMA control and status register
3408          * Note that rdc_stat HAS to be set after RBR and RCR rings are set
3409          */
3410         cs.value = 0;
3411         cs.bits.mex = 1;
3412         cs.bits.rcr_thres = 1;
3413         cs.bits.rcr_to = 1;
3414         cs.bits.rbr_empty = 1;
3415         status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
3416         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3417             "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
3418         if (status != HXGE_OK) {
3419                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3420                     "==> hxge_rxdma_start_channel: "
3421                     "init rxdma control register failed (0x%08x channel %d",
3422                     status, channel));
3423                 return (status);
3424         }
3425 
3426         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3427             "control done - channel %d cs 0x%016llx", channel, cs.value));
3428         HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3429             "==> hxge_rxdma_start_channel: enable done"));
3430         HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel"));
3431         return (HXGE_OK);
3432 }
3433 
3434 static hxge_status_t
3435 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel)
3436 {
3437         hpi_handle_t            handle;
3438         hpi_status_t            rs = HPI_SUCCESS;
3439         rdc_stat_t              cs;
3440         rdc_int_mask_t          ent_mask;
3441         hxge_status_t           status = HXGE_OK;
3442 
3443         HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel"));
3444 
3445         handle = HXGE_DEV_HPI_HANDLE(hxgep);
3446 
3447         HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: "
3448             "hpi handle addr $%p acc $%p",
3449             hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
3450 
3451         /* Reset RXDMA channel */
3452         rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3453         if (rs != HPI_SUCCESS) {
3454                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3455                     " hxge_rxdma_stop_channel: "
3456                     " reset rxdma failed (0x%08x channel %d)",
3457                     rs, channel));
3458                 return (HXGE_ERROR | rs);
3459         }
3460         HXGE_DEBUG_MSG((hxgep, RX_CTL,
3461             "==> hxge_rxdma_stop_channel: reset done"));
3462 
3463         /* Set up the interrupt event masks. */
3464         ent_mask.value = RDC_INT_MASK_ALL;
3465         rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3466         if (rs != HPI_SUCCESS) {
3467                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3468                     "==> hxge_rxdma_stop_channel: "
3469                     "set rxdma event masks failed (0x%08x channel %d)",
3470                     rs, channel));
3471                 return (HXGE_ERROR | rs);
3472         }
3473         HXGE_DEBUG_MSG((hxgep, RX_CTL,
3474             "==> hxge_rxdma_stop_channel: event done"));
3475 
3476         /* Initialize the receive DMA control and status register */
3477         cs.value = 0;
3478         status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
3479 
3480         HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control "
3481             " to default (all 0s) 0x%08x", cs.value));
3482 
3483         if (status != HXGE_OK) {
3484                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3485                     " hxge_rxdma_stop_channel: init rxdma"
3486                     " control register failed (0x%08x channel %d",
3487                     status, channel));
3488                 return (status);
3489         }
3490 
3491         HXGE_DEBUG_MSG((hxgep, RX_CTL,
3492             "==> hxge_rxdma_stop_channel: control done"));
3493 
3494         /* disable dma channel */
3495         status = hxge_disable_rxdma_channel(hxgep, channel);
3496 
3497         if (status != HXGE_OK) {
3498                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3499                     " hxge_rxdma_stop_channel: "
3500                     " init enable rxdma failed (0x%08x channel %d)",
3501                     status, channel));
3502                 return (status);
3503         }
3504 
3505         HXGE_DEBUG_MSG((hxgep, RX_CTL,
3506             "==> hxge_rxdma_stop_channel: disable done"));
3507         HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel"));
3508 
3509         return (HXGE_OK);
3510 }
3511 
3512 hxge_status_t
3513 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep)
3514 {
3515         hpi_handle_t            handle;
3516         p_hxge_rdc_sys_stats_t  statsp;
3517         rdc_fifo_err_stat_t     stat;
3518         hxge_status_t           status = HXGE_OK;
3519 
3520         handle = hxgep->hpi_handle;
3521         statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats;
3522 
3523         /* Get the error status and clear the register */
3524         HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value);
3525         HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value);
3526 
3527         if (stat.bits.rx_ctrl_fifo_sec) {
3528                 statsp->ctrl_fifo_sec++;
3529                 if (statsp->ctrl_fifo_sec == 1)
3530                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3531                             "==> hxge_rxdma_handle_sys_errors: "
3532                             "rx_ctrl_fifo_sec"));
3533         }
3534 
3535         if (stat.bits.rx_ctrl_fifo_ded) {
3536                 /* Global fatal error encountered */
3537                 statsp->ctrl_fifo_ded++;
3538                 HXGE_FM_REPORT_ERROR(hxgep, NULL,
3539                     HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED);
3540                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3541                     "==> hxge_rxdma_handle_sys_errors: "
3542                     "fatal error: rx_ctrl_fifo_ded error"));
3543         }
3544 
3545         if (stat.bits.rx_data_fifo_sec) {
3546                 statsp->data_fifo_sec++;
3547                 if (statsp->data_fifo_sec == 1)
3548                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3549                             "==> hxge_rxdma_handle_sys_errors: "
3550                             "rx_data_fifo_sec"));
3551         }
3552 
3553         if (stat.bits.rx_data_fifo_ded) {
3554                 /* Global fatal error encountered */
3555                 statsp->data_fifo_ded++;
3556                 HXGE_FM_REPORT_ERROR(hxgep, NULL,
3557                     HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED);
3558                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3559                     "==> hxge_rxdma_handle_sys_errors: "
3560                     "fatal error: rx_data_fifo_ded error"));
3561         }
3562 
3563         if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) {
3564                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3565                     " hxge_rxdma_handle_sys_errors: fatal error\n"));
3566                 status = hxge_rx_port_fatal_err_recover(hxgep);
3567                 if (status == HXGE_OK) {
3568                         FM_SERVICE_RESTORED(hxgep);
3569                 }
3570         }
3571 
3572         return (HXGE_OK);
3573 }
3574 
3575 static hxge_status_t
3576 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel)
3577 {
3578         hpi_handle_t            handle;
3579         hpi_status_t            rs = HPI_SUCCESS;
3580         p_rx_rbr_ring_t         rbrp;
3581         p_rx_rcr_ring_t         rcrp;
3582         p_rx_mbox_t             mboxp;
3583         rdc_int_mask_t          ent_mask;
3584         p_hxge_dma_common_t     dmap;
3585         p_rx_msg_t              rx_msg_p;
3586         int                     i;
3587         uint32_t                hxge_port_rcr_size;
3588         uint64_t                tmp;
3589         int                     n_init_kick = 0;
3590 
3591         HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover"));
3592 
3593         /*
3594          * Stop the dma channel waits for the stop done. If the stop done bit
3595          * is not set, then create an error.
3596          */
3597 
3598         handle = HXGE_DEV_HPI_HANDLE(hxgep);
3599 
3600         HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop..."));
3601 
3602         rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[channel];
3603         rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[channel];
3604 
3605         MUTEX_ENTER(&rcrp->lock);
3606         MUTEX_ENTER(&rbrp->lock);
3607 
3608         HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel..."));
3609 
3610         rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
3611         if (rs != HPI_SUCCESS) {
3612                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3613                     "hxge_disable_rxdma_channel:failed"));
3614                 goto fail;
3615         }
3616         HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt..."));
3617 
3618         /* Disable interrupt */
3619         ent_mask.value = RDC_INT_MASK_ALL;
3620         rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3621         if (rs != HPI_SUCCESS) {
3622                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3623                     "Set rxdma event masks failed (channel %d)", channel));
3624         }
3625         HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset..."));
3626 
3627         /* Reset RXDMA channel */
3628         rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3629         if (rs != HPI_SUCCESS) {
3630                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3631                     "Reset rxdma failed (channel %d)", channel));
3632                 goto fail;
3633         }
3634         hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
3635         mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3636 
3637         rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
3638         rbrp->rbr_rd_index = 0;
3639 
3640         rcrp->comp_rd_index = 0;
3641         rcrp->comp_wt_index = 0;
3642         rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
3643             (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
3644 #if defined(__i386)
3645         rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3646             (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3647 #else
3648         rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3649             (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3650 #endif
3651 
3652         rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
3653             (hxge_port_rcr_size - 1);
3654         rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
3655             (hxge_port_rcr_size - 1);
3656 
3657         rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc);
3658         rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3;
3659 
3660         dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
3661         bzero((caddr_t)dmap->kaddrp, dmap->alength);
3662 
3663         HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n",
3664             rbrp->rbr_max_size));
3665 
3666         /* Count the number of buffers owned by the hardware at this moment */
3667         for (i = 0; i < rbrp->rbr_max_size; i++) {
3668                 rx_msg_p = rbrp->rx_msg_ring[i];
3669                 if (rx_msg_p->ref_cnt == 1) {
3670                         n_init_kick++;
3671                 }
3672         }
3673 
3674         HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start..."));
3675 
3676         /*
3677          * This is error recover! Some buffers are owned by the hardware and
3678          * the rest are owned by the apps. We should only kick in those
3679          * owned by the hardware initially. The apps will post theirs
3680          * eventually.
3681          */
3682         (void) hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp,
3683             n_init_kick);
3684 
3685         /*
3686          * The DMA channel may disable itself automatically.
3687          * The following is a work-around.
3688          */
3689         HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp);
3690         rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
3691         if (rs != HPI_SUCCESS) {
3692                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3693                     "hpi_rxdma_cfg_rdc_enable (channel %d)", channel));
3694         }
3695 
3696         /*
3697          * Delay a bit of time by doing reads.
3698          */
3699         for (i = 0; i < 1024; i++) {
3700                 uint64_t value;
3701                 RXDMA_REG_READ64(HXGE_DEV_HPI_HANDLE(hxgep),
3702                     RDC_INT_MASK, i & 3, &value);
3703         }
3704 
3705         MUTEX_EXIT(&rbrp->lock);
3706         MUTEX_EXIT(&rcrp->lock);
3707 
3708         HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover"));
3709         return (HXGE_OK);
3710 
3711 fail:
3712         MUTEX_EXIT(&rbrp->lock);
3713         MUTEX_EXIT(&rcrp->lock);
3714         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3715             "Error Recovery failed for channel(%d)", channel));
3716         return (HXGE_ERROR | rs);
3717 }
3718 
3719 static hxge_status_t
3720 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep)
3721 {
3722         hxge_status_t           status = HXGE_OK;
3723         p_hxge_dma_common_t     *dma_buf_p;
3724         uint16_t                channel;
3725         int                     ndmas;
3726         int                     i;
3727         block_reset_t           reset_reg;
3728         p_rx_rcr_ring_t rcrp;
3729         p_rx_rbr_ring_t rbrp;
3730 
3731         HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover"));
3732         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ..."));
3733 
3734         /* Disable RxMAC */
3735         HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n"));
3736         MUTEX_ENTER(&hxgep->vmac_lock);
3737         if (hxge_rx_vmac_disable(hxgep) != HXGE_OK)
3738                 goto fail;
3739 
3740         HXGE_DELAY(1000);
3741 
3742         /*
3743          * Reset RDC block from PEU for this fatal error
3744          */
3745         reset_reg.value = 0;
3746         reset_reg.bits.rdc_rst = 1;
3747         HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
3748 
3749         HXGE_DELAY(1000);
3750 
3751         /* Restore any common settings after PEU reset */
3752         if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK)
3753                 goto fail;
3754 
3755         HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels..."));
3756 
3757         ndmas = hxgep->rx_buf_pool_p->ndmas;
3758         dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p;
3759 
3760         for (i = 0; i < ndmas; i++) {
3761                 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
3762                 rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
3763                 rbrp = rcrp->rx_rbr_p;
3764 
3765                 MUTEX_ENTER(&rbrp->post_lock);
3766 
3767                 /*
3768                  * This function needs to be inside the post_lock
3769                  */
3770                 if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) {
3771                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3772                             "Could not recover channel %d", channel));
3773                 }
3774                 MUTEX_EXIT(&rbrp->post_lock);
3775         }
3776 
3777         HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC..."));
3778 
3779         /* Reset RxMAC */
3780         if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) {
3781                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3782                     "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
3783                 goto fail;
3784         }
3785 
3786         HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC..."));
3787 
3788         /* Re-Initialize RxMAC */
3789         if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) {
3790                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3791                     "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
3792                 goto fail;
3793         }
3794         HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC..."));
3795 
3796         /* Re-enable RxMAC */
3797         if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) {
3798                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3799                     "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC"));
3800                 goto fail;
3801         }
3802         MUTEX_EXIT(&hxgep->vmac_lock);
3803 
3804         /* Reset the error mask since PEU reset cleared it */
3805         HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
3806 
3807         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3808             "Recovery Successful, RxPort Restored"));
3809         HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover"));
3810         return (HXGE_OK);
3811 
3812 fail:
3813         MUTEX_EXIT(&hxgep->vmac_lock);
3814         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3815             "Error Recovery failed for hxge(%d)", hxgep->instance));
3816         return (status);
3817 }
3818 
3819 static void
3820 hxge_rbr_empty_restore(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p)
3821 {
3822         hpi_status_t            hpi_status;
3823         hxge_status_t           status;
3824         rdc_stat_t              cs;
3825         p_hxge_rx_ring_stats_t  rdc_stats;
3826 
3827         rdc_stats = &hxgep->statsp->rdc_stats[rx_rbr_p->rdc];
3828 
3829         /*
3830          * Complete the processing for the RBR Empty by:
3831          *      0) kicking back HXGE_RBR_EMPTY_THRESHOLD
3832          *         packets.
3833          *      1) Disable the RX vmac.
3834          *      2) Re-enable the affected DMA channel.
3835          *      3) Re-enable the RX vmac.
3836          */
3837 
3838         /*
3839          * Disable the RX VMAC, but setting the framelength
3840          * to 0, since there is a hardware bug when disabling
3841          * the vmac.
3842          */
3843         MUTEX_ENTER(&hxgep->vmac_lock);
3844         (void) hxge_rx_vmac_disable(hxgep);
3845 
3846         /*
3847          * Re-arm the mex bit for interrupts to be enabled.
3848          */
3849         cs.value = 0;
3850         cs.bits.mex = 1;
3851         RXDMA_REG_WRITE64(HXGE_DEV_HPI_HANDLE(hxgep), RDC_STAT,
3852             rx_rbr_p->rdc, cs.value);
3853 
3854         hpi_status = hpi_rxdma_cfg_rdc_enable(
3855             HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc);
3856         if (hpi_status != HPI_SUCCESS) {
3857                 rdc_stats->rbr_empty_fail++;
3858 
3859                 /* Assume we are already inside the post_lock */
3860                 status = hxge_rxdma_fatal_err_recover(hxgep, rx_rbr_p->rdc);
3861                 if (status != HXGE_OK) {
3862                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3863                             "hxge(%d): channel(%d) is empty.",
3864                             hxgep->instance, rx_rbr_p->rdc));
3865                 }
3866         }
3867 
3868         /*
3869          * Re-enable the RX VMAC.
3870          */
3871         (void) hxge_rx_vmac_enable(hxgep);
3872         MUTEX_EXIT(&hxgep->vmac_lock);
3873 
3874         rdc_stats->rbr_empty_restore++;
3875         rx_rbr_p->rbr_is_empty = B_FALSE;
3876 }