1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #include <hxge_impl.h>
  27 #include <hxge_txdma.h>
  28 #include <sys/llc1.h>
  29 
  30 uint32_t hxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
  31 uint32_t hxge_tx_minfree = 64;
  32 uint32_t hxge_tx_intr_thres = 0;
  33 uint32_t hxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
  34 uint32_t hxge_tx_tiny_pack = 1;
  35 uint32_t hxge_tx_use_bcopy = 1;
  36 
  37 extern uint32_t hxge_tx_ring_size;
  38 extern uint32_t hxge_bcopy_thresh;
  39 extern uint32_t hxge_dvma_thresh;
  40 extern uint32_t hxge_dma_stream_thresh;
  41 extern dma_method_t hxge_force_dma;
  42 
  43 /* Device register access attributes for PIO.  */
  44 extern ddi_device_acc_attr_t hxge_dev_reg_acc_attr;
  45 
  46 /* Device descriptor access attributes for DMA.  */
  47 extern ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr;
  48 
  49 /* Device buffer access attributes for DMA.  */
  50 extern ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr;
  51 extern ddi_dma_attr_t hxge_desc_dma_attr;
  52 extern ddi_dma_attr_t hxge_tx_dma_attr;
  53 
  54 static hxge_status_t hxge_map_txdma(p_hxge_t hxgep);
  55 static void hxge_unmap_txdma(p_hxge_t hxgep);
  56 static hxge_status_t hxge_txdma_hw_start(p_hxge_t hxgep);
  57 static void hxge_txdma_hw_stop(p_hxge_t hxgep);
  58 
  59 static hxge_status_t hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel,
  60     p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p,
  61     uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
  62     p_tx_mbox_t *tx_mbox_p);
  63 static void hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel,
  64     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
  65 static hxge_status_t hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t,
  66     p_hxge_dma_common_t *, p_tx_ring_t *, uint32_t);
  67 static void hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep,
  68     p_tx_ring_t tx_ring_p);
  69 static void hxge_map_txdma_channel_cfg_ring(p_hxge_t, uint16_t,
  70     p_hxge_dma_common_t *, p_tx_ring_t, p_tx_mbox_t *);
  71 static void hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep,
  72     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
  73 static hxge_status_t hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel,
  74     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
  75 static hxge_status_t hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel,
  76     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p);
  77 static p_tx_ring_t hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel);
  78 static hxge_status_t hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index,
  79     p_hxge_ldv_t ldvp, tdc_stat_t cs);
  80 static p_tx_mbox_t hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel);
  81 static hxge_status_t hxge_txdma_fatal_err_recover(p_hxge_t hxgep,
  82     uint16_t channel, p_tx_ring_t tx_ring_p);
  83 static hxge_status_t hxge_tx_port_fatal_err_recover(p_hxge_t hxgep);
  84 
  85 hxge_status_t
  86 hxge_init_txdma_channels(p_hxge_t hxgep)
  87 {
  88         hxge_status_t   status = HXGE_OK;
  89         block_reset_t   reset_reg;
  90 
  91         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_init_txdma_channels"));
  92 
  93         /*
  94          * Reset TDC block from PEU to cleanup any unknown configuration.
  95          * This may be resulted from previous reboot.
  96          */
  97         reset_reg.value = 0;
  98         reset_reg.bits.tdc_rst = 1;
  99         HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
 100 
 101         HXGE_DELAY(1000);
 102 
 103         status = hxge_map_txdma(hxgep);
 104         if (status != HXGE_OK) {
 105                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
 106                     "<== hxge_init_txdma_channels: status 0x%x", status));
 107                 return (status);
 108         }
 109 
 110         status = hxge_txdma_hw_start(hxgep);
 111         if (status != HXGE_OK) {
 112                 hxge_unmap_txdma(hxgep);
 113                 return (status);
 114         }
 115 
 116         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 117             "<== hxge_init_txdma_channels: status 0x%x", status));
 118 
 119         return (HXGE_OK);
 120 }
 121 
 122 void
 123 hxge_uninit_txdma_channels(p_hxge_t hxgep)
 124 {
 125         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_uninit_txdma_channels"));
 126 
 127         hxge_txdma_hw_stop(hxgep);
 128         hxge_unmap_txdma(hxgep);
 129 
 130         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_uinit_txdma_channels"));
 131 }
 132 
 133 void
 134 hxge_setup_dma_common(p_hxge_dma_common_t dest_p, p_hxge_dma_common_t src_p,
 135     uint32_t entries, uint32_t size)
 136 {
 137         size_t tsize;
 138         *dest_p = *src_p;
 139         tsize = size * entries;
 140         dest_p->alength = tsize;
 141         dest_p->nblocks = entries;
 142         dest_p->block_size = size;
 143         dest_p->offset += tsize;
 144 
 145         src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
 146         src_p->alength -= tsize;
 147         src_p->dma_cookie.dmac_laddress += tsize;
 148         src_p->dma_cookie.dmac_size -= tsize;
 149 }
 150 
 151 hxge_status_t
 152 hxge_reset_txdma_channel(p_hxge_t hxgep, uint16_t channel, uint64_t reg_data)
 153 {
 154         hpi_status_t    rs = HPI_SUCCESS;
 155         hxge_status_t   status = HXGE_OK;
 156         hpi_handle_t    handle;
 157 
 158         HXGE_DEBUG_MSG((hxgep, TX_CTL, " ==> hxge_reset_txdma_channel"));
 159 
 160         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 161         if ((reg_data & TDC_TDR_RST_MASK) == TDC_TDR_RST_MASK) {
 162                 rs = hpi_txdma_channel_reset(handle, channel);
 163         } else {
 164                 rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel);
 165         }
 166 
 167         if (rs != HPI_SUCCESS) {
 168                 status = HXGE_ERROR | rs;
 169         }
 170 
 171         /*
 172          * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
 173          * overflow fatal error if tail is not set to 0 after reset!
 174          */
 175         TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0);
 176 
 177         HXGE_DEBUG_MSG((hxgep, TX_CTL, " <== hxge_reset_txdma_channel"));
 178 
 179         return (status);
 180 }
 181 
 182 hxge_status_t
 183 hxge_init_txdma_channel_event_mask(p_hxge_t hxgep, uint16_t channel,
 184     tdc_int_mask_t *mask_p)
 185 {
 186         hpi_handle_t    handle;
 187         hpi_status_t    rs = HPI_SUCCESS;
 188         hxge_status_t   status = HXGE_OK;
 189 
 190         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 191             "<== hxge_init_txdma_channel_event_mask"));
 192 
 193         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 194 
 195         /*
 196          * Mask off tx_rng_oflow since it is a false alarm. The driver
 197          * ensures not over flowing the hardware and check the hardware
 198          * status.
 199          */
 200         mask_p->bits.tx_rng_oflow = 1;
 201         rs = hpi_txdma_event_mask(handle, OP_SET, channel, mask_p);
 202         if (rs != HPI_SUCCESS) {
 203                 status = HXGE_ERROR | rs;
 204         }
 205 
 206         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 207             "==> hxge_init_txdma_channel_event_mask"));
 208         return (status);
 209 }
 210 
 211 hxge_status_t
 212 hxge_enable_txdma_channel(p_hxge_t hxgep,
 213     uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
 214 {
 215         hpi_handle_t    handle;
 216         hpi_status_t    rs = HPI_SUCCESS;
 217         hxge_status_t   status = HXGE_OK;
 218 
 219         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_enable_txdma_channel"));
 220 
 221         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 222         /*
 223          * Use configuration data composed at init time. Write to hardware the
 224          * transmit ring configurations.
 225          */
 226         rs = hpi_txdma_ring_config(handle, OP_SET, channel,
 227             (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
 228 
 229         if (rs != HPI_SUCCESS) {
 230                 return (HXGE_ERROR | rs);
 231         }
 232 
 233         /* Write to hardware the mailbox */
 234         rs = hpi_txdma_mbox_config(handle, OP_SET, channel,
 235             (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
 236 
 237         if (rs != HPI_SUCCESS) {
 238                 return (HXGE_ERROR | rs);
 239         }
 240 
 241         /* Start the DMA engine. */
 242         rs = hpi_txdma_channel_init_enable(handle, channel);
 243         if (rs != HPI_SUCCESS) {
 244                 return (HXGE_ERROR | rs);
 245         }
 246         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_enable_txdma_channel"));
 247         return (status);
 248 }
 249 
 250 void
 251 hxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, boolean_t l4_cksum,
 252     int pkt_len, uint8_t npads, p_tx_pkt_hdr_all_t pkthdrp)
 253 {
 254         p_tx_pkt_header_t       hdrp;
 255         p_mblk_t                nmp;
 256         uint64_t                tmp;
 257         size_t                  mblk_len;
 258         size_t                  iph_len;
 259         size_t                  hdrs_size;
 260         uint8_t                 *ip_buf;
 261         uint16_t                eth_type;
 262         uint8_t                 ipproto;
 263         boolean_t               is_vlan = B_FALSE;
 264         size_t                  eth_hdr_size;
 265         uint8_t hdrs_buf[sizeof (struct ether_header) + 64 + sizeof (uint32_t)];
 266 
 267         HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: mp $%p", mp));
 268 
 269         /*
 270          * Caller should zero out the headers first.
 271          */
 272         hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
 273 
 274         if (fill_len) {
 275                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 276                     "==> hxge_fill_tx_hdr: pkt_len %d npads %d",
 277                     pkt_len, npads));
 278                 tmp = (uint64_t)pkt_len;
 279                 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
 280 
 281                 goto fill_tx_header_done;
 282         }
 283         tmp = (uint64_t)npads;
 284         hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT);
 285 
 286         /*
 287          * mp is the original data packet (does not include the Neptune
 288          * transmit header).
 289          */
 290         nmp = mp;
 291         mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
 292         HXGE_DEBUG_MSG((NULL, TX_CTL,
 293             "==> hxge_fill_tx_hdr: mp $%p b_rptr $%p len %d",
 294             mp, nmp->b_rptr, mblk_len));
 295         ip_buf = NULL;
 296         bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header));
 297         eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
 298         HXGE_DEBUG_MSG((NULL, TX_CTL,
 299             "==> : hxge_fill_tx_hdr: (value 0x%llx) ether type 0x%x",
 300             eth_type, hdrp->value));
 301 
 302         if (eth_type < ETHERMTU) {
 303                 tmp = 1ull;
 304                 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
 305                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 306                     "==> hxge_tx_pkt_hdr_init: LLC value 0x%llx", hdrp->value));
 307                 if (*(hdrs_buf + sizeof (struct ether_header)) ==
 308                     LLC_SNAP_SAP) {
 309                         eth_type = ntohs(*((uint16_t *)(hdrs_buf +
 310                             sizeof (struct ether_header) + 6)));
 311                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 312                             "==> hxge_tx_pkt_hdr_init: LLC ether type 0x%x",
 313                             eth_type));
 314                 } else {
 315                         goto fill_tx_header_done;
 316                 }
 317         } else if (eth_type == VLAN_ETHERTYPE) {
 318                 tmp = 1ull;
 319                 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
 320 
 321                 eth_type = ntohs(((struct ether_vlan_header *)
 322                     hdrs_buf)->ether_type);
 323                 is_vlan = B_TRUE;
 324                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 325                     "==> hxge_tx_pkt_hdr_init: VLAN value 0x%llx",
 326                     hdrp->value));
 327         }
 328         if (!is_vlan) {
 329                 eth_hdr_size = sizeof (struct ether_header);
 330         } else {
 331                 eth_hdr_size = sizeof (struct ether_vlan_header);
 332         }
 333 
 334         switch (eth_type) {
 335         case ETHERTYPE_IP:
 336                 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
 337                         ip_buf = nmp->b_rptr + eth_hdr_size;
 338                         mblk_len -= eth_hdr_size;
 339                         iph_len = ((*ip_buf) & 0x0f);
 340                         if (mblk_len > (iph_len + sizeof (uint32_t))) {
 341                                 ip_buf = nmp->b_rptr;
 342                                 ip_buf += eth_hdr_size;
 343                         } else {
 344                                 ip_buf = NULL;
 345                         }
 346                 }
 347                 if (ip_buf == NULL) {
 348                         hdrs_size = 0;
 349                         ((p_ether_header_t)hdrs_buf)->ether_type = 0;
 350                         while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) {
 351                                 mblk_len = (size_t)nmp->b_wptr -
 352                                     (size_t)nmp->b_rptr;
 353                                 if (mblk_len >=
 354                                     (sizeof (hdrs_buf) - hdrs_size))
 355                                         mblk_len = sizeof (hdrs_buf) -
 356                                             hdrs_size;
 357                                 bcopy(nmp->b_rptr,
 358                                     &hdrs_buf[hdrs_size], mblk_len);
 359                                 hdrs_size += mblk_len;
 360                                 nmp = nmp->b_cont;
 361                         }
 362                         ip_buf = hdrs_buf;
 363                         ip_buf += eth_hdr_size;
 364                         iph_len = ((*ip_buf) & 0x0f);
 365                 }
 366                 ipproto = ip_buf[9];
 367 
 368                 tmp = (uint64_t)iph_len;
 369                 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
 370                 tmp = (uint64_t)(eth_hdr_size >> 1);
 371                 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
 372 
 373                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv4 "
 374                     " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
 375                     "tmp 0x%x", iph_len, hdrp->bits.l3start, eth_hdr_size,
 376                     ipproto, tmp));
 377                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 378                     "==> hxge_tx_pkt_hdr_init: IP value 0x%llx", hdrp->value));
 379                 break;
 380 
 381         case ETHERTYPE_IPV6:
 382                 hdrs_size = 0;
 383                 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
 384                 while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) {
 385                         mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
 386                         if (mblk_len >= (sizeof (hdrs_buf) - hdrs_size))
 387                                 mblk_len = sizeof (hdrs_buf) - hdrs_size;
 388                         bcopy(nmp->b_rptr, &hdrs_buf[hdrs_size], mblk_len);
 389                         hdrs_size += mblk_len;
 390                         nmp = nmp->b_cont;
 391                 }
 392                 ip_buf = hdrs_buf;
 393                 ip_buf += eth_hdr_size;
 394 
 395                 tmp = 1ull;
 396                 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
 397 
 398                 tmp = (eth_hdr_size >> 1);
 399                 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
 400 
 401                 /* byte 6 is the next header protocol */
 402                 ipproto = ip_buf[6];
 403 
 404                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv6 "
 405                     " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
 406                     iph_len, hdrp->bits.l3start, eth_hdr_size, ipproto));
 407                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_hdr_init: IPv6 "
 408                     "value 0x%llx", hdrp->value));
 409                 break;
 410 
 411         default:
 412                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: non-IP"));
 413                 goto fill_tx_header_done;
 414         }
 415 
 416         switch (ipproto) {
 417         case IPPROTO_TCP:
 418                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 419                     "==> hxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
 420                 if (l4_cksum) {
 421                         tmp = 1ull;
 422                         hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
 423                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 424                             "==> hxge_tx_pkt_hdr_init: TCP CKSUM"
 425                             "value 0x%llx", hdrp->value));
 426                 }
 427                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 428                     "==> hxge_tx_pkt_hdr_init: TCP value 0x%llx", hdrp->value));
 429                 break;
 430 
 431         case IPPROTO_UDP:
 432                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: UDP"));
 433                 if (l4_cksum) {
 434                         tmp = 0x2ull;
 435                         hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
 436                 }
 437                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 438                     "==> hxge_tx_pkt_hdr_init: UDP value 0x%llx",
 439                     hdrp->value));
 440                 break;
 441 
 442         default:
 443                 goto fill_tx_header_done;
 444         }
 445 
 446 fill_tx_header_done:
 447         HXGE_DEBUG_MSG((NULL, TX_CTL,
 448             "==> hxge_fill_tx_hdr: pkt_len %d npads %d value 0x%llx",
 449             pkt_len, npads, hdrp->value));
 450         HXGE_DEBUG_MSG((NULL, TX_CTL, "<== hxge_fill_tx_hdr"));
 451 }
 452 
 453 /*ARGSUSED*/
 454 p_mblk_t
 455 hxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
 456 {
 457         p_mblk_t newmp = NULL;
 458 
 459         if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
 460                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 461                     "<== hxge_tx_pkt_header_reserve: allocb failed"));
 462                 return (NULL);
 463         }
 464         HXGE_DEBUG_MSG((NULL, TX_CTL,
 465             "==> hxge_tx_pkt_header_reserve: get new mp"));
 466         DB_TYPE(newmp) = M_DATA;
 467         newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
 468         linkb(newmp, mp);
 469         newmp->b_rptr -= TX_PKT_HEADER_SIZE;
 470 
 471         HXGE_DEBUG_MSG((NULL, TX_CTL,
 472             "==>hxge_tx_pkt_header_reserve: b_rptr $%p b_wptr $%p",
 473             newmp->b_rptr, newmp->b_wptr));
 474         HXGE_DEBUG_MSG((NULL, TX_CTL,
 475             "<== hxge_tx_pkt_header_reserve: use new mp"));
 476         return (newmp);
 477 }
 478 
 479 int
 480 hxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
 481 {
 482         uint_t          nmblks;
 483         ssize_t         len;
 484         uint_t          pkt_len;
 485         p_mblk_t        nmp, bmp, tmp;
 486         uint8_t         *b_wptr;
 487 
 488         HXGE_DEBUG_MSG((NULL, TX_CTL,
 489             "==> hxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p len %d",
 490             mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
 491 
 492         nmp = mp;
 493         bmp = mp;
 494         nmblks = 0;
 495         pkt_len = 0;
 496         *tot_xfer_len_p = 0;
 497 
 498         while (nmp) {
 499                 len = MBLKL(nmp);
 500                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: "
 501                     "len %d pkt_len %d nmblks %d tot_xfer_len %d",
 502                     len, pkt_len, nmblks, *tot_xfer_len_p));
 503 
 504                 if (len <= 0) {
 505                         bmp = nmp;
 506                         nmp = nmp->b_cont;
 507                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 508                             "==> hxge_tx_pkt_nmblocks:"
 509                             " len (0) pkt_len %d nmblks %d", pkt_len, nmblks));
 510                         continue;
 511                 }
 512                 *tot_xfer_len_p += len;
 513                 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: "
 514                     "len %d pkt_len %d nmblks %d tot_xfer_len %d",
 515                     len, pkt_len, nmblks, *tot_xfer_len_p));
 516 
 517                 if (len < hxge_bcopy_thresh) {
 518                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 519                             "==> hxge_tx_pkt_nmblocks: "
 520                             "len %d (< thresh) pkt_len %d nmblks %d",
 521                             len, pkt_len, nmblks));
 522                         if (pkt_len == 0)
 523                                 nmblks++;
 524                         pkt_len += len;
 525                         if (pkt_len >= hxge_bcopy_thresh) {
 526                                 pkt_len = 0;
 527                                 len = 0;
 528                                 nmp = bmp;
 529                         }
 530                 } else {
 531                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 532                             "==> hxge_tx_pkt_nmblocks: "
 533                             "len %d (> thresh) pkt_len %d nmblks %d",
 534                             len, pkt_len, nmblks));
 535                         pkt_len = 0;
 536                         nmblks++;
 537                         /*
 538                          * Hardware limits the transfer length to 4K. If len is
 539                          * more than 4K, we need to break it up to at most 2
 540                          * more blocks.
 541                          */
 542                         if (len > TX_MAX_TRANSFER_LENGTH) {
 543                                 uint32_t nsegs;
 544 
 545                                 HXGE_DEBUG_MSG((NULL, TX_CTL,
 546                                     "==> hxge_tx_pkt_nmblocks: "
 547                                     "len %d pkt_len %d nmblks %d nsegs %d",
 548                                     len, pkt_len, nmblks, nsegs));
 549                                 nsegs = 1;
 550                                 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
 551                                         ++nsegs;
 552                                 }
 553                                 do {
 554                                         b_wptr = nmp->b_rptr +
 555                                             TX_MAX_TRANSFER_LENGTH;
 556                                         nmp->b_wptr = b_wptr;
 557                                         if ((tmp = dupb(nmp)) == NULL) {
 558                                                 return (0);
 559                                         }
 560                                         tmp->b_rptr = b_wptr;
 561                                         tmp->b_wptr = nmp->b_wptr;
 562                                         tmp->b_cont = nmp->b_cont;
 563                                         nmp->b_cont = tmp;
 564                                         nmblks++;
 565                                         if (--nsegs) {
 566                                                 nmp = tmp;
 567                                         }
 568                                 } while (nsegs);
 569                                 nmp = tmp;
 570                         }
 571                 }
 572 
 573                 /*
 574                  * Hardware limits the transmit gather pointers to 15.
 575                  */
 576                 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
 577                     TX_MAX_GATHER_POINTERS) {
 578                         HXGE_DEBUG_MSG((NULL, TX_CTL,
 579                             "==> hxge_tx_pkt_nmblocks: pull msg - "
 580                             "len %d pkt_len %d nmblks %d",
 581                             len, pkt_len, nmblks));
 582                         /* Pull all message blocks from b_cont */
 583                         if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
 584                                 return (0);
 585                         }
 586                         freemsg(nmp->b_cont);
 587                         nmp->b_cont = tmp;
 588                         pkt_len = 0;
 589                 }
 590                 bmp = nmp;
 591                 nmp = nmp->b_cont;
 592         }
 593 
 594         HXGE_DEBUG_MSG((NULL, TX_CTL,
 595             "<== hxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
 596             "nmblks %d len %d tot_xfer_len %d",
 597             mp->b_rptr, mp->b_wptr, nmblks, MBLKL(mp), *tot_xfer_len_p));
 598         return (nmblks);
 599 }
 600 
 601 boolean_t
 602 hxge_txdma_reclaim(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, int nmblks)
 603 {
 604         boolean_t               status = B_TRUE;
 605         p_hxge_dma_common_t     tx_desc_dma_p;
 606         hxge_dma_common_t       desc_area;
 607         p_tx_desc_t             tx_desc_ring_vp;
 608         p_tx_desc_t             tx_desc_p;
 609         p_tx_desc_t             tx_desc_pp;
 610         tx_desc_t               r_tx_desc;
 611         p_tx_msg_t              tx_msg_ring;
 612         p_tx_msg_t              tx_msg_p;
 613         hpi_handle_t            handle;
 614         tdc_tdr_head_t          tx_head;
 615         uint32_t                pkt_len;
 616         uint_t                  tx_rd_index;
 617         uint16_t                head_index, tail_index;
 618         uint8_t                 tdc;
 619         boolean_t               head_wrap, tail_wrap;
 620         p_hxge_tx_ring_stats_t  tdc_stats;
 621         tdc_byte_cnt_t          byte_cnt;
 622         tdc_tdr_qlen_t          qlen;
 623         int                     rc;
 624 
 625         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_reclaim"));
 626 
 627         status = ((tx_ring_p->descs_pending < hxge_reclaim_pending) &&
 628             (nmblks != 0));
 629         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 630             "==> hxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
 631             tx_ring_p->descs_pending, hxge_reclaim_pending, nmblks));
 632 
 633         if (!status) {
 634                 tx_desc_dma_p = &tx_ring_p->tdc_desc;
 635                 desc_area = tx_ring_p->tdc_desc;
 636                 tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
 637                 tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
 638                 tx_rd_index = tx_ring_p->rd_index;
 639                 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
 640                 tx_msg_ring = tx_ring_p->tx_msg_ring;
 641                 tx_msg_p = &tx_msg_ring[tx_rd_index];
 642                 tdc = tx_ring_p->tdc;
 643                 tdc_stats = tx_ring_p->tdc_stats;
 644                 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
 645                         tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
 646                 }
 647                 tail_index = tx_ring_p->wr_index;
 648                 tail_wrap = tx_ring_p->wr_index_wrap;
 649 
 650                 /*
 651                  * tdc_byte_cnt reg can be used to get bytes transmitted. It
 652                  * includes padding too in case of runt packets.
 653                  */
 654                 handle = HXGE_DEV_HPI_HANDLE(hxgep);
 655                 TXDMA_REG_READ64(handle, TDC_BYTE_CNT, tdc, &byte_cnt.value);
 656                 tdc_stats->obytes_with_pad += byte_cnt.bits.byte_count;
 657 
 658                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 659                     "==> hxge_txdma_reclaim: tdc %d tx_rd_index %d "
 660                     "tail_index %d tail_wrap %d tx_desc_p $%p ($%p) ",
 661                     tdc, tx_rd_index, tail_index, tail_wrap,
 662                     tx_desc_p, (*(uint64_t *)tx_desc_p)));
 663 
 664                 /*
 665                  * Read the hardware maintained transmit head and wrap around
 666                  * bit.
 667                  */
 668                 TXDMA_REG_READ64(handle, TDC_TDR_HEAD, tdc, &tx_head.value);
 669                 head_index = tx_head.bits.head;
 670                 head_wrap = tx_head.bits.wrap;
 671                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 672                     "==> hxge_txdma_reclaim: "
 673                     "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
 674                     tx_rd_index, tail_index, tail_wrap, head_index, head_wrap));
 675 
 676                 /*
 677                  * For debug only. This can be used to verify the qlen and make
 678                  * sure the hardware is wrapping the Tdr correctly.
 679                  */
 680                 TXDMA_REG_READ64(handle, TDC_TDR_QLEN, tdc, &qlen.value);
 681                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 682                     "==> hxge_txdma_reclaim: tdr_qlen %d tdr_pref_qlen %d",
 683                     qlen.bits.tdr_qlen, qlen.bits.tdr_pref_qlen));
 684 
 685                 if (head_index == tail_index) {
 686                         if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index,
 687                             tail_wrap) && (head_index == tx_rd_index)) {
 688                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 689                                     "==> hxge_txdma_reclaim: EMPTY"));
 690                                 return (B_TRUE);
 691                         }
 692                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 693                             "==> hxge_txdma_reclaim: Checking if ring full"));
 694                         if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
 695                             tail_wrap)) {
 696                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 697                                     "==> hxge_txdma_reclaim: full"));
 698                                 return (B_FALSE);
 699                         }
 700                 }
 701                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 702                     "==> hxge_txdma_reclaim: tx_rd_index and head_index"));
 703 
 704                 /* XXXX: limit the # of reclaims */
 705                 tx_desc_pp = &r_tx_desc;
 706                 while ((tx_rd_index != head_index) &&
 707                     (tx_ring_p->descs_pending != 0)) {
 708                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 709                             "==> hxge_txdma_reclaim: Checking if pending"));
 710                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 711                             "==> hxge_txdma_reclaim: descs_pending %d ",
 712                             tx_ring_p->descs_pending));
 713                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 714                             "==> hxge_txdma_reclaim: "
 715                             "(tx_rd_index %d head_index %d (tx_desc_p $%p)",
 716                             tx_rd_index, head_index, tx_desc_p));
 717 
 718                         tx_desc_pp->value = tx_desc_p->value;
 719                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 720                             "==> hxge_txdma_reclaim: "
 721                             "(tx_rd_index %d head_index %d "
 722                             "tx_desc_p $%p (desc value 0x%llx) ",
 723                             tx_rd_index, head_index,
 724                             tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
 725                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 726                             "==> hxge_txdma_reclaim: dump desc:"));
 727 
 728                         /*
 729                          * tdc_byte_cnt reg can be used to get bytes
 730                          * transmitted
 731                          */
 732                         pkt_len = tx_desc_pp->bits.tr_len;
 733                         tdc_stats->obytes += pkt_len;
 734                         tdc_stats->opackets += tx_desc_pp->bits.sop;
 735                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 736                             "==> hxge_txdma_reclaim: pkt_len %d "
 737                             "tdc channel %d opackets %d",
 738                             pkt_len, tdc, tdc_stats->opackets));
 739 
 740                         if (tx_msg_p->flags.dma_type == USE_DVMA) {
 741                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 742                                     "tx_desc_p = $%p tx_desc_pp = $%p "
 743                                     "index = %d",
 744                                     tx_desc_p, tx_desc_pp,
 745                                     tx_ring_p->rd_index));
 746                                 (void) dvma_unload(tx_msg_p->dvma_handle,
 747                                     0, -1);
 748                                 tx_msg_p->dvma_handle = NULL;
 749                                 if (tx_ring_p->dvma_wr_index ==
 750                                     tx_ring_p->dvma_wrap_mask) {
 751                                         tx_ring_p->dvma_wr_index = 0;
 752                                 } else {
 753                                         tx_ring_p->dvma_wr_index++;
 754                                 }
 755                                 tx_ring_p->dvma_pending--;
 756                         } else if (tx_msg_p->flags.dma_type == USE_DMA) {
 757                                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 758                                     "==> hxge_txdma_reclaim: USE DMA"));
 759                                 if (rc = ddi_dma_unbind_handle
 760                                     (tx_msg_p->dma_handle)) {
 761                                         cmn_err(CE_WARN, "hxge_reclaim: "
 762                                             "ddi_dma_unbind_handle "
 763                                             "failed. status %d", rc);
 764                                 }
 765                         }
 766 
 767                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 768                             "==> hxge_txdma_reclaim: count packets"));
 769 
 770                         /*
 771                          * count a chained packet only once.
 772                          */
 773                         if (tx_msg_p->tx_message != NULL) {
 774                                 freemsg(tx_msg_p->tx_message);
 775                                 tx_msg_p->tx_message = NULL;
 776                         }
 777                         tx_msg_p->flags.dma_type = USE_NONE;
 778                         tx_rd_index = tx_ring_p->rd_index;
 779                         tx_rd_index = (tx_rd_index + 1) &
 780                             tx_ring_p->tx_wrap_mask;
 781                         tx_ring_p->rd_index = tx_rd_index;
 782                         tx_ring_p->descs_pending--;
 783                         tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
 784                         tx_msg_p = &tx_msg_ring[tx_rd_index];
 785                 }
 786 
 787                 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
 788                     (int)tx_ring_p->descs_pending - TX_FULL_MARK));
 789                 if (status) {
 790                         (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing,
 791                             1, 0);
 792                 }
 793         } else {
 794                 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
 795                     (int)tx_ring_p->descs_pending - TX_FULL_MARK));
 796         }
 797 
 798         HXGE_DEBUG_MSG((hxgep, TX_CTL,
 799             "<== hxge_txdma_reclaim status = 0x%08x", status));
 800         return (status);
 801 }
 802 
 803 uint_t
 804 hxge_tx_intr(caddr_t arg1, caddr_t arg2)
 805 {
 806         p_hxge_ldv_t    ldvp = (p_hxge_ldv_t)arg1;
 807         p_hxge_t        hxgep = (p_hxge_t)arg2;
 808         p_hxge_ldg_t    ldgp;
 809         uint8_t         channel;
 810         uint32_t        vindex;
 811         hpi_handle_t    handle;
 812         tdc_stat_t      cs;
 813         p_tx_ring_t     *tx_rings;
 814         p_tx_ring_t     tx_ring_p;
 815         hpi_status_t    rs = HPI_SUCCESS;
 816         uint_t          serviced = DDI_INTR_UNCLAIMED;
 817         hxge_status_t   status = HXGE_OK;
 818 
 819         if (ldvp == NULL) {
 820                 HXGE_DEBUG_MSG((NULL, INT_CTL,
 821                     "<== hxge_tx_intr: hxgep $%p ldvp $%p", hxgep, ldvp));
 822                 return (DDI_INTR_UNCLAIMED);
 823         }
 824 
 825         if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
 826                 hxgep = ldvp->hxgep;
 827         }
 828 
 829         /*
 830          * If the interface is not started, just swallow the interrupt
 831          * and don't rearm the logical device.
 832          */
 833         if (hxgep->hxge_mac_state != HXGE_MAC_STARTED)
 834                 return (DDI_INTR_CLAIMED);
 835 
 836         HXGE_DEBUG_MSG((hxgep, INT_CTL,
 837             "==> hxge_tx_intr: hxgep(arg2) $%p ldvp(arg1) $%p", hxgep, ldvp));
 838 
 839         /*
 840          * This interrupt handler is for a specific transmit dma channel.
 841          */
 842         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 843 
 844         /* Get the control and status for this channel. */
 845         channel = ldvp->channel;
 846         ldgp = ldvp->ldgp;
 847         HXGE_DEBUG_MSG((hxgep, INT_CTL,
 848             "==> hxge_tx_intr: hxgep $%p ldvp (ldvp) $%p channel %d",
 849             hxgep, ldvp, channel));
 850 
 851         rs = hpi_txdma_control_status(handle, OP_GET, channel, &cs);
 852         vindex = ldvp->vdma_index;
 853         HXGE_DEBUG_MSG((hxgep, INT_CTL,
 854             "==> hxge_tx_intr:channel %d ring index %d status 0x%08x",
 855             channel, vindex, rs));
 856 
 857         if (!rs && cs.bits.marked) {
 858                 HXGE_DEBUG_MSG((hxgep, INT_CTL,
 859                     "==> hxge_tx_intr:channel %d ring index %d "
 860                     "status 0x%08x (marked bit set)", channel, vindex, rs));
 861                 tx_rings = hxgep->tx_rings->rings;
 862                 tx_ring_p = tx_rings[vindex];
 863                 HXGE_DEBUG_MSG((hxgep, INT_CTL,
 864                     "==> hxge_tx_intr:channel %d ring index %d "
 865                     "status 0x%08x (marked bit set, calling reclaim)",
 866                     channel, vindex, rs));
 867 
 868                 MUTEX_ENTER(&tx_ring_p->lock);
 869                 (void) hxge_txdma_reclaim(hxgep, tx_rings[vindex], 0);
 870                 MUTEX_EXIT(&tx_ring_p->lock);
 871                 mac_tx_update(hxgep->mach);
 872         }
 873 
 874         /*
 875          * Process other transmit control and status. Check the ldv state.
 876          */
 877         status = hxge_tx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
 878 
 879         /* Clear the error bits */
 880         RXDMA_REG_WRITE64(handle, TDC_STAT, channel, cs.value);
 881 
 882         /*
 883          * Rearm this logical group if this is a single device group.
 884          */
 885         if (ldgp->nldvs == 1) {
 886                 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_tx_intr: rearm"));
 887                 if (status == HXGE_OK) {
 888                         (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg,
 889                             B_TRUE, ldgp->ldg_timer);
 890                 }
 891         }
 892         HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_tx_intr"));
 893         serviced = DDI_INTR_CLAIMED;
 894         return (serviced);
 895 }
 896 
 897 void
 898 hxge_txdma_stop(p_hxge_t hxgep)
 899 {
 900         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop"));
 901 
 902         (void) hxge_tx_vmac_disable(hxgep);
 903         (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
 904 
 905         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop"));
 906 }
 907 
 908 hxge_status_t
 909 hxge_txdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
 910 {
 911         int             i, ndmas;
 912         uint16_t        channel;
 913         p_tx_rings_t    tx_rings;
 914         p_tx_ring_t     *tx_desc_rings;
 915         hpi_handle_t    handle;
 916         hpi_status_t    rs = HPI_SUCCESS;
 917         hxge_status_t   status = HXGE_OK;
 918 
 919         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 920             "==> hxge_txdma_hw_mode: enable mode %d", enable));
 921 
 922         if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
 923                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 924                     "<== hxge_txdma_mode: not initialized"));
 925                 return (HXGE_ERROR);
 926         }
 927         tx_rings = hxgep->tx_rings;
 928         if (tx_rings == NULL) {
 929                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 930                     "<== hxge_txdma_hw_mode: NULL global ring pointer"));
 931                 return (HXGE_ERROR);
 932         }
 933         tx_desc_rings = tx_rings->rings;
 934         if (tx_desc_rings == NULL) {
 935                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
 936                     "<== hxge_txdma_hw_mode: NULL rings pointer"));
 937                 return (HXGE_ERROR);
 938         }
 939         ndmas = tx_rings->ndmas;
 940         if (!ndmas) {
 941                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
 942                     "<== hxge_txdma_hw_mode: no dma channel allocated"));
 943                 return (HXGE_ERROR);
 944         }
 945         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_mode: "
 946             "tx_rings $%p tx_desc_rings $%p ndmas %d",
 947             tx_rings, tx_desc_rings, ndmas));
 948 
 949         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 950         for (i = 0; i < ndmas; i++) {
 951                 if (tx_desc_rings[i] == NULL) {
 952                         continue;
 953                 }
 954                 channel = tx_desc_rings[i]->tdc;
 955                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 956                     "==> hxge_txdma_hw_mode: channel %d", channel));
 957                 if (enable) {
 958                         rs = hpi_txdma_channel_enable(handle, channel);
 959                         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 960                             "==> hxge_txdma_hw_mode: channel %d (enable) "
 961                             "rs 0x%x", channel, rs));
 962                 } else {
 963                         /*
 964                          * Stop the dma channel and waits for the stop done. If
 965                          * the stop done bit is not set, then force an error so
 966                          * TXC will stop. All channels bound to this port need
 967                          * to be stopped and reset after injecting an interrupt
 968                          * error.
 969                          */
 970                         rs = hpi_txdma_channel_disable(handle, channel);
 971                         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 972                             "==> hxge_txdma_hw_mode: channel %d (disable) "
 973                             "rs 0x%x", channel, rs));
 974                 }
 975         }
 976 
 977         status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
 978 
 979         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
 980             "<== hxge_txdma_hw_mode: status 0x%x", status));
 981 
 982         return (status);
 983 }
 984 
 985 void
 986 hxge_txdma_enable_channel(p_hxge_t hxgep, uint16_t channel)
 987 {
 988         hpi_handle_t handle;
 989 
 990         HXGE_DEBUG_MSG((hxgep, DMA_CTL,
 991             "==> hxge_txdma_enable_channel: channel %d", channel));
 992 
 993         handle = HXGE_DEV_HPI_HANDLE(hxgep);
 994         /* enable the transmit dma channels */
 995         (void) hpi_txdma_channel_enable(handle, channel);
 996 
 997         HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_txdma_enable_channel"));
 998 }
 999 
1000 void
1001 hxge_txdma_disable_channel(p_hxge_t hxgep, uint16_t channel)
1002 {
1003         hpi_handle_t handle;
1004 
1005         HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1006             "==> hxge_txdma_disable_channel: channel %d", channel));
1007 
1008         handle = HXGE_DEV_HPI_HANDLE(hxgep);
1009         /* stop the transmit dma channels */
1010         (void) hpi_txdma_channel_disable(handle, channel);
1011 
1012         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_disable_channel"));
1013 }
1014 
1015 int
1016 hxge_txdma_stop_inj_err(p_hxge_t hxgep, int channel)
1017 {
1018         hpi_handle_t    handle;
1019         int             status;
1020         hpi_status_t    rs = HPI_SUCCESS;
1021 
1022         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop_inj_err"));
1023 
1024         /*
1025          * Stop the dma channel waits for the stop done. If the stop done bit
1026          * is not set, then create an error.
1027          */
1028         handle = HXGE_DEV_HPI_HANDLE(hxgep);
1029         rs = hpi_txdma_channel_disable(handle, channel);
1030         status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
1031         if (status == HXGE_OK) {
1032                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1033                     "<== hxge_txdma_stop_inj_err (channel %d): "
1034                     "stopped OK", channel));
1035                 return (status);
1036         }
1037 
1038         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1039             "==> hxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1040             " (injected error but still not stopped)", channel, rs));
1041 
1042         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop_inj_err"));
1043 
1044         return (status);
1045 }
1046 
1047 /*ARGSUSED*/
1048 void
1049 hxge_fixup_txdma_rings(p_hxge_t hxgep)
1050 {
1051         int             index, ndmas;
1052         uint16_t        channel;
1053         p_tx_rings_t    tx_rings;
1054 
1055         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_txdma_rings"));
1056 
1057         /*
1058          * For each transmit channel, reclaim each descriptor and free buffers.
1059          */
1060         tx_rings = hxgep->tx_rings;
1061         if (tx_rings == NULL) {
1062                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1063                     "<== hxge_fixup_txdma_rings: NULL ring pointer"));
1064                 return;
1065         }
1066 
1067         ndmas = tx_rings->ndmas;
1068         if (!ndmas) {
1069                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1070                     "<== hxge_fixup_txdma_rings: no channel allocated"));
1071                 return;
1072         }
1073 
1074         if (tx_rings->rings == NULL) {
1075                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1076                     "<== hxge_fixup_txdma_rings: NULL rings pointer"));
1077                 return;
1078         }
1079 
1080         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_fixup_txdma_rings: "
1081             "tx_rings $%p tx_desc_rings $%p ndmas %d",
1082             tx_rings, tx_rings->rings, ndmas));
1083 
1084         for (index = 0; index < ndmas; index++) {
1085                 channel = tx_rings->rings[index]->tdc;
1086                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1087                     "==> hxge_fixup_txdma_rings: channel %d", channel));
1088                 hxge_txdma_fixup_channel(hxgep, tx_rings->rings[index],
1089                     channel);
1090         }
1091 
1092         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_txdma_rings"));
1093 }
1094 
1095 /*ARGSUSED*/
1096 void
1097 hxge_txdma_fix_channel(p_hxge_t hxgep, uint16_t channel)
1098 {
1099         p_tx_ring_t ring_p;
1100 
1101         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_channel"));
1102 
1103         ring_p = hxge_txdma_get_ring(hxgep, channel);
1104         if (ring_p == NULL) {
1105                 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel"));
1106                 return;
1107         }
1108 
1109         if (ring_p->tdc != channel) {
1110                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1111                     "<== hxge_txdma_fix_channel: channel not matched "
1112                     "ring tdc %d passed channel", ring_p->tdc, channel));
1113                 return;
1114         }
1115 
1116         hxge_txdma_fixup_channel(hxgep, ring_p, channel);
1117 
1118         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel"));
1119 }
1120 
1121 /*ARGSUSED*/
1122 void
1123 hxge_txdma_fixup_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel)
1124 {
1125         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_channel"));
1126 
1127         if (ring_p == NULL) {
1128                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1129                     "<== hxge_txdma_fixup_channel: NULL ring pointer"));
1130                 return;
1131         }
1132         if (ring_p->tdc != channel) {
1133                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1134                     "<== hxge_txdma_fixup_channel: channel not matched "
1135                     "ring tdc %d passed channel", ring_p->tdc, channel));
1136                 return;
1137         }
1138         MUTEX_ENTER(&ring_p->lock);
1139         (void) hxge_txdma_reclaim(hxgep, ring_p, 0);
1140 
1141         ring_p->rd_index = 0;
1142         ring_p->wr_index = 0;
1143         ring_p->ring_head.value = 0;
1144         ring_p->ring_kick_tail.value = 0;
1145         ring_p->descs_pending = 0;
1146         MUTEX_EXIT(&ring_p->lock);
1147 
1148         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_channel"));
1149 }
1150 
1151 /*ARGSUSED*/
1152 void
1153 hxge_txdma_hw_kick(p_hxge_t hxgep)
1154 {
1155         int             index, ndmas;
1156         uint16_t        channel;
1157         p_tx_rings_t    tx_rings;
1158 
1159         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick"));
1160 
1161         tx_rings = hxgep->tx_rings;
1162         if (tx_rings == NULL) {
1163                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1164                     "<== hxge_txdma_hw_kick: NULL ring pointer"));
1165                 return;
1166         }
1167         ndmas = tx_rings->ndmas;
1168         if (!ndmas) {
1169                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1170                     "<== hxge_txdma_hw_kick: no channel allocated"));
1171                 return;
1172         }
1173         if (tx_rings->rings == NULL) {
1174                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1175                     "<== hxge_txdma_hw_kick: NULL rings pointer"));
1176                 return;
1177         }
1178         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_kick: "
1179             "tx_rings $%p tx_desc_rings $%p ndmas %d",
1180             tx_rings, tx_rings->rings, ndmas));
1181 
1182         for (index = 0; index < ndmas; index++) {
1183                 channel = tx_rings->rings[index]->tdc;
1184                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1185                     "==> hxge_txdma_hw_kick: channel %d", channel));
1186                 hxge_txdma_hw_kick_channel(hxgep, tx_rings->rings[index],
1187                     channel);
1188         }
1189 
1190         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick"));
1191 }
1192 
1193 /*ARGSUSED*/
1194 void
1195 hxge_txdma_kick_channel(p_hxge_t hxgep, uint16_t channel)
1196 {
1197         p_tx_ring_t ring_p;
1198 
1199         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_kick_channel"));
1200 
1201         ring_p = hxge_txdma_get_ring(hxgep, channel);
1202         if (ring_p == NULL) {
1203                 HXGE_DEBUG_MSG((hxgep, TX_CTL, " hxge_txdma_kick_channel"));
1204                 return;
1205         }
1206 
1207         if (ring_p->tdc != channel) {
1208                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1209                     "<== hxge_txdma_kick_channel: channel not matched "
1210                     "ring tdc %d passed channel", ring_p->tdc, channel));
1211                 return;
1212         }
1213 
1214         hxge_txdma_hw_kick_channel(hxgep, ring_p, channel);
1215 
1216         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_kick_channel"));
1217 }
1218 
1219 /*ARGSUSED*/
1220 void
1221 hxge_txdma_hw_kick_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel)
1222 {
1223         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick_channel"));
1224 
1225         if (ring_p == NULL) {
1226                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1227                     "<== hxge_txdma_hw_kick_channel: NULL ring pointer"));
1228                 return;
1229         }
1230 
1231         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick_channel"));
1232 }
1233 
1234 /*ARGSUSED*/
1235 void
1236 hxge_check_tx_hang(p_hxge_t hxgep)
1237 {
1238         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_check_tx_hang"));
1239 
1240         /*
1241          * Needs inputs from hardware for regs: head index had not moved since
1242          * last timeout. packets not transmitted or stuffed registers.
1243          */
1244         if (hxge_txdma_hung(hxgep)) {
1245                 hxge_fixup_hung_txdma_rings(hxgep);
1246         }
1247 
1248         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_check_tx_hang"));
1249 }
1250 
1251 int
1252 hxge_txdma_hung(p_hxge_t hxgep)
1253 {
1254         int             index, ndmas;
1255         uint16_t        channel;
1256         p_tx_rings_t    tx_rings;
1257         p_tx_ring_t     tx_ring_p;
1258 
1259         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hung"));
1260 
1261         tx_rings = hxgep->tx_rings;
1262         if (tx_rings == NULL) {
1263                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1264                     "<== hxge_txdma_hung: NULL ring pointer"));
1265                 return (B_FALSE);
1266         }
1267 
1268         ndmas = tx_rings->ndmas;
1269         if (!ndmas) {
1270                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1271                     "<== hxge_txdma_hung: no channel allocated"));
1272                 return (B_FALSE);
1273         }
1274 
1275         if (tx_rings->rings == NULL) {
1276                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1277                     "<== hxge_txdma_hung: NULL rings pointer"));
1278                 return (B_FALSE);
1279         }
1280 
1281         for (index = 0; index < ndmas; index++) {
1282                 channel = tx_rings->rings[index]->tdc;
1283                 tx_ring_p = tx_rings->rings[index];
1284                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1285                     "==> hxge_txdma_hung: channel %d", channel));
1286                 if (hxge_txdma_channel_hung(hxgep, tx_ring_p, channel)) {
1287                         return (B_TRUE);
1288                 }
1289         }
1290 
1291         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hung"));
1292 
1293         return (B_FALSE);
1294 }
1295 
1296 int
1297 hxge_txdma_channel_hung(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1298 {
1299         uint16_t        head_index, tail_index;
1300         boolean_t       head_wrap, tail_wrap;
1301         hpi_handle_t    handle;
1302         tdc_tdr_head_t  tx_head;
1303         uint_t          tx_rd_index;
1304 
1305         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung"));
1306 
1307         handle = HXGE_DEV_HPI_HANDLE(hxgep);
1308         HXGE_DEBUG_MSG((hxgep, TX_CTL,
1309             "==> hxge_txdma_channel_hung: channel %d", channel));
1310         MUTEX_ENTER(&tx_ring_p->lock);
1311         (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
1312 
1313         tail_index = tx_ring_p->wr_index;
1314         tail_wrap = tx_ring_p->wr_index_wrap;
1315         tx_rd_index = tx_ring_p->rd_index;
1316         MUTEX_EXIT(&tx_ring_p->lock);
1317 
1318         HXGE_DEBUG_MSG((hxgep, TX_CTL,
1319             "==> hxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1320             "tail_index %d tail_wrap %d ",
1321             channel, tx_rd_index, tail_index, tail_wrap));
1322         /*
1323          * Read the hardware maintained transmit head and wrap around bit.
1324          */
1325         (void) hpi_txdma_ring_head_get(handle, channel, &tx_head);
1326         head_index = tx_head.bits.head;
1327         head_wrap = tx_head.bits.wrap;
1328         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung: "
1329             "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d",
1330             tx_rd_index, tail_index, tail_wrap, head_index, head_wrap));
1331 
1332         if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index, tail_wrap) &&
1333             (head_index == tx_rd_index)) {
1334                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1335                     "==> hxge_txdma_channel_hung: EMPTY"));
1336                 return (B_FALSE);
1337         }
1338         HXGE_DEBUG_MSG((hxgep, TX_CTL,
1339             "==> hxge_txdma_channel_hung: Checking if ring full"));
1340         if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, tail_wrap)) {
1341                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1342                     "==> hxge_txdma_channel_hung: full"));
1343                 return (B_TRUE);
1344         }
1345 
1346         /* If not full, check with hardware to see if it is hung */
1347         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_channel_hung"));
1348 
1349         return (B_FALSE);
1350 }
1351 
1352 /*ARGSUSED*/
1353 void
1354 hxge_fixup_hung_txdma_rings(p_hxge_t hxgep)
1355 {
1356         int             index, ndmas;
1357         uint16_t        channel;
1358         p_tx_rings_t    tx_rings;
1359 
1360         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings"));
1361         tx_rings = hxgep->tx_rings;
1362         if (tx_rings == NULL) {
1363                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1364                     "<== hxge_fixup_hung_txdma_rings: NULL ring pointer"));
1365                 return;
1366         }
1367         ndmas = tx_rings->ndmas;
1368         if (!ndmas) {
1369                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1370                     "<== hxge_fixup_hung_txdma_rings: no channel allocated"));
1371                 return;
1372         }
1373         if (tx_rings->rings == NULL) {
1374                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1375                     "<== hxge_fixup_hung_txdma_rings: NULL rings pointer"));
1376                 return;
1377         }
1378         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings: "
1379             "tx_rings $%p tx_desc_rings $%p ndmas %d",
1380             tx_rings, tx_rings->rings, ndmas));
1381 
1382         for (index = 0; index < ndmas; index++) {
1383                 channel = tx_rings->rings[index]->tdc;
1384                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1385                     "==> hxge_fixup_hung_txdma_rings: channel %d", channel));
1386                 hxge_txdma_fixup_hung_channel(hxgep, tx_rings->rings[index],
1387                     channel);
1388         }
1389 
1390         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_hung_txdma_rings"));
1391 }
1392 
1393 /*ARGSUSED*/
1394 void
1395 hxge_txdma_fix_hung_channel(p_hxge_t hxgep, uint16_t channel)
1396 {
1397         p_tx_ring_t ring_p;
1398 
1399         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_hung_channel"));
1400         ring_p = hxge_txdma_get_ring(hxgep, channel);
1401         if (ring_p == NULL) {
1402                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1403                     "<== hxge_txdma_fix_hung_channel"));
1404                 return;
1405         }
1406         if (ring_p->tdc != channel) {
1407                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1408                     "<== hxge_txdma_fix_hung_channel: channel not matched "
1409                     "ring tdc %d passed channel", ring_p->tdc, channel));
1410                 return;
1411         }
1412         hxge_txdma_fixup_channel(hxgep, ring_p, channel);
1413 
1414         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_hung_channel"));
1415 }
1416 
1417 /*ARGSUSED*/
1418 void
1419 hxge_txdma_fixup_hung_channel(p_hxge_t hxgep, p_tx_ring_t ring_p,
1420     uint16_t channel)
1421 {
1422         hpi_handle_t    handle;
1423         int             status = HXGE_OK;
1424 
1425         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_hung_channel"));
1426 
1427         if (ring_p == NULL) {
1428                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1429                     "<== hxge_txdma_fixup_hung_channel: NULL ring pointer"));
1430                 return;
1431         }
1432         if (ring_p->tdc != channel) {
1433                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1434                     "<== hxge_txdma_fixup_hung_channel: channel "
1435                     "not matched ring tdc %d passed channel",
1436                     ring_p->tdc, channel));
1437                 return;
1438         }
1439         /* Reclaim descriptors */
1440         MUTEX_ENTER(&ring_p->lock);
1441         (void) hxge_txdma_reclaim(hxgep, ring_p, 0);
1442         MUTEX_EXIT(&ring_p->lock);
1443 
1444         handle = HXGE_DEV_HPI_HANDLE(hxgep);
1445         /*
1446          * Stop the dma channel waits for the stop done. If the stop done bit
1447          * is not set, then force an error.
1448          */
1449         status = hpi_txdma_channel_disable(handle, channel);
1450         if (!(status & HPI_TXDMA_STOP_FAILED)) {
1451                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1452                     "<== hxge_txdma_fixup_hung_channel: stopped OK "
1453                     "ring tdc %d passed channel %d", ring_p->tdc, channel));
1454                 return;
1455         }
1456         /* Stop done bit will be set as a result of error injection */
1457         status = hpi_txdma_channel_disable(handle, channel);
1458         if (!(status & HPI_TXDMA_STOP_FAILED)) {
1459                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1460                     "<== hxge_txdma_fixup_hung_channel: stopped again"
1461                     "ring tdc %d passed channel", ring_p->tdc, channel));
1462                 return;
1463         }
1464 
1465         HXGE_DEBUG_MSG((hxgep, TX_CTL,
1466             "<== hxge_txdma_fixup_hung_channel: stop done still not set!! "
1467             "ring tdc %d passed channel", ring_p->tdc, channel));
1468         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_hung_channel"));
1469 }
1470 
1471 /*ARGSUSED*/
1472 void
1473 hxge_reclaim_rings(p_hxge_t hxgep)
1474 {
1475         int             index, ndmas;
1476         uint16_t        channel;
1477         p_tx_rings_t    tx_rings;
1478         p_tx_ring_t     tx_ring_p;
1479 
1480         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclaim_ring"));
1481         tx_rings = hxgep->tx_rings;
1482         if (tx_rings == NULL) {
1483                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1484                     "<== hxge_reclain_rimgs: NULL ring pointer"));
1485                 return;
1486         }
1487         ndmas = tx_rings->ndmas;
1488         if (!ndmas) {
1489                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1490                     "<== hxge_reclain_rimgs: no channel allocated"));
1491                 return;
1492         }
1493         if (tx_rings->rings == NULL) {
1494                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1495                     "<== hxge_reclain_rimgs: NULL rings pointer"));
1496                 return;
1497         }
1498         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclain_rimgs: "
1499             "tx_rings $%p tx_desc_rings $%p ndmas %d",
1500             tx_rings, tx_rings->rings, ndmas));
1501 
1502         for (index = 0; index < ndmas; index++) {
1503                 channel = tx_rings->rings[index]->tdc;
1504                 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> reclain_rimgs: channel %d",
1505                     channel));
1506                 tx_ring_p = tx_rings->rings[index];
1507                 MUTEX_ENTER(&tx_ring_p->lock);
1508                 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, channel);
1509                 MUTEX_EXIT(&tx_ring_p->lock);
1510         }
1511 
1512         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_reclaim_rings"));
1513 }
1514 
1515 /*
1516  * Static functions start here.
1517  */
1518 static hxge_status_t
1519 hxge_map_txdma(p_hxge_t hxgep)
1520 {
1521         int                     i, ndmas;
1522         uint16_t                channel;
1523         p_tx_rings_t            tx_rings;
1524         p_tx_ring_t             *tx_desc_rings;
1525         p_tx_mbox_areas_t       tx_mbox_areas_p;
1526         p_tx_mbox_t             *tx_mbox_p;
1527         p_hxge_dma_pool_t       dma_buf_poolp;
1528         p_hxge_dma_pool_t       dma_cntl_poolp;
1529         p_hxge_dma_common_t     *dma_buf_p;
1530         p_hxge_dma_common_t     *dma_cntl_p;
1531         hxge_status_t           status = HXGE_OK;
1532 
1533         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma"));
1534 
1535         dma_buf_poolp = hxgep->tx_buf_pool_p;
1536         dma_cntl_poolp = hxgep->tx_cntl_pool_p;
1537 
1538         if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
1539                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1540                     "==> hxge_map_txdma: buf not allocated"));
1541                 return (HXGE_ERROR);
1542         }
1543         ndmas = dma_buf_poolp->ndmas;
1544         if (!ndmas) {
1545                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1546                     "<== hxge_map_txdma: no dma allocated"));
1547                 return (HXGE_ERROR);
1548         }
1549         dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
1550         dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
1551 
1552         tx_rings = (p_tx_rings_t)KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP);
1553         tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC(
1554             sizeof (p_tx_ring_t) * ndmas, KM_SLEEP);
1555 
1556         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
1557             "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
1558 
1559         tx_mbox_areas_p = (p_tx_mbox_areas_t)
1560             KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP);
1561         tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC(
1562             sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP);
1563 
1564         /*
1565          * Map descriptors from the buffer pools for each dma channel.
1566          */
1567         for (i = 0; i < ndmas; i++) {
1568                 /*
1569                  * Set up and prepare buffer blocks, descriptors and mailbox.
1570                  */
1571                 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
1572                 status = hxge_map_txdma_channel(hxgep, channel,
1573                     (p_hxge_dma_common_t *)&dma_buf_p[i],
1574                     (p_tx_ring_t *)&tx_desc_rings[i],
1575                     dma_buf_poolp->num_chunks[i],
1576                     (p_hxge_dma_common_t *)&dma_cntl_p[i],
1577                     (p_tx_mbox_t *)&tx_mbox_p[i]);
1578                 if (status != HXGE_OK) {
1579                         goto hxge_map_txdma_fail1;
1580                 }
1581                 tx_desc_rings[i]->index = (uint16_t)i;
1582                 tx_desc_rings[i]->tdc_stats = &hxgep->statsp->tdc_stats[i];
1583         }
1584 
1585         tx_rings->ndmas = ndmas;
1586         tx_rings->rings = tx_desc_rings;
1587         hxgep->tx_rings = tx_rings;
1588         tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p;
1589         hxgep->tx_mbox_areas_p = tx_mbox_areas_p;
1590 
1591         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
1592             "tx_rings $%p rings $%p", hxgep->tx_rings, hxgep->tx_rings->rings));
1593         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: "
1594             "tx_rings $%p tx_desc_rings $%p",
1595             hxgep->tx_rings, tx_desc_rings));
1596 
1597         goto hxge_map_txdma_exit;
1598 
1599 hxge_map_txdma_fail1:
1600         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1601             "==> hxge_map_txdma: uninit tx desc "
1602             "(status 0x%x channel %d i %d)", hxgep, status, channel, i));
1603         i--;
1604         for (; i >= 0; i--) {
1605                 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
1606                 hxge_unmap_txdma_channel(hxgep, channel, tx_desc_rings[i],
1607                     tx_mbox_p[i]);
1608         }
1609 
1610         KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
1611         KMEM_FREE(tx_rings, sizeof (tx_rings_t));
1612         KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
1613         KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
1614 
1615 hxge_map_txdma_exit:
1616         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1617             "==> hxge_map_txdma: (status 0x%x channel %d)", status, channel));
1618 
1619         return (status);
1620 }
1621 
1622 static void
1623 hxge_unmap_txdma(p_hxge_t hxgep)
1624 {
1625         int                     i, ndmas;
1626         uint8_t                 channel;
1627         p_tx_rings_t            tx_rings;
1628         p_tx_ring_t             *tx_desc_rings;
1629         p_tx_mbox_areas_t       tx_mbox_areas_p;
1630         p_tx_mbox_t             *tx_mbox_p;
1631         p_hxge_dma_pool_t       dma_buf_poolp;
1632 
1633         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma"));
1634 
1635         dma_buf_poolp = hxgep->tx_buf_pool_p;
1636         if (!dma_buf_poolp->buf_allocated) {
1637                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1638                     "==> hxge_unmap_txdma: buf not allocated"));
1639                 return;
1640         }
1641         ndmas = dma_buf_poolp->ndmas;
1642         if (!ndmas) {
1643                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1644                     "<== hxge_unmap_txdma: no dma allocated"));
1645                 return;
1646         }
1647         tx_rings = hxgep->tx_rings;
1648         tx_desc_rings = tx_rings->rings;
1649         if (tx_rings == NULL) {
1650                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1651                     "<== hxge_unmap_txdma: NULL ring pointer"));
1652                 return;
1653         }
1654         tx_desc_rings = tx_rings->rings;
1655         if (tx_desc_rings == NULL) {
1656                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
1657                     "<== hxge_unmap_txdma: NULL ring pointers"));
1658                 return;
1659         }
1660         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma: "
1661             "tx_rings $%p tx_desc_rings $%p ndmas %d",
1662             tx_rings, tx_desc_rings, ndmas));
1663 
1664         tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
1665         tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
1666 
1667         for (i = 0; i < ndmas; i++) {
1668                 channel = tx_desc_rings[i]->tdc;
1669                 (void) hxge_unmap_txdma_channel(hxgep, channel,
1670                     (p_tx_ring_t)tx_desc_rings[i],
1671                     (p_tx_mbox_t)tx_mbox_p[i]);
1672         }
1673 
1674         KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas);
1675         KMEM_FREE(tx_rings, sizeof (tx_rings_t));
1676         KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas);
1677         KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t));
1678 
1679         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma"));
1680 }
1681 
1682 static hxge_status_t
1683 hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel,
1684     p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p,
1685     uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p,
1686     p_tx_mbox_t *tx_mbox_p)
1687 {
1688         int status = HXGE_OK;
1689 
1690         /*
1691          * Set up and prepare buffer blocks, descriptors and mailbox.
1692          */
1693         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1694             "==> hxge_map_txdma_channel (channel %d)", channel));
1695 
1696         /*
1697          * Transmit buffer blocks
1698          */
1699         status = hxge_map_txdma_channel_buf_ring(hxgep, channel,
1700             dma_buf_p, tx_desc_p, num_chunks);
1701         if (status != HXGE_OK) {
1702                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1703                     "==> hxge_map_txdma_channel (channel %d): "
1704                     "map buffer failed 0x%x", channel, status));
1705                 goto hxge_map_txdma_channel_exit;
1706         }
1707         /*
1708          * Transmit block ring, and mailbox.
1709          */
1710         hxge_map_txdma_channel_cfg_ring(hxgep, channel, dma_cntl_p, *tx_desc_p,
1711             tx_mbox_p);
1712 
1713         goto hxge_map_txdma_channel_exit;
1714 
1715 hxge_map_txdma_channel_fail1:
1716         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1717             "==> hxge_map_txdma_channel: unmap buf"
1718             "(status 0x%x channel %d)", status, channel));
1719         hxge_unmap_txdma_channel_buf_ring(hxgep, *tx_desc_p);
1720 
1721 hxge_map_txdma_channel_exit:
1722         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1723             "<== hxge_map_txdma_channel: (status 0x%x channel %d)",
1724             status, channel));
1725 
1726         return (status);
1727 }
1728 
1729 /*ARGSUSED*/
1730 static void
1731 hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel,
1732     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
1733 {
1734         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1735             "==> hxge_unmap_txdma_channel (channel %d)", channel));
1736 
1737         /* unmap tx block ring, and mailbox.  */
1738         (void) hxge_unmap_txdma_channel_cfg_ring(hxgep, tx_ring_p, tx_mbox_p);
1739 
1740         /* unmap buffer blocks */
1741         (void) hxge_unmap_txdma_channel_buf_ring(hxgep, tx_ring_p);
1742 
1743         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma_channel"));
1744 }
1745 
1746 /*ARGSUSED*/
1747 static void
1748 hxge_map_txdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
1749     p_hxge_dma_common_t *dma_cntl_p, p_tx_ring_t tx_ring_p,
1750     p_tx_mbox_t *tx_mbox_p)
1751 {
1752         p_tx_mbox_t             mboxp;
1753         p_hxge_dma_common_t     cntl_dmap;
1754         p_hxge_dma_common_t     dmap;
1755         tdc_tdr_cfg_t           *tx_ring_cfig_p;
1756         tdc_tdr_kick_t          *tx_ring_kick_p;
1757         tdc_tdr_cfg_t           *tx_cs_p;
1758         tdc_int_mask_t          *tx_evmask_p;
1759         tdc_mbh_t               *mboxh_p;
1760         tdc_mbl_t               *mboxl_p;
1761         uint64_t                tx_desc_len;
1762 
1763         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1764             "==> hxge_map_txdma_channel_cfg_ring"));
1765 
1766         cntl_dmap = *dma_cntl_p;
1767 
1768         dmap = (p_hxge_dma_common_t)&tx_ring_p->tdc_desc;
1769         hxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
1770             sizeof (tx_desc_t));
1771 
1772         /*
1773          * Zero out transmit ring descriptors.
1774          */
1775         bzero((caddr_t)dmap->kaddrp, dmap->alength);
1776         tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
1777         tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
1778         tx_cs_p = &(tx_ring_p->tx_cs);
1779         tx_evmask_p = &(tx_ring_p->tx_evmask);
1780         tx_ring_cfig_p->value = 0;
1781         tx_ring_kick_p->value = 0;
1782         tx_cs_p->value = 0;
1783         tx_evmask_p->value = 0;
1784 
1785         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1786             "==> hxge_map_txdma_channel_cfg_ring: channel %d des $%p",
1787             dma_channel, dmap->dma_cookie.dmac_laddress));
1788 
1789         tx_ring_cfig_p->value = 0;
1790 
1791         /* Hydra len is 11 bits and the lower 5 bits are 0s */
1792         tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 5);
1793         tx_ring_cfig_p->value =
1794             (dmap->dma_cookie.dmac_laddress & TDC_TDR_CFG_ADDR_MASK) |
1795             (tx_desc_len << TDC_TDR_CFG_LEN_SHIFT);
1796 
1797         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1798             "==> hxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
1799             dma_channel, tx_ring_cfig_p->value));
1800 
1801         tx_cs_p->bits.reset = 1;
1802 
1803         /* Map in mailbox */
1804         mboxp = (p_tx_mbox_t)KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
1805         dmap = (p_hxge_dma_common_t)&mboxp->tx_mbox;
1806         hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
1807         mboxh_p = (tdc_mbh_t *)&tx_ring_p->tx_mbox_mbh;
1808         mboxl_p = (tdc_mbl_t *)&tx_ring_p->tx_mbox_mbl;
1809         mboxh_p->value = mboxl_p->value = 0;
1810 
1811         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1812             "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
1813             dmap->dma_cookie.dmac_laddress));
1814 
1815         mboxh_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
1816             TDC_MBH_ADDR_SHIFT) & TDC_MBH_MASK);
1817         mboxl_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress &
1818             TDC_MBL_MASK) >> TDC_MBL_SHIFT);
1819 
1820         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1821             "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
1822             dmap->dma_cookie.dmac_laddress));
1823         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1824             "==> hxge_map_txdma_channel_cfg_ring: hmbox $%p mbox $%p",
1825             mboxh_p->bits.mbaddr, mboxl_p->bits.mbaddr));
1826 
1827         /*
1828          * Set page valid and no mask
1829          */
1830         tx_ring_p->page_hdl.value = 0;
1831 
1832         *tx_mbox_p = mboxp;
1833 
1834         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1835             "<== hxge_map_txdma_channel_cfg_ring"));
1836 }
1837 
1838 /*ARGSUSED*/
1839 static void
1840 hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep,
1841     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
1842 {
1843         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1844             "==> hxge_unmap_txdma_channel_cfg_ring: channel %d",
1845             tx_ring_p->tdc));
1846 
1847         KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
1848 
1849         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1850             "<== hxge_unmap_txdma_channel_cfg_ring"));
1851 }
1852 
1853 static hxge_status_t
1854 hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
1855     p_hxge_dma_common_t *dma_buf_p,
1856     p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
1857 {
1858         p_hxge_dma_common_t     dma_bufp, tmp_bufp;
1859         p_hxge_dma_common_t     dmap;
1860         hxge_os_dma_handle_t    tx_buf_dma_handle;
1861         p_tx_ring_t             tx_ring_p;
1862         p_tx_msg_t              tx_msg_ring;
1863         hxge_status_t           status = HXGE_OK;
1864         int                     ddi_status = DDI_SUCCESS;
1865         int                     i, j, index;
1866         uint32_t                size, bsize;
1867         uint32_t                nblocks, nmsgs;
1868         char                    qname[TASKQ_NAMELEN];
1869 
1870         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1871             "==> hxge_map_txdma_channel_buf_ring"));
1872 
1873         dma_bufp = tmp_bufp = *dma_buf_p;
1874         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1875             " hxge_map_txdma_channel_buf_ring: channel %d to map %d "
1876             "chunks bufp $%p", channel, num_chunks, dma_bufp));
1877 
1878         nmsgs = 0;
1879         for (i = 0; i < num_chunks; i++, tmp_bufp++) {
1880                 nmsgs += tmp_bufp->nblocks;
1881                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1882                     "==> hxge_map_txdma_channel_buf_ring: channel %d "
1883                     "bufp $%p nblocks %d nmsgs %d",
1884                     channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
1885         }
1886         if (!nmsgs) {
1887                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1888                     "<== hxge_map_txdma_channel_buf_ring: channel %d "
1889                     "no msg blocks", channel));
1890                 status = HXGE_ERROR;
1891 
1892                 goto hxge_map_txdma_channel_buf_ring_exit;
1893         }
1894 
1895         tx_ring_p = (p_tx_ring_t)KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
1896         tx_ring_p->hxgep = hxgep;
1897         (void) snprintf(qname, TASKQ_NAMELEN, "hxge_%d_%d",
1898             hxgep->instance, channel);
1899         tx_ring_p->taskq = ddi_taskq_create(hxgep->dip, qname, 1,
1900             TASKQ_DEFAULTPRI, 0);
1901         if (tx_ring_p->taskq == NULL) {
1902                 goto hxge_map_txdma_channel_buf_ring_fail1;
1903         }
1904 
1905         MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
1906             (void *) hxgep->interrupt_cookie);
1907         /*
1908          * Allocate transmit message rings and handles for packets not to be
1909          * copied to premapped buffers.
1910          */
1911         size = nmsgs * sizeof (tx_msg_t);
1912         tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
1913         for (i = 0; i < nmsgs; i++) {
1914                 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1915                     DDI_DMA_DONTWAIT, 0, &tx_msg_ring[i].dma_handle);
1916                 if (ddi_status != DDI_SUCCESS) {
1917                         status |= HXGE_DDI_FAILED;
1918                         break;
1919                 }
1920         }
1921 
1922         if (i < nmsgs) {
1923                 HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL,
1924                     "Allocate handles failed."));
1925 
1926                 goto hxge_map_txdma_channel_buf_ring_fail1;
1927         }
1928         tx_ring_p->tdc = channel;
1929         tx_ring_p->tx_msg_ring = tx_msg_ring;
1930         tx_ring_p->tx_ring_size = nmsgs;
1931         tx_ring_p->num_chunks = num_chunks;
1932         if (!hxge_tx_intr_thres) {
1933                 hxge_tx_intr_thres = tx_ring_p->tx_ring_size / 4;
1934         }
1935         tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
1936         tx_ring_p->rd_index = 0;
1937         tx_ring_p->wr_index = 0;
1938         tx_ring_p->ring_head.value = 0;
1939         tx_ring_p->ring_kick_tail.value = 0;
1940         tx_ring_p->descs_pending = 0;
1941 
1942         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1943             "==> hxge_map_txdma_channel_buf_ring: channel %d "
1944             "actual tx desc max %d nmsgs %d (config hxge_tx_ring_size %d)",
1945             channel, tx_ring_p->tx_ring_size, nmsgs, hxge_tx_ring_size));
1946 
1947         /*
1948          * Map in buffers from the buffer pool.
1949          */
1950         index = 0;
1951         bsize = dma_bufp->block_size;
1952 
1953         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma_channel_buf_ring: "
1954             "dma_bufp $%p tx_rng_p $%p tx_msg_rng_p $%p bsize %d",
1955             dma_bufp, tx_ring_p, tx_msg_ring, bsize));
1956 
1957         for (i = 0; i < num_chunks; i++, dma_bufp++) {
1958                 bsize = dma_bufp->block_size;
1959                 nblocks = dma_bufp->nblocks;
1960                 tx_buf_dma_handle = dma_bufp->dma_handle;
1961                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1962                     "==> hxge_map_txdma_channel_buf_ring: dma chunk %d "
1963                     "size %d dma_bufp $%p",
1964                     i, sizeof (hxge_dma_common_t), dma_bufp));
1965 
1966                 for (j = 0; j < nblocks; j++) {
1967                         tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
1968                         tx_msg_ring[index].offset_index = j;
1969                         dmap = &tx_msg_ring[index++].buf_dma;
1970                         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
1971                             "==> hxge_map_txdma_channel_buf_ring: j %d"
1972                             "dmap $%p", i, dmap));
1973                         hxge_setup_dma_common(dmap, dma_bufp, 1, bsize);
1974                 }
1975         }
1976 
1977         if (i < num_chunks) {
1978                 status = HXGE_ERROR;
1979 
1980                 goto hxge_map_txdma_channel_buf_ring_fail1;
1981         }
1982 
1983         *tx_desc_p = tx_ring_p;
1984 
1985         goto hxge_map_txdma_channel_buf_ring_exit;
1986 
1987 hxge_map_txdma_channel_buf_ring_fail1:
1988         if (tx_ring_p->taskq) {
1989                 ddi_taskq_destroy(tx_ring_p->taskq);
1990                 tx_ring_p->taskq = NULL;
1991         }
1992 
1993         index--;
1994         for (; index >= 0; index--) {
1995                 if (tx_msg_ring[index].dma_handle != NULL) {
1996                         ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
1997                 }
1998         }
1999         MUTEX_DESTROY(&tx_ring_p->lock);
2000         KMEM_FREE(tx_msg_ring, size);
2001         KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2002 
2003         status = HXGE_ERROR;
2004 
2005 hxge_map_txdma_channel_buf_ring_exit:
2006         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2007             "<== hxge_map_txdma_channel_buf_ring status 0x%x", status));
2008 
2009         return (status);
2010 }
2011 
2012 /*ARGSUSED*/
2013 static void
2014 hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep, p_tx_ring_t tx_ring_p)
2015 {
2016         p_tx_msg_t      tx_msg_ring;
2017         p_tx_msg_t      tx_msg_p;
2018         int             i;
2019 
2020         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2021             "==> hxge_unmap_txdma_channel_buf_ring"));
2022         if (tx_ring_p == NULL) {
2023                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2024                     "<== hxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2025                 return;
2026         }
2027         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2028             "==> hxge_unmap_txdma_channel_buf_ring: channel %d",
2029             tx_ring_p->tdc));
2030 
2031         MUTEX_ENTER(&tx_ring_p->lock);
2032         tx_msg_ring = tx_ring_p->tx_msg_ring;
2033         for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2034                 tx_msg_p = &tx_msg_ring[i];
2035                 if (tx_msg_p->flags.dma_type == USE_DVMA) {
2036                         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "entry = %d", i));
2037                         (void) dvma_unload(tx_msg_p->dvma_handle, 0, -1);
2038                         tx_msg_p->dvma_handle = NULL;
2039                         if (tx_ring_p->dvma_wr_index ==
2040                             tx_ring_p->dvma_wrap_mask) {
2041                                 tx_ring_p->dvma_wr_index = 0;
2042                         } else {
2043                                 tx_ring_p->dvma_wr_index++;
2044                         }
2045                         tx_ring_p->dvma_pending--;
2046                 } else if (tx_msg_p->flags.dma_type == USE_DMA) {
2047                         if (ddi_dma_unbind_handle(tx_msg_p->dma_handle)) {
2048                                 cmn_err(CE_WARN, "hxge_unmap_tx_bug_ring: "
2049                                     "ddi_dma_unbind_handle failed.");
2050                         }
2051                 }
2052                 if (tx_msg_p->tx_message != NULL) {
2053                         freemsg(tx_msg_p->tx_message);
2054                         tx_msg_p->tx_message = NULL;
2055                 }
2056         }
2057 
2058         for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2059                 if (tx_msg_ring[i].dma_handle != NULL) {
2060                         ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2061                 }
2062         }
2063         MUTEX_EXIT(&tx_ring_p->lock);
2064 
2065         if (tx_ring_p->taskq) {
2066                 ddi_taskq_destroy(tx_ring_p->taskq);
2067                 tx_ring_p->taskq = NULL;
2068         }
2069 
2070         MUTEX_DESTROY(&tx_ring_p->lock);
2071         KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2072         KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2073 
2074         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2075             "<== hxge_unmap_txdma_channel_buf_ring"));
2076 }
2077 
2078 static hxge_status_t
2079 hxge_txdma_hw_start(p_hxge_t hxgep)
2080 {
2081         int                     i, ndmas;
2082         uint16_t                channel;
2083         p_tx_rings_t            tx_rings;
2084         p_tx_ring_t             *tx_desc_rings;
2085         p_tx_mbox_areas_t       tx_mbox_areas_p;
2086         p_tx_mbox_t             *tx_mbox_p;
2087         hxge_status_t           status = HXGE_OK;
2088         uint64_t                tmp;
2089 
2090         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start"));
2091 
2092         /*
2093          * Initialize REORD Table 1. Disable VMAC 2. Reset the FIFO Err Stat.
2094          * 3. Scrub memory and check for errors.
2095          */
2096         (void) hxge_tx_vmac_disable(hxgep);
2097 
2098         /*
2099          * Clear the error status
2100          */
2101         HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7);
2102 
2103         /*
2104          * Scrub the rtab memory for the TDC and reset the TDC.
2105          */
2106         HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, 0x0ULL);
2107         HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, 0x0ULL);
2108 
2109         for (i = 0; i < 256; i++) {
2110                 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
2111                     (uint64_t)i);
2112 
2113                 /*
2114                  * Write the command register with an indirect read instruction
2115                  */
2116                 tmp = (0x1ULL << 30) | i;
2117                 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp);
2118 
2119                 /*
2120                  * Wait for status done
2121                  */
2122                 tmp = 0;
2123                 do {
2124                         HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
2125                             &tmp);
2126                 } while (((tmp >> 31) & 0x1ULL) == 0x0);
2127         }
2128 
2129         for (i = 0; i < 256; i++) {
2130                 /*
2131                  * Write the command register with an indirect read instruction
2132                  */
2133                 tmp = (0x1ULL << 30) | i;
2134                 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp);
2135 
2136                 /*
2137                  * Wait for status done
2138                  */
2139                 tmp = 0;
2140                 do {
2141                         HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD,
2142                             &tmp);
2143                 } while (((tmp >> 31) & 0x1ULL) == 0x0);
2144 
2145                 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, &tmp);
2146                 if (0x1ff00ULL != (0x1ffffULL & tmp)) {
2147                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2148                             "unexpected data (hi), entry: %x, value: 0x%0llx\n",
2149                             i, (unsigned long long)tmp));
2150                         status = HXGE_ERROR;
2151                 }
2152 
2153                 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, &tmp);
2154                 if (tmp != 0) {
2155                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2156                             "unexpected data (lo), entry: %x\n", i));
2157                         status = HXGE_ERROR;
2158                 }
2159 
2160                 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp);
2161                 if (tmp != 0) {
2162                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2163                             "parity error, entry: %x, val 0x%llx\n",
2164                             i, (unsigned long long)tmp));
2165                         status = HXGE_ERROR;
2166                 }
2167 
2168                 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp);
2169                 if (tmp != 0) {
2170                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl "
2171                             "parity error, entry: %x\n", i));
2172                         status = HXGE_ERROR;
2173                 }
2174         }
2175 
2176         if (status != HXGE_OK)
2177                 goto hxge_txdma_hw_start_exit;
2178 
2179         /*
2180          * Reset FIFO Error Status for the TDC and enable FIFO error events.
2181          */
2182         HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7);
2183         HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_MASK, 0x0);
2184 
2185         /*
2186          * Initialize the Transmit DMAs.
2187          */
2188         tx_rings = hxgep->tx_rings;
2189         if (tx_rings == NULL) {
2190                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2191                     "<== hxge_txdma_hw_start: NULL ring pointer"));
2192                 return (HXGE_ERROR);
2193         }
2194 
2195         tx_desc_rings = tx_rings->rings;
2196         if (tx_desc_rings == NULL) {
2197                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2198                     "<== hxge_txdma_hw_start: NULL ring pointers"));
2199                 return (HXGE_ERROR);
2200         }
2201         ndmas = tx_rings->ndmas;
2202         if (!ndmas) {
2203                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2204                     "<== hxge_txdma_hw_start: no dma channel allocated"));
2205                 return (HXGE_ERROR);
2206         }
2207         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start: "
2208             "tx_rings $%p tx_desc_rings $%p ndmas %d",
2209             tx_rings, tx_desc_rings, ndmas));
2210 
2211         tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
2212         tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2213 
2214         /*
2215          * Init the DMAs.
2216          */
2217         for (i = 0; i < ndmas; i++) {
2218                 channel = tx_desc_rings[i]->tdc;
2219                 status = hxge_txdma_start_channel(hxgep, channel,
2220                     (p_tx_ring_t)tx_desc_rings[i],
2221                     (p_tx_mbox_t)tx_mbox_p[i]);
2222                 if (status != HXGE_OK) {
2223                         goto hxge_txdma_hw_start_fail1;
2224                 }
2225         }
2226 
2227         (void) hxge_tx_vmac_enable(hxgep);
2228 
2229         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2230             "==> hxge_txdma_hw_start: tx_rings $%p rings $%p",
2231             hxgep->tx_rings, hxgep->tx_rings->rings));
2232         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2233             "==> hxge_txdma_hw_start: tx_rings $%p tx_desc_rings $%p",
2234             hxgep->tx_rings, tx_desc_rings));
2235 
2236         goto hxge_txdma_hw_start_exit;
2237 
2238 hxge_txdma_hw_start_fail1:
2239         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2240             "==> hxge_txdma_hw_start: disable (status 0x%x channel %d i %d)",
2241             status, channel, i));
2242 
2243         for (; i >= 0; i--) {
2244                 channel = tx_desc_rings[i]->tdc,
2245                     (void) hxge_txdma_stop_channel(hxgep, channel,
2246                     (p_tx_ring_t)tx_desc_rings[i],
2247                     (p_tx_mbox_t)tx_mbox_p[i]);
2248         }
2249 
2250 hxge_txdma_hw_start_exit:
2251         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2252             "==> hxge_txdma_hw_start: (status 0x%x)", status));
2253 
2254         return (status);
2255 }
2256 
2257 static void
2258 hxge_txdma_hw_stop(p_hxge_t hxgep)
2259 {
2260         int                     i, ndmas;
2261         uint16_t                channel;
2262         p_tx_rings_t            tx_rings;
2263         p_tx_ring_t             *tx_desc_rings;
2264         p_tx_mbox_areas_t       tx_mbox_areas_p;
2265         p_tx_mbox_t             *tx_mbox_p;
2266 
2267         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop"));
2268 
2269         tx_rings = hxgep->tx_rings;
2270         if (tx_rings == NULL) {
2271                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2272                     "<== hxge_txdma_hw_stop: NULL ring pointer"));
2273                 return;
2274         }
2275 
2276         tx_desc_rings = tx_rings->rings;
2277         if (tx_desc_rings == NULL) {
2278                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2279                     "<== hxge_txdma_hw_stop: NULL ring pointers"));
2280                 return;
2281         }
2282 
2283         ndmas = tx_rings->ndmas;
2284         if (!ndmas) {
2285                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2286                     "<== hxge_txdma_hw_stop: no dma channel allocated"));
2287                 return;
2288         }
2289 
2290         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: "
2291             "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2292 
2293         tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
2294         tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2295 
2296         for (i = 0; i < ndmas; i++) {
2297                 channel = tx_desc_rings[i]->tdc;
2298                 (void) hxge_txdma_stop_channel(hxgep, channel,
2299                     (p_tx_ring_t)tx_desc_rings[i],
2300                     (p_tx_mbox_t)tx_mbox_p[i]);
2301         }
2302 
2303         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: "
2304             "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2305         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_hw_stop"));
2306 }
2307 
2308 static hxge_status_t
2309 hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel,
2310     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2311 {
2312         hxge_status_t status = HXGE_OK;
2313 
2314         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2315             "==> hxge_txdma_start_channel (channel %d)", channel));
2316         /*
2317          * TXDMA/TXC must be in stopped state.
2318          */
2319         (void) hxge_txdma_stop_inj_err(hxgep, channel);
2320 
2321         /*
2322          * Reset TXDMA channel
2323          */
2324         tx_ring_p->tx_cs.value = 0;
2325         tx_ring_p->tx_cs.bits.reset = 1;
2326         status = hxge_reset_txdma_channel(hxgep, channel,
2327             tx_ring_p->tx_cs.value);
2328         if (status != HXGE_OK) {
2329                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2330                     "==> hxge_txdma_start_channel (channel %d)"
2331                     " reset channel failed 0x%x", channel, status));
2332 
2333                 goto hxge_txdma_start_channel_exit;
2334         }
2335 
2336         /*
2337          * Initialize the TXDMA channel specific FZC control configurations.
2338          * These FZC registers are pertaining to each TX channel (i.e. logical
2339          * pages).
2340          */
2341         status = hxge_init_fzc_txdma_channel(hxgep, channel,
2342             tx_ring_p, tx_mbox_p);
2343         if (status != HXGE_OK) {
2344                 goto hxge_txdma_start_channel_exit;
2345         }
2346 
2347         /*
2348          * Initialize the event masks.
2349          */
2350         tx_ring_p->tx_evmask.value = 0;
2351         status = hxge_init_txdma_channel_event_mask(hxgep,
2352             channel, &tx_ring_p->tx_evmask);
2353         if (status != HXGE_OK) {
2354                 goto hxge_txdma_start_channel_exit;
2355         }
2356 
2357         /*
2358          * Load TXDMA descriptors, buffers, mailbox, initialise the DMA
2359          * channels and enable each DMA channel.
2360          */
2361         status = hxge_enable_txdma_channel(hxgep, channel,
2362             tx_ring_p, tx_mbox_p);
2363         if (status != HXGE_OK) {
2364                 goto hxge_txdma_start_channel_exit;
2365         }
2366 
2367 hxge_txdma_start_channel_exit:
2368         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_start_channel"));
2369 
2370         return (status);
2371 }
2372 
2373 /*ARGSUSED*/
2374 static hxge_status_t
2375 hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel,
2376     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2377 {
2378         int status = HXGE_OK;
2379 
2380         HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2381             "==> hxge_txdma_stop_channel: channel %d", channel));
2382 
2383         /*
2384          * Stop (disable) TXDMA and TXC (if stop bit is set and STOP_N_GO bit
2385          * not set, the TXDMA reset state will not be set if reset TXDMA.
2386          */
2387         (void) hxge_txdma_stop_inj_err(hxgep, channel);
2388 
2389         /*
2390          * Reset TXDMA channel
2391          */
2392         tx_ring_p->tx_cs.value = 0;
2393         tx_ring_p->tx_cs.bits.reset = 1;
2394         status = hxge_reset_txdma_channel(hxgep, channel,
2395             tx_ring_p->tx_cs.value);
2396         if (status != HXGE_OK) {
2397                 goto hxge_txdma_stop_channel_exit;
2398         }
2399 
2400 hxge_txdma_stop_channel_exit:
2401         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_stop_channel"));
2402 
2403         return (status);
2404 }
2405 
2406 static p_tx_ring_t
2407 hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel)
2408 {
2409         int             index, ndmas;
2410         uint16_t        tdc;
2411         p_tx_rings_t    tx_rings;
2412 
2413         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_ring"));
2414 
2415         tx_rings = hxgep->tx_rings;
2416         if (tx_rings == NULL) {
2417                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2418                     "<== hxge_txdma_get_ring: NULL ring pointer"));
2419                 return (NULL);
2420         }
2421         ndmas = tx_rings->ndmas;
2422         if (!ndmas) {
2423                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2424                     "<== hxge_txdma_get_ring: no channel allocated"));
2425                 return (NULL);
2426         }
2427         if (tx_rings->rings == NULL) {
2428                 HXGE_DEBUG_MSG((hxgep, TX_CTL,
2429                     "<== hxge_txdma_get_ring: NULL rings pointer"));
2430                 return (NULL);
2431         }
2432         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_ring: "
2433             "tx_rings $%p tx_desc_rings $%p ndmas %d",
2434             tx_rings, tx_rings, ndmas));
2435 
2436         for (index = 0; index < ndmas; index++) {
2437                 tdc = tx_rings->rings[index]->tdc;
2438                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2439                     "==> hxge_fixup_txdma_rings: channel %d", tdc));
2440                 if (channel == tdc) {
2441                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
2442                             "<== hxge_txdma_get_ring: tdc %d ring $%p",
2443                             tdc, tx_rings->rings[index]));
2444                         return (p_tx_ring_t)(tx_rings->rings[index]);
2445                 }
2446         }
2447 
2448         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_ring"));
2449 
2450         return (NULL);
2451 }
2452 
2453 static p_tx_mbox_t
2454 hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel)
2455 {
2456         int                     index, tdc, ndmas;
2457         p_tx_rings_t            tx_rings;
2458         p_tx_mbox_areas_t       tx_mbox_areas_p;
2459         p_tx_mbox_t             *tx_mbox_p;
2460 
2461         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_mbox"));
2462 
2463         tx_rings = hxgep->tx_rings;
2464         if (tx_rings == NULL) {
2465                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2466                     "<== hxge_txdma_get_mbox: NULL ring pointer"));
2467                 return (NULL);
2468         }
2469         tx_mbox_areas_p = hxgep->tx_mbox_areas_p;
2470         if (tx_mbox_areas_p == NULL) {
2471                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2472                     "<== hxge_txdma_get_mbox: NULL mbox pointer"));
2473                 return (NULL);
2474         }
2475         tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2476 
2477         ndmas = tx_rings->ndmas;
2478         if (!ndmas) {
2479                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2480                     "<== hxge_txdma_get_mbox: no channel allocated"));
2481                 return (NULL);
2482         }
2483         if (tx_rings->rings == NULL) {
2484                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2485                     "<== hxge_txdma_get_mbox: NULL rings pointer"));
2486                 return (NULL);
2487         }
2488         HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_mbox: "
2489             "tx_rings $%p tx_desc_rings $%p ndmas %d",
2490             tx_rings, tx_rings, ndmas));
2491 
2492         for (index = 0; index < ndmas; index++) {
2493                 tdc = tx_rings->rings[index]->tdc;
2494                 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2495                     "==> hxge_txdma_get_mbox: channel %d", tdc));
2496                 if (channel == tdc) {
2497                         HXGE_DEBUG_MSG((hxgep, TX_CTL,
2498                             "<== hxge_txdma_get_mbox: tdc %d ring $%p",
2499                             tdc, tx_rings->rings[index]));
2500                         return (p_tx_mbox_t)(tx_mbox_p[index]);
2501                 }
2502         }
2503 
2504         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_mbox"));
2505 
2506         return (NULL);
2507 }
2508 
2509 /*ARGSUSED*/
2510 static hxge_status_t
2511 hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
2512     tdc_stat_t cs)
2513 {
2514         hpi_handle_t            handle;
2515         uint8_t                 channel;
2516         p_tx_ring_t             *tx_rings;
2517         p_tx_ring_t             tx_ring_p;
2518         p_hxge_tx_ring_stats_t  tdc_stats;
2519         boolean_t               txchan_fatal = B_FALSE;
2520         hxge_status_t           status = HXGE_OK;
2521         tdc_drop_cnt_t          drop_cnt;
2522 
2523         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_tx_err_evnts"));
2524         handle = HXGE_DEV_HPI_HANDLE(hxgep);
2525         channel = ldvp->channel;
2526 
2527         tx_rings = hxgep->tx_rings->rings;
2528         tx_ring_p = tx_rings[index];
2529         tdc_stats = tx_ring_p->tdc_stats;
2530 
2531         /* Get the error counts if any */
2532         TXDMA_REG_READ64(handle, TDC_DROP_CNT, channel, &drop_cnt.value);
2533         tdc_stats->count_hdr_size_err += drop_cnt.bits.hdr_size_error_count;
2534         tdc_stats->count_runt += drop_cnt.bits.runt_count;
2535         tdc_stats->count_abort += drop_cnt.bits.abort_count;
2536 
2537         if (cs.bits.peu_resp_err) {
2538                 tdc_stats->peu_resp_err++;
2539                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2540                     HXGE_FM_EREPORT_TDMC_PEU_RESP_ERR);
2541                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2542                     "==> hxge_tx_err_evnts(channel %d): "
2543                     "fatal error: peu_resp_err", channel));
2544                 txchan_fatal = B_TRUE;
2545         }
2546 
2547         if (cs.bits.pkt_size_hdr_err) {
2548                 tdc_stats->pkt_size_hdr_err++;
2549                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2550                     HXGE_FM_EREPORT_TDMC_PKT_SIZE_HDR_ERR);
2551                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2552                     "==> hxge_tx_err_evnts(channel %d): "
2553                     "fatal error: pkt_size_hdr_err", channel));
2554                 txchan_fatal = B_TRUE;
2555         }
2556 
2557         if (cs.bits.runt_pkt_drop_err) {
2558                 tdc_stats->runt_pkt_drop_err++;
2559                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2560                     HXGE_FM_EREPORT_TDMC_RUNT_PKT_DROP_ERR);
2561                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2562                     "==> hxge_tx_err_evnts(channel %d): "
2563                     "fatal error: runt_pkt_drop_err", channel));
2564                 txchan_fatal = B_TRUE;
2565         }
2566 
2567         if (cs.bits.pkt_size_err) {
2568                 tdc_stats->pkt_size_err++;
2569                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2570                     HXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
2571                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2572                     "==> hxge_tx_err_evnts(channel %d): "
2573                     "fatal error: pkt_size_err", channel));
2574                 txchan_fatal = B_TRUE;
2575         }
2576 
2577         if (cs.bits.tx_rng_oflow) {
2578                 tdc_stats->tx_rng_oflow++;
2579                 if (tdc_stats->tx_rng_oflow)
2580                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2581                             "==> hxge_tx_err_evnts(channel %d): "
2582                             "fatal error: tx_rng_oflow", channel));
2583         }
2584 
2585         if (cs.bits.pref_par_err) {
2586                 tdc_stats->pref_par_err++;
2587 
2588                 /* Get the address of parity error read data */
2589                 TXDMA_REG_READ64(hxgep->hpi_handle, TDC_PREF_PAR_LOG,
2590                     channel, &tdc_stats->errlog.value);
2591 
2592                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2593                     HXGE_FM_EREPORT_TDMC_PREF_PAR_ERR);
2594                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2595                     "==> hxge_tx_err_evnts(channel %d): "
2596                     "fatal error: pref_par_err", channel));
2597                 txchan_fatal = B_TRUE;
2598         }
2599 
2600         if (cs.bits.tdr_pref_cpl_to) {
2601                 tdc_stats->tdr_pref_cpl_to++;
2602                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2603                     HXGE_FM_EREPORT_TDMC_TDR_PREF_CPL_TO);
2604                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2605                     "==> hxge_tx_err_evnts(channel %d): "
2606                     "fatal error: tdr_pref_cpl_to", channel));
2607                 txchan_fatal = B_TRUE;
2608         }
2609 
2610         if (cs.bits.pkt_cpl_to) {
2611                 tdc_stats->pkt_cpl_to++;
2612                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2613                     HXGE_FM_EREPORT_TDMC_PKT_CPL_TO);
2614                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2615                     "==> hxge_tx_err_evnts(channel %d): "
2616                     "fatal error: pkt_cpl_to", channel));
2617                 txchan_fatal = B_TRUE;
2618         }
2619 
2620         if (cs.bits.invalid_sop) {
2621                 tdc_stats->invalid_sop++;
2622                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2623                     HXGE_FM_EREPORT_TDMC_INVALID_SOP);
2624                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2625                     "==> hxge_tx_err_evnts(channel %d): "
2626                     "fatal error: invalid_sop", channel));
2627                 txchan_fatal = B_TRUE;
2628         }
2629 
2630         if (cs.bits.unexpected_sop) {
2631                 tdc_stats->unexpected_sop++;
2632                 HXGE_FM_REPORT_ERROR(hxgep, channel,
2633                     HXGE_FM_EREPORT_TDMC_UNEXPECTED_SOP);
2634                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2635                     "==> hxge_tx_err_evnts(channel %d): "
2636                     "fatal error: unexpected_sop", channel));
2637                 txchan_fatal = B_TRUE;
2638         }
2639 
2640         /* Clear error injection source in case this is an injected error */
2641         TXDMA_REG_WRITE64(hxgep->hpi_handle, TDC_STAT_INT_DBG, channel, 0);
2642 
2643         if (txchan_fatal) {
2644                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2645                     " hxge_tx_err_evnts: "
2646                     " fatal error on channel %d cs 0x%llx\n",
2647                     channel, cs.value));
2648                 status = hxge_txdma_fatal_err_recover(hxgep, channel,
2649                     tx_ring_p);
2650                 if (status == HXGE_OK) {
2651                         FM_SERVICE_RESTORED(hxgep);
2652                 }
2653         }
2654 
2655         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "<== hxge_tx_err_evnts"));
2656 
2657         return (status);
2658 }
2659 
2660 hxge_status_t
2661 hxge_txdma_handle_sys_errors(p_hxge_t hxgep)
2662 {
2663         hpi_handle_t            handle;
2664         hxge_status_t           status = HXGE_OK;
2665         tdc_fifo_err_stat_t     fifo_stat;
2666         hxge_tdc_sys_stats_t    *tdc_sys_stats;
2667 
2668         HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_handle_sys_errors"));
2669 
2670         handle = HXGE_DEV_HPI_HANDLE(hxgep);
2671 
2672         /*
2673          * The FIFO is shared by all channels.
2674          * Get the status of Reorder Buffer and Reorder Table Buffer Errors
2675          */
2676         HXGE_REG_RD64(handle, TDC_FIFO_ERR_STAT, &fifo_stat.value);
2677 
2678         /*
2679          * Clear the error bits. Note that writing a 1 clears the bit. Writing
2680          * a 0 does nothing.
2681          */
2682         HXGE_REG_WR64(handle, TDC_FIFO_ERR_STAT, fifo_stat.value);
2683 
2684         tdc_sys_stats = &hxgep->statsp->tdc_sys_stats;
2685         if (fifo_stat.bits.reord_tbl_par_err) {
2686                 tdc_sys_stats->reord_tbl_par_err++;
2687                 HXGE_FM_REPORT_ERROR(hxgep, NULL,
2688                     HXGE_FM_EREPORT_TDMC_REORD_TBL_PAR);
2689                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2690                     "==> hxge_txdma_handle_sys_errors: fatal error: "
2691                     "reord_tbl_par_err"));
2692         }
2693 
2694         if (fifo_stat.bits.reord_buf_ded_err) {
2695                 tdc_sys_stats->reord_buf_ded_err++;
2696                 HXGE_FM_REPORT_ERROR(hxgep, NULL,
2697                     HXGE_FM_EREPORT_TDMC_REORD_BUF_DED);
2698                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2699                     "==> hxge_txdma_handle_sys_errors: "
2700                     "fatal error: reord_buf_ded_err"));
2701         }
2702 
2703         if (fifo_stat.bits.reord_buf_sec_err) {
2704                 tdc_sys_stats->reord_buf_sec_err++;
2705                 if (tdc_sys_stats->reord_buf_sec_err == 1)
2706                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2707                             "==> hxge_txdma_handle_sys_errors: "
2708                             "reord_buf_sec_err"));
2709         }
2710 
2711         if (fifo_stat.bits.reord_tbl_par_err ||
2712             fifo_stat.bits.reord_buf_ded_err) {
2713                 status = hxge_tx_port_fatal_err_recover(hxgep);
2714                 if (status == HXGE_OK) {
2715                         FM_SERVICE_RESTORED(hxgep);
2716                 }
2717         }
2718 
2719         HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_handle_sys_errors"));
2720 
2721         return (status);
2722 }
2723 
2724 static hxge_status_t
2725 hxge_txdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel,
2726     p_tx_ring_t tx_ring_p)
2727 {
2728         hpi_handle_t    handle;
2729         hpi_status_t    rs = HPI_SUCCESS;
2730         p_tx_mbox_t     tx_mbox_p;
2731         hxge_status_t   status = HXGE_OK;
2732 
2733         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover"));
2734         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2735             "Recovering from TxDMAChannel#%d error...", channel));
2736 
2737         /*
2738          * Stop the dma channel waits for the stop done. If the stop done bit
2739          * is not set, then create an error.
2740          */
2741         handle = HXGE_DEV_HPI_HANDLE(hxgep);
2742         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping txdma channel(%d)",
2743             channel));
2744         MUTEX_ENTER(&tx_ring_p->lock);
2745         rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel);
2746         if (rs != HPI_SUCCESS) {
2747                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2748                     "==> hxge_txdma_fatal_err_recover (channel %d): "
2749                     "stop failed ", channel));
2750 
2751                 goto fail;
2752         }
2753         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming txdma channel(%d)",
2754             channel));
2755         (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
2756 
2757         /*
2758          * Reset TXDMA channel
2759          */
2760         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "resetting txdma channel(%d)",
2761             channel));
2762         if ((rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
2763             HPI_SUCCESS) {
2764                 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2765                     "==> hxge_txdma_fatal_err_recover (channel %d)"
2766                     " reset channel failed 0x%x", channel, rs));
2767 
2768                 goto fail;
2769         }
2770         /*
2771          * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx
2772          * overflow fatal error if tail is not set to 0 after reset!
2773          */
2774         TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0);
2775 
2776         /*
2777          * Restart TXDMA channel
2778          *
2779          * Initialize the TXDMA channel specific FZC control configurations.
2780          * These FZC registers are pertaining to each TX channel (i.e. logical
2781          * pages).
2782          */
2783         tx_mbox_p = hxge_txdma_get_mbox(hxgep, channel);
2784         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "restarting txdma channel(%d)",
2785             channel));
2786         status = hxge_init_fzc_txdma_channel(hxgep, channel,
2787             tx_ring_p, tx_mbox_p);
2788         if (status != HXGE_OK)
2789                 goto fail;
2790 
2791         /*
2792          * Initialize the event masks.
2793          */
2794         tx_ring_p->tx_evmask.value = 0;
2795         status = hxge_init_txdma_channel_event_mask(hxgep, channel,
2796             &tx_ring_p->tx_evmask);
2797         if (status != HXGE_OK)
2798                 goto fail;
2799 
2800         tx_ring_p->wr_index_wrap = B_FALSE;
2801         tx_ring_p->wr_index = 0;
2802         tx_ring_p->rd_index = 0;
2803 
2804         /*
2805          * Load TXDMA descriptors, buffers, mailbox, initialise the DMA
2806          * channels and enable each DMA channel.
2807          */
2808         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "enabling txdma channel(%d)",
2809             channel));
2810         status = hxge_enable_txdma_channel(hxgep, channel,
2811             tx_ring_p, tx_mbox_p);
2812         MUTEX_EXIT(&tx_ring_p->lock);
2813         if (status != HXGE_OK)
2814                 goto fail;
2815 
2816         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2817             "Recovery Successful, TxDMAChannel#%d Restored", channel));
2818         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover"));
2819 
2820         return (HXGE_OK);
2821 
2822 fail:
2823         MUTEX_EXIT(&tx_ring_p->lock);
2824         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2825             "hxge_txdma_fatal_err_recover (channel %d): "
2826             "failed to recover this txdma channel", channel));
2827         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
2828 
2829         return (status);
2830 }
2831 
2832 static hxge_status_t
2833 hxge_tx_port_fatal_err_recover(p_hxge_t hxgep)
2834 {
2835         hpi_handle_t    handle;
2836         hpi_status_t    rs = HPI_SUCCESS;
2837         hxge_status_t   status = HXGE_OK;
2838         p_tx_ring_t     *tx_desc_rings;
2839         p_tx_rings_t    tx_rings;
2840         p_tx_ring_t     tx_ring_p;
2841         int             i, ndmas;
2842         uint16_t        channel;
2843         block_reset_t   reset_reg;
2844 
2845         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2846             "==> hxge_tx_port_fatal_err_recover"));
2847         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2848             "Recovering from TxPort error..."));
2849 
2850         handle = HXGE_DEV_HPI_HANDLE(hxgep);
2851 
2852         /* Reset TDC block from PEU for this fatal error */
2853         reset_reg.value = 0;
2854         reset_reg.bits.tdc_rst = 1;
2855         HXGE_REG_WR32(handle, BLOCK_RESET, reset_reg.value);
2856 
2857         HXGE_DELAY(1000);
2858 
2859         /*
2860          * Stop the dma channel waits for the stop done. If the stop done bit
2861          * is not set, then create an error.
2862          */
2863         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping all DMA channels..."));
2864 
2865         tx_rings = hxgep->tx_rings;
2866         tx_desc_rings = tx_rings->rings;
2867         ndmas = tx_rings->ndmas;
2868 
2869         for (i = 0; i < ndmas; i++) {
2870                 if (tx_desc_rings[i] == NULL) {
2871                         continue;
2872                 }
2873                 tx_ring_p = tx_rings->rings[i];
2874                 MUTEX_ENTER(&tx_ring_p->lock);
2875         }
2876 
2877         for (i = 0; i < ndmas; i++) {
2878                 if (tx_desc_rings[i] == NULL) {
2879                         continue;
2880                 }
2881                 channel = tx_desc_rings[i]->tdc;
2882                 tx_ring_p = tx_rings->rings[i];
2883                 rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel);
2884                 if (rs != HPI_SUCCESS) {
2885                         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2886                             "==> hxge_txdma_fatal_err_recover (channel %d): "
2887                             "stop failed ", channel));
2888 
2889                         goto fail;
2890                 }
2891         }
2892 
2893         /*
2894          * Do reclaim on all of th DMAs.
2895          */
2896         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming all DMA channels..."));
2897         for (i = 0; i < ndmas; i++) {
2898                 if (tx_desc_rings[i] == NULL) {
2899                         continue;
2900                 }
2901                 tx_ring_p = tx_rings->rings[i];
2902                 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0);
2903         }
2904 
2905         /* Restart the TDC */
2906         if ((status = hxge_txdma_hw_start(hxgep)) != HXGE_OK)
2907                 goto fail;
2908 
2909         for (i = 0; i < ndmas; i++) {
2910                 if (tx_desc_rings[i] == NULL) {
2911                         continue;
2912                 }
2913                 tx_ring_p = tx_rings->rings[i];
2914                 MUTEX_EXIT(&tx_ring_p->lock);
2915         }
2916 
2917         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2918             "Recovery Successful, TxPort Restored"));
2919         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2920             "<== hxge_tx_port_fatal_err_recover"));
2921         return (HXGE_OK);
2922 
2923 fail:
2924         for (i = 0; i < ndmas; i++) {
2925                 if (tx_desc_rings[i] == NULL) {
2926                         continue;
2927                 }
2928                 tx_ring_p = tx_rings->rings[i];
2929                 MUTEX_EXIT(&tx_ring_p->lock);
2930         }
2931 
2932         HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed"));
2933         HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL,
2934             "hxge_txdma_fatal_err_recover (channel %d): "
2935             "failed to recover this txdma channel"));
2936 
2937         return (status);
2938 }