552 ring->cur = ring->next = ring->stat = 0;
553 err = mwl_alloc_dma_mem(sc->sc_dev, &mwl_dma_attr,
554 count * sizeof (struct mwl_txdesc), &mwl_desc_accattr,
555 DDI_DMA_CONSISTENT, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
556 &ring->txdesc_dma);
557 if (err) {
558 MWL_DBG(MWL_DBG_DMA, "mwl: mwl_alloc_tx_ring(): "
559 "alloc tx ring failed, size %d\n",
560 (uint32_t)(count * sizeof (struct mwl_txdesc)));
561 return (DDI_FAILURE);
562 }
563
564 MWL_DBG(MWL_DBG_DMA, "mwl: mwl_alloc_tx_ring(): "
565 "dma len = %d\n", (uint32_t)(ring->txdesc_dma.alength));
566 ring->desc = (struct mwl_txdesc *)ring->txdesc_dma.mem_va;
567 ring->physaddr = ring->txdesc_dma.cookie.dmac_address;
568 bzero(ring->desc, count * sizeof (struct mwl_txdesc));
569
570 datadlen = count * sizeof (struct mwl_txbuf);
571 ring->buf = kmem_zalloc(datadlen, KM_SLEEP);
572 if (ring->buf == NULL) {
573 MWL_DBG(MWL_DBG_DMA, "mwl: mwl_alloc_tx_ring(): "
574 "could not alloc tx ring data buffer\n");
575 return (DDI_FAILURE);
576 }
577 bzero(ring->buf, count * sizeof (struct mwl_txbuf));
578
579 for (i = 0; i < count; i++) {
580 ds = &ring->desc[i];
581 bf = &ring->buf[i];
582 /* alloc DMA memory */
583 (void) mwl_alloc_dma_mem(sc->sc_dev, &mwl_dma_attr,
584 sc->sc_dmabuf_size,
585 &mwl_buf_accattr,
586 DDI_DMA_STREAMING,
587 DDI_DMA_WRITE | DDI_DMA_STREAMING,
588 &bf->txbuf_dma);
589 bf->bf_baddr = bf->txbuf_dma.cookie.dmac_address;
590 bf->bf_mem = (uint8_t *)(bf->txbuf_dma.mem_va);
591 bf->bf_daddr = ring->physaddr + _PTRDIFF(ds, ring->desc);
592 bf->bf_desc = ds;
593 }
594
595 (void) ddi_dma_sync(ring->txdesc_dma.dma_hdl,
596 0,
2209 pCmd->KeyParam.Length = LE_16(sizeof (pCmd->KeyParam));
2210 pCmd->KeyParam.KeyTypeId = LE_16(kv->keyTypeId);
2211 pCmd->KeyParam.KeyInfo = LE_32(kv->keyFlags);
2212 pCmd->KeyParam.KeyIndex = LE_32(kv->keyIndex);
2213 #ifdef MWL_MBSS_SUPPORT
2214 IEEE80211_ADDR_COPY(pCmd->KeyParam.Macaddr, mac);
2215 #else
2216 IEEE80211_ADDR_COPY(pCmd->Macaddr, mac);
2217 #endif
2218 retval = mwlExecuteCmd(sc, HostCmd_CMD_UPDATE_ENCRYPTION);
2219 return (retval);
2220 }
2221
2222 /* ARGSUSED */
2223 static struct ieee80211_node *
2224 mwl_node_alloc(struct ieee80211com *ic)
2225 {
2226 struct mwl_node *mn;
2227
2228 mn = kmem_zalloc(sizeof (struct mwl_node), KM_SLEEP);
2229 if (mn == NULL) {
2230 /* XXX stat+msg */
2231 MWL_DBG(MWL_DBG_MSG, "mwl: mwl_node_alloc(): "
2232 "alloc node failed\n");
2233 return (NULL);
2234 }
2235 return (&mn->mn_node);
2236 }
2237
2238 static void
2239 mwl_node_free(struct ieee80211_node *ni)
2240 {
2241 struct ieee80211com *ic = ni->in_ic;
2242 struct mwl_node *mn = MWL_NODE(ni);
2243
2244 if (mn->mn_staid != 0) {
2245 // mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2246 // delstaid(sc, mn->mn_staid);
2247 mn->mn_staid = 0;
2248 }
2249 ic->ic_node_cleanup(ni);
2250 kmem_free(ni, sizeof (struct mwl_node));
2251 }
2252
2253 /*
2254 * Allocate a key cache slot for a unicast key. The
|
552 ring->cur = ring->next = ring->stat = 0;
553 err = mwl_alloc_dma_mem(sc->sc_dev, &mwl_dma_attr,
554 count * sizeof (struct mwl_txdesc), &mwl_desc_accattr,
555 DDI_DMA_CONSISTENT, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
556 &ring->txdesc_dma);
557 if (err) {
558 MWL_DBG(MWL_DBG_DMA, "mwl: mwl_alloc_tx_ring(): "
559 "alloc tx ring failed, size %d\n",
560 (uint32_t)(count * sizeof (struct mwl_txdesc)));
561 return (DDI_FAILURE);
562 }
563
564 MWL_DBG(MWL_DBG_DMA, "mwl: mwl_alloc_tx_ring(): "
565 "dma len = %d\n", (uint32_t)(ring->txdesc_dma.alength));
566 ring->desc = (struct mwl_txdesc *)ring->txdesc_dma.mem_va;
567 ring->physaddr = ring->txdesc_dma.cookie.dmac_address;
568 bzero(ring->desc, count * sizeof (struct mwl_txdesc));
569
570 datadlen = count * sizeof (struct mwl_txbuf);
571 ring->buf = kmem_zalloc(datadlen, KM_SLEEP);
572 bzero(ring->buf, count * sizeof (struct mwl_txbuf));
573
574 for (i = 0; i < count; i++) {
575 ds = &ring->desc[i];
576 bf = &ring->buf[i];
577 /* alloc DMA memory */
578 (void) mwl_alloc_dma_mem(sc->sc_dev, &mwl_dma_attr,
579 sc->sc_dmabuf_size,
580 &mwl_buf_accattr,
581 DDI_DMA_STREAMING,
582 DDI_DMA_WRITE | DDI_DMA_STREAMING,
583 &bf->txbuf_dma);
584 bf->bf_baddr = bf->txbuf_dma.cookie.dmac_address;
585 bf->bf_mem = (uint8_t *)(bf->txbuf_dma.mem_va);
586 bf->bf_daddr = ring->physaddr + _PTRDIFF(ds, ring->desc);
587 bf->bf_desc = ds;
588 }
589
590 (void) ddi_dma_sync(ring->txdesc_dma.dma_hdl,
591 0,
2204 pCmd->KeyParam.Length = LE_16(sizeof (pCmd->KeyParam));
2205 pCmd->KeyParam.KeyTypeId = LE_16(kv->keyTypeId);
2206 pCmd->KeyParam.KeyInfo = LE_32(kv->keyFlags);
2207 pCmd->KeyParam.KeyIndex = LE_32(kv->keyIndex);
2208 #ifdef MWL_MBSS_SUPPORT
2209 IEEE80211_ADDR_COPY(pCmd->KeyParam.Macaddr, mac);
2210 #else
2211 IEEE80211_ADDR_COPY(pCmd->Macaddr, mac);
2212 #endif
2213 retval = mwlExecuteCmd(sc, HostCmd_CMD_UPDATE_ENCRYPTION);
2214 return (retval);
2215 }
2216
2217 /* ARGSUSED */
2218 static struct ieee80211_node *
2219 mwl_node_alloc(struct ieee80211com *ic)
2220 {
2221 struct mwl_node *mn;
2222
2223 mn = kmem_zalloc(sizeof (struct mwl_node), KM_SLEEP);
2224 return (&mn->mn_node);
2225 }
2226
2227 static void
2228 mwl_node_free(struct ieee80211_node *ni)
2229 {
2230 struct ieee80211com *ic = ni->in_ic;
2231 struct mwl_node *mn = MWL_NODE(ni);
2232
2233 if (mn->mn_staid != 0) {
2234 // mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
2235 // delstaid(sc, mn->mn_staid);
2236 mn->mn_staid = 0;
2237 }
2238 ic->ic_node_cleanup(ni);
2239 kmem_free(ni, sizeof (struct mwl_node));
2240 }
2241
2242 /*
2243 * Allocate a key cache slot for a unicast key. The
|