stmmac_main中stmmac_xmit分析
/** * stmmac_xmit - Tx entry point of the driver * @skb : the socket buffer * @dev : device pointer * Description : this is the tx entry point of the driver. * It programs the chain or the ring and supports oversized frames * and SG feature. */ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int first_entry, tx_packets, enh_desc; struct stmmac_priv *priv = netdev_priv(dev); unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; u32 queue = skb_get_queue_mapping(skb); int nfrags = skb_shinfo(skb)->nr_frags; int gso = skb_shinfo(skb)->gso_type; struct dma_edesc *tbs_desc = NULL; int entry, desc_size, first_tx; struct dma_desc *desc, *first; struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; dma_addr_t des; tx_q = &priv->tx_queue[queue]; first_tx = tx_q->cur_tx; if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) return stmmac_tso_xmit(skb, dev); if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) return stmmac_tso_xmit(skb, dev); } if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); /* This is a hard error, log it. */ netdev_err(priv->dev, "%s: Tx Ring full when queue awake\n", __func__); } return NETDEV_TX_BUSY; } /* Check if VLAN can be inserted by HW */ has_vlan = stmmac_vlan_insert(priv, skb, tx_q); entry = tx_q->cur_tx; first_entry = entry; WARN_ON(tx_q->tx_skbuff[first_entry]); csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); if (likely(priv->extend_desc)) desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) desc = &tx_q->dma_entx[entry].basic; else desc = tx_q->dma_tx + entry; first = desc; if (has_vlan) stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); if (unlikely(is_jumbo)) { entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); if (unlikely(entry < 0) && (entry != -EINVAL)) goto dma_map_err; } for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int len = skb_frag_size(frag); bool last_segment = (i == (nfrags - 1)); entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); WARN_ON(tx_q->tx_skbuff[entry]); if (likely(priv->extend_desc)) desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) desc = &tx_q->dma_entx[entry].basic; else desc = tx_q->dma_tx + entry; des = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, des)) goto dma_map_err; /* should reuse desc w/o issues */ tx_q->tx_skbuff_dma[entry].buf = des; stmmac_set_desc_addr(priv, desc, des); tx_q->tx_skbuff_dma[entry].map_as_page = true; tx_q->tx_skbuff_dma[entry].len = len; tx_q->tx_skbuff_dma[entry].last_segment = last_segment; /* Prepare the descriptor and set the own bit too */ stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, priv->mode, 1, last_segment, skb->len); } /* Only the last descriptor gets to point to the skb. */ tx_q->tx_skbuff[entry] = skb; /* According to the coalesce parameter the IC bit for the latest * segment is reset and the timer re-started to clean the tx status. * This approach takes care about the fragments: desc is the first * element in case of no SG. */ tx_packets = (entry + 1) - first_tx; tx_q->tx_count_frames += tx_packets; if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) set_ic = true; else if (!priv->tx_coal_frames) set_ic = false; else if (tx_packets > priv->tx_coal_frames) set_ic = true; else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) set_ic = true; else set_ic = false; if (set_ic) { if (likely(priv->extend_desc)) desc = &tx_q->dma_etx[entry].basic; else if (tx_q->tbs & STMMAC_TBS_AVAIL) desc = &tx_q->dma_entx[entry].basic; else desc = &tx_q->dma_tx[entry]; tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; } /* We've used all descriptors we need for this skb, however, * advance cur_tx so that it references a fresh descriptor. * ndo_start_xmit will fill this descriptor the next time it's * called and stmmac_tx_clean may clean up to this descriptor. */ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); tx_q->cur_tx = entry; if (netif_msg_pktdata(priv)) { netdev_dbg(priv->dev, "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, entry, first, nfrags); netdev_dbg(priv->dev, ">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", __func__); netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } dev->stats.tx_bytes += skb->len; if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); skb_tx_timestamp(skb); /* Ready to fill the first descriptor and set the OWN bit w/o any * problems because all the descriptors are actually ready to be * passed to the DMA engine. */ if (likely(!is_jumbo)) { bool last_segment = (nfrags == 0); des = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); //进行DMA映射,得到映射地址des if (dma_mapping_error(priv->device, des)) goto dma_map_err; tx_q->tx_skbuff_dma[first_entry].buf = des; //在接收中断中,用于dma_unmap_single stmmac_set_desc_addr(priv, first, des); tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { /* declare that device is doing timestamping */ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; stmmac_enable_tx_timestamp(priv, first); } /* Prepare the first descriptor setting the OWN bit too */ stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, csum_insertion, priv->mode, 0, last_segment, skb->len); //让DMA拥有该描述符 } if (tx_q->tbs & STMMAC_TBS_EN) { struct timespec64 ts = ns_to_timespec64(skb->tstamp); tbs_desc = &tx_q->dma_entx[first_entry]; stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); } stmmac_set_tx_owner(priv, first); /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that * all is coherent before granting the DMA engine. */ wmb(); netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); stmmac_enable_dma_transmission(priv, priv->ioaddr); if (likely(priv->extend_desc)) desc_size = sizeof(struct dma_extended_desc); else if (tx_q->tbs & STMMAC_TBS_AVAIL) desc_size = sizeof(struct dma_edesc); else desc_size = sizeof(struct dma_desc); tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);//设置尾指针,启动DMA发送 stmmac_tx_timer_arm(priv, queue); return NETDEV_TX_OK; dma_map_err: netdev_err(priv->dev, "Tx DMA map failed\n"); dev_kfree_skb(skb); priv->dev->stats.tx_dropped++; return NETDEV_TX_OK; }
接下来DMA会自动读取descriptor,根据descriptor读取buffer,然后将buffer数据送往queue。

浙公网安备 33010602011771号