sfc/siena: Remove build references to missing functionality

Functionality not supported or needed on Siena includes:
- Anything for EF100
- EF10 specifics such as register access, PIO and TSO offload.
Also only bind to Siena NICs.

Remove EF10 specifics from nic.h.
The functions that start with efx_farch_ will be removed from sfc.ko
with a subsequent patch.
Add the efx_ prefix to siena_prepare_flush() to make it consistent
with the other APIs.

Signed-off-by: Martin Habets <habetsm.xilinx@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Martin Habets 2022-05-09 16:31:55 +01:00 committed by Jakub Kicinski
parent d48523cb88
commit 956f2d86cb
10 changed files with 17 additions and 458 deletions

View file

@ -26,7 +26,6 @@
#include "efx.h"
#include "efx_common.h"
#include "efx_channels.h"
#include "ef100.h"
#include "rx_common.h"
#include "tx_common.h"
#include "nic.h"
@ -795,22 +794,10 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/* PCI device ID table */
static const struct pci_device_id efx_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */
.driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */
.driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
.driver_data = (unsigned long)&siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
.driver_data = (unsigned long)&siena_a0_nic_type},
{0} /* end of list */
};
@ -1298,14 +1285,8 @@ static int __init efx_init_module(void)
if (rc < 0)
goto err_pci;
rc = pci_register_driver(&ef100_pci_driver);
if (rc < 0)
goto err_pci_ef100;
return 0;
err_pci_ef100:
pci_unregister_driver(&efx_pci_driver);
err_pci:
efx_destroy_reset_workqueue();
err_reset:
@ -1318,7 +1299,6 @@ static void __exit efx_exit_module(void)
{
printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&ef100_pci_driver);
pci_unregister_driver(&efx_pci_driver);
efx_destroy_reset_workqueue();
unregister_netdevice_notifier(&efx_netdev_notifier);

View file

@ -10,8 +10,6 @@
#include <linux/indirect_call_wrapper.h>
#include "net_driver.h"
#include "ef100_rx.h"
#include "ef100_tx.h"
#include "filter.h"
int efx_net_open(struct net_device *net_dev);
@ -24,9 +22,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue,
ef100_enqueue_skb, __efx_enqueue_skb,
tx_queue, skb);
return INDIRECT_CALL_1(tx_queue->efx->type->tx_enqueue,
__efx_enqueue_skb, tx_queue, skb);
}
void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
@ -40,16 +37,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
static inline void efx_rx_flush_packet(struct efx_channel *channel)
{
if (channel->rx_pkt_n_frags)
INDIRECT_CALL_2(channel->efx->type->rx_packet,
__ef100_rx_packet, __efx_rx_packet,
channel);
__efx_rx_packet(channel);
}
static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
{
if (efx->type->rx_buf_hash_valid)
return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid,
ef100_rx_buf_hash_valid,
prefix);
return true;
}

View file

@ -16,7 +16,6 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
#include "ef10_regs.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
@ -195,7 +194,6 @@ struct efx_nic_reg {
#define REGISTER_BB(name) REGISTER(name, F, B, B)
#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
static const struct efx_nic_reg efx_nic_regs[] = {
REGISTER_AZ(ADR_REGION),
@ -302,9 +300,6 @@ static const struct efx_nic_reg efx_nic_regs[] = {
REGISTER_AB(XX_TXDRV_CTL),
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
/* XX_CORE_STAT is partly RC */
REGISTER_DZ(BIU_HW_REV_ID),
REGISTER_DZ(MC_DB_LWRD),
REGISTER_DZ(MC_DB_HWRD),
};
struct efx_nic_reg_table {
@ -337,7 +332,6 @@ struct efx_nic_reg_table {
FR_BZ_ ## name ## _STEP, \
FR_CZ_ ## name ## _ROWS)
#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
/* DRIVER is not used */
@ -368,7 +362,6 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
/* MSIX_PBA_TABLE is not mapped */
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
REGISTER_TABLE_BZ(RX_FILTER_TBL0),
REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
};
size_t efx_nic_get_regs_len(struct efx_nic *efx)

View file

@ -116,193 +116,7 @@ struct siena_nic_data {
#endif
};
enum {
EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
EF10_STAT_port_tx_packets,
EF10_STAT_port_tx_pause,
EF10_STAT_port_tx_control,
EF10_STAT_port_tx_unicast,
EF10_STAT_port_tx_multicast,
EF10_STAT_port_tx_broadcast,
EF10_STAT_port_tx_lt64,
EF10_STAT_port_tx_64,
EF10_STAT_port_tx_65_to_127,
EF10_STAT_port_tx_128_to_255,
EF10_STAT_port_tx_256_to_511,
EF10_STAT_port_tx_512_to_1023,
EF10_STAT_port_tx_1024_to_15xx,
EF10_STAT_port_tx_15xx_to_jumbo,
EF10_STAT_port_rx_bytes,
EF10_STAT_port_rx_bytes_minus_good_bytes,
EF10_STAT_port_rx_good_bytes,
EF10_STAT_port_rx_bad_bytes,
EF10_STAT_port_rx_packets,
EF10_STAT_port_rx_good,
EF10_STAT_port_rx_bad,
EF10_STAT_port_rx_pause,
EF10_STAT_port_rx_control,
EF10_STAT_port_rx_unicast,
EF10_STAT_port_rx_multicast,
EF10_STAT_port_rx_broadcast,
EF10_STAT_port_rx_lt64,
EF10_STAT_port_rx_64,
EF10_STAT_port_rx_65_to_127,
EF10_STAT_port_rx_128_to_255,
EF10_STAT_port_rx_256_to_511,
EF10_STAT_port_rx_512_to_1023,
EF10_STAT_port_rx_1024_to_15xx,
EF10_STAT_port_rx_15xx_to_jumbo,
EF10_STAT_port_rx_gtjumbo,
EF10_STAT_port_rx_bad_gtjumbo,
EF10_STAT_port_rx_overflow,
EF10_STAT_port_rx_align_error,
EF10_STAT_port_rx_length_error,
EF10_STAT_port_rx_nodesc_drops,
EF10_STAT_port_rx_pm_trunc_bb_overflow,
EF10_STAT_port_rx_pm_discard_bb_overflow,
EF10_STAT_port_rx_pm_trunc_vfifo_full,
EF10_STAT_port_rx_pm_discard_vfifo_full,
EF10_STAT_port_rx_pm_trunc_qbb,
EF10_STAT_port_rx_pm_discard_qbb,
EF10_STAT_port_rx_pm_discard_mapping,
EF10_STAT_port_rx_dp_q_disabled_packets,
EF10_STAT_port_rx_dp_di_dropped_packets,
EF10_STAT_port_rx_dp_streaming_packets,
EF10_STAT_port_rx_dp_hlb_fetch,
EF10_STAT_port_rx_dp_hlb_wait,
EF10_STAT_rx_unicast,
EF10_STAT_rx_unicast_bytes,
EF10_STAT_rx_multicast,
EF10_STAT_rx_multicast_bytes,
EF10_STAT_rx_broadcast,
EF10_STAT_rx_broadcast_bytes,
EF10_STAT_rx_bad,
EF10_STAT_rx_bad_bytes,
EF10_STAT_rx_overflow,
EF10_STAT_tx_unicast,
EF10_STAT_tx_unicast_bytes,
EF10_STAT_tx_multicast,
EF10_STAT_tx_multicast_bytes,
EF10_STAT_tx_broadcast,
EF10_STAT_tx_broadcast_bytes,
EF10_STAT_tx_bad,
EF10_STAT_tx_bad_bytes,
EF10_STAT_tx_overflow,
EF10_STAT_V1_COUNT,
EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT,
EF10_STAT_fec_corrected_errors,
EF10_STAT_fec_corrected_symbols_lane0,
EF10_STAT_fec_corrected_symbols_lane1,
EF10_STAT_fec_corrected_symbols_lane2,
EF10_STAT_fec_corrected_symbols_lane3,
EF10_STAT_ctpio_vi_busy_fallback,
EF10_STAT_ctpio_long_write_success,
EF10_STAT_ctpio_missing_dbell_fail,
EF10_STAT_ctpio_overflow_fail,
EF10_STAT_ctpio_underflow_fail,
EF10_STAT_ctpio_timeout_fail,
EF10_STAT_ctpio_noncontig_wr_fail,
EF10_STAT_ctpio_frm_clobber_fail,
EF10_STAT_ctpio_invalid_wr_fail,
EF10_STAT_ctpio_vi_clobber_fallback,
EF10_STAT_ctpio_unqualified_fallback,
EF10_STAT_ctpio_runt_fallback,
EF10_STAT_ctpio_success,
EF10_STAT_ctpio_fallback,
EF10_STAT_ctpio_poison,
EF10_STAT_ctpio_erase,
EF10_STAT_COUNT
};
/* Maximum number of TX PIO buffers we may allocate to a function.
* This matches the total number of buffers on each SFC9100-family
* controller.
*/
#define EF10_TX_PIOBUF_COUNT 16
/**
* struct efx_ef10_nic_data - EF10 architecture NIC state
* @mcdi_buf: DMA buffer for MCDI
* @warm_boot_count: Last seen MC warm boot count
* @vi_base: Absolute index of first VI in this function
* @n_allocated_vis: Number of VIs allocated to this function
* @n_piobufs: Number of PIO buffers allocated to this function
* @wc_membase: Base address of write-combining mapping of the memory BAR
* @pio_write_base: Base address for writing PIO buffers
* @pio_write_vi_base: Relative VI number for @pio_write_base
* @piobuf_handle: Handle of each PIO buffer allocated
* @piobuf_size: size of a single PIO buffer
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
* @mc_stats: Scratch buffer for converting statistics to the kernel's format
* @stats: Hardware statistics
* @workaround_35388: Flag: firmware supports workaround for bug 35388
* @workaround_26807: Flag: firmware supports workaround for bug 26807
* @workaround_61265: Flag: firmware supports workaround for bug 61265
* @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
* after MC reboot
* @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
* %MC_CMD_GET_CAPABILITIES response)
* @datapath_caps2: Further Capabilities of datapath firmware (FLAGS2 field of
* %MC_CMD_GET_CAPABILITIES response)
* @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
* @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
* @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
* @pf_index: The number for this PF, or the parent PF if this is a VF
#ifdef CONFIG_SFC_SRIOV
* @vf: Pointer to VF data structure
#endif
* @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
* @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
* @vlan_lock: Lock to serialize access to vlan_list.
* @udp_tunnels: UDP tunnel port numbers and types.
* @udp_tunnels_dirty: flag indicating a reboot occurred while pushing
* @udp_tunnels to hardware and thus the push must be re-done.
* @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty.
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
u16 warm_boot_count;
unsigned int vi_base;
unsigned int n_allocated_vis;
unsigned int n_piobufs;
void __iomem *wc_membase, *pio_write_base;
unsigned int pio_write_vi_base;
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
u16 piobuf_size;
bool must_restore_piobufs;
__le64 *mc_stats;
u64 stats[EF10_STAT_COUNT];
bool workaround_35388;
bool workaround_26807;
bool workaround_61265;
bool must_check_datapath_caps;
u32 datapath_caps;
u32 datapath_caps2;
unsigned int rx_dpcpu_fw_id;
unsigned int tx_dpcpu_fw_id;
bool must_probe_vswitching;
unsigned int pf_index;
u8 port_id[ETH_ALEN];
#ifdef CONFIG_SFC_SRIOV
unsigned int vf_index;
struct ef10_vf *vf;
#endif
u8 vport_mac[ETH_ALEN];
struct list_head vlan_list;
struct mutex vlan_lock;
struct efx_udp_tunnel udp_tunnels[16];
bool udp_tunnels_dirty;
struct mutex udp_tunnels_lock;
u64 licensed_features;
};
/* TSOv2 */
int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
extern const struct efx_nic_type efx_hunt_a0_nic_type;
extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
@ -364,7 +178,7 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
/* Global Resources */
void siena_prepare_flush(struct efx_nic *efx);
void efx_siena_prepare_flush(struct efx_nic *efx);
int efx_farch_fini_dmaq(struct efx_nic *efx);
void efx_farch_finish_flr(struct efx_nic *efx);
void siena_finish_flush(struct efx_nic *efx);

View file

@ -75,9 +75,6 @@ static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned i
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
}
int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
/* Decide whether to push a TX descriptor to the NIC vs merely writing
* the doorbell. This can reduce latency when we are adding a single
* descriptor to an empty queue, but is otherwise pointless. Further,

View file

@ -1790,17 +1790,6 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
/* Check licensed features. If we don't have the license for TX
* timestamps, the NIC will not support them.
*/
if (efx_ptp_use_mac_tx_timestamps(efx)) {
struct efx_ef10_nic_data *nic_data = efx->nic_data;
if (!(nic_data->licensed_features &
(1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN)))
ts_info->so_timestamping &=
~SOF_TIMESTAMPING_TX_HARDWARE;
}
if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
ts_info->phc_index =
ptp_clock_index(primary->ptp_data->phc_clock);

View file

@ -56,7 +56,7 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
channel->channel);
}
void siena_prepare_flush(struct efx_nic *efx)
void efx_siena_prepare_flush(struct efx_nic *efx)
{
if (efx->fc_disable++ == 0)
efx_mcdi_set_mac(efx);
@ -992,7 +992,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.probe_port = efx_mcdi_port_probe,
.remove_port = efx_mcdi_port_remove,
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = siena_prepare_flush,
.prepare_flush = efx_siena_prepare_flush,
.finish_flush = siena_finish_flush,
.prepare_flr = efx_port_dummy_op_void,
.finish_flr = efx_farch_finish_flr,

View file

@ -689,7 +689,7 @@ static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
rtnl_lock();
siena_prepare_flush(efx);
efx_siena_prepare_flush(efx);
rtnl_unlock();
/* Flush all the initialized queues */

View file

@ -22,14 +22,6 @@
#include "tx.h"
#include "tx_common.h"
#include "workarounds.h"
#include "ef10_regs.h"
#ifdef EFX_USE_PIO
#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
#endif /* EFX_USE_PIO */
static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer)
@ -123,173 +115,6 @@ static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
return rc;
}
#ifdef EFX_USE_PIO
struct efx_short_copy_buffer {
int used;
u8 buf[L1_CACHE_BYTES];
};
/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
* Advances piobuf pointer. Leaves additional data in the copy buffer.
*/
static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
u8 *data, int len,
struct efx_short_copy_buffer *copy_buf)
{
int block_len = len & ~(sizeof(copy_buf->buf) - 1);
__iowrite64_copy(*piobuf, data, block_len >> 3);
*piobuf += block_len;
len -= block_len;
if (len) {
data += block_len;
BUG_ON(copy_buf->used);
BUG_ON(len > sizeof(copy_buf->buf));
memcpy(copy_buf->buf, data, len);
copy_buf->used = len;
}
}
/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
* Advances piobuf pointer. Leaves additional data in the copy buffer.
*/
static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
u8 *data, int len,
struct efx_short_copy_buffer *copy_buf)
{
if (copy_buf->used) {
/* if the copy buffer is partially full, fill it up and write */
int copy_to_buf =
min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
copy_buf->used += copy_to_buf;
/* if we didn't fill it up then we're done for now */
if (copy_buf->used < sizeof(copy_buf->buf))
return;
__iowrite64_copy(*piobuf, copy_buf->buf,
sizeof(copy_buf->buf) >> 3);
*piobuf += sizeof(copy_buf->buf);
data += copy_to_buf;
len -= copy_to_buf;
copy_buf->used = 0;
}
efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
}
static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
struct efx_short_copy_buffer *copy_buf)
{
/* if there's anything in it, write the whole buffer, including junk */
if (copy_buf->used)
__iowrite64_copy(piobuf, copy_buf->buf,
sizeof(copy_buf->buf) >> 3);
}
/* Traverse skb structure and copy fragments in to PIO buffer.
* Advances piobuf pointer.
*/
static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
u8 __iomem **piobuf,
struct efx_short_copy_buffer *copy_buf)
{
int i;
efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
copy_buf);
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
u8 *vaddr;
vaddr = kmap_atomic(skb_frag_page(f));
efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
skb_frag_size(f), copy_buf);
kunmap_atomic(vaddr);
}
EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
}
static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
struct sk_buff *skb)
{
struct efx_tx_buffer *buffer =
efx_tx_queue_get_insert_buffer(tx_queue);
u8 __iomem *piobuf = tx_queue->piobuf;
/* Copy to PIO buffer. Ensure the writes are padded to the end
* of a cache line, as this is required for write-combining to be
* effective on at least x86.
*/
if (skb_shinfo(skb)->nr_frags) {
/* The size of the copy buffer will ensure all writes
* are the size of a cache line.
*/
struct efx_short_copy_buffer copy_buf;
copy_buf.used = 0;
efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
&piobuf, &copy_buf);
efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
} else {
/* Pad the write to the size of a cache line.
* We can do this because we know the skb_shared_info struct is
* after the source, and the destination buffer is big enough.
*/
BUILD_BUG_ON(L1_CACHE_BYTES >
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
__iowrite64_copy(tx_queue->piobuf, skb->data,
ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
}
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
EFX_POPULATE_QWORD_5(buffer->option,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
ESF_DZ_TX_PIO_CONT, 0,
ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
ESF_DZ_TX_PIO_BUF_ADDR,
tx_queue->piobuf_offset);
++tx_queue->insert_count;
return 0;
}
/* Decide whether we can use TX PIO, ie. write packet data directly into
* a buffer on the device. This can reduce latency at the expense of
* throughput, so we only do this if both hardware and software TX rings
* are empty, including all queues for the channel. This also ensures that
* only one packet at a time can be using the PIO buffer. If the xmit_more
* flag is set then we don't use this - there'll be another packet along
* shortly and we want to hold off the doorbell.
*/
static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue)
{
struct efx_channel *channel = tx_queue->channel;
if (!tx_queue->piobuf)
return false;
EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors);
efx_for_each_channel_tx_queue(tx_queue, channel)
if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count))
return false;
return true;
}
#endif /* EFX_USE_PIO */
/* Send any pending traffic for a channel. xmit_more is shared across all
* queues for a channel, so we must check all of them.
*/
@ -338,35 +163,11 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
* size limit.
*/
if (segments) {
switch (tx_queue->tso_version) {
case 1:
rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped);
break;
case 2:
rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped);
break;
case 0: /* No TSO on this queue, SW fallback needed */
default:
rc = -EINVAL;
break;
}
if (rc == -EINVAL) {
rc = efx_tx_tso_fallback(tx_queue, skb);
tx_queue->tso_fallbacks++;
if (rc == 0)
return 0;
}
if (rc)
goto err;
#ifdef EFX_USE_PIO
} else if (skb_len <= efx_piobuf_size && !xmit_more &&
efx_tx_may_pio(tx_queue)) {
/* Use PIO for short packets with an empty queue. */
if (efx_enqueue_skb_pio(tx_queue, skb))
goto err;
tx_queue->pio_packets++;
data_mapped = true;
#endif
rc = efx_tx_tso_fallback(tx_queue, skb);
tx_queue->tso_fallbacks++;
if (rc == 0)
return 0;
goto err;
} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
/* Pad short packets or coalesce short fragmented packets. */
if (efx_enqueue_skb_copy(tx_queue, skb))

View file

@ -21,12 +21,6 @@
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
/* Lockup when writing event block registers at gen2/gen3 */
#define EFX_EF10_WORKAROUND_35388(efx) \
(((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
#define EFX_WORKAROUND_35388(efx) \
(efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
/* Moderation timer access must go through MCDI */
#define EFX_EF10_WORKAROUND_61265(efx) \
(((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265)