mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
Merge branch 'cxgb4-next'
Hariprasad Shenai says: ==================== cxgb4/cxgb4vf: Cleanup and minor fixes This series sets FBMIN to 64 bytes for Chelsio's T6 series of adapters, check to replenish fl is revised, some code cleanup in cxgb4vf sge initialization code and removes dead code. This patch series has been created against net-next tree and includes patches on cxgb4 and cxgb4vf driver. We have included all the maintainers of respective drivers. Kindly review the change and let us know in case of any review comments. ====================
This commit is contained in:
commit
3ebeac1d02
6 changed files with 85 additions and 97 deletions
|
@ -2226,7 +2226,7 @@ static int process_responses(struct sge_rspq *q, int budget)
|
|||
budget_left--;
|
||||
}
|
||||
|
||||
if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
|
||||
if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
|
||||
__refill_fl(q->adap, &rxq->fl);
|
||||
return budget - budget_left;
|
||||
}
|
||||
|
@ -2611,8 +2611,18 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
|||
htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
|
||||
FW_IQ_CMD_FL0CONGCIF_F |
|
||||
FW_IQ_CMD_FL0CONGEN_F);
|
||||
/* In T6, for egress queue type FL there is internal overhead
|
||||
* of 16B for header going into FLM module. Hence the maximum
|
||||
* allowed burst size is 448 bytes. For T4/T5, the hardware
|
||||
* doesn't coalesce fetch requests if more than 64 bytes of
|
||||
* Free List pointers are provided, so we use a 128-byte Fetch
|
||||
* Burst Minimum there (T6 implements coalescing so we can use
|
||||
* the smaller 64-byte value there).
|
||||
*/
|
||||
c.fl0dcaen_to_fl0cidxfthresh =
|
||||
htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
|
||||
htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
|
||||
FETCHBURSTMIN_128B_X :
|
||||
FETCHBURSTMIN_64B_X) |
|
||||
FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
|
||||
FETCHBURSTMAX_512B_X :
|
||||
FETCHBURSTMAX_256B_X));
|
||||
|
|
|
@ -65,6 +65,7 @@
|
|||
#define TIMERREG_COUNTER0_X 0
|
||||
|
||||
#define FETCHBURSTMIN_64B_X 2
|
||||
#define FETCHBURSTMIN_128B_X 3
|
||||
|
||||
#define FETCHBURSTMAX_256B_X 2
|
||||
#define FETCHBURSTMAX_512B_X 3
|
||||
|
|
|
@ -862,52 +862,6 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
|
|||
return ns;
|
||||
}
|
||||
|
||||
/*
|
||||
* Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
|
||||
* at a specified offset within the list, into an array of addrss pointers and
|
||||
* return the number collected.
|
||||
*/
|
||||
static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
|
||||
const u8 **addr,
|
||||
unsigned int offset,
|
||||
unsigned int maxaddrs)
|
||||
{
|
||||
unsigned int index = 0;
|
||||
unsigned int naddr = 0;
|
||||
const struct netdev_hw_addr *ha;
|
||||
|
||||
for_each_dev_addr(dev, ha)
|
||||
if (index++ >= offset) {
|
||||
addr[naddr++] = ha->addr;
|
||||
if (naddr >= maxaddrs)
|
||||
break;
|
||||
}
|
||||
return naddr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
|
||||
* at a specified offset within the list, into an array of addrss pointers and
|
||||
* return the number collected.
|
||||
*/
|
||||
static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
|
||||
const u8 **addr,
|
||||
unsigned int offset,
|
||||
unsigned int maxaddrs)
|
||||
{
|
||||
unsigned int index = 0;
|
||||
unsigned int naddr = 0;
|
||||
const struct netdev_hw_addr *ha;
|
||||
|
||||
netdev_for_each_mc_addr(ha, dev)
|
||||
if (index++ >= offset) {
|
||||
addr[naddr++] = ha->addr;
|
||||
if (naddr >= maxaddrs)
|
||||
break;
|
||||
}
|
||||
return naddr;
|
||||
}
|
||||
|
||||
static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
|
||||
{
|
||||
struct adapter *adapter = pi->adapter;
|
||||
|
@ -2236,16 +2190,6 @@ static int adap_init0(struct adapter *adapter)
|
|||
int err;
|
||||
u32 param, val = 0;
|
||||
|
||||
/*
|
||||
* Wait for the device to become ready before proceeding ...
|
||||
*/
|
||||
err = t4vf_wait_dev_ready(adapter);
|
||||
if (err) {
|
||||
dev_err(adapter->pdev_dev, "device didn't become ready:"
|
||||
" err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some environments do not properly handle PCIE FLRs -- e.g. in Linux
|
||||
* 2.6.31 and later we can't call pci_reset_function() in order to
|
||||
|
|
|
@ -1864,7 +1864,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
|
|||
* for new buffer pointers, refill the Free List.
|
||||
*/
|
||||
if (rspq->offset >= 0 &&
|
||||
rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
|
||||
fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
|
||||
__refill_fl(rspq->adapter, &rxq->fl);
|
||||
return budget - budget_left;
|
||||
}
|
||||
|
@ -2300,9 +2300,20 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
|
|||
FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
|
||||
FW_IQ_CMD_FL0PACKEN_F |
|
||||
FW_IQ_CMD_FL0PADEN_F);
|
||||
|
||||
/* In T6, for egress queue type FL there is internal overhead
|
||||
* of 16B for header going into FLM module. Hence the maximum
|
||||
* allowed burst size is 448 bytes. For T4/T5, the hardware
|
||||
* doesn't coalesce fetch requests if more than 64 bytes of
|
||||
* Free List pointers are provided, so we use a 128-byte Fetch
|
||||
* Burst Minimum there (T6 implements coalescing so we can use
|
||||
* the smaller 64-byte value there).
|
||||
*/
|
||||
cmd.fl0dcaen_to_fl0cidxfthresh =
|
||||
cpu_to_be16(
|
||||
FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
|
||||
FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
|
||||
FETCHBURSTMIN_128B_X :
|
||||
FETCHBURSTMIN_64B_X) |
|
||||
FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
|
||||
FETCHBURSTMAX_512B_X :
|
||||
FETCHBURSTMAX_256B_X));
|
||||
|
@ -2607,7 +2618,6 @@ int t4vf_sge_init(struct adapter *adapter)
|
|||
u32 fl0 = sge_params->sge_fl_buffer_size[0];
|
||||
u32 fl1 = sge_params->sge_fl_buffer_size[1];
|
||||
struct sge *s = &adapter->sge;
|
||||
unsigned int ingpadboundary, ingpackboundary, ingpad_shift;
|
||||
|
||||
/*
|
||||
* Start by vetting the basic SGE parameters which have been set up by
|
||||
|
@ -2619,7 +2629,8 @@ int t4vf_sge_init(struct adapter *adapter)
|
|||
fl0, fl1);
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
|
||||
if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
|
||||
RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
|
||||
dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2632,41 +2643,7 @@ int t4vf_sge_init(struct adapter *adapter)
|
|||
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
|
||||
? 128 : 64);
|
||||
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
|
||||
|
||||
/* T4 uses a single control field to specify both the PCIe Padding and
|
||||
* Packing Boundary. T5 introduced the ability to specify these
|
||||
* separately. The actual Ingress Packet Data alignment boundary
|
||||
* within Packed Buffer Mode is the maximum of these two
|
||||
* specifications. (Note that it makes no real practical sense to
|
||||
* have the Pading Boudary be larger than the Packing Boundary but you
|
||||
* could set the chip up that way and, in fact, legacy T4 code would
|
||||
* end doing this because it would initialize the Padding Boundary and
|
||||
* leave the Packing Boundary initialized to 0 (16 bytes).)
|
||||
* Padding Boundary values in T6 starts from 8B,
|
||||
* where as it is 32B for T4 and T5.
|
||||
*/
|
||||
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
|
||||
ingpad_shift = INGPADBOUNDARY_SHIFT_X;
|
||||
else
|
||||
ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
|
||||
|
||||
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
|
||||
ingpad_shift);
|
||||
if (is_t4(adapter->params.chip)) {
|
||||
s->fl_align = ingpadboundary;
|
||||
} else {
|
||||
/* T5 has a different interpretation of one of the PCIe Packing
|
||||
* Boundary values.
|
||||
*/
|
||||
ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
|
||||
if (ingpackboundary == INGPACKBOUNDARY_16B_X)
|
||||
ingpackboundary = 16;
|
||||
else
|
||||
ingpackboundary = 1 << (ingpackboundary +
|
||||
INGPACKBOUNDARY_SHIFT_X);
|
||||
|
||||
s->fl_align = max(ingpadboundary, ingpackboundary);
|
||||
}
|
||||
s->fl_align = t4vf_fl_pkt_align(adapter);
|
||||
|
||||
/* A FL with <= fl_starve_thres buffers is starving and a periodic
|
||||
* timer will attempt to refill it. This needs to be larger than the
|
||||
|
|
|
@ -309,6 +309,7 @@ int t4vf_port_init(struct adapter *, int);
|
|||
int t4vf_fw_reset(struct adapter *);
|
||||
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
|
||||
|
||||
int t4vf_fl_pkt_align(struct adapter *adapter);
|
||||
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
|
||||
int t4vf_bar2_sge_qregs(struct adapter *adapter,
|
||||
unsigned int qid,
|
||||
|
|
|
@ -417,6 +417,61 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
|
|||
return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* t4vf_fl_pkt_align - return the fl packet alignment
|
||||
* @adapter: the adapter
|
||||
*
|
||||
* T4 has a single field to specify the packing and padding boundary.
|
||||
* T5 onwards has separate fields for this and hence the alignment for
|
||||
* next packet offset is maximum of these two. And T6 changes the
|
||||
* Ingress Padding Boundary Shift, so it's all a mess and it's best
|
||||
* if we put this in low-level Common Code ...
|
||||
*
|
||||
*/
|
||||
int t4vf_fl_pkt_align(struct adapter *adapter)
|
||||
{
|
||||
u32 sge_control, sge_control2;
|
||||
unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
|
||||
|
||||
sge_control = adapter->params.sge.sge_control;
|
||||
|
||||
/* T4 uses a single control field to specify both the PCIe Padding and
|
||||
* Packing Boundary. T5 introduced the ability to specify these
|
||||
* separately. The actual Ingress Packet Data alignment boundary
|
||||
* within Packed Buffer Mode is the maximum of these two
|
||||
* specifications. (Note that it makes no real practical sense to
|
||||
* have the Pading Boudary be larger than the Packing Boundary but you
|
||||
* could set the chip up that way and, in fact, legacy T4 code would
|
||||
* end doing this because it would initialize the Padding Boundary and
|
||||
* leave the Packing Boundary initialized to 0 (16 bytes).)
|
||||
* Padding Boundary values in T6 starts from 8B,
|
||||
* where as it is 32B for T4 and T5.
|
||||
*/
|
||||
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
|
||||
ingpad_shift = INGPADBOUNDARY_SHIFT_X;
|
||||
else
|
||||
ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
|
||||
|
||||
ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
|
||||
|
||||
fl_align = ingpadboundary;
|
||||
if (!is_t4(adapter->params.chip)) {
|
||||
/* T5 has a different interpretation of one of the PCIe Packing
|
||||
* Boundary values.
|
||||
*/
|
||||
sge_control2 = adapter->params.sge.sge_control2;
|
||||
ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
|
||||
if (ingpackboundary == INGPACKBOUNDARY_16B_X)
|
||||
ingpackboundary = 16;
|
||||
else
|
||||
ingpackboundary = 1 << (ingpackboundary +
|
||||
INGPACKBOUNDARY_SHIFT_X);
|
||||
|
||||
fl_align = max(ingpadboundary, ingpackboundary);
|
||||
}
|
||||
return fl_align;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
|
||||
* @adapter: the adapter
|
||||
|
|
Loading…
Reference in a new issue