mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-14 12:37:32 +00:00
liquidio: fix tx completions in napi poll
If there are no egress packets pending, then don't look for tx completions in napi poll. Also, fix broken tx queue wakeup logic. Signed-off-by: VSR Burru <veerasenareddy.burru@cavium.com> Signed-off-by: Felix Manlunas <felix.manlunas@cavium.com> Signed-off-by: Satanand Burla <satananda.burla@cavium.com> Signed-off-by: Derek Chickles <derek.chickles@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
031d4f1210
commit
6069f3fbde
2 changed files with 22 additions and 17 deletions
|
@ -932,14 +932,13 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
|
||||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
|
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
|
||||||
tx_restart, 1);
|
tx_restart, 1);
|
||||||
netif_wake_subqueue(netdev, iq->q_index);
|
netif_wake_subqueue(netdev, iq->q_index);
|
||||||
} else {
|
|
||||||
if (!octnet_iq_is_full(oct, lio->txq)) {
|
|
||||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
|
|
||||||
lio->txq,
|
|
||||||
tx_restart, 1);
|
|
||||||
wake_q(netdev, lio->txq);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
} else if (netif_queue_stopped(netdev) &&
|
||||||
|
lio->linfo.link.s.link_up &&
|
||||||
|
(!octnet_iq_is_full(oct, lio->txq))) {
|
||||||
|
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
|
||||||
|
lio->txq, tx_restart, 1);
|
||||||
|
netif_wake_queue(netdev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2454,8 +2453,11 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
|
||||||
/* Flush the instruction queue */
|
/* Flush the instruction queue */
|
||||||
iq = oct->instr_queue[iq_no];
|
iq = oct->instr_queue[iq_no];
|
||||||
if (iq) {
|
if (iq) {
|
||||||
/* Process iq buffers with in the budget limits */
|
if (atomic_read(&iq->instr_pending))
|
||||||
tx_done = octeon_flush_iq(oct, iq, budget);
|
/* Process iq buffers with in the budget limits */
|
||||||
|
tx_done = octeon_flush_iq(oct, iq, budget);
|
||||||
|
else
|
||||||
|
tx_done = 1;
|
||||||
/* Update iq read-index rather than waiting for next interrupt.
|
/* Update iq read-index rather than waiting for next interrupt.
|
||||||
* Return back if tx_done is false.
|
* Return back if tx_done is false.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -687,13 +687,12 @@ static void update_txq_status(struct octeon_device *oct, int iq_num)
|
||||||
netif_wake_subqueue(netdev, iq->q_index);
|
netif_wake_subqueue(netdev, iq->q_index);
|
||||||
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
|
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
|
||||||
tx_restart, 1);
|
tx_restart, 1);
|
||||||
} else {
|
|
||||||
if (!octnet_iq_is_full(oct, lio->txq)) {
|
|
||||||
INCR_INSTRQUEUE_PKT_COUNT(
|
|
||||||
lio->oct_dev, lio->txq, tx_restart, 1);
|
|
||||||
wake_q(netdev, lio->txq);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
} else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up &&
|
||||||
|
(!octnet_iq_is_full(oct, lio->txq))) {
|
||||||
|
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
|
||||||
|
lio->txq, tx_restart, 1);
|
||||||
|
netif_wake_queue(netdev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1636,8 +1635,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
|
||||||
/* Flush the instruction queue */
|
/* Flush the instruction queue */
|
||||||
iq = oct->instr_queue[iq_no];
|
iq = oct->instr_queue[iq_no];
|
||||||
if (iq) {
|
if (iq) {
|
||||||
/* Process iq buffers with in the budget limits */
|
if (atomic_read(&iq->instr_pending))
|
||||||
tx_done = octeon_flush_iq(oct, iq, budget);
|
/* Process iq buffers with in the budget limits */
|
||||||
|
tx_done = octeon_flush_iq(oct, iq, budget);
|
||||||
|
else
|
||||||
|
tx_done = 1;
|
||||||
|
|
||||||
/* Update iq read-index rather than waiting for next interrupt.
|
/* Update iq read-index rather than waiting for next interrupt.
|
||||||
* Return back if tx_done is false.
|
* Return back if tx_done is false.
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in a new issue