mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-30 22:26:55 +00:00
gve: Fix GFP flags when allocing pages
Use GFP_ATOMIC when allocating pages out of the hotpath,
continue to use GFP_KERNEL when allocating pages during setup.
GFP_KERNEL will allow blocking which allows it to succeed
more often in a low memory enviornment but in the hotpath we do
not want to allow the allocation to block.
Fixes: f5cedc84a3
("gve: Add transmit and receive support")
Signed-off-by: Catherine Sullivan <csully@google.com>
Signed-off-by: David Awogbemila <awogbemila@google.com>
Link: https://lore.kernel.org/r/20220126003843.3584521-1-awogbemila@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
2f65132843
commit
a92f7a6fee
4 changed files with 7 additions and 6 deletions
|
@ -843,7 +843,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
|
||||||
/* buffers */
|
/* buffers */
|
||||||
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
|
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
|
||||||
struct page **page, dma_addr_t *dma,
|
struct page **page, dma_addr_t *dma,
|
||||||
enum dma_data_direction);
|
enum dma_data_direction, gfp_t gfp_flags);
|
||||||
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
|
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
|
||||||
enum dma_data_direction);
|
enum dma_data_direction);
|
||||||
/* tx handling */
|
/* tx handling */
|
||||||
|
|
|
@ -766,9 +766,9 @@ static void gve_free_rings(struct gve_priv *priv)
|
||||||
|
|
||||||
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
|
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
|
||||||
struct page **page, dma_addr_t *dma,
|
struct page **page, dma_addr_t *dma,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir, gfp_t gfp_flags)
|
||||||
{
|
{
|
||||||
*page = alloc_page(GFP_KERNEL);
|
*page = alloc_page(gfp_flags);
|
||||||
if (!*page) {
|
if (!*page) {
|
||||||
priv->page_alloc_fail++;
|
priv->page_alloc_fail++;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -811,7 +811,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
|
||||||
for (i = 0; i < pages; i++) {
|
for (i = 0; i < pages; i++) {
|
||||||
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
|
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
|
||||||
&qpl->page_buses[i],
|
&qpl->page_buses[i],
|
||||||
gve_qpl_dma_dir(priv, id));
|
gve_qpl_dma_dir(priv, id), GFP_KERNEL);
|
||||||
/* caller handles clean up */
|
/* caller handles clean up */
|
||||||
if (err)
|
if (err)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -86,7 +86,8 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE);
|
err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
|
||||||
|
GFP_ATOMIC);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|
|
@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
|
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
|
||||||
&buf_state->addr, DMA_FROM_DEVICE);
|
&buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue