mmc: tmio_mmc_core: Remove local_irq_{save,restore}() around k[un]map_atomic()

A long time ago the kmap_atomic API required a slot to be provided which
risked the possibility that other code might use the same slot at the
same time. Disabling interrupts prevented the possibility of an interrupt
handler doing that. However, that went away with
commit 3e4d3af501 ("mm: stack based kmap_atomic()").

When the second argument to kmap_atomic was removed by commit 482fce997e
("mmc: remove the second argument of k[un]map_atomic()"),
local_irq_{save,restore}() should have been removed also.

Remove it now.

Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Link: https://lore.kernel.org/r/20221005101951.3165-12-adrian.hunter@intel.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
This commit is contained in:
Adrian Hunter 2022-10-05 13:19:48 +03:00 committed by Ulf Hansson
parent 4526cdaf9d
commit 14c9825f96
3 changed files with 8 additions and 14 deletions

View file

@ -254,12 +254,11 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
void *sg_vaddr = tmio_mmc_kmap_atomic(sg);
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
tmio_mmc_kunmap_atomic(sg, sg_vaddr);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}

View file

@ -204,18 +204,15 @@ void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
irqreturn_t tmio_mmc_irq(int irq, void *devid);
static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg)
{
local_irq_save(*flags);
return kmap_atomic(sg_page(sg)) + sg->offset;
}
static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
unsigned long *flags, void *virt)
void *virt)
{
kunmap_atomic(virt - sg->offset);
local_irq_restore(*flags);
}
#ifdef CONFIG_PM

View file

@ -412,7 +412,6 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
void *sg_virt;
unsigned short *buf;
unsigned int count;
unsigned long flags;
if (host->dma_on) {
pr_err("PIO IRQ in DMA mode!\n");
@ -422,7 +421,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
return;
}
sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr);
buf = (unsigned short *)(sg_virt + host->sg_off);
count = host->sg_ptr->length - host->sg_off;
@ -437,7 +436,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
host->sg_off += count;
tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
tmio_mmc_kunmap_atomic(host->sg_ptr, sg_virt);
if (host->sg_off == host->sg_ptr->length)
tmio_mmc_next_sg(host);
@ -446,11 +445,10 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
{
if (host->sg_ptr == &host->bounce_sg) {
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig);
memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
tmio_mmc_kunmap_atomic(host->sg_orig, sg_vaddr);
}
}