mmc_block: bounce buffer highmem support

Support highmem pages in the bounce buffer code by using the
sg_copy_from/to_buffer() functions.

Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
This commit is contained in:
Pierre Ossman 2008-07-22 14:35:42 +02:00
parent 719a61b452
commit 2ff1fa6791

View file

@ -148,7 +148,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
printk(KERN_WARNING "%s: unable to allocate " printk(KERN_WARNING "%s: unable to allocate "
"bounce buffer\n", mmc_card_name(card)); "bounce buffer\n", mmc_card_name(card));
} else { } else {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_sectors(mq->queue, bouncesz / 512); blk_queue_max_sectors(mq->queue, bouncesz / 512);
blk_queue_max_phys_segments(mq->queue, bouncesz / 512); blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
blk_queue_max_hw_segments(mq->queue, bouncesz / 512); blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
@ -290,55 +290,15 @@ void mmc_queue_resume(struct mmc_queue *mq)
} }
} }
static void copy_sg(struct scatterlist *dst, unsigned int dst_len, /*
struct scatterlist *src, unsigned int src_len) * Prepare the sg list(s) to be handed of to the host driver
{ */
unsigned int chunk;
char *dst_buf, *src_buf;
unsigned int dst_size, src_size;
dst_buf = NULL;
src_buf = NULL;
dst_size = 0;
src_size = 0;
while (src_len) {
BUG_ON(dst_len == 0);
if (dst_size == 0) {
dst_buf = sg_virt(dst);
dst_size = dst->length;
}
if (src_size == 0) {
src_buf = sg_virt(src);
src_size = src->length;
}
chunk = min(dst_size, src_size);
memcpy(dst_buf, src_buf, chunk);
dst_buf += chunk;
src_buf += chunk;
dst_size -= chunk;
src_size -= chunk;
if (dst_size == 0) {
dst++;
dst_len--;
}
if (src_size == 0) {
src++;
src_len--;
}
}
}
unsigned int mmc_queue_map_sg(struct mmc_queue *mq) unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{ {
unsigned int sg_len; unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
int i;
if (!mq->bounce_buf) if (!mq->bounce_buf)
return blk_rq_map_sg(mq->queue, mq->req, mq->sg); return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
@ -349,47 +309,52 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
mq->bounce_sg_len = sg_len; mq->bounce_sg_len = sg_len;
/* buflen = 0;
* Shortcut in the event we only get a single entry. for_each_sg(mq->bounce_sg, sg, sg_len, i)
*/ buflen += sg->length;
if (sg_len == 1) {
memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
return 1;
}
sg_init_one(mq->sg, mq->bounce_buf, 0); sg_init_one(mq->sg, mq->bounce_buf, buflen);
while (sg_len) {
mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
sg_len--;
}
return 1; return 1;
} }
/*
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
void mmc_queue_bounce_pre(struct mmc_queue *mq) void mmc_queue_bounce_pre(struct mmc_queue *mq)
{ {
unsigned long flags;
if (!mq->bounce_buf) if (!mq->bounce_buf)
return; return;
if (mq->bounce_sg_len == 1)
return;
if (rq_data_dir(mq->req) != WRITE) if (rq_data_dir(mq->req) != WRITE)
return; return;
copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len); local_irq_save(flags);
sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
mq->bounce_buf, mq->sg[0].length);
local_irq_restore(flags);
} }
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
void mmc_queue_bounce_post(struct mmc_queue *mq) void mmc_queue_bounce_post(struct mmc_queue *mq)
{ {
unsigned long flags;
if (!mq->bounce_buf) if (!mq->bounce_buf)
return; return;
if (mq->bounce_sg_len == 1)
return;
if (rq_data_dir(mq->req) != READ) if (rq_data_dir(mq->req) != READ)
return; return;
copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1); local_irq_save(flags);
sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
mq->bounce_buf, mq->sg[0].length);
local_irq_restore(flags);
} }