2019-05-27 06:55:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2006-12-24 21:46:55 +00:00
|
|
|
/*
|
2007-07-11 18:04:50 +00:00
|
|
|
* linux/drivers/mmc/core/mmc_ops.h
|
2006-12-24 21:46:55 +00:00
|
|
|
*
|
|
|
|
* Copyright 2006-2007 Pierre Ossman
|
|
|
|
*/
|
|
|
|
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2011-07-10 16:42:00 +00:00
|
|
|
#include <linux/export.h>
|
2006-12-24 21:46:55 +00:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/scatterlist.h>
|
|
|
|
|
|
|
|
#include <linux/mmc/host.h>
|
|
|
|
#include <linux/mmc/card.h>
|
|
|
|
#include <linux/mmc/mmc.h>
|
|
|
|
|
|
|
|
#include "core.h"
|
2017-06-09 12:15:01 +00:00
|
|
|
#include "card.h"
|
2015-05-07 10:10:15 +00:00
|
|
|
#include "host.h"
|
2006-12-24 21:46:55 +00:00
|
|
|
#include "mmc_ops.h"
|
|
|
|
|
2020-01-22 14:27:45 +00:00
|
|
|
#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */
|
2020-03-16 15:21:52 +00:00
|
|
|
#define MMC_SANITIZE_TIMEOUT_MS (240 * 1000) /* 240s */
|
2022-03-04 10:56:56 +00:00
|
|
|
#define MMC_OP_COND_PERIOD_US (1 * 1000) /* 1ms */
|
|
|
|
#define MMC_OP_COND_TIMEOUT_MS 1000 /* 1s */
|
2012-11-16 15:31:41 +00:00
|
|
|
|
2014-12-01 15:53:34 +00:00
|
|
|
static const u8 tuning_blk_pattern_4bit[] = {
|
|
|
|
0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
|
|
|
|
0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
|
|
|
|
0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
|
|
|
|
0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
|
|
|
|
0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
|
|
|
|
0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
|
|
|
|
0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
|
|
|
|
0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const u8 tuning_blk_pattern_8bit[] = {
|
|
|
|
0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
|
|
|
|
0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
|
|
|
|
0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
|
|
|
|
0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
|
|
|
|
0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
|
|
|
|
0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
|
|
|
|
0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
|
|
|
|
0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
|
|
|
|
0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
|
|
|
|
0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
|
|
|
|
0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
|
|
|
|
0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
|
|
|
|
0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
|
|
|
|
0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
|
|
|
|
0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
|
|
|
|
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
|
|
|
|
};
|
|
|
|
|
2021-05-04 16:12:15 +00:00
|
|
|
struct mmc_busy_data {
|
|
|
|
struct mmc_card *card;
|
|
|
|
bool retry_crc_err;
|
|
|
|
enum mmc_busy_cmd busy_cmd;
|
|
|
|
};
|
|
|
|
|
2021-11-04 06:32:31 +00:00
|
|
|
struct mmc_op_cond_busy_data {
|
|
|
|
struct mmc_host *host;
|
|
|
|
u32 ocr;
|
|
|
|
struct mmc_command *cmd;
|
|
|
|
};
|
|
|
|
|
2017-05-22 08:23:58 +00:00
|
|
|
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
|
2013-09-04 12:21:05 +00:00
|
|
|
{
|
|
|
|
int err;
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
2013-09-04 12:21:05 +00:00
|
|
|
|
|
|
|
cmd.opcode = MMC_SEND_STATUS;
|
|
|
|
if (!mmc_host_is_spi(card->host))
|
|
|
|
cmd.arg = card->rca << 16;
|
|
|
|
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
|
|
|
|
|
2017-05-22 08:23:58 +00:00
|
|
|
err = mmc_wait_for_cmd(card->host, &cmd, retries);
|
2013-09-04 12:21:05 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* NOTE: callers are required to understand the difference
|
|
|
|
* between "native" and SPI format status words!
|
|
|
|
*/
|
|
|
|
if (status)
|
|
|
|
*status = cmd.resp[0];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-05-22 08:23:58 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__mmc_send_status);
|
|
|
|
|
|
|
|
int mmc_send_status(struct mmc_card *card, u32 *status)
|
|
|
|
{
|
|
|
|
return __mmc_send_status(card, status, MMC_CMD_RETRIES);
|
|
|
|
}
|
2017-08-20 21:39:07 +00:00
|
|
|
EXPORT_SYMBOL_GPL(mmc_send_status);
|
2013-09-04 12:21:05 +00:00
|
|
|
|
2006-12-24 21:46:55 +00:00
|
|
|
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
|
|
|
|
{
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
2006-12-24 21:46:55 +00:00
|
|
|
|
|
|
|
cmd.opcode = MMC_SELECT_CARD;
|
|
|
|
|
|
|
|
if (card) {
|
|
|
|
cmd.arg = card->rca << 16;
|
|
|
|
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
|
|
|
} else {
|
|
|
|
cmd.arg = 0;
|
|
|
|
cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
|
|
|
|
}
|
|
|
|
|
2016-01-08 02:15:25 +00:00
|
|
|
return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
2006-12-24 21:46:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int mmc_select_card(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
|
|
|
|
return _mmc_select_card(card->host, card);
|
|
|
|
}
|
|
|
|
|
|
|
|
int mmc_deselect_cards(struct mmc_host *host)
|
|
|
|
{
|
|
|
|
return _mmc_select_card(host, NULL);
|
|
|
|
}
|
|
|
|
|
2014-08-19 08:45:51 +00:00
|
|
|
/*
|
|
|
|
* Write the value specified in the device tree or board code into the optional
|
|
|
|
* 16 bit Driver Stage Register. This can be used to tune raise/fall times and
|
|
|
|
* drive strength of the DAT and CMD outputs. The actual meaning of a given
|
|
|
|
* value is hardware dependant.
|
|
|
|
* The presence of the DSR register can be determined from the CSD register,
|
|
|
|
* bit 76.
|
|
|
|
*/
|
|
|
|
int mmc_set_dsr(struct mmc_host *host)
|
|
|
|
{
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
2014-08-19 08:45:51 +00:00
|
|
|
|
|
|
|
cmd.opcode = MMC_SET_DSR;
|
|
|
|
|
|
|
|
cmd.arg = (host->dsr << 16) | 0xffff;
|
|
|
|
cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
|
|
|
|
|
|
|
|
return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
|
|
|
}
|
|
|
|
|
2006-12-24 21:46:55 +00:00
|
|
|
int mmc_go_idle(struct mmc_host *host)
|
|
|
|
{
|
|
|
|
int err;
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
2006-12-24 21:46:55 +00:00
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
/*
|
|
|
|
* Non-SPI hosts need to prevent chipselect going active during
|
|
|
|
* GO_IDLE; that would put chips into SPI mode. Remind them of
|
|
|
|
* that in case of hardware that won't pull up DAT3/nCS otherwise.
|
|
|
|
*
|
|
|
|
* SPI hosts ignore ios.chip_select; it's managed according to
|
2011-03-31 01:57:33 +00:00
|
|
|
* rules that must accommodate non-MMC slaves which this layer
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
* won't even know about.
|
|
|
|
*/
|
|
|
|
if (!mmc_host_is_spi(host)) {
|
|
|
|
mmc_set_chip_select(host, MMC_CS_HIGH);
|
|
|
|
mmc_delay(1);
|
|
|
|
}
|
2006-12-24 21:46:55 +00:00
|
|
|
|
|
|
|
cmd.opcode = MMC_GO_IDLE_STATE;
|
|
|
|
cmd.arg = 0;
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
|
2006-12-24 21:46:55 +00:00
|
|
|
|
|
|
|
err = mmc_wait_for_cmd(host, &cmd, 0);
|
|
|
|
|
|
|
|
mmc_delay(1);
|
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
if (!mmc_host_is_spi(host)) {
|
|
|
|
mmc_set_chip_select(host, MMC_CS_DONTCARE);
|
|
|
|
mmc_delay(1);
|
|
|
|
}
|
2006-12-24 21:46:55 +00:00
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
host->use_spi_crc = 0;
|
2006-12-24 21:46:55 +00:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-11-04 06:32:31 +00:00
|
|
|
static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
|
|
|
|
{
|
|
|
|
struct mmc_op_cond_busy_data *data = cb_data;
|
|
|
|
struct mmc_host *host = data->host;
|
|
|
|
struct mmc_command *cmd = data->cmd;
|
|
|
|
u32 ocr = data->ocr;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
err = mmc_wait_for_cmd(host, cmd, 0);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (mmc_host_is_spi(host)) {
|
|
|
|
if (!(cmd->resp[0] & R1_SPI_IDLE)) {
|
|
|
|
*busy = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (cmd->resp[0] & MMC_CARD_BUSY) {
|
|
|
|
*busy = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*busy = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* According to eMMC specification v5.1 section 6.4.3, we
|
|
|
|
* should issue CMD1 repeatedly in the idle state until
|
|
|
|
* the eMMC is ready. Otherwise some eMMC devices seem to enter
|
|
|
|
* the inactive mode after mmc_init_card() issued CMD0 when
|
|
|
|
* the eMMC device is busy.
|
|
|
|
*/
|
|
|
|
if (!ocr && !mmc_host_is_spi(host))
|
|
|
|
cmd->arg = cmd->resp[0] | BIT(30);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-12-24 21:46:55 +00:00
|
|
|
int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
|
|
|
|
{
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
2021-11-04 06:32:31 +00:00
|
|
|
int err = 0;
|
|
|
|
struct mmc_op_cond_busy_data cb_data = {
|
|
|
|
.host = host,
|
|
|
|
.ocr = ocr,
|
|
|
|
.cmd = &cmd
|
|
|
|
};
|
2006-12-24 21:46:55 +00:00
|
|
|
|
|
|
|
cmd.opcode = MMC_SEND_OP_COND;
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
|
|
|
|
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
|
2006-12-24 21:46:55 +00:00
|
|
|
|
2022-03-04 10:56:56 +00:00
|
|
|
err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
|
|
|
|
MMC_OP_COND_TIMEOUT_MS,
|
|
|
|
&__mmc_send_op_cond_cb, &cb_data);
|
2021-11-04 06:32:31 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
2006-12-24 21:46:55 +00:00
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
if (rocr && !mmc_host_is_spi(host))
|
2006-12-24 21:46:55 +00:00
|
|
|
*rocr = cmd.resp[0];
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mmc_set_relative_addr(struct mmc_card *card)
|
|
|
|
{
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
2006-12-24 21:46:55 +00:00
|
|
|
|
|
|
|
cmd.opcode = MMC_SET_RELATIVE_ADDR;
|
|
|
|
cmd.arg = card->rca << 16;
|
|
|
|
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
|
|
|
|
2016-01-08 02:15:25 +00:00
|
|
|
return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
|
2006-12-24 21:46:55 +00:00
|
|
|
}
|
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
static int
|
|
|
|
mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
|
2006-12-24 21:46:55 +00:00
|
|
|
{
|
|
|
|
int err;
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
2006-12-24 21:46:55 +00:00
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
cmd.opcode = opcode;
|
|
|
|
cmd.arg = arg;
|
2006-12-24 21:46:55 +00:00
|
|
|
cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
|
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
|
2007-07-22 20:18:46 +00:00
|
|
|
if (err)
|
2006-12-24 21:46:55 +00:00
|
|
|
return err;
|
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
memcpy(cxd, cmd.resp, sizeof(u32) * 4);
|
2006-12-24 21:46:55 +00:00
|
|
|
|
2007-07-22 20:18:46 +00:00
|
|
|
return 0;
|
2006-12-24 21:46:55 +00:00
|
|
|
}
|
|
|
|
|
2012-08-03 00:58:03 +00:00
|
|
|
/*
|
|
|
|
* NOTE: void *buf, caller for the buf is required to use DMA-capable
|
|
|
|
* buffer or on-stack buffer (with some overhead in callee).
|
|
|
|
*/
|
2021-05-04 16:12:17 +00:00
|
|
|
int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
|
|
|
|
u32 args, void *buf, unsigned len)
|
2006-12-24 21:46:55 +00:00
|
|
|
{
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_request mrq = {};
|
|
|
|
struct mmc_command cmd = {};
|
|
|
|
struct mmc_data data = {};
|
2006-12-24 21:46:55 +00:00
|
|
|
struct scatterlist sg;
|
|
|
|
|
|
|
|
mrq.cmd = &cmd;
|
|
|
|
mrq.data = &data;
|
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
cmd.opcode = opcode;
|
2021-05-04 16:12:17 +00:00
|
|
|
cmd.arg = args;
|
2006-12-24 21:46:55 +00:00
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
|
|
|
|
* rely on callers to never use this with "native" calls for reading
|
|
|
|
* CSD or CID. Native versions of those commands use the R2 type,
|
|
|
|
* not R1 plus a data block.
|
|
|
|
*/
|
|
|
|
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
|
|
|
|
|
|
|
|
data.blksz = len;
|
2006-12-24 21:46:55 +00:00
|
|
|
data.blocks = 1;
|
|
|
|
data.flags = MMC_DATA_READ;
|
|
|
|
data.sg = &sg;
|
|
|
|
data.sg_len = 1;
|
|
|
|
|
2014-10-17 09:58:24 +00:00
|
|
|
sg_init_one(&sg, buf, len);
|
2006-12-24 21:46:55 +00:00
|
|
|
|
2009-02-10 14:32:33 +00:00
|
|
|
if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
|
|
|
|
/*
|
|
|
|
* The spec states that CSR and CID accesses have a timeout
|
|
|
|
* of 64 clock cycles.
|
|
|
|
*/
|
|
|
|
data.timeout_ns = 0;
|
|
|
|
data.timeout_clks = 64;
|
|
|
|
} else
|
|
|
|
mmc_set_data_timeout(&data, card);
|
2006-12-24 21:46:55 +00:00
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
mmc_wait_for_req(host, &mrq);
|
|
|
|
|
2007-07-22 20:18:46 +00:00
|
|
|
if (cmd.error)
|
2006-12-24 21:46:55 +00:00
|
|
|
return cmd.error;
|
2007-07-22 20:18:46 +00:00
|
|
|
if (data.error)
|
2006-12-24 21:46:55 +00:00
|
|
|
return data.error;
|
|
|
|
|
2007-07-22 20:18:46 +00:00
|
|
|
return 0;
|
2006-12-24 21:46:55 +00:00
|
|
|
}
|
|
|
|
|
2021-02-15 01:51:16 +00:00
|
|
|
static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
{
|
2007-10-27 12:14:23 +00:00
|
|
|
int ret, i;
|
2021-02-15 01:51:16 +00:00
|
|
|
__be32 *cxd_tmp;
|
2007-10-27 12:14:23 +00:00
|
|
|
|
2021-02-15 01:51:16 +00:00
|
|
|
cxd_tmp = kzalloc(16, GFP_KERNEL);
|
|
|
|
if (!cxd_tmp)
|
2012-08-03 00:58:03 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-05-04 16:12:17 +00:00
|
|
|
ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
|
2007-10-27 12:14:23 +00:00
|
|
|
if (ret)
|
2012-08-03 00:58:03 +00:00
|
|
|
goto err;
|
2007-10-27 12:14:23 +00:00
|
|
|
|
2017-04-02 20:56:03 +00:00
|
|
|
for (i = 0; i < 4; i++)
|
2021-02-15 01:51:16 +00:00
|
|
|
cxd[i] = be32_to_cpu(cxd_tmp[i]);
|
2007-10-27 12:14:23 +00:00
|
|
|
|
2012-08-03 00:58:03 +00:00
|
|
|
err:
|
2021-02-15 01:51:16 +00:00
|
|
|
kfree(cxd_tmp);
|
2012-08-03 00:58:03 +00:00
|
|
|
return ret;
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
}
|
|
|
|
|
2017-06-08 13:27:45 +00:00
|
|
|
int mmc_send_csd(struct mmc_card *card, u32 *csd)
|
|
|
|
{
|
|
|
|
if (mmc_host_is_spi(card->host))
|
2021-02-15 01:51:16 +00:00
|
|
|
return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
|
2017-06-08 13:27:45 +00:00
|
|
|
|
|
|
|
return mmc_send_cxd_native(card->host, card->rca << 16, csd,
|
|
|
|
MMC_SEND_CSD);
|
|
|
|
}
|
|
|
|
|
2017-06-08 13:27:43 +00:00
|
|
|
int mmc_send_cid(struct mmc_host *host, u32 *cid)
|
|
|
|
{
|
|
|
|
if (mmc_host_is_spi(host))
|
2021-02-15 01:51:16 +00:00
|
|
|
return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
|
2017-06-08 13:27:43 +00:00
|
|
|
|
2017-06-08 13:27:44 +00:00
|
|
|
return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
|
2017-06-08 13:27:43 +00:00
|
|
|
}
|
|
|
|
|
2014-10-17 09:32:32 +00:00
|
|
|
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
u8 *ext_csd;
|
|
|
|
|
|
|
|
if (!card || !new_ext_csd)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!mmc_can_ext_csd(card))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* As the ext_csd is so large and mostly unused, we don't store the
|
|
|
|
* raw block in mmc_card.
|
|
|
|
*/
|
2014-10-17 10:00:56 +00:00
|
|
|
ext_csd = kzalloc(512, GFP_KERNEL);
|
2014-10-17 09:32:32 +00:00
|
|
|
if (!ext_csd)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2021-05-04 16:12:17 +00:00
|
|
|
err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
|
2014-10-17 09:54:22 +00:00
|
|
|
512);
|
2014-10-17 09:32:32 +00:00
|
|
|
if (err)
|
|
|
|
kfree(ext_csd);
|
|
|
|
else
|
|
|
|
*new_ext_csd = ext_csd;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
|
|
|
|
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
|
|
|
|
{
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
cmd.opcode = MMC_SPI_READ_OCR;
|
|
|
|
cmd.arg = highcap ? (1 << 30) : 0;
|
|
|
|
cmd.flags = MMC_RSP_SPI_R3;
|
|
|
|
|
|
|
|
err = mmc_wait_for_cmd(host, &cmd, 0);
|
|
|
|
|
|
|
|
*ocrp = cmd.resp[1];
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
|
|
|
|
{
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
MMC core learns about SPI
Teach the MMC/SD/SDIO core about using SPI mode.
- Use mmc_host_is_spi() so enumeration works through SPI signaling
and protocols, not just the native versions.
- Provide the SPI response type flags with each request issued,
including requests from the new lock/unlock code.
- Understand that cmd->resp[0] and mmc_get_status() results for SPI
return different values than for "native" MMC/SD protocol; this
affects resetting, checking card lock status, and some others.
- Understand that some commands act a bit differently ... notably:
* OP_COND command doesn't return the OCR
* APP_CMD status doesn't have an R1_APP_CMD analogue
Those changes required some new and updated primitives:
- Provide utilities to access two SPI-only requests, and one
request that wasn't previously needed:
* mmc_spi_read_ocr() ... SPI only
* mmc_spi_set_crc() ... SPI only (override by module parm)
* mmc_send_cid() ... for use without broadcast mode
- Updated internal routines:
* Previous mmc_send_csd() modified into mmc_send_cxd_native();
it uses native "R2" responses, which include 16 bytes of data.
* Previous mmc_send_ext_csd() becomes new mmc_send_cxd_data()
helper for command-and-data access
* Bugfix to that mmc_send_cxd_data() code: dma-to-stack is
unsafe/nonportable, so kmalloc a bounce buffer instead.
- Modified mmc_send_ext_csd() now uses mmc_send_cxd_data() helper
- Modified mmc_send_csd(), and new mmc_spi_send_cid(), routines use
those helper routines based on whether they're native or SPI
The newest categories of cards supported by the MMC stack aren't expected
to work yet with SPI: MMC or SD cards with over 4GB data, and SDIO.
All those cards support SPI mode, so eventually they should work too.
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
2007-08-08 16:11:32 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
cmd.opcode = MMC_SPI_CRC_ON_OFF;
|
|
|
|
cmd.flags = MMC_RSP_SPI_R1;
|
|
|
|
cmd.arg = use_crc;
|
|
|
|
|
|
|
|
err = mmc_wait_for_cmd(host, &cmd, 0);
|
|
|
|
if (!err)
|
|
|
|
host->use_spi_crc = use_crc;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2016-10-19 09:04:42 +00:00
|
|
|
static int mmc_switch_status_error(struct mmc_host *host, u32 status)
|
2015-05-07 10:10:19 +00:00
|
|
|
{
|
|
|
|
if (mmc_host_is_spi(host)) {
|
|
|
|
if (status & R1_SPI_ILLEGAL_COMMAND)
|
|
|
|
return -EBADMSG;
|
|
|
|
} else {
|
mmc: core: Adjust and reuse the macro of R1_STATUS(x)
R1_STATUS(x) now is only used by ioctl_rpmb_card_status_poll(),
which checks all bits as possible. But according to the spec,
bit 17 and bit 18 should be ignored, as well bit 14 which is
reserved(must be set to 0) quoting from the spec and these rule
apply to all places checking the device status. So change
its checking from 0xFFFFE000 to 0xFFF9A000.
As a bonus, we reuse it for mmc_do_erase() as well as
mmc_switch_status_error().
(1) Currently mmc_switch_status_error() doesn't check bit 25, but
it means device is locked but not unlocked by CMD42 prior to any
operations which need check busy, which is also not allowed.
(2) mmc_do_erase() also forgot to to check bit 15, WP_ERASE_SKIP.
The spec says "Only partial address space was erased due to existing
write protected blocks.", which obviously means we should fail this I/O.
Otherwise, the partial erased data stored in nonvalatile flash violates
the data integrity from the view of I/O owner, which probably confuse
it when further used.
So reusing R1_STATUS for them not only improve the readability but also
slove real problems.
Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2018-05-30 02:11:43 +00:00
|
|
|
if (R1_STATUS(status))
|
2015-05-07 10:10:19 +00:00
|
|
|
pr_warn("%s: unexpected status %#x after switch\n",
|
|
|
|
mmc_hostname(host), status);
|
|
|
|
if (status & R1_SWITCH_ERROR)
|
|
|
|
return -EBADMSG;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-19 09:04:42 +00:00
|
|
|
/* Caller must hold re-tuning */
|
2020-02-04 08:54:40 +00:00
|
|
|
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
|
2016-10-19 09:04:42 +00:00
|
|
|
{
|
|
|
|
u32 status;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mmc_send_status(card, &status);
|
2016-12-02 11:16:35 +00:00
|
|
|
if (!crc_err_fatal && err == -EILSEQ)
|
|
|
|
return 0;
|
2016-10-19 09:04:42 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
return mmc_switch_status_error(card->host, status);
|
|
|
|
}
|
|
|
|
|
2021-05-04 16:12:15 +00:00
|
|
|
static int mmc_busy_cb(void *cb_data, bool *busy)
|
2020-02-04 08:54:42 +00:00
|
|
|
{
|
2021-05-04 16:12:15 +00:00
|
|
|
struct mmc_busy_data *data = cb_data;
|
|
|
|
struct mmc_host *host = data->card->host;
|
2020-02-04 08:54:42 +00:00
|
|
|
u32 status = 0;
|
|
|
|
int err;
|
|
|
|
|
2021-07-02 13:42:27 +00:00
|
|
|
if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
|
2020-02-04 08:54:42 +00:00
|
|
|
*busy = host->ops->card_busy(host);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-05-04 16:12:15 +00:00
|
|
|
err = mmc_send_status(data->card, &status);
|
|
|
|
if (data->retry_crc_err && err == -EILSEQ) {
|
2020-02-04 08:54:42 +00:00
|
|
|
*busy = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-05-04 16:12:15 +00:00
|
|
|
switch (data->busy_cmd) {
|
2020-02-04 08:54:45 +00:00
|
|
|
case MMC_BUSY_CMD6:
|
2021-05-04 16:12:15 +00:00
|
|
|
err = mmc_switch_status_error(host, status);
|
2020-02-04 08:54:45 +00:00
|
|
|
break;
|
|
|
|
case MMC_BUSY_ERASE:
|
|
|
|
err = R1_STATUS(status) ? -EIO : 0;
|
|
|
|
break;
|
2020-02-04 08:54:47 +00:00
|
|
|
case MMC_BUSY_HPI:
|
2021-05-11 10:13:59 +00:00
|
|
|
case MMC_BUSY_EXTR_SINGLE:
|
2021-07-02 13:42:27 +00:00
|
|
|
case MMC_BUSY_IO:
|
2020-02-04 08:54:47 +00:00
|
|
|
break;
|
2020-02-04 08:54:45 +00:00
|
|
|
default:
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-02-04 08:54:42 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
mmc: core: Update CMD13 busy check for CMD6 commands
Through mmc_poll_for_busy() a CMD13 may be sent to get the status of the
(e)MMC card. If the state of the card is R1_STATE_PRG, the card is
considered as being busy, which means we continue to poll with CMD13. This
seems to be sufficient, but it's also unnecessary fragile, as it means a
new command/request could potentially be sent to the card when it's in an
unknown state.
To try to improve the situation, but also to move towards a more consistent
CMD13 polling behaviour in the mmc core, let's deploy the same policy we
use for regular I/O write requests. In other words, let's check that card
returns to the R1_STATE_TRAN and that the R1_READY_FOR_DATA bit is set in
the CMD13 response, before exiting the polling loop.
Note that, potentially this changed behaviour could lead to unnecessary
waiting for the timeout to expire, if the card for some reason, moves to an
unexpected error state. However, as we bail out from the polling loop when
R1_SWITCH_ERROR bit is set or when the CMD13 fails, this shouldn't be an
issue.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Baolin Wang <baolin.wang7@gmail.com>
Tested-by: Ludovic Barre <ludovic.barre@st.com>
Reviewed-by: Ludovic Barre <ludovic.barre@st.com>
Link: https://lore.kernel.org/r/20200204085449.32585-8-ulf.hansson@linaro.org
2020-02-04 08:54:44 +00:00
|
|
|
*busy = !mmc_ready_for_data(status);
|
2020-02-04 08:54:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-03-04 10:56:56 +00:00
|
|
|
int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
|
|
|
|
unsigned int timeout_ms,
|
2021-05-04 16:12:15 +00:00
|
|
|
int (*busy_cb)(void *cb_data, bool *busy),
|
|
|
|
void *cb_data)
|
2016-10-19 11:20:49 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
unsigned long timeout;
|
2022-03-04 10:56:56 +00:00
|
|
|
unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
|
2016-10-19 11:20:49 +00:00
|
|
|
bool expired = false;
|
|
|
|
bool busy = false;
|
|
|
|
|
|
|
|
timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
|
|
|
|
do {
|
|
|
|
/*
|
2016-10-19 14:15:31 +00:00
|
|
|
* Due to the possibility of being preempted while polling,
|
|
|
|
* check the expiration time first.
|
2016-10-19 11:20:49 +00:00
|
|
|
*/
|
|
|
|
expired = time_after(jiffies, timeout);
|
2016-10-19 14:15:31 +00:00
|
|
|
|
2021-05-04 16:12:15 +00:00
|
|
|
err = (*busy_cb)(cb_data, &busy);
|
2020-02-04 08:54:42 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
2016-10-19 11:20:49 +00:00
|
|
|
|
2016-10-19 14:15:31 +00:00
|
|
|
/* Timeout if the device still remains busy. */
|
|
|
|
if (expired && busy) {
|
|
|
|
pr_err("%s: Card stuck being busy! %s\n",
|
2016-10-19 11:20:49 +00:00
|
|
|
mmc_hostname(host), __func__);
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
2020-02-04 08:54:38 +00:00
|
|
|
|
|
|
|
/* Throttle the polling rate to avoid hogging the CPU. */
|
|
|
|
if (busy) {
|
|
|
|
usleep_range(udelay, udelay * 2);
|
|
|
|
if (udelay < udelay_max)
|
|
|
|
udelay *= 2;
|
|
|
|
}
|
2016-10-19 14:15:31 +00:00
|
|
|
} while (busy);
|
2016-10-19 11:20:49 +00:00
|
|
|
|
2016-11-21 14:49:48 +00:00
|
|
|
return 0;
|
2016-10-19 11:20:49 +00:00
|
|
|
}
|
2021-07-02 13:42:29 +00:00
|
|
|
EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
|
2016-10-19 11:20:49 +00:00
|
|
|
|
2020-02-04 08:54:45 +00:00
|
|
|
int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
|
2021-05-04 16:12:15 +00:00
|
|
|
bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
|
2020-02-04 08:54:45 +00:00
|
|
|
{
|
2021-11-04 06:32:30 +00:00
|
|
|
struct mmc_host *host = card->host;
|
2021-05-04 16:12:15 +00:00
|
|
|
struct mmc_busy_data cb_data;
|
|
|
|
|
|
|
|
cb_data.card = card;
|
|
|
|
cb_data.retry_crc_err = retry_crc_err;
|
|
|
|
cb_data.busy_cmd = busy_cmd;
|
|
|
|
|
2022-03-04 10:56:56 +00:00
|
|
|
return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
|
2020-02-04 08:54:45 +00:00
|
|
|
}
|
2021-07-02 13:42:27 +00:00
|
|
|
EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
|
2020-02-04 08:54:45 +00:00
|
|
|
|
2021-05-04 16:12:12 +00:00
|
|
|
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
|
|
|
|
unsigned int timeout_ms)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the max_busy_timeout of the host is specified, make sure it's
|
|
|
|
* enough to fit the used timeout_ms. In case it's not, let's instruct
|
|
|
|
* the host to avoid HW busy detection, by converting to a R1 response
|
|
|
|
* instead of a R1B. Note, some hosts requires R1B, which also means
|
|
|
|
* they are on their own when it comes to deal with the busy timeout.
|
|
|
|
*/
|
|
|
|
if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
|
|
|
|
(timeout_ms > host->max_busy_timeout)) {
|
|
|
|
cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
|
|
|
|
cmd->busy_timeout = timeout_ms;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-04-11 21:13:43 +00:00
|
|
|
/**
|
2012-09-17 08:42:02 +00:00
|
|
|
* __mmc_switch - modify EXT_CSD register
|
2011-04-11 21:13:43 +00:00
|
|
|
* @card: the MMC card associated with the data transfer
|
|
|
|
* @set: cmd set values
|
|
|
|
* @index: EXT_CSD register index
|
|
|
|
* @value: value to program into EXT_CSD register
|
|
|
|
* @timeout_ms: timeout (ms) for operation performed by register write,
|
|
|
|
* timeout of zero implies maximum possible timeout
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 16:33:36 +00:00
|
|
|
* @timing: new timing to change to
|
2013-09-09 09:57:57 +00:00
|
|
|
* @send_status: send status cmd to poll for busy
|
2016-11-08 14:39:13 +00:00
|
|
|
* @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
|
2021-04-14 21:22:35 +00:00
|
|
|
* @retries: number of retries
|
2011-04-11 21:13:43 +00:00
|
|
|
*
|
|
|
|
* Modifies the EXT_CSD register for selected card.
|
|
|
|
*/
|
2012-09-17 08:42:02 +00:00
|
|
|
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 16:33:36 +00:00
|
|
|
unsigned int timeout_ms, unsigned char timing,
|
2021-04-14 21:22:35 +00:00
|
|
|
bool send_status, bool retry_crc_err, unsigned int retries)
|
2006-12-24 21:46:55 +00:00
|
|
|
{
|
2014-01-28 13:05:39 +00:00
|
|
|
struct mmc_host *host = card->host;
|
2006-12-24 21:46:55 +00:00
|
|
|
int err;
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
2021-05-04 16:12:12 +00:00
|
|
|
bool use_r1b_resp;
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 16:33:36 +00:00
|
|
|
unsigned char old_timing = host->ios.timing;
|
2014-01-28 13:15:34 +00:00
|
|
|
|
2015-05-07 10:10:15 +00:00
|
|
|
mmc_retune_hold(host);
|
|
|
|
|
2020-01-22 14:27:47 +00:00
|
|
|
if (!timeout_ms) {
|
|
|
|
pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
|
|
|
|
mmc_hostname(host));
|
|
|
|
timeout_ms = card->ext_csd.generic_cmd6_time;
|
|
|
|
}
|
|
|
|
|
2006-12-24 21:46:55 +00:00
|
|
|
cmd.opcode = MMC_SWITCH;
|
|
|
|
cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
|
|
|
|
(index << 16) |
|
|
|
|
(value << 8) |
|
|
|
|
set;
|
2021-05-04 16:12:12 +00:00
|
|
|
use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
|
2012-09-17 08:42:02 +00:00
|
|
|
|
2021-04-14 21:22:35 +00:00
|
|
|
err = mmc_wait_for_cmd(host, &cmd, retries);
|
2007-07-22 20:18:46 +00:00
|
|
|
if (err)
|
2015-05-07 10:10:15 +00:00
|
|
|
goto out;
|
2006-12-24 21:46:55 +00:00
|
|
|
|
2016-10-19 09:35:12 +00:00
|
|
|
/*If SPI or used HW busy detection above, then we don't need to poll. */
|
|
|
|
if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
|
2017-01-13 11:05:03 +00:00
|
|
|
mmc_host_is_spi(host))
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 16:33:36 +00:00
|
|
|
goto out_tim;
|
2013-09-04 12:21:05 +00:00
|
|
|
|
2021-05-04 16:12:14 +00:00
|
|
|
/*
|
|
|
|
* If the host doesn't support HW polling via the ->card_busy() ops and
|
|
|
|
* when it's not allowed to poll by using CMD13, then we need to rely on
|
|
|
|
* waiting the stated timeout to be sufficient.
|
|
|
|
*/
|
|
|
|
if (!send_status && !host->ops->card_busy) {
|
|
|
|
mmc_delay(timeout_ms);
|
|
|
|
goto out_tim;
|
|
|
|
}
|
|
|
|
|
2016-10-19 11:20:49 +00:00
|
|
|
/* Let's try to poll to find out when the command is completed. */
|
2021-05-04 16:12:15 +00:00
|
|
|
err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
|
2017-01-13 11:05:03 +00:00
|
|
|
if (err)
|
|
|
|
goto out;
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 16:33:36 +00:00
|
|
|
|
|
|
|
out_tim:
|
2017-01-13 11:05:03 +00:00
|
|
|
/* Switch to new timing before check switch status. */
|
|
|
|
if (timing)
|
|
|
|
mmc_set_timing(host, timing);
|
|
|
|
|
|
|
|
if (send_status) {
|
2020-02-04 08:54:40 +00:00
|
|
|
err = mmc_switch_status(card, true);
|
2017-01-13 11:05:03 +00:00
|
|
|
if (err && timing)
|
|
|
|
mmc_set_timing(host, old_timing);
|
|
|
|
}
|
2015-05-07 10:10:15 +00:00
|
|
|
out:
|
|
|
|
mmc_retune_release(host);
|
2009-09-22 23:44:37 +00:00
|
|
|
|
2015-05-07 10:10:15 +00:00
|
|
|
return err;
|
2006-12-24 21:46:55 +00:00
|
|
|
}
|
2012-09-17 08:42:02 +00:00
|
|
|
|
|
|
|
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
|
|
|
unsigned int timeout_ms)
|
|
|
|
{
|
mmc: core: Enable __mmc_switch() to change bus speed timing for the host
In cases when a speed mode change is requested for mmc cards, a CMD6 is
sent by calling __mmc_switch() during the card initialization. The CMD6
leads to the card entering a busy period. When that is completed, the host
must parse the CMD6 status to find out whether the change of the speed mode
succeeded.
To enable the mmc core to poll the card by using CMD13 to find out when the
busy period is completed, it's reasonable to make sure polling is done by
having the mmc host and the mmc card, being configured to operate at the
same selected bus speed timing.
Therefore, let's extend __mmc_switch() to take yet another parameter, which
allow its callers to update the bus speed timing of the mmc host. In this
way, __mmc_switch() also becomes capable of reading and validating the CMD6
status by sending a CMD13, in cases when that's desired.
If __mmc_switch() encounters a failure, we make sure to restores the old
bus speed timing for the mmc host, before propagating the error code.
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
2016-11-09 16:33:36 +00:00
|
|
|
return __mmc_switch(card, set, index, value, timeout_ms, 0,
|
2021-04-14 21:22:35 +00:00
|
|
|
true, false, MMC_CMD_RETRIES);
|
2012-09-17 08:42:02 +00:00
|
|
|
}
|
2011-04-11 21:13:43 +00:00
|
|
|
EXPORT_SYMBOL_GPL(mmc_switch);
|
2006-12-24 21:46:55 +00:00
|
|
|
|
2015-10-27 06:24:28 +00:00
|
|
|
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
|
2014-11-26 05:05:33 +00:00
|
|
|
{
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_request mrq = {};
|
|
|
|
struct mmc_command cmd = {};
|
|
|
|
struct mmc_data data = {};
|
2014-11-26 05:05:33 +00:00
|
|
|
struct scatterlist sg;
|
2014-12-05 10:31:22 +00:00
|
|
|
struct mmc_ios *ios = &host->ios;
|
2014-11-26 05:05:33 +00:00
|
|
|
const u8 *tuning_block_pattern;
|
|
|
|
int size, err = 0;
|
|
|
|
u8 *data_buf;
|
|
|
|
|
|
|
|
if (ios->bus_width == MMC_BUS_WIDTH_8) {
|
|
|
|
tuning_block_pattern = tuning_blk_pattern_8bit;
|
|
|
|
size = sizeof(tuning_blk_pattern_8bit);
|
|
|
|
} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
|
|
|
|
tuning_block_pattern = tuning_blk_pattern_4bit;
|
|
|
|
size = sizeof(tuning_blk_pattern_4bit);
|
|
|
|
} else
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
data_buf = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!data_buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
mrq.cmd = &cmd;
|
|
|
|
mrq.data = &data;
|
|
|
|
|
|
|
|
cmd.opcode = opcode;
|
|
|
|
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
|
|
|
|
|
|
|
|
data.blksz = size;
|
|
|
|
data.blocks = 1;
|
|
|
|
data.flags = MMC_DATA_READ;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* According to the tuning specs, Tuning process
|
|
|
|
* is normally shorter 40 executions of CMD19,
|
|
|
|
* and timeout value should be shorter than 150 ms
|
|
|
|
*/
|
|
|
|
data.timeout_ns = 150 * NSEC_PER_MSEC;
|
|
|
|
|
|
|
|
data.sg = &sg;
|
|
|
|
data.sg_len = 1;
|
|
|
|
sg_init_one(&sg, data_buf, size);
|
|
|
|
|
2014-12-05 10:31:22 +00:00
|
|
|
mmc_wait_for_req(host, &mrq);
|
2014-11-26 05:05:33 +00:00
|
|
|
|
2015-10-27 06:24:28 +00:00
|
|
|
if (cmd_error)
|
|
|
|
*cmd_error = cmd.error;
|
|
|
|
|
2014-11-26 05:05:33 +00:00
|
|
|
if (cmd.error) {
|
|
|
|
err = cmd.error;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data.error) {
|
|
|
|
err = data.error;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (memcmp(data_buf, tuning_block_pattern, size))
|
|
|
|
err = -EIO;
|
|
|
|
|
|
|
|
out:
|
|
|
|
kfree(data_buf);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mmc_send_tuning);
|
|
|
|
|
2021-06-08 18:06:20 +00:00
|
|
|
int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
|
2016-12-02 13:14:23 +00:00
|
|
|
{
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
2016-12-02 13:14:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* eMMC specification specifies that CMD12 can be used to stop a tuning
|
|
|
|
* command, but SD specification does not, so do nothing unless it is
|
|
|
|
* eMMC.
|
|
|
|
*/
|
|
|
|
if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cmd.opcode = MMC_STOP_TRANSMISSION;
|
|
|
|
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For drivers that override R1 to R1b, set an arbitrary timeout based
|
|
|
|
* on the tuning timeout i.e. 150ms.
|
|
|
|
*/
|
|
|
|
cmd.busy_timeout = 150;
|
|
|
|
|
|
|
|
return mmc_wait_for_cmd(host, &cmd, 0);
|
|
|
|
}
|
2021-06-08 18:06:20 +00:00
|
|
|
EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
|
2016-12-02 13:14:23 +00:00
|
|
|
|
2010-12-15 07:14:24 +00:00
|
|
|
static int
|
|
|
|
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
|
|
|
|
u8 len)
|
|
|
|
{
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_request mrq = {};
|
|
|
|
struct mmc_command cmd = {};
|
|
|
|
struct mmc_data data = {};
|
2010-12-15 07:14:24 +00:00
|
|
|
struct scatterlist sg;
|
|
|
|
u8 *data_buf;
|
|
|
|
u8 *test_buf;
|
|
|
|
int i, err;
|
|
|
|
static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
|
|
|
|
static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
|
|
|
|
|
|
|
|
/* dma onto stack is unsafe/nonportable, but callers to this
|
|
|
|
* routine normally provide temporary on-stack buffers ...
|
|
|
|
*/
|
|
|
|
data_buf = kmalloc(len, GFP_KERNEL);
|
|
|
|
if (!data_buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (len == 8)
|
|
|
|
test_buf = testdata_8bit;
|
|
|
|
else if (len == 4)
|
|
|
|
test_buf = testdata_4bit;
|
|
|
|
else {
|
2011-10-11 06:14:09 +00:00
|
|
|
pr_err("%s: Invalid bus_width %d\n",
|
2010-12-15 07:14:24 +00:00
|
|
|
mmc_hostname(host), len);
|
|
|
|
kfree(data_buf);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (opcode == MMC_BUS_TEST_W)
|
|
|
|
memcpy(data_buf, test_buf, len);
|
|
|
|
|
|
|
|
mrq.cmd = &cmd;
|
|
|
|
mrq.data = &data;
|
|
|
|
cmd.opcode = opcode;
|
|
|
|
cmd.arg = 0;
|
|
|
|
|
|
|
|
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
|
|
|
|
* rely on callers to never use this with "native" calls for reading
|
|
|
|
* CSD or CID. Native versions of those commands use the R2 type,
|
|
|
|
* not R1 plus a data block.
|
|
|
|
*/
|
|
|
|
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
|
|
|
|
|
|
|
|
data.blksz = len;
|
|
|
|
data.blocks = 1;
|
|
|
|
if (opcode == MMC_BUS_TEST_R)
|
|
|
|
data.flags = MMC_DATA_READ;
|
|
|
|
else
|
|
|
|
data.flags = MMC_DATA_WRITE;
|
|
|
|
|
|
|
|
data.sg = &sg;
|
|
|
|
data.sg_len = 1;
|
2013-08-25 03:25:12 +00:00
|
|
|
mmc_set_data_timeout(&data, card);
|
2010-12-15 07:14:24 +00:00
|
|
|
sg_init_one(&sg, data_buf, len);
|
|
|
|
mmc_wait_for_req(host, &mrq);
|
|
|
|
err = 0;
|
|
|
|
if (opcode == MMC_BUS_TEST_R) {
|
|
|
|
for (i = 0; i < len / 4; i++)
|
|
|
|
if ((test_buf[i] ^ data_buf[i]) != 0xff) {
|
|
|
|
err = -EIO;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
kfree(data_buf);
|
|
|
|
|
|
|
|
if (cmd.error)
|
|
|
|
return cmd.error;
|
|
|
|
if (data.error)
|
|
|
|
return data.error;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mmc_bus_test(struct mmc_card *card, u8 bus_width)
|
|
|
|
{
|
2016-01-08 02:15:25 +00:00
|
|
|
int width;
|
2010-12-15 07:14:24 +00:00
|
|
|
|
|
|
|
if (bus_width == MMC_BUS_WIDTH_8)
|
|
|
|
width = 8;
|
|
|
|
else if (bus_width == MMC_BUS_WIDTH_4)
|
|
|
|
width = 4;
|
|
|
|
else if (bus_width == MMC_BUS_WIDTH_1)
|
|
|
|
return 0; /* no need for test */
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
|
|
|
|
* is a problem. This improves chances that the test will work.
|
|
|
|
*/
|
|
|
|
mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
|
2016-01-08 02:15:25 +00:00
|
|
|
return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
|
2010-12-15 07:14:24 +00:00
|
|
|
}
|
2011-10-18 05:26:42 +00:00
|
|
|
|
2020-02-04 08:54:46 +00:00
|
|
|
static int mmc_send_hpi_cmd(struct mmc_card *card)
|
2011-10-18 05:26:42 +00:00
|
|
|
{
|
2020-02-04 08:54:47 +00:00
|
|
|
unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
|
2020-02-04 08:54:48 +00:00
|
|
|
struct mmc_host *host = card->host;
|
2021-05-04 16:12:13 +00:00
|
|
|
bool use_r1b_resp = false;
|
2016-12-19 11:51:18 +00:00
|
|
|
struct mmc_command cmd = {};
|
2011-10-18 05:26:42 +00:00
|
|
|
int err;
|
|
|
|
|
2020-02-04 08:54:48 +00:00
|
|
|
cmd.opcode = card->ext_csd.hpi_cmd;
|
|
|
|
cmd.arg = card->rca << 16 | 1;
|
2021-05-04 16:12:13 +00:00
|
|
|
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
2020-02-04 08:54:48 +00:00
|
|
|
|
2021-05-04 16:12:13 +00:00
|
|
|
if (cmd.opcode == MMC_STOP_TRANSMISSION)
|
|
|
|
use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
|
|
|
|
busy_timeout_ms);
|
2011-10-18 05:26:42 +00:00
|
|
|
|
2020-02-04 08:54:48 +00:00
|
|
|
err = mmc_wait_for_cmd(host, &cmd, 0);
|
2011-10-18 05:26:42 +00:00
|
|
|
if (err) {
|
2020-02-04 08:54:48 +00:00
|
|
|
pr_warn("%s: HPI error %d. Command response %#x\n",
|
|
|
|
mmc_hostname(host), err, cmd.resp[0]);
|
2011-10-18 05:26:42 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-02-04 08:54:48 +00:00
|
|
|
/* No need to poll when using HW busy detection. */
|
|
|
|
if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
|
|
|
|
return 0;
|
|
|
|
|
2020-02-04 08:54:47 +00:00
|
|
|
/* Let's poll to find out when the HPI request completes. */
|
2021-05-04 16:12:15 +00:00
|
|
|
return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
|
2011-10-18 05:26:42 +00:00
|
|
|
}
|
2014-10-20 09:33:53 +00:00
|
|
|
|
2017-06-08 13:27:39 +00:00
|
|
|
/**
|
|
|
|
* mmc_interrupt_hpi - Issue for High priority Interrupt
|
|
|
|
* @card: the MMC card associated with the HPI transfer
|
|
|
|
*
|
|
|
|
* Issued High Priority Interrupt, and check for card status
|
|
|
|
* until out-of prg-state.
|
|
|
|
*/
|
2020-04-03 03:47:27 +00:00
|
|
|
static int mmc_interrupt_hpi(struct mmc_card *card)
|
2017-06-08 13:27:39 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
u32 status;
|
|
|
|
|
|
|
|
if (!card->ext_csd.hpi_en) {
|
|
|
|
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = mmc_send_status(card, &status);
|
|
|
|
if (err) {
|
|
|
|
pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (R1_CURRENT_STATE(status)) {
|
|
|
|
case R1_STATE_IDLE:
|
|
|
|
case R1_STATE_READY:
|
|
|
|
case R1_STATE_STBY:
|
|
|
|
case R1_STATE_TRAN:
|
|
|
|
/*
|
|
|
|
* In idle and transfer states, HPI is not needed and the caller
|
|
|
|
* can issue the next intended command immediately
|
|
|
|
*/
|
|
|
|
goto out;
|
|
|
|
case R1_STATE_PRG:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* In all other states, it's illegal to issue HPI */
|
|
|
|
pr_debug("%s: HPI cannot be sent. Card state=%d\n",
|
|
|
|
mmc_hostname(card->host), R1_CURRENT_STATE(status));
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-02-04 08:54:46 +00:00
|
|
|
err = mmc_send_hpi_cmd(card);
|
2017-06-08 13:27:39 +00:00
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-10-20 09:33:53 +00:00
|
|
|
int mmc_can_ext_csd(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
|
|
|
|
}
|
2017-03-13 12:36:37 +00:00
|
|
|
|
2017-06-09 12:15:01 +00:00
|
|
|
static int mmc_read_bkops_status(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
u8 *ext_csd;
|
|
|
|
|
|
|
|
err = mmc_get_ext_csd(card, &ext_csd);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
|
|
|
|
card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
|
|
|
|
kfree(ext_csd);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-12-10 16:52:40 +00:00
|
|
|
* mmc_run_bkops - Run BKOPS for supported cards
|
|
|
|
* @card: MMC card to run BKOPS for
|
2017-06-09 12:15:01 +00:00
|
|
|
*
|
2018-12-10 16:52:40 +00:00
|
|
|
* Run background operations synchronously for cards having manual BKOPS
|
|
|
|
* enabled and in case it reports urgent BKOPS level.
|
2017-06-09 12:15:01 +00:00
|
|
|
*/
|
2018-12-10 16:52:40 +00:00
|
|
|
void mmc_run_bkops(struct mmc_card *card)
|
2017-06-09 12:15:01 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2018-12-10 16:52:40 +00:00
|
|
|
if (!card->ext_csd.man_bkops_en)
|
2017-06-09 12:15:01 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
err = mmc_read_bkops_status(card);
|
|
|
|
if (err) {
|
|
|
|
pr_err("%s: Failed to read bkops status: %d\n",
|
|
|
|
mmc_hostname(card->host), err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-12-10 16:52:40 +00:00
|
|
|
if (!card->ext_csd.raw_bkops_status ||
|
|
|
|
card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
|
2017-06-09 12:15:01 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
mmc_retune_hold(card->host);
|
|
|
|
|
2018-12-10 16:52:40 +00:00
|
|
|
/*
|
|
|
|
* For urgent BKOPS status, LEVEL_2 and higher, let's execute
|
|
|
|
* synchronously. Future wise, we may consider to start BKOPS, for less
|
|
|
|
* urgent levels by using an asynchronous background task, when idle.
|
|
|
|
*/
|
|
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
2020-01-22 14:27:45 +00:00
|
|
|
EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
|
2021-08-17 22:42:07 +00:00
|
|
|
/*
|
|
|
|
* If the BKOPS timed out, the card is probably still busy in the
|
|
|
|
* R1_STATE_PRG. Rather than continue to wait, let's try to abort
|
|
|
|
* it with a HPI command to get back into R1_STATE_TRAN.
|
|
|
|
*/
|
|
|
|
if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
|
|
|
|
pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
|
|
|
|
else if (err)
|
|
|
|
pr_warn("%s: Error %d running bkops\n",
|
2017-06-09 12:15:01 +00:00
|
|
|
mmc_hostname(card->host), err);
|
|
|
|
|
2018-12-10 16:52:40 +00:00
|
|
|
mmc_retune_release(card->host);
|
2017-06-09 12:15:01 +00:00
|
|
|
}
|
2018-12-10 16:52:40 +00:00
|
|
|
EXPORT_SYMBOL(mmc_run_bkops);
|
2017-06-09 12:15:01 +00:00
|
|
|
|
2017-03-13 12:36:37 +00:00
|
|
|
static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
|
|
|
|
{
|
|
|
|
u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!card->ext_csd.cmdq_support)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
|
|
|
|
val, card->ext_csd.generic_cmd6_time);
|
|
|
|
if (!err)
|
|
|
|
card->ext_csd.cmdq_en = enable;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mmc_cmdq_enable(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
return mmc_cmdq_switch(card, true);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
|
|
|
|
|
|
|
|
int mmc_cmdq_disable(struct mmc_card *card)
|
|
|
|
{
|
|
|
|
return mmc_cmdq_switch(card, false);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
|
2020-03-16 15:21:52 +00:00
|
|
|
|
2021-04-02 09:24:31 +00:00
|
|
|
int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
|
2020-03-16 15:21:52 +00:00
|
|
|
{
|
|
|
|
struct mmc_host *host = card->host;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!mmc_can_sanitize(card)) {
|
|
|
|
pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2021-04-02 09:24:31 +00:00
|
|
|
if (!timeout_ms)
|
|
|
|
timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
|
|
|
|
|
2020-03-16 15:21:52 +00:00
|
|
|
pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
|
|
|
|
|
|
|
|
mmc_retune_hold(host);
|
|
|
|
|
2021-04-14 21:22:36 +00:00
|
|
|
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
|
|
|
|
1, timeout_ms, 0, true, false, 0);
|
2020-03-16 15:21:52 +00:00
|
|
|
if (err)
|
|
|
|
pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the sanitize operation timed out, the card is probably still busy
|
|
|
|
* in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
|
|
|
|
* it with a HPI command to get back into R1_STATE_TRAN.
|
|
|
|
*/
|
|
|
|
if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
|
|
|
|
pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
|
|
|
|
|
|
|
|
mmc_retune_release(host);
|
|
|
|
|
|
|
|
pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mmc_sanitize);
|