Merge patch series "Prepare for upstreaming Pixel 6 and 7 UFS support"

Bart Van Assche <bvanassche@acm.org> says:

The patches in this series are a first step towards integrating
support in the upstream kernel for the UFS controller in the Pixel 6
and 7.

[mkp: resolve conflict with RPMB series]

Link: https://lore.kernel.org/r/20221208234358.252031-1-bvanassche@acm.org
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Martin K. Petersen 2022-12-30 21:21:53 +00:00
commit 4a5bd1a928
4 changed files with 75 additions and 27 deletions

View file

@ -531,7 +531,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
prdt_length = le16_to_cpu(
lrbp->utr_descriptor_ptr->prd_table_length);
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
prdt_length /= sizeof(struct ufshcd_sg_entry);
prdt_length /= ufshcd_sg_entry_size(hba);
dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
@ -540,7 +540,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
if (pr_prdt)
ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
sizeof(struct ufshcd_sg_entry) * prdt_length);
ufshcd_sg_entry_size(hba) * prdt_length);
}
}
@ -1124,6 +1124,12 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
return pending;
}
/*
* Wait until all pending SCSI commands and TMFs have finished or the timeout
* has expired.
*
* Return: 0 upon success; -EBUSY upon timeout.
*/
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
@ -1157,7 +1163,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
schedule();
io_schedule_timeout(msecs_to_jiffies(20));
if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) {
timeout = true;
@ -1228,9 +1234,14 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
return ret;
}
static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
/*
* Wait until all pending SCSI commands and TMFs have finished or the timeout
* has expired.
*
* Return: 0 upon success; -EBUSY upon timeout.
*/
static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
{
#define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
int ret = 0;
/*
* make sure that there are no outstanding requests when
@ -1240,7 +1251,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
ufshcd_scsi_unblock_requests(hba);
@ -1278,7 +1289,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
int ret = 0;
bool is_writelock = true;
ret = ufshcd_clock_scaling_prepare(hba);
ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
@ -2411,7 +2422,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
struct scatterlist *sg_list)
{
struct ufshcd_sg_entry *prd_table;
struct ufshcd_sg_entry *prd;
struct scatterlist *sg;
int i;
@ -2419,11 +2430,11 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
lrbp->utr_descriptor_ptr->prd_table_length =
cpu_to_le16((sg_entries * sizeof(struct ufshcd_sg_entry)));
cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
else
lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
prd_table = lrbp->ucd_prdt_ptr;
prd = lrbp->ucd_prdt_ptr;
for_each_sg(sg_list, sg, sg_entries, i) {
const unsigned int len = sg_dma_len(sg);
@ -2437,9 +2448,10 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
* indicates 4 bytes, '7' indicates 8 bytes, etc."
*/
WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
prd_table[i].size = cpu_to_le32(len - 1);
prd_table[i].addr = cpu_to_le64(sg->dma_address);
prd_table[i].reserved = 0;
prd->size = cpu_to_le32(len - 1);
prd->addr = cpu_to_le64(sg->dma_address);
prd->reserved = 0;
prd = (void *)prd + ufshcd_sg_entry_size(hba);
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
@ -2746,10 +2758,11 @@ static void ufshcd_map_queues(struct Scsi_Host *shost)
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
i * sizeof_utp_transfer_cmd_desc(hba);
struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
i * sizeof(struct utp_transfer_cmd_desc);
i * sizeof_utp_transfer_cmd_desc(hba);
u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
response_upiu);
u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
@ -2757,11 +2770,11 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
lrb->utr_descriptor_ptr = utrdlp + i;
lrb->utrd_dma_addr = hba->utrdl_dma_addr +
i * sizeof(struct utp_transfer_req_desc);
lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
lrb->ucd_req_dma_addr = cmd_desc_element_addr;
lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
}
@ -3676,7 +3689,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
size_t utmrdl_size, utrdl_size, ucdl_size;
/* Allocate memory for UTP command descriptors */
ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
@ -3770,7 +3783,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
prdt_offset =
offsetof(struct utp_transfer_cmd_desc, prd_table);
cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
cmd_desc_dma_addr = hba->ucdl_dma_addr;
for (i = 0; i < hba->nutrs; i++) {
@ -9766,6 +9779,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
hba->dev = dev;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
hba->nop_out_timeout = NOP_OUT_TIMEOUT;
ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
INIT_LIST_HEAD(&hba->clk_list_head);
spin_lock_init(&hba->outstanding_lock);
@ -10145,11 +10159,6 @@ static int __init ufshcd_core_init(void)
{
int ret;
/* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
static_assert(sizeof(struct utp_transfer_cmd_desc) ==
2 * ALIGNED_UPIU_SIZE +
SG_ALL * sizeof(struct ufshcd_sg_entry));
ufs_debugfs_init();
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);

View file

@ -124,3 +124,7 @@ config SCSI_UFS_EXYNOS
Select this if you have UFS host controller on Samsung Exynos SoC.
If unsure, say N.
config SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
bool
default y if SCSI_UFS_EXYNOS && SCSI_UFS_CRYPTO

View file

@ -755,6 +755,7 @@ struct ufs_hba_monitor {
* @vops: pointer to variant specific operations
* @vps: pointer to variant specific parameters
* @priv: pointer to variant specific private data
* @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields)
* @irq: Irq number of the controller
* @is_irq_enabled: whether or not the UFS controller interrupt is enabled.
* @dev_ref_clk_freq: reference clock frequency
@ -878,6 +879,9 @@ struct ufs_hba {
const struct ufs_hba_variant_ops *vops;
struct ufs_hba_variant_params *vps;
void *priv;
#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
size_t sg_entry_size;
#endif
unsigned int irq;
bool is_irq_enabled;
enum ufs_ref_clk_freq dev_ref_clk_freq;
@ -981,6 +985,32 @@ struct ufs_hba {
bool complete_put;
};
#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
{
return hba->sg_entry_size;
}
static inline void ufshcd_set_sg_entry_size(struct ufs_hba *hba, size_t sg_entry_size)
{
WARN_ON_ONCE(sg_entry_size < sizeof(struct ufshcd_sg_entry));
hba->sg_entry_size = sg_entry_size;
}
#else
static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
{
return sizeof(struct ufshcd_sg_entry);
}
#define ufshcd_set_sg_entry_size(hba, sg_entry_size) \
({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
#endif
static inline size_t sizeof_utp_transfer_cmd_desc(const struct ufs_hba *hba)
{
return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
}
/* Returns true if clocks can be gated. Otherwise false */
static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
{

View file

@ -423,18 +423,23 @@ struct ufshcd_sg_entry {
__le64 addr;
__le32 reserved;
__le32 size;
/*
* followed by variant-specific fields if
* CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE has been defined.
*/
};
/**
* struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD)
* @command_upiu: Command UPIU Frame address
* @response_upiu: Response UPIU Frame address
* @prd_table: Physical Region Descriptor
* @prd_table: Physical Region Descriptor: an array of SG_ALL struct
* ufshcd_sg_entry's. Variant-specific fields may be present after each.
*/
struct utp_transfer_cmd_desc {
u8 command_upiu[ALIGNED_UPIU_SIZE];
u8 response_upiu[ALIGNED_UPIU_SIZE];
struct ufshcd_sg_entry prd_table[SG_ALL];
u8 prd_table[];
};
/**