remoteproc: qcom: q6v5-mss: Refactor mba load/unload sequence

Refactor re-useable parts of mba load/unload sequence into mba_load and
mba_reclaim respectively. This is done in order to prevent code duplication
for modem coredump, which requires the mba to be loaded before dumping
the segments. The following changes in functionality are intended:

* Add software bypass to avoid high MX current in mpss error path.
* Remove the proxy votes of clk/regs only after the active/reset clks/regs.
* Reclaim MBA memory after mpss_load failure in mba_reclaim func.
* Set/Unset the dump_mba_loaded flag on mba_load/mba_reclaim respectively.

Signed-off-by: Sibi Sankar <sibis@codeaurora.org>
Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
This commit is contained in:
Sibi Sankar 2018-10-17 19:25:25 +05:30 committed by Bjorn Andersson
parent ab8f873bb9
commit 0304530ddd
1 changed files with 170 additions and 138 deletions

View File

@ -167,6 +167,7 @@ struct q6v5 {
bool running;
bool dump_mba_loaded;
phys_addr_t mba_phys;
void *mba_region;
size_t mba_size;
@ -679,6 +680,171 @@ static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
return true;
}
static int q6v5_mba_load(struct q6v5 *qproc)
{
int ret;
int xfermemop_ret;
qcom_q6v5_prepare(&qproc->q6v5);
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
if (ret) {
dev_err(qproc->dev, "failed to enable proxy supplies\n");
goto disable_irqs;
}
ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
if (ret) {
dev_err(qproc->dev, "failed to enable proxy clocks\n");
goto disable_proxy_reg;
}
ret = q6v5_regulator_enable(qproc, qproc->active_regs,
qproc->active_reg_count);
if (ret) {
dev_err(qproc->dev, "failed to enable supplies\n");
goto disable_proxy_clk;
}
ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
if (ret) {
dev_err(qproc->dev, "failed to enable reset clocks\n");
goto disable_vdd;
}
ret = q6v5_reset_deassert(qproc);
if (ret) {
dev_err(qproc->dev, "failed to deassert mss restart\n");
goto disable_reset_clks;
}
ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
if (ret) {
dev_err(qproc->dev, "failed to enable clocks\n");
goto assert_reset;
}
/* Assign MBA image access in DDR to q6 */
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
qproc->mba_phys, qproc->mba_size);
if (ret) {
dev_err(qproc->dev,
"assigning Q6 access to mba memory failed: %d\n", ret);
goto disable_active_clks;
}
writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
ret = q6v5proc_reset(qproc);
if (ret)
goto reclaim_mba;
ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "MBA boot timed out\n");
goto halt_axi_ports;
} else if (ret != RMB_MBA_XPU_UNLOCKED &&
ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
ret = -EINVAL;
goto halt_axi_ports;
}
qproc->dump_mba_loaded = true;
return 0;
halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
reclaim_mba:
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
qproc->mba_phys,
qproc->mba_size);
if (xfermemop_ret) {
dev_err(qproc->dev,
"Failed to reclaim mba buffer, system may become unstable\n");
}
disable_active_clks:
q6v5_clk_disable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
assert_reset:
q6v5_reset_assert(qproc);
disable_reset_clks:
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
disable_vdd:
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
disable_proxy_clk:
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
disable_proxy_reg:
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
disable_irqs:
qcom_q6v5_unprepare(&qproc->q6v5);
return ret;
}
static void q6v5_mba_reclaim(struct q6v5 *qproc)
{
int ret;
u32 val;
qproc->dump_mba_loaded = false;
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
if (qproc->version == MSS_MSM8996) {
/*
* To avoid high MX current during LPASS/MSS restart.
*/
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
QDSP6v56_CLAMP_QMC_MEM;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
}
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
false, qproc->mpss_phys,
qproc->mpss_size);
WARN_ON(ret);
q6v5_reset_assert(qproc);
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
q6v5_clk_disable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
/* In case of failure or coredump scenario where reclaiming MBA memory
* could not happen reclaim it here.
*/
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
qproc->mba_phys,
qproc->mba_size);
WARN_ON(ret);
ret = qcom_q6v5_unprepare(&qproc->q6v5);
if (ret) {
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
}
}
static int q6v5_mpss_load(struct q6v5 *qproc)
{
const struct elf32_phdr *phdrs;
@ -801,74 +967,9 @@ static int q6v5_start(struct rproc *rproc)
int xfermemop_ret;
int ret;
qcom_q6v5_prepare(&qproc->q6v5);
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
if (ret) {
dev_err(qproc->dev, "failed to enable proxy supplies\n");
goto disable_irqs;
}
ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
if (ret) {
dev_err(qproc->dev, "failed to enable proxy clocks\n");
goto disable_proxy_reg;
}
ret = q6v5_regulator_enable(qproc, qproc->active_regs,
qproc->active_reg_count);
if (ret) {
dev_err(qproc->dev, "failed to enable supplies\n");
goto disable_proxy_clk;
}
ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
if (ret) {
dev_err(qproc->dev, "failed to enable reset clocks\n");
goto disable_vdd;
}
ret = q6v5_reset_deassert(qproc);
if (ret) {
dev_err(qproc->dev, "failed to deassert mss restart\n");
goto disable_reset_clks;
}
ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
if (ret) {
dev_err(qproc->dev, "failed to enable clocks\n");
goto assert_reset;
}
/* Assign MBA image access in DDR to q6 */
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
qproc->mba_phys, qproc->mba_size);
if (ret) {
dev_err(qproc->dev,
"assigning Q6 access to mba memory failed: %d\n", ret);
goto disable_active_clks;
}
writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
ret = q6v5proc_reset(qproc);
ret = q6v5_mba_load(qproc);
if (ret)
goto reclaim_mba;
ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "MBA boot timed out\n");
goto halt_axi_ports;
} else if (ret != RMB_MBA_XPU_UNLOCKED &&
ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
ret = -EINVAL;
goto halt_axi_ports;
}
return ret;
dev_info(qproc->dev, "MBA booted, loading mpss\n");
@ -897,42 +998,7 @@ reclaim_mpss:
false, qproc->mpss_phys,
qproc->mpss_size);
WARN_ON(xfermemop_ret);
halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
reclaim_mba:
xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
qproc->mba_phys,
qproc->mba_size);
if (xfermemop_ret) {
dev_err(qproc->dev,
"Failed to reclaim mba buffer, system may become unstable\n");
}
disable_active_clks:
q6v5_clk_disable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
assert_reset:
q6v5_reset_assert(qproc);
disable_reset_clks:
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
disable_vdd:
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
disable_proxy_clk:
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
disable_proxy_reg:
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
disable_irqs:
qcom_q6v5_unprepare(&qproc->q6v5);
q6v5_mba_reclaim(qproc);
return ret;
}
@ -941,7 +1007,6 @@ static int q6v5_stop(struct rproc *rproc)
{
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
int ret;
u32 val;
qproc->running = false;
@ -949,40 +1014,7 @@ static int q6v5_stop(struct rproc *rproc)
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "timed out on wait\n");
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
if (qproc->version == MSS_MSM8996) {
/*
* To avoid high MX current during LPASS/MSS restart.
*/
val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
QDSP6v56_CLAMP_QMC_MEM;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
}
ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
qproc->mpss_phys, qproc->mpss_size);
WARN_ON(ret);
q6v5_reset_assert(qproc);
ret = qcom_q6v5_unprepare(&qproc->q6v5);
if (ret) {
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
}
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
q6v5_clk_disable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
q6v5_mba_reclaim(qproc);
return 0;
}