TI Driver updates for v5.19

* wkup_m3: io isolation, voltage scaling, vtt regulator and a debug option to stop m3 in suspend.
 * tisci: support for polled mode for system suspend, reset driver is now enabled for COMPILE_TEST
 * knav, dma.. misc cleanups for IS_ERR, pm_run_time*, and various other fixups.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE+KKGk1TrgjIXoxo03bWEnRc2JJ0FAmJ2m64ACgkQ3bWEnRc2
 JJ0VrxAAkwYvb9VDyF2o0WqJZfllt7KqUXPMZ2ijayNwnDy1EVTmcHgjX7K4hA5Z
 z9RX2kzeCobah2urRWVJvVnfsqUlAQB8xjtg9UMskjoicbmJ1OZ8/eOS+Ctwuco9
 7VNVTDhqu7vIOdOtahJ6M9ajIWJorrWNrXf2s9eZZJLaJ3apGolOY6mqGKURq6QY
 jnLvESmPIEtBrYjWr/dkrrPmGuffygM2i7J6RF9RJ3fKuNX7uS72nnYRHN714WU8
 hRuJG4AaYRaEZmWfByXIktdakHBFiBP2g4Zvkeds8KXtRGoamYrrGXuPE1pb1w4p
 kVhumbktMFMSNf37yWdjwo5M1hemhw6SNR3i/021Bt7DSmA7BC0+iLVDJwA+dsrr
 XoCt8V6s3KydeNRDGYSQVECPExYqVyCp3bnHVWPjfS21zpLPmaOQvTA2TTRb737t
 iRKnprDXAD/ALBq9XZbsC2L8jelcXkCOT/+vpCMR4jjyM8OZcmWFU3Nkgp/Uhst/
 3zaL34i4EzvUlrkuHKu92GdNkzkfdd/bk/Emve+12v54QeyM/go8Tn3DLRsWVvg3
 CRgMY5ldELIsPR3VHSTmE8M0YxpmpxM9ixiM0Yrs/V21EZLdiwejqznM9yaLgwM3
 AwPo/WUImn+l+RINY3VdwoHV4dr6CMxkn2laFcQ/c9KEG39ZWH8=
 =Nlpv
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEo6/YBQwIrVS28WGKmmx57+YAGNkFAmJ5gvYACgkQmmx57+YA
 GNlMFRAAhKFLeAl8aXmFzApW0oFLc8cU81wp5cd/5Y/985sph8Lf1TX/ipYxy4oW
 bvq3zhSwS/oxiU7WAPqnOp8zjPmMO5EaK1sbHFT3JgtUVqQk7ZDLs6RMkSbLaXro
 jxYM7UKF8evwTJwBAaXXo9NqXg35Jl24pnZ4oUDib2d5voneOKOpAt1SSqTqUFeT
 y3zlX61j3YANo9KE93ghCON2rq0QSiaq9IUaOcP9BITpM8IjW5yMGmL7/K/GwIxf
 iW5j0yytZ0sh+oA9PGxWvchoQoxgNz3HizC8ksPPoRXFF9umRHZsPRso5NSLCfG7
 9uh6n0dsQ4ua9iTgaMQOCvVsmCY9nawf8xuUxoUCdNK533Pl6T/9gM8D3IwHROLb
 tlk7cW8R5e3CFUL3uKFgmcec/gUAwvraarIkFjwRRpDm68HKWfbU+Ck1BvnaiTje
 mAVi6UewQwjQHL1Q3cpjSAEVlLk0CIEDZtZrgqMyPTl9zfZWkKoFt1XbLM0EMtfw
 LXiFX+wYoHPVIsmRpjh/gQe3BMPNHmYiRHIZFXb+fWZNCIlVXit1yN10WbG+fhC6
 FISfaBj5vSogZ/4kFy8mcD+VsWOsq47cckslQPphRxXGtDfmB/TnQmRAL04D/hlu
 +g7PrPwCIGZL1rbk2eOeZUA56SUyzHa+B21l/6s1F+SnEG+UpZY=
 =7h5b
 -----END PGP SIGNATURE-----

Merge tag 'ti-driver-soc-for-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux into arm/drivers

TI Driver updates for v5.19

* wkup_m3: io isolation, voltage scaling, vtt regulator and a debug option to stop m3 in suspend.
* tisci: support for polled mode for system suspend, reset driver is now enabled for COMPILE_TEST
* knav, dma.. misc cleanups for IS_ERR, pm_run_time*, and various other fixups.

* tag 'ti-driver-soc-for-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux:
  soc: ti: wkup_m3_ipc: Add debug option to halt m3 in suspend
  soc: ti: wkup_m3_ipc: Add support for i2c voltage scaling
  soc: ti: wkup_m3_ipc: Add support for IO Isolation
  soc: ti: knav_qmss_queue: Use IS_ERR instead of IS_ERR_OR_NULL when checking knav_queue_open() result
  soc: ti: pm33xx: using pm_runtime_resume_and_get instead of pm_runtime_get_sync
  firmware: ti_sci: Switch transport to polled mode during system suspend
  soc: ti: wkup_m3_ipc: Add support for toggling VTT regulator
  soc: ti: knav_qmss_queue: Use pm_runtime_resume_and_get instead of pm_runtime_get_sync
  soc: ti: knav_dma: Use pm_runtime_resume_and_get instead of pm_runtime_get_sync
  reset: ti-sci: Allow building under COMPILE_TEST
  soc: ti: ti_sci_pm_domains: Check for null return of devm_kcalloc
  soc: ti: omap_prm: Use of_device_get_match_data()
  soc: ti: pruss: using pm_runtime_resume_and_get instead of pm_runtime_get_sync
  soc: ti: replace usage of found with dedicated list iterator variable
  soc: ti: wkup_m3_ipc: fix platform_get_irq.cocci warning

Link: https://lore.kernel.org/r/20220507163424.pvqnwrxpoo73lmp2@debtless
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann 2022-05-09 23:09:09 +02:00
commit 1901300bf3
10 changed files with 302 additions and 52 deletions

View file

@ -2,7 +2,7 @@
/* /*
* Texas Instruments System Control Interface Protocol Driver * Texas Instruments System Control Interface Protocol Driver
* *
* Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon * Nishanth Menon
*/ */
@ -12,6 +12,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mailbox_client.h> #include <linux/mailbox_client.h>
#include <linux/module.h> #include <linux/module.h>
@ -96,6 +97,7 @@ struct ti_sci_desc {
* @node: list head * @node: list head
* @host_id: Host ID * @host_id: Host ID
* @users: Number of users of this instance * @users: Number of users of this instance
* @is_suspending: Flag set to indicate in suspend path.
*/ */
struct ti_sci_info { struct ti_sci_info {
struct device *dev; struct device *dev;
@ -114,7 +116,7 @@ struct ti_sci_info {
u8 host_id; u8 host_id;
/* protected by ti_sci_list_mutex */ /* protected by ti_sci_list_mutex */
int users; int users;
bool is_suspending;
}; };
#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl)
@ -349,6 +351,8 @@ static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
xfer->tx_message.len = tx_message_size; xfer->tx_message.len = tx_message_size;
xfer->tx_message.chan_rx = info->chan_rx;
xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
xfer->rx_len = (u8)rx_message_size; xfer->rx_len = (u8)rx_message_size;
reinit_completion(&xfer->done); reinit_completion(&xfer->done);
@ -406,6 +410,7 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
int ret; int ret;
int timeout; int timeout;
struct device *dev = info->dev; struct device *dev = info->dev;
bool done_state = true;
ret = mbox_send_message(info->chan_tx, &xfer->tx_message); ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
if (ret < 0) if (ret < 0)
@ -413,13 +418,27 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
ret = 0; ret = 0;
/* And we wait for the response. */ if (!info->is_suspending) {
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); /* And we wait for the response. */
if (!wait_for_completion_timeout(&xfer->done, timeout)) { timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout))
ret = -ETIMEDOUT;
} else {
/*
* If we are suspending, we cannot use wait_for_completion_timeout
* during noirq phase, so we must manually poll the completion.
*/
ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
true, 1,
info->desc->max_rx_timeout_ms * 1000,
false, &xfer->done);
}
if (ret == -ETIMEDOUT || !done_state) {
dev_err(dev, "Mbox timedout in resp(caller: %pS)\n", dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
(void *)_RET_IP_); (void *)_RET_IP_);
ret = -ETIMEDOUT;
} }
/* /*
* NOTE: we might prefer not to need the mailbox ticker to manage the * NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself. * transfer queueing since the protocol layer queues things by itself.
@ -3264,6 +3283,35 @@ static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
return NOTIFY_BAD; return NOTIFY_BAD;
} }
static void ti_sci_set_is_suspending(struct ti_sci_info *info, bool is_suspending)
{
info->is_suspending = is_suspending;
}
static int ti_sci_suspend(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
/*
* We must switch operation to polled mode now as drivers and the genpd
* layer may make late TI SCI calls to change clock and device states
* from the noirq phase of suspend.
*/
ti_sci_set_is_suspending(info, true);
return 0;
}
static int ti_sci_resume(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
ti_sci_set_is_suspending(info, false);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(ti_sci_pm_ops, ti_sci_suspend, ti_sci_resume);
/* Description for K2G */ /* Description for K2G */
static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
.default_host_id = 2, .default_host_id = 2,
@ -3472,6 +3520,7 @@ static struct platform_driver ti_sci_driver = {
.driver = { .driver = {
.name = "ti-sci", .name = "ti-sci",
.of_match_table = of_match_ptr(ti_sci_of_match), .of_match_table = of_match_ptr(ti_sci_of_match),
.pm = &ti_sci_pm_ops,
}, },
}; };
module_platform_driver(ti_sci_driver); module_platform_driver(ti_sci_driver);

View file

@ -240,7 +240,7 @@ config RESET_SUNXI
config RESET_TI_SCI config RESET_TI_SCI
tristate "TI System Control Interface (TI-SCI) reset driver" tristate "TI System Control Interface (TI-SCI) reset driver"
depends on TI_SCI_PROTOCOL depends on TI_SCI_PROTOCOL || COMPILE_TEST
help help
This enables the reset driver support over TI System Control Interface This enables the reset driver support over TI System Control Interface
available on some new TI's SoCs. If you wish to use reset resources available on some new TI's SoCs. If you wish to use reset resources

View file

@ -415,9 +415,8 @@ static int of_channel_match_helper(struct device_node *np, const char *name,
void *knav_dma_open_channel(struct device *dev, const char *name, void *knav_dma_open_channel(struct device *dev, const char *name,
struct knav_dma_cfg *config) struct knav_dma_cfg *config)
{ {
struct knav_dma_chan *chan; struct knav_dma_device *dma = NULL, *iter1;
struct knav_dma_device *dma; struct knav_dma_chan *chan = NULL, *iter2;
bool found = false;
int chan_num = -1; int chan_num = -1;
const char *instance; const char *instance;
@ -444,33 +443,32 @@ void *knav_dma_open_channel(struct device *dev, const char *name,
} }
/* Look for correct dma instance */ /* Look for correct dma instance */
list_for_each_entry(dma, &kdev->list, list) { list_for_each_entry(iter1, &kdev->list, list) {
if (!strcmp(dma->name, instance)) { if (!strcmp(iter1->name, instance)) {
found = true; dma = iter1;
break; break;
} }
} }
if (!found) { if (!dma) {
dev_err(kdev->dev, "No DMA instance with name %s\n", instance); dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
return (void *)-EINVAL; return (void *)-EINVAL;
} }
/* Look for correct dma channel from dma instance */ /* Look for correct dma channel from dma instance */
found = false; list_for_each_entry(iter2, &dma->chan_list, list) {
list_for_each_entry(chan, &dma->chan_list, list) {
if (config->direction == DMA_MEM_TO_DEV) { if (config->direction == DMA_MEM_TO_DEV) {
if (chan->channel == chan_num) { if (iter2->channel == chan_num) {
found = true; chan = iter2;
break; break;
} }
} else { } else {
if (chan->flow == chan_num) { if (iter2->flow == chan_num) {
found = true; chan = iter2;
break; break;
} }
} }
} }
if (!found) { if (!chan) {
dev_err(kdev->dev, "channel %d is not in DMA %s\n", dev_err(kdev->dev, "channel %d is not in DMA %s\n",
chan_num, instance); chan_num, instance);
return (void *)-EINVAL; return (void *)-EINVAL;
@ -747,9 +745,8 @@ static int knav_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&kdev->list); INIT_LIST_HEAD(&kdev->list);
pm_runtime_enable(kdev->dev); pm_runtime_enable(kdev->dev);
ret = pm_runtime_get_sync(kdev->dev); ret = pm_runtime_resume_and_get(kdev->dev);
if (ret < 0) { if (ret < 0) {
pm_runtime_put_noidle(kdev->dev);
dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret); dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
goto err_pm_disable; goto err_pm_disable;
} }

View file

@ -758,10 +758,9 @@ void *knav_pool_create(const char *name,
int num_desc, int region_id) int num_desc, int region_id)
{ {
struct knav_region *reg_itr, *region = NULL; struct knav_region *reg_itr, *region = NULL;
struct knav_pool *pool, *pi; struct knav_pool *pool, *pi = NULL, *iter;
struct list_head *node; struct list_head *node;
unsigned last_offset; unsigned last_offset;
bool slot_found;
int ret; int ret;
if (!kdev) if (!kdev)
@ -790,7 +789,7 @@ void *knav_pool_create(const char *name,
} }
pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0); pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
if (IS_ERR_OR_NULL(pool->queue)) { if (IS_ERR(pool->queue)) {
dev_err(kdev->dev, dev_err(kdev->dev,
"failed to open queue for pool(%s), error %ld\n", "failed to open queue for pool(%s), error %ld\n",
name, PTR_ERR(pool->queue)); name, PTR_ERR(pool->queue));
@ -816,18 +815,17 @@ void *knav_pool_create(const char *name,
* the request * the request
*/ */
last_offset = 0; last_offset = 0;
slot_found = false;
node = &region->pools; node = &region->pools;
list_for_each_entry(pi, &region->pools, region_inst) { list_for_each_entry(iter, &region->pools, region_inst) {
if ((pi->region_offset - last_offset) >= num_desc) { if ((iter->region_offset - last_offset) >= num_desc) {
slot_found = true; pi = iter;
break; break;
} }
last_offset = pi->region_offset + pi->num_desc; last_offset = iter->region_offset + iter->num_desc;
} }
node = &pi->region_inst;
if (slot_found) { if (pi) {
node = &pi->region_inst;
pool->region = region; pool->region = region;
pool->num_desc = num_desc; pool->num_desc = num_desc;
pool->region_offset = last_offset; pool->region_offset = last_offset;
@ -1785,9 +1783,8 @@ static int knav_queue_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&kdev->pdsps); INIT_LIST_HEAD(&kdev->pdsps);
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev); ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) { if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
dev_err(dev, "Failed to enable QMSS\n"); dev_err(dev, "Failed to enable QMSS\n");
return ret; return ret;
} }

View file

@ -941,23 +941,20 @@ static int omap_prm_probe(struct platform_device *pdev)
struct resource *res; struct resource *res;
const struct omap_prm_data *data; const struct omap_prm_data *data;
struct omap_prm *prm; struct omap_prm *prm;
const struct of_device_id *match;
int ret; int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) if (!res)
return -ENODEV; return -ENODEV;
match = of_match_device(omap_prm_id_table, &pdev->dev); data = of_device_get_match_data(&pdev->dev);
if (!match) if (!data)
return -ENOTSUPP; return -ENOTSUPP;
prm = devm_kzalloc(&pdev->dev, sizeof(*prm), GFP_KERNEL); prm = devm_kzalloc(&pdev->dev, sizeof(*prm), GFP_KERNEL);
if (!prm) if (!prm)
return -ENOMEM; return -ENOMEM;
data = match->data;
while (data->base != res->start) { while (data->base != res->start) {
if (!data->base) if (!data->base)
return -EINVAL; return -EINVAL;

View file

@ -555,11 +555,9 @@ static int am33xx_pm_probe(struct platform_device *pdev)
#endif /* CONFIG_SUSPEND */ #endif /* CONFIG_SUSPEND */
pm_runtime_enable(dev); pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev); ret = pm_runtime_resume_and_get(dev);
if (ret < 0) { if (ret < 0)
pm_runtime_put_noidle(dev);
goto err_pm_runtime_disable; goto err_pm_runtime_disable;
}
ret = pm_ops->init(am33xx_do_sram_idle); ret = pm_ops->init(am33xx_do_sram_idle);
if (ret) { if (ret) {

View file

@ -279,10 +279,9 @@ static int pruss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pruss); platform_set_drvdata(pdev, pruss);
pm_runtime_enable(dev); pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev); ret = pm_runtime_resume_and_get(dev);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "couldn't enable module\n"); dev_err(dev, "couldn't enable module\n");
pm_runtime_put_noidle(dev);
goto rpm_disable; goto rpm_disable;
} }

View file

@ -183,6 +183,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
devm_kcalloc(dev, max_id + 1, devm_kcalloc(dev, max_id + 1,
sizeof(*pd_provider->data.domains), sizeof(*pd_provider->data.domains),
GFP_KERNEL); GFP_KERNEL);
if (!pd_provider->data.domains)
return -ENOMEM;
pd_provider->data.num_domains = max_id + 1; pd_provider->data.num_domains = max_id + 1;
pd_provider->data.xlate = ti_sci_pd_xlate; pd_provider->data.xlate = ti_sci_pd_xlate;

View file

@ -7,7 +7,9 @@
* Dave Gerlach <d-gerlach@ti.com> * Dave Gerlach <d-gerlach@ti.com>
*/ */
#include <linux/debugfs.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/firmware.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
@ -40,12 +42,30 @@
#define M3_FW_VERSION_MASK 0xffff #define M3_FW_VERSION_MASK 0xffff
#define M3_WAKE_SRC_MASK 0xff #define M3_WAKE_SRC_MASK 0xff
#define IPC_MEM_TYPE_SHIFT (0x0)
#define IPC_MEM_TYPE_MASK (0x7 << 0)
#define IPC_VTT_STAT_SHIFT (0x3)
#define IPC_VTT_STAT_MASK (0x1 << 3)
#define IPC_VTT_GPIO_PIN_SHIFT (0x4)
#define IPC_VTT_GPIO_PIN_MASK (0x3f << 4)
#define IPC_IO_ISOLATION_STAT_SHIFT (10)
#define IPC_IO_ISOLATION_STAT_MASK (0x1 << 10)
#define IPC_DBG_HALT_SHIFT (11)
#define IPC_DBG_HALT_MASK (0x1 << 11)
#define M3_STATE_UNKNOWN 0 #define M3_STATE_UNKNOWN 0
#define M3_STATE_RESET 1 #define M3_STATE_RESET 1
#define M3_STATE_INITED 2 #define M3_STATE_INITED 2
#define M3_STATE_MSG_FOR_LP 3 #define M3_STATE_MSG_FOR_LP 3
#define M3_STATE_MSG_FOR_RESET 4 #define M3_STATE_MSG_FOR_RESET 4
#define WKUP_M3_SD_FW_MAGIC 0x570C
#define WKUP_M3_DMEM_START 0x80000
#define WKUP_M3_AUXDATA_OFFSET 0x1000
#define WKUP_M3_AUXDATA_SIZE 0xFF
static struct wkup_m3_ipc *m3_ipc_state; static struct wkup_m3_ipc *m3_ipc_state;
static const struct wkup_m3_wakeup_src wakeups[] = { static const struct wkup_m3_wakeup_src wakeups[] = {
@ -66,6 +86,148 @@ static const struct wkup_m3_wakeup_src wakeups[] = {
{.irq_nr = 0, .src = "Unknown"}, {.irq_nr = 0, .src = "Unknown"},
}; };
/**
* wkup_m3_copy_aux_data - Copy auxiliary data to special region of m3 dmem
* @data - pointer to data
* @sz - size of data to copy (limit 256 bytes)
*
* Copies any additional blob of data to the wkup_m3 dmem to be used by the
* firmware
*/
static unsigned long wkup_m3_copy_aux_data(struct wkup_m3_ipc *m3_ipc,
const void *data, int sz)
{
unsigned long aux_data_dev_addr;
void *aux_data_addr;
aux_data_dev_addr = WKUP_M3_DMEM_START + WKUP_M3_AUXDATA_OFFSET;
aux_data_addr = rproc_da_to_va(m3_ipc->rproc,
aux_data_dev_addr,
WKUP_M3_AUXDATA_SIZE,
NULL);
memcpy(aux_data_addr, data, sz);
return WKUP_M3_AUXDATA_OFFSET;
}
static void wkup_m3_scale_data_fw_cb(const struct firmware *fw, void *context)
{
unsigned long val, aux_base;
struct wkup_m3_scale_data_header hdr;
struct wkup_m3_ipc *m3_ipc = context;
struct device *dev = m3_ipc->dev;
if (!fw) {
dev_err(dev, "Voltage scale fw name given but file missing.\n");
return;
}
memcpy(&hdr, fw->data, sizeof(hdr));
if (hdr.magic != WKUP_M3_SD_FW_MAGIC) {
dev_err(dev, "PM: Voltage Scale Data binary does not appear valid.\n");
goto release_sd_fw;
}
aux_base = wkup_m3_copy_aux_data(m3_ipc, fw->data + sizeof(hdr),
fw->size - sizeof(hdr));
val = (aux_base + hdr.sleep_offset);
val |= ((aux_base + hdr.wake_offset) << 16);
m3_ipc->volt_scale_offsets = val;
release_sd_fw:
release_firmware(fw);
};
static int wkup_m3_init_scale_data(struct wkup_m3_ipc *m3_ipc,
struct device *dev)
{
int ret = 0;
/*
* If no name is provided, user has already been warned, pm will
* still work so return 0
*/
if (!m3_ipc->sd_fw_name)
return ret;
ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
m3_ipc->sd_fw_name, dev, GFP_ATOMIC,
m3_ipc, wkup_m3_scale_data_fw_cb);
return ret;
}
#ifdef CONFIG_DEBUG_FS
static void wkup_m3_set_halt_late(bool enabled)
{
if (enabled)
m3_ipc_state->halt = (1 << IPC_DBG_HALT_SHIFT);
else
m3_ipc_state->halt = 0;
}
static int option_get(void *data, u64 *val)
{
u32 *option = data;
*val = *option;
return 0;
}
static int option_set(void *data, u64 val)
{
u32 *option = data;
*option = val;
if (option == &m3_ipc_state->halt) {
if (val)
wkup_m3_set_halt_late(true);
else
wkup_m3_set_halt_late(false);
}
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(wkup_m3_ipc_option_fops, option_get, option_set,
"%llu\n");
static int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
{
m3_ipc->dbg_path = debugfs_create_dir("wkup_m3_ipc", NULL);
if (!m3_ipc->dbg_path)
return -EINVAL;
(void)debugfs_create_file("enable_late_halt", 0644,
m3_ipc->dbg_path,
&m3_ipc->halt,
&wkup_m3_ipc_option_fops);
return 0;
}
static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
{
debugfs_remove_recursive(m3_ipc->dbg_path);
}
#else
static inline int wkup_m3_ipc_dbg_init(struct wkup_m3_ipc *m3_ipc)
{
return 0;
}
static inline void wkup_m3_ipc_dbg_destroy(struct wkup_m3_ipc *m3_ipc)
{
}
#endif /* CONFIG_DEBUG_FS */
static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc) static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
{ {
writel(AM33XX_M3_TXEV_ACK, writel(AM33XX_M3_TXEV_ACK,
@ -130,6 +292,7 @@ static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
} }
m3_ipc->state = M3_STATE_INITED; m3_ipc->state = M3_STATE_INITED;
wkup_m3_init_scale_data(m3_ipc, dev);
complete(&m3_ipc->sync_complete); complete(&m3_ipc->sync_complete);
break; break;
case M3_STATE_MSG_FOR_RESET: case M3_STATE_MSG_FOR_RESET:
@ -215,6 +378,17 @@ static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
(m3_ipc->state != M3_STATE_UNKNOWN)); (m3_ipc->state != M3_STATE_UNKNOWN));
} }
static void wkup_m3_set_vtt_gpio(struct wkup_m3_ipc *m3_ipc, int gpio)
{
m3_ipc->vtt_conf = (1 << IPC_VTT_STAT_SHIFT) |
(gpio << IPC_VTT_GPIO_PIN_SHIFT);
}
static void wkup_m3_set_io_isolation(struct wkup_m3_ipc *m3_ipc)
{
m3_ipc->isolation_conf = (1 << IPC_IO_ISOLATION_STAT_SHIFT);
}
/* Public functions */ /* Public functions */
/** /**
* wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use * wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
@ -280,12 +454,15 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
switch (state) { switch (state) {
case WKUP_M3_DEEPSLEEP: case WKUP_M3_DEEPSLEEP:
m3_power_state = IPC_CMD_DS0; m3_power_state = IPC_CMD_DS0;
wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->volt_scale_offsets, 5);
break; break;
case WKUP_M3_STANDBY: case WKUP_M3_STANDBY:
m3_power_state = IPC_CMD_STANDBY; m3_power_state = IPC_CMD_STANDBY;
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break; break;
case WKUP_M3_IDLE: case WKUP_M3_IDLE:
m3_power_state = IPC_CMD_IDLE; m3_power_state = IPC_CMD_IDLE;
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
break; break;
default: default:
return 1; return 1;
@ -294,11 +471,13 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
/* Program each required IPC register then write defaults to others */ /* Program each required IPC register then write defaults to others */
wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0); wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1); wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4); wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type |
m3_ipc->vtt_conf |
m3_ipc->isolation_conf |
m3_ipc->halt, 4);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2); wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3); wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6); wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7); wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
@ -433,12 +612,13 @@ static int wkup_m3_rproc_boot_thread(void *arg)
static int wkup_m3_ipc_probe(struct platform_device *pdev) static int wkup_m3_ipc_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int irq, ret; int irq, ret, temp;
phandle rproc_phandle; phandle rproc_phandle;
struct rproc *m3_rproc; struct rproc *m3_rproc;
struct resource *res; struct resource *res;
struct task_struct *task; struct task_struct *task;
struct wkup_m3_ipc *m3_ipc; struct wkup_m3_ipc *m3_ipc;
struct device_node *np = dev->of_node;
m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL); m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
if (!m3_ipc) if (!m3_ipc)
@ -450,10 +630,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
return PTR_ERR(m3_ipc->ipc_mem_base); return PTR_ERR(m3_ipc->ipc_mem_base);
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0)
dev_err(&pdev->dev, "no irq resource\n");
return irq; return irq;
}
ret = devm_request_irq(dev, irq, wkup_m3_txev_handler, ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
0, "wkup_m3_txev", m3_ipc); 0, "wkup_m3_txev", m3_ipc);
@ -496,6 +674,22 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
m3_ipc->ops = &ipc_ops; m3_ipc->ops = &ipc_ops;
if (!of_property_read_u32(np, "ti,vtt-gpio-pin", &temp)) {
if (temp >= 0 && temp <= 31)
wkup_m3_set_vtt_gpio(m3_ipc, temp);
else
dev_warn(dev, "Invalid VTT GPIO(%d) pin\n", temp);
}
if (of_find_property(np, "ti,set-io-isolation", NULL))
wkup_m3_set_io_isolation(m3_ipc);
ret = of_property_read_string(np, "firmware-name",
&m3_ipc->sd_fw_name);
if (ret) {
dev_dbg(dev, "Voltage scaling data blob not provided from DT.\n");
};
/* /*
* Wait for firmware loading completion in a thread so we * Wait for firmware loading completion in a thread so we
* can boot the wkup_m3 as soon as it's ready without holding * can boot the wkup_m3 as soon as it's ready without holding
@ -510,6 +704,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
goto err_put_rproc; goto err_put_rproc;
} }
wkup_m3_ipc_dbg_init(m3_ipc);
return 0; return 0;
err_put_rproc: err_put_rproc:
@ -521,6 +717,8 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
static int wkup_m3_ipc_remove(struct platform_device *pdev) static int wkup_m3_ipc_remove(struct platform_device *pdev)
{ {
wkup_m3_ipc_dbg_destroy(m3_ipc_state);
mbox_free_channel(m3_ipc_state->mbox); mbox_free_channel(m3_ipc_state->mbox);
rproc_shutdown(m3_ipc_state->rproc); rproc_shutdown(m3_ipc_state->rproc);

View file

@ -33,7 +33,13 @@ struct wkup_m3_ipc {
int mem_type; int mem_type;
unsigned long resume_addr; unsigned long resume_addr;
int vtt_conf;
int isolation_conf;
int state; int state;
u32 halt;
unsigned long volt_scale_offsets;
const char *sd_fw_name;
struct completion sync_complete; struct completion sync_complete;
struct mbox_client mbox_client; struct mbox_client mbox_client;
@ -41,6 +47,7 @@ struct wkup_m3_ipc {
struct wkup_m3_ipc_ops *ops; struct wkup_m3_ipc_ops *ops;
int is_rtc_only; int is_rtc_only;
struct dentry *dbg_path;
}; };
struct wkup_m3_wakeup_src { struct wkup_m3_wakeup_src {
@ -48,6 +55,12 @@ struct wkup_m3_wakeup_src {
char src[10]; char src[10];
}; };
struct wkup_m3_scale_data_header {
u16 magic;
u8 sleep_offset;
u8 wake_offset;
} __packed;
struct wkup_m3_ipc_ops { struct wkup_m3_ipc_ops {
void (*set_mem_type)(struct wkup_m3_ipc *m3_ipc, int mem_type); void (*set_mem_type)(struct wkup_m3_ipc *m3_ipc, int mem_type);
void (*set_resume_address)(struct wkup_m3_ipc *m3_ipc, void *addr); void (*set_resume_address)(struct wkup_m3_ipc *m3_ipc, void *addr);