net: wwan: iosm: enable runtime pm support for 7560

Adds runtime pm support for 7560.

As part of probe procedure auto suspend is enabled and auto suspend
delay is set to 5000 ms for runtime pm use. Later auto flag is set
to power manage the device at run time.

On successful communication establishment between host and device the
device usage counter is dropped and request to put the device into
sleep state (suspend).

In TX path, the device usage counter is raised and device is moved out
of sleep(resume) for data transmission. In RX path, if the device has
some data to be sent it request host platform to change the power state
by giving PCI PME message.

Signed-off-by: M Chetan Kumar <m.chetan.kumar@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
M Chetan Kumar 2023-06-08 15:38:03 +05:30 committed by David S. Miller
parent cbb1ca6d5f
commit e4f5073d53
6 changed files with 65 additions and 4 deletions

View file

@ -4,6 +4,7 @@
*/
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_devlink.h"
@ -631,6 +632,11 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
/* Complete all memory stores after setting bit */
smp_mb__after_atomic();
if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID) {
pm_runtime_mark_last_busy(ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_imem->dev);
}
return;
err_ipc_mux_deinit:
@ -1234,6 +1240,7 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
/* forward MDM_NOT_READY to listeners */
ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
pm_runtime_get_sync(ipc_imem->dev);
hrtimer_cancel(&ipc_imem->td_alloc_timer);
hrtimer_cancel(&ipc_imem->tdupdate_timer);
@ -1419,6 +1426,16 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
}
if (!pm_runtime_enabled(ipc_imem->dev))
pm_runtime_enable(ipc_imem->dev);
pm_runtime_set_autosuspend_delay(ipc_imem->dev,
IPC_MEM_AUTO_SUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(ipc_imem->dev);
pm_runtime_allow(ipc_imem->dev);
pm_runtime_mark_last_busy(ipc_imem->dev);
return ipc_imem;
devlink_channel_fail:
ipc_devlink_deinit(ipc_imem->ipc_devlink);

View file

@ -103,6 +103,8 @@ struct ipc_chnl_cfg;
#define FULLY_FUNCTIONAL 0
#define IOSM_DEVLINK_INIT 1
#define IPC_MEM_AUTO_SUSPEND_DELAY_MS 5000
/* List of the supported UL/DL pipes. */
enum ipc_mem_pipes {
IPC_MEM_PIPE_0 = 0,

View file

@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <net/rtnetlink.h>
#include "iosm_ipc_imem.h"
@ -437,7 +438,8 @@ static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
return 0;
}
static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb);
static DEFINE_RUNTIME_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb,
ipc_pcie_resume_cb, NULL);
static struct pci_driver iosm_ipc_driver = {
.name = KBUILD_MODNAME,

View file

@ -3,6 +3,8 @@
* Copyright (C) 2020-21 Intel Corporation.
*/
#include <linux/pm_runtime.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_imem_ops.h"
#include "iosm_ipc_port.h"
@ -13,12 +15,16 @@ static int ipc_port_ctrl_start(struct wwan_port *port)
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
int ret = 0;
pm_runtime_get_sync(ipc_port->ipc_imem->dev);
ipc_port->channel = ipc_imem_sys_port_open(ipc_port->ipc_imem,
ipc_port->chl_id,
IPC_HP_CDEV_OPEN);
if (!ipc_port->channel)
ret = -EIO;
pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
return ret;
}
@ -27,15 +33,24 @@ static void ipc_port_ctrl_stop(struct wwan_port *port)
{
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
pm_runtime_get_sync(ipc_port->ipc_imem->dev);
ipc_imem_sys_port_close(ipc_port->ipc_imem, ipc_port->channel);
pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
}
/* transfer control data to modem */
static int ipc_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
{
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
int ret;
return ipc_imem_sys_cdev_write(ipc_port, skb);
pm_runtime_get_sync(ipc_port->ipc_imem->dev);
ret = ipc_imem_sys_cdev_write(ipc_port, skb);
pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
return ret;
}
static const struct wwan_port_ops ipc_wwan_ctrl_ops = {

View file

@ -3,7 +3,9 @@
* Copyright (C) 2020-2021 Intel Corporation.
*/
#include <linux/pm_runtime.h>
#include <linux/wwan.h>
#include "iosm_ipc_trace.h"
/* sub buffer size and number of sub buffer */
@ -97,6 +99,8 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
if (ret)
return ret;
pm_runtime_get_sync(ipc_trace->ipc_imem->dev);
mutex_lock(&ipc_trace->trc_mutex);
if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) {
ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem,
@ -117,6 +121,10 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
ret = count;
unlock:
mutex_unlock(&ipc_trace->trc_mutex);
pm_runtime_mark_last_busy(ipc_trace->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_trace->ipc_imem->dev);
return ret;
}

View file

@ -6,6 +6,7 @@
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/if_link.h>
#include <linux/pm_runtime.h>
#include <linux/rtnetlink.h>
#include <linux/wwan.h>
#include <net/pkt_sched.h>
@ -51,11 +52,13 @@ static int ipc_wwan_link_open(struct net_device *netdev)
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
int if_id = priv->if_id;
int ret = 0;
if (if_id < IP_MUX_SESSION_START ||
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
return -EINVAL;
pm_runtime_get_sync(ipc_wwan->ipc_imem->dev);
/* get channel id */
priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
@ -63,7 +66,8 @@ static int ipc_wwan_link_open(struct net_device *netdev)
dev_err(ipc_wwan->dev,
"cannot connect wwan0 & id %d to the IPC mem layer",
if_id);
return -ENODEV;
ret = -ENODEV;
goto err_out;
}
/* enable tx path, DL data may follow */
@ -72,7 +76,11 @@ static int ipc_wwan_link_open(struct net_device *netdev)
dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
priv->ch_id, priv->if_id);
return 0;
err_out:
pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
return ret;
}
/* Bring-down the wwan net link */
@ -82,9 +90,12 @@ static int ipc_wwan_link_stop(struct net_device *netdev)
netif_stop_queue(netdev);
pm_runtime_get_sync(priv->ipc_wwan->ipc_imem->dev);
ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
priv->ch_id);
priv->ch_id = -1;
pm_runtime_mark_last_busy(priv->ipc_wwan->ipc_imem->dev);
pm_runtime_put_autosuspend(priv->ipc_wwan->ipc_imem->dev);
return 0;
}
@ -106,6 +117,7 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
return -EINVAL;
pm_runtime_get(ipc_wwan->ipc_imem->dev);
/* Send the SKB to device for transmission */
ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
if_id, priv->ch_id, skb);
@ -119,9 +131,14 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
ret = NETDEV_TX_BUSY;
dev_err(ipc_wwan->dev, "unable to push packets");
} else {
pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
goto exit;
}
pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
return ret;
exit: