2020-03-06 04:28:27 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
|
2021-04-09 18:07:18 +00:00
|
|
|
* Copyright (C) 2018-2021 Linaro Ltd.
|
2020-03-06 04:28:27 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/if_rmnet.h>
|
2021-08-12 19:50:32 +00:00
|
|
|
#include <linux/pm_runtime.h>
|
2020-07-24 18:11:41 +00:00
|
|
|
#include <linux/remoteproc/qcom_rproc.h>
|
2020-03-06 04:28:27 +00:00
|
|
|
|
|
|
|
#include "ipa.h"
|
|
|
|
#include "ipa_data.h"
|
|
|
|
#include "ipa_endpoint.h"
|
|
|
|
#include "ipa_table.h"
|
|
|
|
#include "ipa_mem.h"
|
|
|
|
#include "ipa_modem.h"
|
|
|
|
#include "ipa_smp2p.h"
|
|
|
|
#include "ipa_qmi.h"
|
2021-07-26 20:11:36 +00:00
|
|
|
#include "ipa_uc.h"
|
2021-07-27 21:19:31 +00:00
|
|
|
#include "ipa_clock.h"
|
2020-03-06 04:28:27 +00:00
|
|
|
|
|
|
|
#define IPA_NETDEV_NAME "rmnet_ipa%d"
|
|
|
|
#define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
|
|
|
|
#define IPA_NETDEV_TIMEOUT 10 /* seconds */
|
|
|
|
|
|
|
|
enum ipa_modem_state {
|
|
|
|
IPA_MODEM_STATE_STOPPED = 0,
|
|
|
|
IPA_MODEM_STATE_STARTING,
|
|
|
|
IPA_MODEM_STATE_RUNNING,
|
|
|
|
IPA_MODEM_STATE_STOPPING,
|
|
|
|
};
|
|
|
|
|
2021-08-12 19:50:32 +00:00
|
|
|
/**
|
|
|
|
* struct ipa_priv - IPA network device private data
|
|
|
|
* @ipa: IPA pointer
|
|
|
|
* @work: Work structure used to wake the modem netdev TX queue
|
|
|
|
*/
|
2020-03-06 04:28:27 +00:00
|
|
|
struct ipa_priv {
|
|
|
|
struct ipa *ipa;
|
2021-08-12 19:50:32 +00:00
|
|
|
struct work_struct work;
|
2020-03-06 04:28:27 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/** ipa_open() - Opens the modem network interface */
|
|
|
|
static int ipa_open(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct ipa_priv *priv = netdev_priv(netdev);
|
|
|
|
struct ipa *ipa = priv->ipa;
|
|
|
|
int ret;
|
|
|
|
|
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the
IPA core clock and interconnects. And although this commit exposes
errors that could occur, we generally assume this won't happen in
practice.
This commit changes ipa_clock_get() and ipa_clock_put() so each
returns a value. The values returned are meant to mimic what the
runtime power management functions return, so we can set up error
handling here before we make the switch. Have ipa_clock_get()
increment the reference count even if it returns an error, to match
the behavior of pm_runtime_get().
More details follow.
When taking a reference in ipa_clock_get(), return 0 for the first
reference, 1 for subsequent references, or a negative error code if
an error occurs. Note that if ipa_clock_get() returns an error, we
must not touch hardware; in some cases such errors now cause entire
blocks of code to be skipped.
When dropping a reference in ipa_clock_put(), we return 0 or an
error code. The error would come from ipa_clock_disable(), which
now returns what ipa_interconnect_disable() returns (either 0 or a
negative error code). For now, callers ignore the return value;
if an error occurs, a message will have already been logged, and
little more can actually be done to improve the situation.
Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-10 19:26:58 +00:00
|
|
|
ret = ipa_clock_get(ipa);
|
|
|
|
if (WARN_ON(ret < 0))
|
|
|
|
goto err_clock_put;
|
2021-07-27 21:19:32 +00:00
|
|
|
|
2020-03-06 04:28:27 +00:00
|
|
|
ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
|
|
|
|
if (ret)
|
2021-07-27 21:19:32 +00:00
|
|
|
goto err_clock_put;
|
|
|
|
|
2020-03-06 04:28:27 +00:00
|
|
|
ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
|
|
|
|
if (ret)
|
|
|
|
goto err_disable_tx;
|
|
|
|
|
|
|
|
netif_start_queue(netdev);
|
|
|
|
|
2021-08-12 19:50:35 +00:00
|
|
|
(void)ipa_clock_put(ipa);
|
|
|
|
|
2020-03-06 04:28:27 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_disable_tx:
|
|
|
|
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
|
2021-07-27 21:19:32 +00:00
|
|
|
err_clock_put:
|
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the
IPA core clock and interconnects. And although this commit exposes
errors that could occur, we generally assume this won't happen in
practice.
This commit changes ipa_clock_get() and ipa_clock_put() so each
returns a value. The values returned are meant to mimic what the
runtime power management functions return, so we can set up error
handling here before we make the switch. Have ipa_clock_get()
increment the reference count even if it returns an error, to match
the behavior of pm_runtime_get().
More details follow.
When taking a reference in ipa_clock_get(), return 0 for the first
reference, 1 for subsequent references, or a negative error code if
an error occurs. Note that if ipa_clock_get() returns an error, we
must not touch hardware; in some cases such errors now cause entire
blocks of code to be skipped.
When dropping a reference in ipa_clock_put(), we return 0 or an
error code. The error would come from ipa_clock_disable(), which
now returns what ipa_interconnect_disable() returns (either 0 or a
negative error code). For now, callers ignore the return value;
if an error occurs, a message will have already been logged, and
little more can actually be done to improve the situation.
Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-10 19:26:58 +00:00
|
|
|
(void)ipa_clock_put(ipa);
|
2020-03-06 04:28:27 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** ipa_stop() - Stops the modem network interface. */
|
|
|
|
static int ipa_stop(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct ipa_priv *priv = netdev_priv(netdev);
|
|
|
|
struct ipa *ipa = priv->ipa;
|
2021-08-12 19:50:35 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = ipa_clock_get(ipa);
|
|
|
|
if (WARN_ON(ret < 0))
|
|
|
|
goto out_clock_put;
|
2020-03-06 04:28:27 +00:00
|
|
|
|
|
|
|
netif_stop_queue(netdev);
|
|
|
|
|
|
|
|
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
|
|
|
|
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
|
2021-08-12 19:50:35 +00:00
|
|
|
out_clock_put:
|
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the
IPA core clock and interconnects. And although this commit exposes
errors that could occur, we generally assume this won't happen in
practice.
This commit changes ipa_clock_get() and ipa_clock_put() so each
returns a value. The values returned are meant to mimic what the
runtime power management functions return, so we can set up error
handling here before we make the switch. Have ipa_clock_get()
increment the reference count even if it returns an error, to match
the behavior of pm_runtime_get().
More details follow.
When taking a reference in ipa_clock_get(), return 0 for the first
reference, 1 for subsequent references, or a negative error code if
an error occurs. Note that if ipa_clock_get() returns an error, we
must not touch hardware; in some cases such errors now cause entire
blocks of code to be skipped.
When dropping a reference in ipa_clock_put(), we return 0 or an
error code. The error would come from ipa_clock_disable(), which
now returns what ipa_interconnect_disable() returns (either 0 or a
negative error code). For now, callers ignore the return value;
if an error occurs, a message will have already been logged, and
little more can actually be done to improve the situation.
Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-10 19:26:58 +00:00
|
|
|
(void)ipa_clock_put(ipa);
|
2021-07-27 21:19:32 +00:00
|
|
|
|
2020-03-06 04:28:27 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** ipa_start_xmit() - Transmits an skb.
|
|
|
|
* @skb: skb to be transmitted
|
|
|
|
* @dev: network device
|
|
|
|
*
|
|
|
|
* Return codes:
|
|
|
|
* NETDEV_TX_OK: Success
|
|
|
|
* NETDEV_TX_BUSY: Error while transmitting the skb. Try again later
|
|
|
|
*/
|
2021-08-12 19:50:35 +00:00
|
|
|
static netdev_tx_t
|
|
|
|
ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
2020-03-06 04:28:27 +00:00
|
|
|
{
|
|
|
|
struct net_device_stats *stats = &netdev->stats;
|
|
|
|
struct ipa_priv *priv = netdev_priv(netdev);
|
|
|
|
struct ipa_endpoint *endpoint;
|
|
|
|
struct ipa *ipa = priv->ipa;
|
|
|
|
u32 skb_len = skb->len;
|
2021-08-12 19:50:33 +00:00
|
|
|
struct device *dev;
|
2020-03-06 04:28:27 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!skb_len)
|
|
|
|
goto err_drop_skb;
|
|
|
|
|
|
|
|
endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
|
|
|
|
if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP))
|
|
|
|
goto err_drop_skb;
|
|
|
|
|
2021-08-12 19:50:33 +00:00
|
|
|
/* The hardware must be powered for us to transmit */
|
|
|
|
dev = &ipa->pdev->dev;
|
|
|
|
ret = pm_runtime_get(dev);
|
|
|
|
if (ret < 1) {
|
|
|
|
/* If a resume won't happen, just drop the packet */
|
|
|
|
if (ret < 0 && ret != -EINPROGRESS) {
|
|
|
|
pm_runtime_put_noidle(dev);
|
|
|
|
goto err_drop_skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No power (yet). Stop the network stack from transmitting
|
|
|
|
* until we're resumed; ipa_modem_resume() arranges for the
|
|
|
|
* TX queue to be started again.
|
|
|
|
*/
|
|
|
|
netif_stop_queue(netdev);
|
|
|
|
|
|
|
|
(void)pm_runtime_put(dev);
|
|
|
|
|
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
}
|
|
|
|
|
2020-03-06 04:28:27 +00:00
|
|
|
ret = ipa_endpoint_skb_tx(endpoint, skb);
|
2021-08-12 19:50:33 +00:00
|
|
|
|
|
|
|
(void)pm_runtime_put(dev);
|
|
|
|
|
2020-03-06 04:28:27 +00:00
|
|
|
if (ret) {
|
|
|
|
if (ret != -E2BIG)
|
|
|
|
return NETDEV_TX_BUSY;
|
|
|
|
goto err_drop_skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
stats->tx_packets++;
|
|
|
|
stats->tx_bytes += skb_len;
|
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
err_drop_skb:
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
stats->tx_dropped++;
|
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct net_device_stats *stats = &netdev->stats;
|
|
|
|
|
|
|
|
if (skb) {
|
|
|
|
skb->dev = netdev;
|
|
|
|
skb->protocol = htons(ETH_P_MAP);
|
|
|
|
stats->rx_packets++;
|
|
|
|
stats->rx_bytes += skb->len;
|
|
|
|
|
|
|
|
(void)netif_receive_skb(skb);
|
|
|
|
} else {
|
|
|
|
stats->rx_dropped++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct net_device_ops ipa_modem_ops = {
|
|
|
|
.ndo_open = ipa_open,
|
|
|
|
.ndo_stop = ipa_stop,
|
|
|
|
.ndo_start_xmit = ipa_start_xmit,
|
|
|
|
};
|
|
|
|
|
|
|
|
/** ipa_modem_netdev_setup() - netdev setup function for the modem */
|
|
|
|
static void ipa_modem_netdev_setup(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
netdev->netdev_ops = &ipa_modem_ops;
|
|
|
|
ether_setup(netdev);
|
|
|
|
/* No header ops (override value set by ether_setup()) */
|
|
|
|
netdev->header_ops = NULL;
|
|
|
|
netdev->type = ARPHRD_RAWIP;
|
|
|
|
netdev->hard_header_len = 0;
|
|
|
|
netdev->max_mtu = IPA_MTU;
|
|
|
|
netdev->mtu = netdev->max_mtu;
|
|
|
|
netdev->addr_len = 0;
|
|
|
|
netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
|
|
|
|
/* The endpoint is configured for QMAP */
|
|
|
|
netdev->needed_headroom = sizeof(struct rmnet_map_header);
|
|
|
|
netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
|
|
|
|
netdev->watchdog_timeo = IPA_NETDEV_TIMEOUT * HZ;
|
|
|
|
netdev->hw_features = NETIF_F_SG;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** ipa_modem_suspend() - suspend callback
|
|
|
|
* @netdev: Network device
|
|
|
|
*
|
|
|
|
* Suspend the modem's endpoints.
|
|
|
|
*/
|
|
|
|
void ipa_modem_suspend(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct ipa_priv *priv = netdev_priv(netdev);
|
|
|
|
struct ipa *ipa = priv->ipa;
|
|
|
|
|
2021-08-04 15:36:21 +00:00
|
|
|
if (!(netdev->flags & IFF_UP))
|
|
|
|
return;
|
|
|
|
|
2020-03-06 04:28:27 +00:00
|
|
|
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
|
|
|
|
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
|
|
|
|
}
|
|
|
|
|
2021-08-12 19:50:32 +00:00
|
|
|
/**
|
|
|
|
* ipa_modem_wake_queue_work() - enable modem netdev queue
|
|
|
|
* @work: Work structure
|
|
|
|
*
|
|
|
|
* Re-enable transmit on the modem network device. This is called
|
|
|
|
* in (power management) work queue context, scheduled when resuming
|
2021-08-12 19:50:33 +00:00
|
|
|
* the modem. We can't enable the queue directly in ipa_modem_resume()
|
|
|
|
* because transmits restart the instant the queue is awakened; but the
|
|
|
|
* device power state won't be ACTIVE until *after* ipa_modem_resume()
|
|
|
|
* returns.
|
2021-08-12 19:50:32 +00:00
|
|
|
*/
|
|
|
|
static void ipa_modem_wake_queue_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct ipa_priv *priv = container_of(work, struct ipa_priv, work);
|
|
|
|
|
|
|
|
netif_wake_queue(priv->ipa->modem_netdev);
|
|
|
|
}
|
|
|
|
|
2020-03-06 04:28:27 +00:00
|
|
|
/** ipa_modem_resume() - resume callback for runtime_pm
|
|
|
|
* @dev: pointer to device
|
|
|
|
*
|
|
|
|
* Resume the modem's endpoints.
|
|
|
|
*/
|
|
|
|
void ipa_modem_resume(struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct ipa_priv *priv = netdev_priv(netdev);
|
|
|
|
struct ipa *ipa = priv->ipa;
|
|
|
|
|
2021-08-04 15:36:21 +00:00
|
|
|
if (!(netdev->flags & IFF_UP))
|
|
|
|
return;
|
|
|
|
|
2020-03-06 04:28:27 +00:00
|
|
|
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
|
|
|
|
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
|
|
|
|
|
2021-08-12 19:50:32 +00:00
|
|
|
/* Arrange for the TX queue to be restarted */
|
|
|
|
(void)queue_pm_work(&priv->work);
|
2020-03-06 04:28:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int ipa_modem_start(struct ipa *ipa)
|
|
|
|
{
|
|
|
|
enum ipa_modem_state state;
|
|
|
|
struct net_device *netdev;
|
|
|
|
struct ipa_priv *priv;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Only attempt to start the modem if it's stopped */
|
|
|
|
state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_STOPPED,
|
|
|
|
IPA_MODEM_STATE_STARTING);
|
|
|
|
|
|
|
|
/* Silently ignore attempts when running, or when changing state */
|
|
|
|
if (state != IPA_MODEM_STATE_STOPPED)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
netdev = alloc_netdev(sizeof(struct ipa_priv), IPA_NETDEV_NAME,
|
|
|
|
NET_NAME_UNKNOWN, ipa_modem_netdev_setup);
|
|
|
|
if (!netdev) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_set_state;
|
|
|
|
}
|
|
|
|
|
2021-01-06 10:07:55 +00:00
|
|
|
SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
|
2020-03-06 04:28:27 +00:00
|
|
|
priv = netdev_priv(netdev);
|
|
|
|
priv->ipa = ipa;
|
2021-08-12 19:50:32 +00:00
|
|
|
INIT_WORK(&priv->work, ipa_modem_wake_queue_work);
|
2021-08-04 15:36:22 +00:00
|
|
|
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
|
|
|
|
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
|
|
|
|
ipa->modem_netdev = netdev;
|
2020-03-06 04:28:27 +00:00
|
|
|
|
|
|
|
ret = register_netdev(netdev);
|
2021-08-04 15:36:22 +00:00
|
|
|
if (ret) {
|
|
|
|
ipa->modem_netdev = NULL;
|
|
|
|
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
|
|
|
|
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
|
2021-04-09 18:07:18 +00:00
|
|
|
free_netdev(netdev);
|
|
|
|
}
|
2020-03-06 04:28:27 +00:00
|
|
|
|
|
|
|
out_set_state:
|
|
|
|
if (ret)
|
|
|
|
atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
|
|
|
|
else
|
|
|
|
atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
|
|
|
|
smp_mb__after_atomic();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ipa_modem_stop(struct ipa *ipa)
|
|
|
|
{
|
|
|
|
struct net_device *netdev = ipa->modem_netdev;
|
|
|
|
enum ipa_modem_state state;
|
|
|
|
|
|
|
|
/* Only attempt to stop the modem if it's running */
|
|
|
|
state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING,
|
|
|
|
IPA_MODEM_STATE_STOPPING);
|
|
|
|
|
|
|
|
/* Silently ignore attempts when already stopped */
|
|
|
|
if (state == IPA_MODEM_STATE_STOPPED)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* If we're somewhere between stopped and starting, we're busy */
|
|
|
|
if (state != IPA_MODEM_STATE_RUNNING)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
/* Prevent the modem from triggering a call to ipa_setup() */
|
|
|
|
ipa_smp2p_disable(ipa);
|
|
|
|
|
2021-08-04 15:36:21 +00:00
|
|
|
/* Clean up the netdev and endpoints if it was started */
|
2020-03-06 04:28:27 +00:00
|
|
|
if (netdev) {
|
2021-08-12 19:50:32 +00:00
|
|
|
struct ipa_priv *priv = netdev_priv(netdev);
|
|
|
|
|
|
|
|
cancel_work_sync(&priv->work);
|
2021-08-04 15:36:21 +00:00
|
|
|
/* If it was opened, stop it first */
|
|
|
|
if (netdev->flags & IFF_UP)
|
|
|
|
(void)ipa_stop(netdev);
|
2021-08-04 15:36:22 +00:00
|
|
|
unregister_netdev(netdev);
|
|
|
|
ipa->modem_netdev = NULL;
|
2021-04-09 18:07:18 +00:00
|
|
|
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
|
|
|
|
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
|
2020-03-06 04:28:27 +00:00
|
|
|
free_netdev(netdev);
|
|
|
|
}
|
|
|
|
|
2021-04-09 18:07:19 +00:00
|
|
|
atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
|
2020-03-06 04:28:27 +00:00
|
|
|
smp_mb__after_atomic();
|
|
|
|
|
2021-04-09 18:07:19 +00:00
|
|
|
return 0;
|
2020-03-06 04:28:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Treat a "clean" modem stop the same as a crash */
|
|
|
|
static void ipa_modem_crashed(struct ipa *ipa)
|
|
|
|
{
|
|
|
|
struct device *dev = &ipa->pdev->dev;
|
|
|
|
int ret;
|
|
|
|
|
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the
IPA core clock and interconnects. And although this commit exposes
errors that could occur, we generally assume this won't happen in
practice.
This commit changes ipa_clock_get() and ipa_clock_put() so each
returns a value. The values returned are meant to mimic what the
runtime power management functions return, so we can set up error
handling here before we make the switch. Have ipa_clock_get()
increment the reference count even if it returns an error, to match
the behavior of pm_runtime_get().
More details follow.
When taking a reference in ipa_clock_get(), return 0 for the first
reference, 1 for subsequent references, or a negative error code if
an error occurs. Note that if ipa_clock_get() returns an error, we
must not touch hardware; in some cases such errors now cause entire
blocks of code to be skipped.
When dropping a reference in ipa_clock_put(), we return 0 or an
error code. The error would come from ipa_clock_disable(), which
now returns what ipa_interconnect_disable() returns (either 0 or a
negative error code). For now, callers ignore the return value;
if an error occurs, a message will have already been logged, and
little more can actually be done to improve the situation.
Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-10 19:26:58 +00:00
|
|
|
ret = ipa_clock_get(ipa);
|
|
|
|
if (WARN_ON(ret < 0))
|
|
|
|
goto out_clock_put;
|
2021-07-27 21:19:31 +00:00
|
|
|
|
2020-03-06 04:28:27 +00:00
|
|
|
ipa_endpoint_modem_pause_all(ipa, true);
|
|
|
|
|
|
|
|
ipa_endpoint_modem_hol_block_clear_all(ipa);
|
|
|
|
|
|
|
|
ipa_table_reset(ipa, true);
|
|
|
|
|
|
|
|
ret = ipa_table_hash_flush(ipa);
|
|
|
|
if (ret)
|
2020-03-11 09:16:13 +00:00
|
|
|
dev_err(dev, "error %d flushing hash caches\n", ret);
|
2020-03-06 04:28:27 +00:00
|
|
|
|
|
|
|
ret = ipa_endpoint_modem_exception_reset_all(ipa);
|
|
|
|
if (ret)
|
2020-04-11 07:30:04 +00:00
|
|
|
dev_err(dev, "error %d resetting exception endpoint\n", ret);
|
2020-03-06 04:28:27 +00:00
|
|
|
|
|
|
|
ipa_endpoint_modem_pause_all(ipa, false);
|
|
|
|
|
|
|
|
ret = ipa_modem_stop(ipa);
|
|
|
|
if (ret)
|
2020-04-11 07:30:04 +00:00
|
|
|
dev_err(dev, "error %d stopping modem\n", ret);
|
2020-03-06 04:28:27 +00:00
|
|
|
|
|
|
|
/* Now prepare for the next modem boot */
|
|
|
|
ret = ipa_mem_zero_modem(ipa);
|
|
|
|
if (ret)
|
|
|
|
dev_err(dev, "error %d zeroing modem memory regions\n", ret);
|
2021-07-27 21:19:31 +00:00
|
|
|
|
net: ipa: have ipa_clock_get() return a value
We currently assume no errors occur when enabling or disabling the
IPA core clock and interconnects. And although this commit exposes
errors that could occur, we generally assume this won't happen in
practice.
This commit changes ipa_clock_get() and ipa_clock_put() so each
returns a value. The values returned are meant to mimic what the
runtime power management functions return, so we can set up error
handling here before we make the switch. Have ipa_clock_get()
increment the reference count even if it returns an error, to match
the behavior of pm_runtime_get().
More details follow.
When taking a reference in ipa_clock_get(), return 0 for the first
reference, 1 for subsequent references, or a negative error code if
an error occurs. Note that if ipa_clock_get() returns an error, we
must not touch hardware; in some cases such errors now cause entire
blocks of code to be skipped.
When dropping a reference in ipa_clock_put(), we return 0 or an
error code. The error would come from ipa_clock_disable(), which
now returns what ipa_interconnect_disable() returns (either 0 or a
negative error code). For now, callers ignore the return value;
if an error occurs, a message will have already been logged, and
little more can actually be done to improve the situation.
Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2021-08-10 19:26:58 +00:00
|
|
|
out_clock_put:
|
|
|
|
(void)ipa_clock_put(ipa);
|
2020-03-06 04:28:27 +00:00
|
|
|
}
|
|
|
|
|
2020-07-24 18:11:41 +00:00
|
|
|
static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
|
|
|
|
void *data)
|
2020-03-06 04:28:27 +00:00
|
|
|
{
|
2020-07-24 18:11:41 +00:00
|
|
|
struct ipa *ipa = container_of(nb, struct ipa, nb);
|
|
|
|
struct qcom_ssr_notify_data *notify_data = data;
|
|
|
|
struct device *dev = &ipa->pdev->dev;
|
2020-03-06 04:28:27 +00:00
|
|
|
|
2020-07-24 18:11:41 +00:00
|
|
|
switch (action) {
|
|
|
|
case QCOM_SSR_BEFORE_POWERUP:
|
2020-03-06 04:28:27 +00:00
|
|
|
dev_info(dev, "received modem starting event\n");
|
2021-07-26 20:11:36 +00:00
|
|
|
ipa_uc_clock(ipa);
|
2020-03-06 04:28:27 +00:00
|
|
|
ipa_smp2p_notify_reset(ipa);
|
|
|
|
break;
|
|
|
|
|
2020-07-24 18:11:41 +00:00
|
|
|
case QCOM_SSR_AFTER_POWERUP:
|
2020-03-06 04:28:27 +00:00
|
|
|
dev_info(dev, "received modem running event\n");
|
|
|
|
break;
|
|
|
|
|
2020-07-24 18:11:41 +00:00
|
|
|
case QCOM_SSR_BEFORE_SHUTDOWN:
|
2020-03-06 04:28:27 +00:00
|
|
|
dev_info(dev, "received modem %s event\n",
|
2020-07-24 18:11:41 +00:00
|
|
|
notify_data->crashed ? "crashed" : "stopping");
|
2020-03-06 04:28:27 +00:00
|
|
|
if (ipa->setup_complete)
|
|
|
|
ipa_modem_crashed(ipa);
|
|
|
|
break;
|
|
|
|
|
2020-07-24 18:11:41 +00:00
|
|
|
case QCOM_SSR_AFTER_SHUTDOWN:
|
2020-03-06 04:28:27 +00:00
|
|
|
dev_info(dev, "received modem offline event\n");
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2020-07-24 18:11:41 +00:00
|
|
|
dev_err(dev, "received unrecognized event %lu\n", action);
|
2020-03-06 04:28:27 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-07-24 18:11:41 +00:00
|
|
|
|
|
|
|
return NOTIFY_OK;
|
2020-03-06 04:28:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int ipa_modem_init(struct ipa *ipa, bool modem_init)
|
|
|
|
{
|
|
|
|
return ipa_smp2p_init(ipa, modem_init);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ipa_modem_exit(struct ipa *ipa)
|
|
|
|
{
|
|
|
|
ipa_smp2p_exit(ipa);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ipa_modem_config(struct ipa *ipa)
|
|
|
|
{
|
2020-07-24 18:11:41 +00:00
|
|
|
void *notifier;
|
|
|
|
|
|
|
|
ipa->nb.notifier_call = ipa_modem_notify;
|
|
|
|
|
|
|
|
notifier = qcom_register_ssr_notifier("mpss", &ipa->nb);
|
|
|
|
if (IS_ERR(notifier))
|
|
|
|
return PTR_ERR(notifier);
|
|
|
|
|
|
|
|
ipa->notifier = notifier;
|
|
|
|
|
|
|
|
return 0;
|
2020-03-06 04:28:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ipa_modem_deconfig(struct ipa *ipa)
|
|
|
|
{
|
2020-07-24 18:11:41 +00:00
|
|
|
struct device *dev = &ipa->pdev->dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
|
|
|
|
if (ret)
|
|
|
|
dev_err(dev, "error %d unregistering notifier", ret);
|
|
|
|
|
|
|
|
ipa->notifier = NULL;
|
|
|
|
memset(&ipa->nb, 0, sizeof(ipa->nb));
|
2020-03-06 04:28:27 +00:00
|
|
|
}
|