nfp: flower: avoid rmmod nfp crash issues

When there are CT table entries, and you rmmod nfp, the following
events can happen:

task1:
    nfp_net_pci_remove
          ↓
    nfp_flower_stop->(asynchronous)tcf_ct_flow_table_cleanup_work(3)
          ↓
    nfp_zone_table_entry_destroy(1)

task2:
    nfp_fl_ct_handle_nft_flow(2)

When the execution order is (1)->(2)->(3), it will crash. Therefore, in
the function nfp_fl_ct_del_flow, nf_flow_table_offload_del_cb needs to
be executed synchronously.

At the same time, in order to solve the deadlock problem and the problem
of rtnl_lock sometimes failing, replace rtnl_lock with the private
nfp_fl_lock.

Fixes: 7cc93d888d ("nfp: flower-ct: remove callback delete deadlock")
Cc: stable@vger.kernel.org
Signed-off-by: Yanguo Li <yanguo.li@corigine.com>
Signed-off-by: Louis Peens <louis.peens@corigine.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Yanguo Li 2023-10-09 13:21:55 +02:00 committed by David S. Miller
parent 8f8abb863f
commit 14690995c1
6 changed files with 54 additions and 23 deletions

View File

@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
struct nfp_flower_cmsg_merge_hint *msg;
struct nfp_fl_payload *sub_flows[2];
struct nfp_flower_priv *priv;
int err, i, flow_cnt;
msg = nfp_flower_cmsg_get_data(skb);
@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
return;
}
rtnl_lock();
priv = app->priv;
mutex_lock(&priv->nfp_fl_lock);
for (i = 0; i < flow_cnt; i++) {
u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
if (!sub_flows[i]) {
nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
goto err_rtnl_unlock;
goto err_mutex_unlock;
}
}
@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
if (err == -ENOMEM)
nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
err_rtnl_unlock:
rtnl_unlock();
err_mutex_unlock:
mutex_unlock(&priv->nfp_fl_lock);
}
static void

View File

@ -2131,8 +2131,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
struct nfp_fl_ct_flow_entry *ct_entry;
struct netlink_ext_ack *extack = NULL;
ASSERT_RTNL();
extack = flow->common.extack;
switch (flow->command) {
case FLOW_CLS_REPLACE:
@ -2178,9 +2176,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb
switch (type) {
case TC_SETUP_CLSFLOWER:
rtnl_lock();
while (!mutex_trylock(&zt->priv->nfp_fl_lock)) {
if (!zt->nft) /* avoid deadlock */
return err;
msleep(20);
}
err = nfp_fl_ct_offload_nft_flow(zt, flow);
rtnl_unlock();
mutex_unlock(&zt->priv->nfp_fl_lock);
break;
default:
return -EOPNOTSUPP;
@ -2208,6 +2210,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
struct nfp_fl_ct_flow_entry *ct_entry;
struct nfp_fl_ct_zone_entry *zt;
struct rhashtable *m_table;
struct nf_flowtable *nft;
if (!ct_map_ent)
return -ENOENT;
@ -2226,8 +2229,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
if (ct_map_ent->cookie > 0)
kfree(ct_map_ent);
if (!zt->pre_ct_count) {
zt->nft = NULL;
if (!zt->pre_ct_count && zt->nft) {
nft = zt->nft;
zt->nft = NULL; /* avoid deadlock */
nf_flow_table_offload_del_cb(nft,
nfp_fl_ct_handle_nft_flow,
zt);
nfp_fl_ct_clean_nft_entries(zt);
}
break;

View File

@ -297,6 +297,7 @@ struct nfp_fl_internal_ports {
* @predt_list: List to keep track of decap pretun flows
* @neigh_table: Table to keep track of neighbor entries
* @predt_lock: Lock to serialise predt/neigh table updates
* @nfp_fl_lock: Lock to protect the flow offload operation
*/
struct nfp_flower_priv {
struct nfp_app *app;
@ -339,6 +340,7 @@ struct nfp_flower_priv {
struct list_head predt_list;
struct rhashtable neigh_table;
spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */
struct mutex nfp_fl_lock; /* Protect the flow operation */
};
/**

View File

@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
goto err_free_stats_ctx_table;
mutex_init(&priv->nfp_fl_lock);
err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
if (err)
goto err_free_merge_table;

View File

@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
u64 parent_ctx = 0;
int err;
ASSERT_RTNL();
if (sub_flow1 == sub_flow2 ||
nfp_flower_is_merge_flow(sub_flow1) ||
nfp_flower_is_merge_flow(sub_flow2))
@ -1727,19 +1725,30 @@ static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flower)
{
struct nfp_flower_priv *priv = app->priv;
int ret;
if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
mutex_lock(&priv->nfp_fl_lock);
switch (flower->command) {
case FLOW_CLS_REPLACE:
return nfp_flower_add_offload(app, netdev, flower);
ret = nfp_flower_add_offload(app, netdev, flower);
break;
case FLOW_CLS_DESTROY:
return nfp_flower_del_offload(app, netdev, flower);
ret = nfp_flower_del_offload(app, netdev, flower);
break;
case FLOW_CLS_STATS:
return nfp_flower_get_stats(app, netdev, flower);
ret = nfp_flower_get_stats(app, netdev, flower);
break;
default:
return -EOPNOTSUPP;
ret = -EOPNOTSUPP;
break;
}
mutex_unlock(&priv->nfp_fl_lock);
return ret;
}
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
repr_priv = repr->app_priv;
repr_priv->block_shared = f->block_shared;
f->driver_block_list = &nfp_block_cb_list;
f->unlocked_driver_cb = true;
switch (f->command) {
case FLOW_BLOCK_BIND:
@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP;
f->unlocked_driver_cb = true;
switch (f->command) {
case FLOW_BLOCK_BIND:
cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);

View File

@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
{
struct netlink_ext_ack *extack = flow->common.extack;
struct nfp_flower_priv *fl_priv = app->priv;
int ret;
if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
return -EOPNOTSUPP;
}
mutex_lock(&fl_priv->nfp_fl_lock);
switch (flow->command) {
case TC_CLSMATCHALL_REPLACE:
return nfp_flower_install_rate_limiter(app, netdev, flow,
extack);
ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack);
break;
case TC_CLSMATCHALL_DESTROY:
return nfp_flower_remove_rate_limiter(app, netdev, flow,
extack);
ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack);
break;
case TC_CLSMATCHALL_STATS:
return nfp_flower_stats_rate_limiter(app, netdev, flow,
extack);
ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack);
break;
default:
return -EOPNOTSUPP;
ret = -EOPNOTSUPP;
break;
}
mutex_unlock(&fl_priv->nfp_fl_lock);
return ret;
}
/* Offload tc action, currently only for tc police */