hsr: use rcu_read_lock() in hsr_get_node_{list/status}()

[ Upstream commit 173756b868 ]

hsr_get_node_{list/status}() are not under rtnl_lock() because
they are callback functions of generic netlink.
But they use __dev_get_by_index() without rtnl_lock().
So, it would use unsafe data.
In order to fix it, rcu_read_lock() and dev_get_by_index_rcu()
are used instead of __dev_get_by_index().

Fixes: f421436a59 ("net/hsr: Add support for the High-availability Seamless Redundancy protocol (HSRv0)")
Signed-off-by: Taehee Yoo <ap420073@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Taehee Yoo 2020-03-13 06:50:14 +00:00 committed by Greg Kroah-Hartman
parent b200a210a6
commit 16da9c5d0e
2 changed files with 23 additions and 30 deletions

View file

@ -468,13 +468,9 @@ int hsr_get_node_data(struct hsr_priv *hsr,
struct hsr_port *port; struct hsr_port *port;
unsigned long tdiff; unsigned long tdiff;
rcu_read_lock();
node = find_node_by_AddrA(&hsr->node_db, addr); node = find_node_by_AddrA(&hsr->node_db, addr);
if (!node) { if (!node)
rcu_read_unlock(); return -ENOENT;
return -ENOENT; /* No such entry */
}
ether_addr_copy(addr_b, node->MacAddressB); ether_addr_copy(addr_b, node->MacAddressB);
@ -509,7 +505,5 @@ int hsr_get_node_data(struct hsr_priv *hsr,
*addr_b_ifindex = -1; *addr_b_ifindex = -1;
} }
rcu_read_unlock();
return 0; return 0;
} }

View file

@ -259,17 +259,16 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
if (!na) if (!na)
goto invalid; goto invalid;
hsr_dev = __dev_get_by_index(genl_info_net(info), rcu_read_lock();
nla_get_u32(info->attrs[HSR_A_IFINDEX])); hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
nla_get_u32(info->attrs[HSR_A_IFINDEX]));
if (!hsr_dev) if (!hsr_dev)
goto invalid; goto rcu_unlock;
if (!is_hsr_master(hsr_dev)) if (!is_hsr_master(hsr_dev))
goto invalid; goto rcu_unlock;
/* Send reply */ /* Send reply */
skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb_out) { if (!skb_out) {
res = -ENOMEM; res = -ENOMEM;
goto fail; goto fail;
@ -321,12 +320,10 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq); res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
if (res < 0) if (res < 0)
goto nla_put_failure; goto nla_put_failure;
rcu_read_lock();
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
if (port) if (port)
res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX, res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
port->dev->ifindex); port->dev->ifindex);
rcu_read_unlock();
if (res < 0) if (res < 0)
goto nla_put_failure; goto nla_put_failure;
@ -336,20 +333,22 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq); res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
if (res < 0) if (res < 0)
goto nla_put_failure; goto nla_put_failure;
rcu_read_lock();
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
if (port) if (port)
res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX, res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
port->dev->ifindex); port->dev->ifindex);
rcu_read_unlock();
if (res < 0) if (res < 0)
goto nla_put_failure; goto nla_put_failure;
rcu_read_unlock();
genlmsg_end(skb_out, msg_head); genlmsg_end(skb_out, msg_head);
genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
return 0; return 0;
rcu_unlock:
rcu_read_unlock();
invalid: invalid:
netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
return 0; return 0;
@ -359,6 +358,7 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
/* Fall through */ /* Fall through */
fail: fail:
rcu_read_unlock();
return res; return res;
} }
@ -385,17 +385,16 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
if (!na) if (!na)
goto invalid; goto invalid;
hsr_dev = __dev_get_by_index(genl_info_net(info), rcu_read_lock();
nla_get_u32(info->attrs[HSR_A_IFINDEX])); hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
nla_get_u32(info->attrs[HSR_A_IFINDEX]));
if (!hsr_dev) if (!hsr_dev)
goto invalid; goto rcu_unlock;
if (!is_hsr_master(hsr_dev)) if (!is_hsr_master(hsr_dev))
goto invalid; goto rcu_unlock;
/* Send reply */ /* Send reply */
skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb_out) { if (!skb_out) {
res = -ENOMEM; res = -ENOMEM;
goto fail; goto fail;
@ -415,14 +414,11 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
hsr = netdev_priv(hsr_dev); hsr = netdev_priv(hsr_dev);
rcu_read_lock();
pos = hsr_get_next_node(hsr, NULL, addr); pos = hsr_get_next_node(hsr, NULL, addr);
while (pos) { while (pos) {
res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr); res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
if (res < 0) { if (res < 0)
rcu_read_unlock();
goto nla_put_failure; goto nla_put_failure;
}
pos = hsr_get_next_node(hsr, pos, addr); pos = hsr_get_next_node(hsr, pos, addr);
} }
rcu_read_unlock(); rcu_read_unlock();
@ -432,6 +428,8 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
return 0; return 0;
rcu_unlock:
rcu_read_unlock();
invalid: invalid:
netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
return 0; return 0;
@ -441,6 +439,7 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
/* Fall through */ /* Fall through */
fail: fail:
rcu_read_unlock();
return res; return res;
} }