2019-05-05 10:19:27 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
|
|
|
|
*/
|
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
#include <linux/dsa/sja1105.h>
|
|
|
|
#include <linux/dsa/8021q.h>
|
|
|
|
#include <linux/packing.h>
|
|
|
|
#include "dsa_priv.h"
|
|
|
|
|
|
|
|
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
|
|
|
|
static inline bool sja1105_is_link_local(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
const struct ethhdr *hdr = eth_hdr(skb);
|
|
|
|
u64 dmac = ether_addr_to_u64(hdr->h_dest);
|
|
|
|
|
2019-06-08 12:04:38 +00:00
|
|
|
if (ntohs(hdr->h_proto) == ETH_P_SJA1105_META)
|
|
|
|
return false;
|
2019-05-05 10:19:27 +00:00
|
|
|
if ((dmac & SJA1105_LINKLOCAL_FILTER_A_MASK) ==
|
|
|
|
SJA1105_LINKLOCAL_FILTER_A)
|
|
|
|
return true;
|
|
|
|
if ((dmac & SJA1105_LINKLOCAL_FILTER_B_MASK) ==
|
|
|
|
SJA1105_LINKLOCAL_FILTER_B)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-08 12:04:39 +00:00
|
|
|
struct sja1105_meta {
|
|
|
|
u64 tstamp;
|
|
|
|
u64 dmac_byte_4;
|
|
|
|
u64 dmac_byte_3;
|
|
|
|
u64 source_port;
|
|
|
|
u64 switch_id;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void sja1105_meta_unpack(const struct sk_buff *skb,
|
|
|
|
struct sja1105_meta *meta)
|
|
|
|
{
|
|
|
|
u8 *buf = skb_mac_header(skb) + ETH_HLEN;
|
|
|
|
|
|
|
|
/* UM10944.pdf section 4.2.17 AVB Parameters:
|
|
|
|
* Structure of the meta-data follow-up frame.
|
|
|
|
* It is in network byte order, so there are no quirks
|
|
|
|
* while unpacking the meta frame.
|
|
|
|
*
|
|
|
|
* Also SJA1105 E/T only populates bits 23:0 of the timestamp
|
|
|
|
* whereas P/Q/R/S does 32 bits. Since the structure is the
|
|
|
|
* same and the E/T puts zeroes in the high-order byte, use
|
|
|
|
* a unified unpacking command for both device series.
|
|
|
|
*/
|
|
|
|
packing(buf, &meta->tstamp, 31, 0, 4, UNPACK, 0);
|
|
|
|
packing(buf + 4, &meta->dmac_byte_4, 7, 0, 1, UNPACK, 0);
|
|
|
|
packing(buf + 5, &meta->dmac_byte_3, 7, 0, 1, UNPACK, 0);
|
|
|
|
packing(buf + 6, &meta->source_port, 7, 0, 1, UNPACK, 0);
|
|
|
|
packing(buf + 7, &meta->switch_id, 7, 0, 1, UNPACK, 0);
|
|
|
|
}
|
|
|
|
|
2019-06-08 12:04:36 +00:00
|
|
|
static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
const struct ethhdr *hdr = eth_hdr(skb);
|
|
|
|
u64 smac = ether_addr_to_u64(hdr->h_source);
|
|
|
|
u64 dmac = ether_addr_to_u64(hdr->h_dest);
|
|
|
|
|
|
|
|
if (smac != SJA1105_META_SMAC)
|
|
|
|
return false;
|
|
|
|
if (dmac != SJA1105_META_DMAC)
|
|
|
|
return false;
|
|
|
|
if (ntohs(hdr->h_proto) != ETH_P_SJA1105_META)
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-05-12 17:20:32 +00:00
|
|
|
static bool sja1105_can_use_vlan_as_tags(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
|
2020-09-21 00:10:31 +00:00
|
|
|
u16 vlan_tci;
|
2020-05-12 17:20:32 +00:00
|
|
|
|
2020-05-13 00:23:27 +00:00
|
|
|
if (hdr->h_vlan_proto == htons(ETH_P_SJA1105))
|
2020-05-12 17:20:32 +00:00
|
|
|
return true;
|
|
|
|
|
2020-09-21 00:10:31 +00:00
|
|
|
if (hdr->h_vlan_proto != htons(ETH_P_8021Q) &&
|
|
|
|
!skb_vlan_tag_present(skb))
|
2020-05-12 17:20:32 +00:00
|
|
|
return false;
|
|
|
|
|
2020-09-21 00:10:31 +00:00
|
|
|
if (skb_vlan_tag_present(skb))
|
|
|
|
vlan_tci = skb_vlan_tag_get(skb);
|
|
|
|
else
|
|
|
|
vlan_tci = ntohs(hdr->h_vlan_TCI);
|
|
|
|
|
|
|
|
return vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK);
|
2020-05-12 17:20:32 +00:00
|
|
|
}
|
|
|
|
|
2019-05-05 10:19:27 +00:00
|
|
|
/* This is the first time the tagger sees the frame on RX.
|
2019-06-08 12:04:39 +00:00
|
|
|
* Figure out if we can decode it.
|
2019-05-05 10:19:27 +00:00
|
|
|
*/
|
|
|
|
static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
2020-05-12 17:20:32 +00:00
|
|
|
if (sja1105_can_use_vlan_as_tags(skb))
|
2019-06-08 12:04:39 +00:00
|
|
|
return true;
|
2019-05-29 21:51:26 +00:00
|
|
|
if (sja1105_is_link_local(skb))
|
2019-05-05 10:19:27 +00:00
|
|
|
return true;
|
2019-06-08 12:04:39 +00:00
|
|
|
if (sja1105_is_meta_frame(skb))
|
2019-05-05 10:19:27 +00:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
net: dsa: Make deferred_xmit private to sja1105
There are 3 things that are wrong with the DSA deferred xmit mechanism:
1. Its introduction has made the DSA hotpath ever so slightly more
inefficient for everybody, since DSA_SKB_CB(skb)->deferred_xmit needs
to be initialized to false for every transmitted frame, in order to
figure out whether the driver requested deferral or not (a very rare
occasion, rare even for the only driver that does use this mechanism:
sja1105). That was necessary to avoid kfree_skb from freeing the skb.
2. Because L2 PTP is a link-local protocol like STP, it requires
management routes and deferred xmit with this switch. But as opposed
to STP, the deferred work mechanism needs to schedule the packet
rather quickly for the TX timstamp to be collected in time and sent
to user space. But there is no provision for controlling the
scheduling priority of this deferred xmit workqueue. Too bad this is
a rather specific requirement for a feature that nobody else uses
(more below).
3. Perhaps most importantly, it makes the DSA core adhere a bit too
much to the NXP company-wide policy "Innovate Where It Doesn't
Matter". The sja1105 is probably the only DSA switch that requires
some frames sent from the CPU to be routed to the slave port via an
out-of-band configuration (register write) rather than in-band (DSA
tag). And there are indeed very good reasons to not want to do that:
if that out-of-band register is at the other end of a slow bus such
as SPI, then you limit that Ethernet flow's throughput to effectively
the throughput of the SPI bus. So hardware vendors should definitely
not be encouraged to design this way. We do _not_ want more
widespread use of this mechanism.
Luckily we have a solution for each of the 3 issues:
For 1, we can just remove that variable in the skb->cb and counteract
the effect of kfree_skb with skb_get, much to the same effect. The
advantage, of course, being that anybody who doesn't use deferred xmit
doesn't need to do any extra operation in the hotpath.
For 2, we can create a kernel thread for each port's deferred xmit work.
If the user switch ports are named swp0, swp1, swp2, the kernel threads
will be named swp0_xmit, swp1_xmit, swp2_xmit (there appears to be a 15
character length limit on kernel thread names). With this, the user can
change the scheduling priority with chrt $(pidof swp2_xmit).
For 3, we can actually move the entire implementation to the sja1105
driver.
So this patch deletes the generic implementation from the DSA core and
adds a new one, more adequate to the requirements of PTP TX
timestamping, in sja1105_main.c.
Suggested-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-01-04 00:37:10 +00:00
|
|
|
/* Calls sja1105_port_deferred_xmit in sja1105_main.c */
|
|
|
|
static struct sk_buff *sja1105_defer_xmit(struct sja1105_port *sp,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
/* Increase refcount so the kfree_skb in dsa_slave_xmit
|
|
|
|
* won't really free the packet.
|
|
|
|
*/
|
|
|
|
skb_queue_tail(&sp->xmit_queue, skb_get(skb));
|
|
|
|
kthread_queue_work(sp->xmit_worker, &sp->xmit_work);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-05-12 17:20:32 +00:00
|
|
|
static u16 sja1105_xmit_tpid(struct sja1105_port *sp)
|
|
|
|
{
|
|
|
|
return sp->xmit_tpid;
|
|
|
|
}
|
|
|
|
|
2019-05-05 10:19:27 +00:00
|
|
|
static struct sk_buff *sja1105_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct dsa_port *dp = dsa_slave_to_port(netdev);
|
2020-01-04 00:37:11 +00:00
|
|
|
u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
|
2019-09-15 02:00:01 +00:00
|
|
|
u16 queue_mapping = skb_get_queue_mapping(skb);
|
|
|
|
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
|
2019-05-05 10:19:27 +00:00
|
|
|
|
|
|
|
/* Transmitting management traffic does not rely upon switch tagging,
|
|
|
|
* but instead SPI-installed management routes. Part 2 of this
|
|
|
|
* is the .port_deferred_xmit driver callback.
|
|
|
|
*/
|
|
|
|
if (unlikely(sja1105_is_link_local(skb)))
|
net: dsa: Make deferred_xmit private to sja1105
There are 3 things that are wrong with the DSA deferred xmit mechanism:
1. Its introduction has made the DSA hotpath ever so slightly more
inefficient for everybody, since DSA_SKB_CB(skb)->deferred_xmit needs
to be initialized to false for every transmitted frame, in order to
figure out whether the driver requested deferral or not (a very rare
occasion, rare even for the only driver that does use this mechanism:
sja1105). That was necessary to avoid kfree_skb from freeing the skb.
2. Because L2 PTP is a link-local protocol like STP, it requires
management routes and deferred xmit with this switch. But as opposed
to STP, the deferred work mechanism needs to schedule the packet
rather quickly for the TX timstamp to be collected in time and sent
to user space. But there is no provision for controlling the
scheduling priority of this deferred xmit workqueue. Too bad this is
a rather specific requirement for a feature that nobody else uses
(more below).
3. Perhaps most importantly, it makes the DSA core adhere a bit too
much to the NXP company-wide policy "Innovate Where It Doesn't
Matter". The sja1105 is probably the only DSA switch that requires
some frames sent from the CPU to be routed to the slave port via an
out-of-band configuration (register write) rather than in-band (DSA
tag). And there are indeed very good reasons to not want to do that:
if that out-of-band register is at the other end of a slow bus such
as SPI, then you limit that Ethernet flow's throughput to effectively
the throughput of the SPI bus. So hardware vendors should definitely
not be encouraged to design this way. We do _not_ want more
widespread use of this mechanism.
Luckily we have a solution for each of the 3 issues:
For 1, we can just remove that variable in the skb->cb and counteract
the effect of kfree_skb with skb_get, much to the same effect. The
advantage, of course, being that anybody who doesn't use deferred xmit
doesn't need to do any extra operation in the hotpath.
For 2, we can create a kernel thread for each port's deferred xmit work.
If the user switch ports are named swp0, swp1, swp2, the kernel threads
will be named swp0_xmit, swp1_xmit, swp2_xmit (there appears to be a 15
character length limit on kernel thread names). With this, the user can
change the scheduling priority with chrt $(pidof swp2_xmit).
For 3, we can actually move the entire implementation to the sja1105
driver.
So this patch deletes the generic implementation from the DSA core and
adds a new one, more adequate to the requirements of PTP TX
timestamping, in sja1105_main.c.
Suggested-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-01-04 00:37:10 +00:00
|
|
|
return sja1105_defer_xmit(dp->priv, skb);
|
2019-05-05 10:19:27 +00:00
|
|
|
|
2020-05-12 17:20:32 +00:00
|
|
|
return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp->priv),
|
2019-05-05 10:19:27 +00:00
|
|
|
((pcp << VLAN_PRIO_SHIFT) | tx_vid));
|
|
|
|
}
|
|
|
|
|
2019-06-08 12:04:42 +00:00
|
|
|
static void sja1105_transfer_meta(struct sk_buff *skb,
|
|
|
|
const struct sja1105_meta *meta)
|
|
|
|
{
|
|
|
|
struct ethhdr *hdr = eth_hdr(skb);
|
|
|
|
|
|
|
|
hdr->h_dest[3] = meta->dmac_byte_3;
|
|
|
|
hdr->h_dest[4] = meta->dmac_byte_4;
|
2021-06-11 19:01:28 +00:00
|
|
|
SJA1105_SKB_CB(skb)->tstamp = meta->tstamp;
|
2019-06-08 12:04:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This is a simple state machine which follows the hardware mechanism of
|
|
|
|
* generating RX timestamps:
|
|
|
|
*
|
|
|
|
* After each timestampable skb (all traffic for which send_meta1 and
|
|
|
|
* send_meta0 is true, aka all MAC-filtered link-local traffic) a meta frame
|
|
|
|
* containing a partial timestamp is immediately generated by the switch and
|
|
|
|
* sent as a follow-up to the link-local frame on the CPU port.
|
|
|
|
*
|
|
|
|
* The meta frames have no unique identifier (such as sequence number) by which
|
|
|
|
* one may pair them to the correct timestampable frame.
|
|
|
|
* Instead, the switch has internal logic that ensures no frames are sent on
|
|
|
|
* the CPU port between a link-local timestampable frame and its corresponding
|
|
|
|
* meta follow-up. It also ensures strict ordering between ports (lower ports
|
|
|
|
* have higher priority towards the CPU port). For this reason, a per-port
|
|
|
|
* data structure is not needed/desirable.
|
|
|
|
*
|
|
|
|
* This function pairs the link-local frame with its partial timestamp from the
|
|
|
|
* meta follow-up frame. The full timestamp will be reconstructed later in a
|
|
|
|
* work queue.
|
|
|
|
*/
|
|
|
|
static struct sk_buff
|
|
|
|
*sja1105_rcv_meta_state_machine(struct sk_buff *skb,
|
|
|
|
struct sja1105_meta *meta,
|
|
|
|
bool is_link_local,
|
|
|
|
bool is_meta)
|
|
|
|
{
|
|
|
|
struct sja1105_port *sp;
|
|
|
|
struct dsa_port *dp;
|
|
|
|
|
|
|
|
dp = dsa_slave_to_port(skb->dev);
|
|
|
|
sp = dp->priv;
|
|
|
|
|
|
|
|
/* Step 1: A timestampable frame was received.
|
|
|
|
* Buffer it until we get its meta frame.
|
|
|
|
*/
|
2019-10-01 18:58:19 +00:00
|
|
|
if (is_link_local) {
|
|
|
|
if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
|
|
|
|
/* Do normal processing. */
|
|
|
|
return skb;
|
|
|
|
|
2019-06-08 12:04:42 +00:00
|
|
|
spin_lock(&sp->data->meta_lock);
|
|
|
|
/* Was this a link-local frame instead of the meta
|
|
|
|
* that we were expecting?
|
|
|
|
*/
|
|
|
|
if (sp->data->stampable_skb) {
|
|
|
|
dev_err_ratelimited(dp->ds->dev,
|
|
|
|
"Expected meta frame, is %12llx "
|
|
|
|
"in the DSA master multicast filter?\n",
|
|
|
|
SJA1105_META_DMAC);
|
2019-08-04 22:38:48 +00:00
|
|
|
kfree_skb(sp->data->stampable_skb);
|
2019-06-08 12:04:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Hold a reference to avoid dsa_switch_rcv
|
|
|
|
* from freeing the skb.
|
|
|
|
*/
|
|
|
|
sp->data->stampable_skb = skb_get(skb);
|
|
|
|
spin_unlock(&sp->data->meta_lock);
|
|
|
|
|
|
|
|
/* Tell DSA we got nothing */
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Step 2: The meta frame arrived.
|
|
|
|
* Time to take the stampable skb out of the closet, annotate it
|
|
|
|
* with the partial timestamp, and pretend that we received it
|
|
|
|
* just now (basically masquerade the buffered frame as the meta
|
|
|
|
* frame, which serves no further purpose).
|
|
|
|
*/
|
|
|
|
} else if (is_meta) {
|
|
|
|
struct sk_buff *stampable_skb;
|
|
|
|
|
2019-10-01 18:58:19 +00:00
|
|
|
/* Drop the meta frame if we're not in the right state
|
|
|
|
* to process it.
|
|
|
|
*/
|
|
|
|
if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
|
|
|
|
return NULL;
|
|
|
|
|
2019-06-08 12:04:42 +00:00
|
|
|
spin_lock(&sp->data->meta_lock);
|
|
|
|
|
|
|
|
stampable_skb = sp->data->stampable_skb;
|
|
|
|
sp->data->stampable_skb = NULL;
|
|
|
|
|
|
|
|
/* Was this a meta frame instead of the link-local
|
|
|
|
* that we were expecting?
|
|
|
|
*/
|
|
|
|
if (!stampable_skb) {
|
|
|
|
dev_err_ratelimited(dp->ds->dev,
|
|
|
|
"Unexpected meta frame\n");
|
|
|
|
spin_unlock(&sp->data->meta_lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (stampable_skb->dev != skb->dev) {
|
|
|
|
dev_err_ratelimited(dp->ds->dev,
|
|
|
|
"Meta frame on wrong port\n");
|
|
|
|
spin_unlock(&sp->data->meta_lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free the meta frame and give DSA the buffered stampable_skb
|
|
|
|
* for further processing up the network stack.
|
|
|
|
*/
|
|
|
|
kfree_skb(skb);
|
2019-08-04 22:38:47 +00:00
|
|
|
skb = stampable_skb;
|
2019-06-08 12:04:42 +00:00
|
|
|
sja1105_transfer_meta(skb, meta);
|
|
|
|
|
|
|
|
spin_unlock(&sp->data->meta_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2020-05-12 17:20:34 +00:00
|
|
|
static void sja1105_decode_subvlan(struct sk_buff *skb, u16 subvlan)
|
|
|
|
{
|
|
|
|
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
|
|
|
|
struct sja1105_port *sp = dp->priv;
|
|
|
|
u16 vid = sp->subvlan_map[subvlan];
|
|
|
|
u16 vlan_tci;
|
|
|
|
|
|
|
|
if (vid == VLAN_N_VID)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vlan_tci = (skb->priority << VLAN_PRIO_SHIFT) | vid;
|
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
|
|
|
|
}
|
|
|
|
|
2021-06-11 19:01:27 +00:00
|
|
|
static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
u16 tpid = ntohs(eth_hdr(skb)->h_proto);
|
|
|
|
|
|
|
|
return tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
|
|
|
|
skb_vlan_tag_present(skb);
|
|
|
|
}
|
|
|
|
|
2019-05-05 10:19:27 +00:00
|
|
|
static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
|
|
|
|
struct net_device *netdev,
|
|
|
|
struct packet_type *pt)
|
|
|
|
{
|
2021-06-11 19:01:27 +00:00
|
|
|
int source_port, switch_id, subvlan = 0;
|
2019-06-08 12:04:39 +00:00
|
|
|
struct sja1105_meta meta = {0};
|
2020-03-24 09:45:34 +00:00
|
|
|
struct ethhdr *hdr;
|
2019-06-08 12:04:32 +00:00
|
|
|
bool is_link_local;
|
2019-06-08 12:04:39 +00:00
|
|
|
bool is_meta;
|
2019-05-05 10:19:27 +00:00
|
|
|
|
2020-03-24 09:45:34 +00:00
|
|
|
hdr = eth_hdr(skb);
|
2019-06-08 12:04:32 +00:00
|
|
|
is_link_local = sja1105_is_link_local(skb);
|
2019-06-08 12:04:39 +00:00
|
|
|
is_meta = sja1105_is_meta_frame(skb);
|
2019-05-05 10:19:27 +00:00
|
|
|
|
|
|
|
skb->offload_fwd_mark = 1;
|
|
|
|
|
2021-06-11 19:01:27 +00:00
|
|
|
if (sja1105_skb_has_tag_8021q(skb)) {
|
2019-06-08 12:04:32 +00:00
|
|
|
/* Normal traffic path. */
|
2021-06-11 19:01:27 +00:00
|
|
|
dsa_8021q_rcv(skb, &source_port, &switch_id, &subvlan);
|
2019-06-08 12:04:32 +00:00
|
|
|
} else if (is_link_local) {
|
2019-05-05 10:19:27 +00:00
|
|
|
/* Management traffic path. Switch embeds the switch ID and
|
|
|
|
* port ID into bytes of the destination MAC, courtesy of
|
|
|
|
* the incl_srcpt options.
|
|
|
|
*/
|
|
|
|
source_port = hdr->h_dest[3];
|
|
|
|
switch_id = hdr->h_dest[4];
|
|
|
|
/* Clear the DMAC bytes that were mangled by the switch */
|
|
|
|
hdr->h_dest[3] = 0;
|
|
|
|
hdr->h_dest[4] = 0;
|
2019-06-08 12:04:39 +00:00
|
|
|
} else if (is_meta) {
|
|
|
|
sja1105_meta_unpack(skb, &meta);
|
|
|
|
source_port = meta.source_port;
|
|
|
|
switch_id = meta.switch_id;
|
2019-05-05 10:19:27 +00:00
|
|
|
} else {
|
2019-06-08 12:04:32 +00:00
|
|
|
return NULL;
|
2019-05-05 10:19:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
skb->dev = dsa_master_find_slave(netdev, switch_id, source_port);
|
|
|
|
if (!skb->dev) {
|
|
|
|
netdev_warn(netdev, "Couldn't decode source port\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-05-12 17:20:34 +00:00
|
|
|
if (subvlan)
|
|
|
|
sja1105_decode_subvlan(skb, subvlan);
|
|
|
|
|
2019-06-08 12:04:42 +00:00
|
|
|
return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
|
|
|
|
is_meta);
|
2019-05-05 10:19:27 +00:00
|
|
|
}
|
|
|
|
|
2020-09-26 19:32:14 +00:00
|
|
|
static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
|
|
|
|
int *offset)
|
|
|
|
{
|
|
|
|
/* No tag added for management frames, all ok */
|
|
|
|
if (unlikely(sja1105_is_link_local(skb)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
dsa_tag_generic_flow_dissect(skb, proto, offset);
|
|
|
|
}
|
|
|
|
|
2020-05-11 23:47:15 +00:00
|
|
|
static const struct dsa_device_ops sja1105_netdev_ops = {
|
2019-05-05 10:19:27 +00:00
|
|
|
.name = "sja1105",
|
|
|
|
.proto = DSA_TAG_PROTO_SJA1105,
|
|
|
|
.xmit = sja1105_xmit,
|
|
|
|
.rcv = sja1105_rcv,
|
|
|
|
.filter = sja1105_filter,
|
2021-06-11 19:01:24 +00:00
|
|
|
.needed_headroom = VLAN_HLEN,
|
2020-09-26 19:32:14 +00:00
|
|
|
.flow_dissect = sja1105_flow_dissect,
|
net: dsa: tag_sja1105: request promiscuous mode for master
Currently PTP is broken when ports are in standalone mode (the tagger
keeps printing this message):
sja1105 spi0.1: Expected meta frame, is 01-80-c2-00-00-0e in the DSA master multicast filter?
Sure, one might say "simply add 01-80-c2-00-00-0e to the master's RX
filter" but things become more complicated because:
- Actually all frames in the 01-80-c2-xx-xx-xx and 01-1b-19-xx-xx-xx
range are trapped to the CPU automatically
- The switch mangles bytes 3 and 4 of the MAC address via the incl_srcpt
("include source port [in the DMAC]") option, which is how source port
and switch id identification is done for link-local traffic on RX. But
this means that an address installed to the RX filter would, at the
end of the day, not correspond to the final address seen by the DSA
master.
Assume RX filtering lists on DSA masters are typically too small to
include all necessary addresses for PTP to work properly on sja1105, and
just request promiscuous mode unconditionally.
Just an example:
Assuming the following addresses are trapped to the CPU:
01-80-c2-00-00-00 to 01-80-c2-00-00-ff
01-1b-19-00-00-00 to 01-1b-19-00-00-ff
These are 512 addresses.
Now let's say this is a board with 3 switches, and 4 ports per switch.
The 512 addresses become 6144 addresses that must be managed by the DSA
master's RX filtering lists.
This may be refined in the future, but for now, it is simply not worth
it to add the additional addresses to the master's RX filter, so simply
request it to become promiscuous as soon as the driver probes.
Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-09-26 19:32:03 +00:00
|
|
|
.promisc_on_master = true,
|
2019-05-05 10:19:27 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_SJA1105);
|
|
|
|
|
|
|
|
module_dsa_tag_driver(sja1105_netdev_ops);
|