Networking fixes for 5.14-rc2, including fixes from bpf and netfilter.

Current release - regressions:
 
  - sock: fix parameter order in sock_setsockopt()
 
 Current release - new code bugs:
 
  - netfilter: nft_last:
      - fix incorrect arithmetic when restoring last used
      - honor NFTA_LAST_SET on restoration
 
 Previous releases - regressions:
 
  - udp: properly flush normal packet at GRO time
 
  - sfc: ensure correct number of XDP queues; don't allow enabling the
         feature if there isn't sufficient resources to Tx from any CPU
 
  - dsa: sja1105: fix address learning getting disabled on the CPU port
 
  - mptcp: addresses a rmem accounting issue that could keep packets
         in subflow receive buffers longer than necessary, delaying
 	MPTCP-level ACKs
 
  - ip_tunnel: fix mtu calculation for ETHER tunnel devices
 
  - do not reuse skbs allocated from skbuff_fclone_cache in the napi
    skb cache, we'd try to return them to the wrong slab cache
 
  - tcp: consistently disable header prediction for mptcp
 
 Previous releases - always broken:
 
  - bpf: fix subprog poke descriptor tracking use-after-free
 
  - ipv6:
       - allocate enough headroom in ip6_finish_output2() in case
         iptables TEE is used
       - tcp: drop silly ICMPv6 packet too big messages to avoid
         expensive and pointless lookups (which may serve as a DDOS
 	vector)
       - make sure fwmark is copied in SYNACK packets
       - fix 'disable_policy' for forwarded packets (align with IPv4)
 
  - netfilter: conntrack: do not renew entry stuck in tcp SYN_SENT state
 
  - netfilter: conntrack: do not mark RST in the reply direction coming
       after SYN packet for an out-of-sync entry
 
  - mptcp: cleanly handle error conditions with MP_JOIN and syncookies
 
  - mptcp: fix double free when rejecting a join due to port mismatch
 
  - validate lwtstate->data before returning from skb_tunnel_info()
 
  - tcp: call sk_wmem_schedule before sk_mem_charge in zerocopy path
 
  - mt76: mt7921: continue to probe driver when fw already downloaded
 
  - bonding: fix multiple issues with offloading IPsec to (thru?) bond
 
  - stmmac: ptp: fix issues around Qbv support and setting time back
 
  - bcmgenet: always clear wake-up based on energy detection
 
 Misc:
 
  - sctp: move 198 addresses from unusable to private scope
 
  - ptp: support virtual clocks and timestamping
 
  - openvswitch: optimize operation for key comparison
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmDu3mMACgkQMUZtbf5S
 Irsjxg//UwcPJMYFmXV+fGkEsWYe1Kf29FcUDEeANFtbltfAcIfZ0GoTbSDRnrVb
 HcYAKcm4XRx5bWWdQrQsQq/yiLbnS/rSLc7VRB+uRHWRKl3eYcaUB2rnCXsxrjGw
 wQJgOmztDCJS4BIky24iQpF/8lg7p/Gj2Ih532gh93XiYo612FrEJKkYb2/OQfYX
 GkbnZ0kL2Y1SV+bhy6aT5azvhHKM4/3eA4fHeJ2p8e2gOZ5ni0vpX0xEzdzKOCd0
 vwR/Wu3h/+2QuFYVcSsVguuM++JXACG8MAS/Tof78dtNM4a3kQxzqeh5Bv6IkfTu
 rokENLq4pjNRy+nBAOeQZj8Jd0K0kkf/PN9WMdGQtplMoFhjjV25R6PeRrV9wwPo
 peozIz2MuQo7Kfof1D+44h2foyLfdC28/Z0CvRbDpr5EHOfYynvBbrnhzIGdQp6V
 xgftKTOdgz2Djgg8HiblZund1FA44OYerddVAASrIsnSFnIz1VLVQIsfV+GLBwwc
 FawrIZ6WfIjzRSrDGOvDsbAQI47T/1jbaPJeK6XgjWkQmjEd6UtRWRZLYCxemQEw
 4HP3sWC96BOehuD8ylipVE1oFqrxCiOB/fZxezXqjo8dSX3NLdak4cCHTHoW5SuZ
 eEAxQRaBliKd+P7hoy9cZ57CAu3zUa8kijfM5QRlCAHF+zSxaPs=
 =QFnb
 -----END PGP SIGNATURE-----

Merge tag 'net-5.14-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski.
 "Including fixes from bpf and netfilter.

  Current release - regressions:

   - sock: fix parameter order in sock_setsockopt()

  Current release - new code bugs:

   - netfilter: nft_last:
       - fix incorrect arithmetic when restoring last used
       - honor NFTA_LAST_SET on restoration

  Previous releases - regressions:

   - udp: properly flush normal packet at GRO time

   - sfc: ensure correct number of XDP queues; don't allow enabling the
     feature if there isn't sufficient resources to Tx from any CPU

   - dsa: sja1105: fix address learning getting disabled on the CPU port

   - mptcp: addresses a rmem accounting issue that could keep packets in
     subflow receive buffers longer than necessary, delaying MPTCP-level
     ACKs

   - ip_tunnel: fix mtu calculation for ETHER tunnel devices

   - do not reuse skbs allocated from skbuff_fclone_cache in the napi
     skb cache, we'd try to return them to the wrong slab cache

   - tcp: consistently disable header prediction for mptcp

  Previous releases - always broken:

   - bpf: fix subprog poke descriptor tracking use-after-free

   - ipv6:
       - allocate enough headroom in ip6_finish_output2() in case
         iptables TEE is used
       - tcp: drop silly ICMPv6 packet too big messages to avoid
         expensive and pointless lookups (which may serve as a DDOS
         vector)
       - make sure fwmark is copied in SYNACK packets
       - fix 'disable_policy' for forwarded packets (align with IPv4)

   - netfilter: conntrack:
       - do not renew entry stuck in tcp SYN_SENT state
       - do not mark RST in the reply direction coming after SYN packet
         for an out-of-sync entry

   - mptcp: cleanly handle error conditions with MP_JOIN and syncookies

   - mptcp: fix double free when rejecting a join due to port mismatch

   - validate lwtstate->data before returning from skb_tunnel_info()

   - tcp: call sk_wmem_schedule before sk_mem_charge in zerocopy path

   - mt76: mt7921: continue to probe driver when fw already downloaded

   - bonding: fix multiple issues with offloading IPsec to (thru?) bond

   - stmmac: ptp: fix issues around Qbv support and setting time back

   - bcmgenet: always clear wake-up based on energy detection

  Misc:

   - sctp: move 198 addresses from unusable to private scope

   - ptp: support virtual clocks and timestamping

   - openvswitch: optimize operation for key comparison"

* tag 'net-5.14-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (158 commits)
  net: dsa: properly check for the bridge_leave methods in dsa_switch_bridge_leave()
  sfc: add logs explaining XDP_TX/REDIRECT is not available
  sfc: ensure correct number of XDP queues
  sfc: fix lack of XDP TX queues - error XDP TX failed (-22)
  net: fddi: fix UAF in fza_probe
  net: dsa: sja1105: fix address learning getting disabled on the CPU port
  net: ocelot: fix switchdev objects synced for wrong netdev with LAG offload
  net: Use nlmsg_unicast() instead of netlink_unicast()
  octeontx2-pf: Fix uninitialized boolean variable pps
  ipv6: allocate enough headroom in ip6_finish_output2()
  net: hdlc: rename 'mod_init' & 'mod_exit' functions to be module-specific
  net: bridge: multicast: fix MRD advertisement router port marking race
  net: bridge: multicast: fix PIM hello router port marking race
  net: phy: marvell10g: fix differentiation of 88X3310 from 88X3340
  dsa: fix for_each_child.cocci warnings
  virtio_net: check virtqueue_add_sgs() return value
  mptcp: properly account bulk freed memory
  selftests: mptcp: fix case multiple subflows limited by server
  mptcp: avoid processing packet if a subflow reset
  mptcp: fix syncookie process if mptcp can not_accept new subflow
  ...
This commit is contained in:
Linus Torvalds 2021-07-14 09:24:32 -07:00
commit 8096acd744
191 changed files with 3598 additions and 2349 deletions

View File

@ -33,6 +33,13 @@ Description:
frequency adjustment value (a positive integer) in
parts per billion.
What: /sys/class/ptp/ptpN/max_vclocks
Date: May 2021
Contact: Yangbo Lu <yangbo.lu@nxp.com>
Description:
This file contains the maximum number of ptp vclocks.
Write integer to re-configure it.
What: /sys/class/ptp/ptpN/n_alarms
Date: September 2010
Contact: Richard Cochran <richardcochran@gmail.com>
@ -61,6 +68,19 @@ Description:
This file contains the number of programmable pins
offered by the PTP hardware clock.
What: /sys/class/ptp/ptpN/n_vclocks
Date: May 2021
Contact: Yangbo Lu <yangbo.lu@nxp.com>
Description:
This file contains the number of virtual PTP clocks in
use. By default, the value is 0 meaning that only the
physical clock is in use. Setting the value creates
the corresponding number of virtual clocks and causes
the physical clock to become free running. Setting the
value back to 0 deletes the virtual clocks and
switches the physical clock back to normal, adjustable
operation.
What: /sys/class/ptp/ptpN/pins
Date: March 2014
Contact: Richard Cochran <richardcochran@gmail.com>

View File

@ -13,7 +13,7 @@ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
For the properties relevant to the ethernet controller connected to the GPMC
refer to the binding documentation of the device. For example, the documentation
for the SMSC 911x is Documentation/devicetree/bindings/net/smsc911x.txt
for the SMSC 911x is Documentation/devicetree/bindings/net/smsc,lan9115.yaml
Child nodes need to specify the GPMC bus address width using the "bank-width"
property but is possible that an ethernet controller also has a property to

View File

@ -0,0 +1,110 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/smsc,lan9115.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller
maintainers:
- Shawn Guo <shawnguo@kernel.org>
allOf:
- $ref: ethernet-controller.yaml#
properties:
compatible:
oneOf:
- const: smsc,lan9115
- items:
- enum:
- smsc,lan89218
- smsc,lan9117
- smsc,lan9118
- smsc,lan9220
- smsc,lan9221
- const: smsc,lan9115
reg:
maxItems: 1
reg-shift: true
reg-io-width:
enum: [ 2, 4 ]
default: 2
interrupts:
minItems: 1
items:
- description:
LAN interrupt line
- description:
Optional PME (power management event) interrupt that is able to wake
up the host system with a 50ms pulse on network activity
clocks:
maxItems: 1
phy-mode: true
smsc,irq-active-high:
type: boolean
description: Indicates the IRQ polarity is active-high
smsc,irq-push-pull:
type: boolean
description: Indicates the IRQ type is push-pull
smsc,force-internal-phy:
type: boolean
description: Forces SMSC LAN controller to use internal PHY
smsc,force-external-phy:
type: boolean
description: Forces SMSC LAN controller to use external PHY
smsc,save-mac-address:
type: boolean
description:
Indicates that MAC address needs to be saved before resetting the
controller
reset-gpios:
maxItems: 1
description:
A GPIO line connected to the RESET (active low) signal of the device.
On many systems this is wired high so the device goes out of reset at
power-on, but if it is under program control, this optional GPIO can
wake up in response to it.
vdd33a-supply:
description: 3.3V analog power supply
vddvario-supply:
description: IO logic power supply
required:
- compatible
- reg
- interrupts
# There are lots of bus-specific properties ("qcom,*", "samsung,*", "fsl,*",
# "gpmc,*", ...) to be found, that actually depend on the compatible value of
# the parent node.
additionalProperties: true
examples:
- |
#include <dt-bindings/gpio/gpio.h>
ethernet@f4000000 {
compatible = "smsc,lan9220", "smsc,lan9115";
reg = <0xf4000000 0x2000000>;
phy-mode = "mii";
interrupt-parent = <&gpio1>;
interrupts = <31>, <32>;
reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
reg-io-width = <4>;
smsc,irq-push-pull;
};

View File

@ -1,43 +0,0 @@
* Smart Mixed-Signal Connectivity (SMSC) LAN911x/912x Controller
Required properties:
- compatible : Should be "smsc,lan<model>", "smsc,lan9115"
- reg : Address and length of the io space for SMSC LAN
- interrupts : one or two interrupt specifiers
- The first interrupt is the SMSC LAN interrupt line
- The second interrupt (if present) is the PME (power
management event) interrupt that is able to wake up the host
system with a 50ms pulse on network activity
- phy-mode : See ethernet.txt file in the same directory
Optional properties:
- reg-shift : Specify the quantity to shift the register offsets by
- reg-io-width : Specify the size (in bytes) of the IO accesses that
should be performed on the device. Valid value for SMSC LAN is
2 or 4. If it's omitted or invalid, the size would be 2.
- smsc,irq-active-high : Indicates the IRQ polarity is active-high
- smsc,irq-push-pull : Indicates the IRQ type is push-pull
- smsc,force-internal-phy : Forces SMSC LAN controller to use
internal PHY
- smsc,force-external-phy : Forces SMSC LAN controller to use
external PHY
- smsc,save-mac-address : Indicates that mac address needs to be saved
before resetting the controller
- reset-gpios : a GPIO line connected to the RESET (active low) signal
of the device. On many systems this is wired high so the device goes
out of reset at power-on, but if it is under program control, this
optional GPIO can wake up in response to it.
- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
Examples:
lan9220@f4000000 {
compatible = "smsc,lan9220", "smsc,lan9115";
reg = <0xf4000000 0x2000000>;
phy-mode = "mii";
interrupt-parent = <&gpio1>;
interrupts = <31>, <32>;
reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
reg-io-width = <4>;
smsc,irq-push-pull;
};

View File

@ -212,6 +212,7 @@ Userspace to kernel:
``ETHTOOL_MSG_FEC_SET`` set FEC settings
``ETHTOOL_MSG_MODULE_EEPROM_GET`` read SFP module EEPROM
``ETHTOOL_MSG_STATS_GET`` get standard statistics
``ETHTOOL_MSG_PHC_VCLOCKS_GET`` get PHC virtual clocks info
===================================== ================================
Kernel to userspace:
@ -250,6 +251,7 @@ Kernel to userspace:
``ETHTOOL_MSG_FEC_NTF`` FEC settings
``ETHTOOL_MSG_MODULE_EEPROM_GET_REPLY`` read SFP module EEPROM
``ETHTOOL_MSG_STATS_GET_REPLY`` standard statistics
``ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY`` PHC virtual clocks info
======================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@ -1477,6 +1479,25 @@ Low and high bounds are inclusive, for example:
etherStatsPkts512to1023Octets 512 1023
============================= ==== ====
PHC_VCLOCKS_GET
===============
Query device PHC virtual clocks information.
Request contents:
==================================== ====== ==========================
``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested request header
==================================== ====== ==========================
Kernel response contents:
==================================== ====== ==========================
``ETHTOOL_A_PHC_VCLOCKS_HEADER`` nested reply header
``ETHTOOL_A_PHC_VCLOCKS_NUM`` u32 PHC virtual clocks number
``ETHTOOL_A_PHC_VCLOCKS_INDEX`` s32 PHC index array
==================================== ====== ==========================
Request translation
===================
@ -1575,4 +1596,5 @@ are netlink only.
n/a ``ETHTOOL_MSG_CABLE_TEST_ACT``
n/a ``ETHTOOL_MSG_CABLE_TEST_TDR_ACT``
n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET``
n/a ``ETHTOOL_MSG_PHC_VCLOCKS_GET``
=================================== =====================================

View File

@ -110,6 +110,12 @@ nf_conntrack_tcp_be_liberal - BOOLEAN
Be conservative in what you do, be liberal in what you accept from others.
If it's non-zero, we mark only out of window RST segments as INVALID.
nf_conntrack_tcp_ignore_invalid_rst - BOOLEAN
- 0 - disabled (default)
- 1 - enabled
If it's 1, we don't mark out of window RST segments as INVALID.
nf_conntrack_tcp_loose - BOOLEAN
- 0 - disabled
- not 0 - enabled (default)

View File

@ -4,10 +4,125 @@
Linux Kernel TIPC
=================
TIPC (Transparent Inter Process Communication) is a protocol that is
specially designed for intra-cluster communication.
Introduction
============
For more information about TIPC, see http://tipc.sourceforge.net.
TIPC (Transparent Inter Process Communication) is a protocol that is specially
designed for intra-cluster communication. It can be configured to transmit
messages either on UDP or directly across Ethernet. Message delivery is
sequence guaranteed, loss free and flow controlled. Latency times are shorter
than with any other known protocol, while maximal throughput is comparable to
that of TCP.
TIPC Features
-------------
- Cluster wide IPC service
Have you ever wished you had the convenience of Unix Domain Sockets even when
transmitting data between cluster nodes? Where you yourself determine the
addresses you want to bind to and use? Where you don't have to perform DNS
lookups and worry about IP addresses? Where you don't have to start timers
to monitor the continuous existence of peer sockets? And yet without the
downsides of that socket type, such as the risk of lingering inodes?
Welcome to the Transparent Inter Process Communication service, TIPC in short,
which gives you all of this, and a lot more.
- Service Addressing
A fundamental concept in TIPC is that of Service Addressing which makes it
possible for a programmer to chose his own address, bind it to a server
socket and let client programs use only that address for sending messages.
- Service Tracking
A client wanting to wait for the availability of a server, uses the Service
Tracking mechanism to subscribe for binding and unbinding/close events for
sockets with the associated service address.
The service tracking mechanism can also be used for Cluster Topology Tracking,
i.e., subscribing for availability/non-availability of cluster nodes.
Likewise, the service tracking mechanism can be used for Cluster Connectivity
Tracking, i.e., subscribing for up/down events for individual links between
cluster nodes.
- Transmission Modes
Using a service address, a client can send datagram messages to a server socket.
Using the same address type, it can establish a connection towards an accepting
server socket.
It can also use a service address to create and join a Communication Group,
which is the TIPC manifestation of a brokerless message bus.
Multicast with very good performance and scalability is available both in
datagram mode and in communication group mode.
- Inter Node Links
Communication between any two nodes in a cluster is maintained by one or two
Inter Node Links, which both guarantee data traffic integrity and monitor
the peer node's availability.
- Cluster Scalability
By applying the Overlapping Ring Monitoring algorithm on the inter node links
it is possible to scale TIPC clusters up to 1000 nodes with a maintained
neighbor failure discovery time of 1-2 seconds. For smaller clusters this
time can be made much shorter.
- Neighbor Discovery
Neighbor Node Discovery in the cluster is done by Ethernet broadcast or UDP
multicast, when any of those services are available. If not, configured peer
IP addresses can be used.
- Configuration
When running TIPC in single node mode no configuration whatsoever is needed.
When running in cluster mode TIPC must as a minimum be given a node address
(before Linux 4.17) and told which interface to attach to. The "tipc"
configuration tool makes is possible to add and maintain many more
configuration parameters.
- Performance
TIPC message transfer latency times are better than in any other known protocol.
Maximal byte throughput for inter-node connections is still somewhat lower than
for TCP, while they are superior for intra-node and inter-container throughput
on the same host.
- Language Support
The TIPC user API has support for C, Python, Perl, Ruby, D and Go.
More Information
----------------
- How to set up TIPC:
http://tipc.io/getting_started.html
- How to program with TIPC:
http://tipc.io/programming.html
- How to contribute to TIPC:
- http://tipc.io/contacts.html
- More details about TIPC specification:
http://tipc.io/protocol.html
Implementation
==============
TIPC is implemented as a kernel module in net/tipc/ directory.
TIPC Base Types
---------------

View File

@ -15009,6 +15009,13 @@ F: drivers/net/phy/dp83640*
F: drivers/ptp/*
F: include/linux/ptp_cl*
PTP VIRTUAL CLOCK SUPPORT
M: Yangbo Lu <yangbo.lu@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/ptp/ptp_vclock.c
F: net/ethtool/phc_vclocks.c
PTRACE SUPPORT
M: Oleg Nesterov <oleg@redhat.com>
S: Maintained

View File

@ -581,7 +581,7 @@
* EBI2. This has a 25MHz chrystal next to it, so no
* clocking is needed.
*/
ethernet-ebi2@2,0 {
ethernet@2,0 {
compatible = "smsc,lan9221", "smsc,lan9115";
reg = <2 0x0 0x100>;
/*
@ -598,8 +598,6 @@
phy-mode = "mii";
reg-io-width = <2>;
smsc,force-external-phy;
/* IRQ on edge falling = active low */
smsc,irq-active-low;
smsc,irq-push-pull;
/*

View File

@ -570,6 +570,9 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
for (i = 0; i < prog->aux->size_poke_tab; i++) {
poke = &prog->aux->poke_tab[i];
if (poke->aux && poke->aux != prog->aux)
continue;
WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)

View File

@ -401,24 +401,85 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
static int bond_ipsec_add_sa(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
int err;
if (!bond_dev)
return -EINVAL;
rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
xs->xso.real_dev = slave->dev;
bond->xs = xs;
if (!slave) {
rcu_read_unlock();
return -ENODEV;
}
if (!(slave->dev->xfrmdev_ops
&& slave->dev->xfrmdev_ops->xdo_dev_state_add)) {
if (!slave->dev->xfrmdev_ops ||
!slave->dev->xfrmdev_ops->xdo_dev_state_add ||
netif_is_bond_master(slave->dev)) {
slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
rcu_read_unlock();
return -EINVAL;
}
return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
if (!ipsec) {
rcu_read_unlock();
return -ENOMEM;
}
xs->xso.real_dev = slave->dev;
err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
if (!err) {
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
spin_lock_bh(&bond->ipsec_lock);
list_add(&ipsec->list, &bond->ipsec_list);
spin_unlock_bh(&bond->ipsec_lock);
} else {
kfree(ipsec);
}
rcu_read_unlock();
return err;
}
static void bond_ipsec_add_sa_all(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
struct bond_ipsec *ipsec;
struct slave *slave;
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
if (!slave)
goto out;
if (!slave->dev->xfrmdev_ops ||
!slave->dev->xfrmdev_ops->xdo_dev_state_add ||
netif_is_bond_master(slave->dev)) {
spin_lock_bh(&bond->ipsec_lock);
if (!list_empty(&bond->ipsec_list))
slave_warn(bond_dev, slave->dev,
"%s: no slave xdo_dev_state_add\n",
__func__);
spin_unlock_bh(&bond->ipsec_lock);
goto out;
}
spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
ipsec->xs->xso.real_dev = slave->dev;
if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
ipsec->xs->xso.real_dev = NULL;
}
}
spin_unlock_bh(&bond->ipsec_lock);
out:
rcu_read_unlock();
}
/**
@ -428,27 +489,77 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
static void bond_ipsec_del_sa(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
if (!bond_dev)
return;
rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
if (!slave)
return;
goto out;
xs->xso.real_dev = slave->dev;
if (!xs->xso.real_dev)
goto out;
if (!(slave->dev->xfrmdev_ops
&& slave->dev->xfrmdev_ops->xdo_dev_state_delete)) {
WARN_ON(xs->xso.real_dev != slave->dev);
if (!slave->dev->xfrmdev_ops ||
!slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
netif_is_bond_master(slave->dev)) {
slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
return;
goto out;
}
slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
out:
spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
if (ipsec->xs == xs) {
list_del(&ipsec->list);
kfree(ipsec);
break;
}
}
spin_unlock_bh(&bond->ipsec_lock);
rcu_read_unlock();
}
static void bond_ipsec_del_sa_all(struct bonding *bond)
{
struct net_device *bond_dev = bond->dev;
struct bond_ipsec *ipsec;
struct slave *slave;
rcu_read_lock();
slave = rcu_dereference(bond->curr_active_slave);
if (!slave) {
rcu_read_unlock();
return;
}
spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
if (!ipsec->xs->xso.real_dev)
continue;
if (!slave->dev->xfrmdev_ops ||
!slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
netif_is_bond_master(slave->dev)) {
slave_warn(bond_dev, slave->dev,
"%s: no slave xdo_dev_state_delete\n",
__func__);
} else {
slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
}
ipsec->xs->xso.real_dev = NULL;
}
spin_unlock_bh(&bond->ipsec_lock);
rcu_read_unlock();
}
/**
@ -459,21 +570,37 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
struct bonding *bond = netdev_priv(bond_dev);
struct slave *curr_active = rcu_dereference(bond->curr_active_slave);
struct net_device *slave_dev = curr_active->dev;
struct net_device *real_dev;
struct slave *curr_active;
struct bonding *bond;
int err;
if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
return true;
bond = netdev_priv(bond_dev);
rcu_read_lock();
curr_active = rcu_dereference(bond->curr_active_slave);
real_dev = curr_active->dev;
if (!(slave_dev->xfrmdev_ops
&& slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) {
slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__);
return false;
if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
err = false;
goto out;
}
xs->xso.real_dev = slave_dev;
return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
if (!xs->xso.real_dev) {
err = false;
goto out;
}
if (!real_dev->xfrmdev_ops ||
!real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
netif_is_bond_master(real_dev)) {
err = false;
goto out;
}
err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
out:
rcu_read_unlock();
return err;
}
static const struct xfrmdev_ops bond_xfrmdev_ops = {
@ -990,8 +1117,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
return;
#ifdef CONFIG_XFRM_OFFLOAD
if (old_active && bond->xs)
bond_ipsec_del_sa(bond->xs);
bond_ipsec_del_sa_all(bond);
#endif /* CONFIG_XFRM_OFFLOAD */
if (new_active) {
@ -1066,10 +1192,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
#ifdef CONFIG_XFRM_OFFLOAD
if (new_active && bond->xs) {
xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true);
bond_ipsec_add_sa(bond->xs);
}
bond_ipsec_add_sa_all(bond);
#endif /* CONFIG_XFRM_OFFLOAD */
/* resend IGMP joins since active slave has changed or
@ -3327,6 +3450,7 @@ static int bond_master_netdev_event(unsigned long event,
return bond_event_changename(event_bond);
case NETDEV_UNREGISTER:
bond_remove_proc_entry(event_bond);
xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
break;
case NETDEV_REGISTER:
bond_create_proc_entry(event_bond);
@ -4894,7 +5018,8 @@ void bond_setup(struct net_device *bond_dev)
#ifdef CONFIG_XFRM_OFFLOAD
/* set up xfrm device ops (only supported in active-backup right now) */
bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
bond->xs = NULL;
INIT_LIST_HEAD(&bond->ipsec_list);
spin_lock_init(&bond->ipsec_lock);
#endif /* CONFIG_XFRM_OFFLOAD */
/* don't acquire bond device's netif_tx_lock when transmitting */

View File

@ -20,15 +20,6 @@ config CAIF_TTY
identified as N_CAIF. When this ldisc is opened from user space
it will redirect the TTY's traffic into the CAIF stack.
config CAIF_HSI
tristate "CAIF HSI transport driver"
depends on CAIF
default n
help
The CAIF low level driver for CAIF over HSI.
Be aware that if you enable this then you also need to
enable a low-level HSI driver.
config CAIF_VIRTIO
tristate "CAIF virtio transport driver"
depends on CAIF && HAS_DMA

View File

@ -4,8 +4,5 @@ ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
# Serial interface
obj-$(CONFIG_CAIF_TTY) += caif_serial.o
# HSI interface
obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
# Virtio interface
obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o

File diff suppressed because it is too large Load Diff

View File

@ -419,8 +419,10 @@ int ksz_switch_register(struct ksz_device *dev,
if (of_property_read_u32(port, "reg",
&port_num))
continue;
if (!(dev->port_mask & BIT(port_num)))
if (!(dev->port_mask & BIT(port_num))) {
of_node_put(port);
return -EINVAL;
}
of_get_phy_mode(port,
&dev->ports[port_num].interface);
}

View File

@ -3583,6 +3583,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@ -3596,7 +3597,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
@ -3606,6 +3607,9 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@ -3619,6 +3623,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6341_phylink_validate,
};
@ -4383,6 +4392,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@ -4396,7 +4406,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
@ -4406,6 +4416,9 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@ -4421,6 +4434,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6341_phylink_validate,
};

View File

@ -722,7 +722,7 @@ static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = {
int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
if (mv88e6390_serdes_get_lane(chip, port) < 0)
if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
@ -734,7 +734,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
struct mv88e6390_serdes_hw_stat *stat;
int i;
if (mv88e6390_serdes_get_lane(chip, port) < 0)
if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
@ -770,7 +770,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
int lane;
int i;
lane = mv88e6390_serdes_get_lane(chip, port);
lane = mv88e6xxx_serdes_get_lane(chip, port);
if (lane < 0)
return 0;

View File

@ -122,14 +122,12 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
for (i = 0; i < ds->num_ports; i++) {
mac[i] = default_mac;
if (i == dsa_upstream_port(priv->ds, i)) {
/* STP doesn't get called for CPU port, so we need to
* set the I/O parameters statically.
*/
mac[i].dyn_learn = true;
mac[i].ingress = true;
mac[i].egress = true;
}
/* Let sja1105_bridge_stp_state_set() keep address learning
* enabled for the CPU port.
*/
if (dsa_is_cpu_port(ds, i))
priv->learn_ena |= BIT(i);
}
return 0;

View File

@ -594,6 +594,11 @@ int atl1c_phy_init(struct atl1c_hw *hw)
int ret_val;
u16 mii_bmcr_data = BMCR_RESET;
if (hw->nic_type == athr_mt) {
hw->phy_configured = true;
return 0;
}
if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
(atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
dev_err(&pdev->dev, "Error get phy ID\n");

View File

@ -1640,7 +1640,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_PASSIVE:
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
EXT_ENERGY_DET_MASK);
if (GENET_IS_V5(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
@ -3237,15 +3238,21 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
/* Returns a reusable dma control register value */
static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
{
unsigned int i;
u32 reg;
u32 dma_ctrl;
/* disable DMA */
dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
for (i = 0; i < priv->hw_params->tx_queues; i++)
dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
for (i = 0; i < priv->hw_params->rx_queues; i++)
dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
@ -3292,7 +3299,6 @@ static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned long dma_ctrl;
u32 reg;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@ -3318,12 +3324,6 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
@ -4139,7 +4139,6 @@ static int bcmgenet_resume(struct device *d)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
unsigned long dma_ctrl;
u32 reg;
int ret;
if (!netif_running(dev))
@ -4176,12 +4175,6 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg |= EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);

View File

@ -186,12 +186,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
if (priv->hw_params->flags & GENET_HAS_EXT) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
reg &= ~EXT_ENERGY_DET_MASK;
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
reg = UMAC_IRQ_MPD_R;
if (hfb_enable)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;

View File

@ -2643,6 +2643,9 @@ static void detach_ulds(struct adapter *adap)
{
unsigned int i;
if (!is_uld(adap))
return;
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
@ -7141,10 +7144,13 @@ static void remove_one(struct pci_dev *pdev)
*/
destroy_workqueue(adapter->workq);
if (is_uld(adapter)) {
detach_ulds(adapter);
t4_uld_clean_up(adapter);
}
detach_ulds(adapter);
for_each_port(adapter, i)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);
t4_uld_clean_up(adapter);
adap_free_hma_mem(adapter);
@ -7152,10 +7158,6 @@ static void remove_one(struct pci_dev *pdev)
cxgb4_free_mps_ref_entries(adapter);
for_each_port(adapter, i)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);
debugfs_remove_recursive(adapter->debugfs_root);
if (!is_t4(adapter->params.chip))

View File

@ -581,6 +581,9 @@ void t4_uld_clean_up(struct adapter *adap)
{
unsigned int i;
if (!is_uld(adap))
return;
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++) {
if (!adap->uld[i].handle)

View File

@ -1469,7 +1469,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if (err)
return -ENXIO;
return err;
err = pci_request_regions(pdev, "gvnic-cfg");
if (err)
@ -1477,19 +1477,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
goto abort_with_pci_region;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev,
"Failed to set consistent dma mask: err=%d\n", err);
goto abort_with_pci_region;
}
reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
if (!reg_bar) {
dev_err(&pdev->dev, "Failed to map pci bar!\n");
@ -1512,6 +1505,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
if (!dev) {
dev_err(&pdev->dev, "could not allocate netdev\n");
err = -ENOMEM;
goto abort_with_db_bar;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@ -1565,7 +1559,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(dev);
if (err)
goto abort_with_wq;
goto abort_with_gve_init;
dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
@ -1573,6 +1567,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
queue_work(priv->gve_wq, &priv->service_task);
return 0;
abort_with_gve_init:
gve_teardown_priv_resources(priv);
abort_with_wq:
destroy_workqueue(priv->gve_wq);
@ -1590,7 +1587,7 @@ abort_with_pci_region:
abort_with_enabled:
pci_disable_device(pdev);
return -ENXIO;
return err;
}
static void gve_remove(struct pci_dev *pdev)

View File

@ -566,13 +566,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
/* Prefetch the payload header. */
prefetch((char *)buf_state->addr + buf_state->page_info.page_offset);
#if L1_CACHE_BYTES < 128
prefetch((char *)buf_state->addr + buf_state->page_info.page_offset +
L1_CACHE_BYTES);
#endif
if (eop && buf_len <= priv->rx_copybreak) {
rx->skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len, 0);

View File

@ -2420,9 +2420,10 @@ out:
static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
bool saved_state = false;
struct ibmvnic_rwi *tmprwi;
struct ibmvnic_rwi *rwi;
unsigned long flags;
u32 reset_state;
int rc = 0;
@ -2489,7 +2490,7 @@ static void __ibmvnic_reset(struct work_struct *work)
} else {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
tmprwi = rwi;
adapter->last_reset_time = jiffies;
if (rc)
@ -2497,8 +2498,23 @@ static void __ibmvnic_reset(struct work_struct *work)
rwi = get_next_rwi(adapter);
/*
* If there is another reset queued, free the previous rwi
* and process the new reset even if previous reset failed
* (the previous reset could have failed because of a fail
* over for instance, so process the fail over).
*
* If there are no resets queued and the previous reset failed,
* the adapter would be in an undefined state. So retry the
* previous reset as a hard reset.
*/
if (rwi)
kfree(tmprwi);
else if (rc)
rwi = tmprwi;
if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
rwi->reset_reason == VNIC_RESET_MOBILITY))
rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
adapter->force_reset_recovery = true;
}

View File

@ -7664,6 +7664,7 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:

View File

@ -2227,6 +2227,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:

View File

@ -3798,6 +3798,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_pci_reg:
err_dma:

View File

@ -931,6 +931,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
**/
static int igb_request_msix(struct igb_adapter *adapter)
{
unsigned int num_q_vectors = adapter->num_q_vectors;
struct net_device *netdev = adapter->netdev;
int i, err = 0, vector = 0, free_vector = 0;
@ -939,7 +940,13 @@ static int igb_request_msix(struct igb_adapter *adapter)
if (err)
goto err_out;
for (i = 0; i < adapter->num_q_vectors; i++) {
if (num_q_vectors > MAX_Q_VECTORS) {
num_q_vectors = MAX_Q_VECTORS;
dev_warn(&adapter->pdev->dev,
"The number of queue vectors (%d) is higher than max allowed (%d)\n",
adapter->num_q_vectors, MAX_Q_VECTORS);
}
for (i = 0; i < num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
vector++;
@ -1678,14 +1685,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter)
**/
static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{
struct igb_ring *ring = adapter->tx_ring[queue];
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
struct igb_ring *ring;
u32 tqavcc, tqavctrl;
u16 value;
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
ring = adapter->tx_ring[queue];
/* If any of the Qav features is enabled, configure queues as SR and
* with HIGH PRIO. If none is, then configure them with LOW PRIO and
@ -3615,6 +3623,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@ -4835,6 +4844,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
tx_buffer->next_to_watch = NULL;
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;

View File

@ -578,7 +578,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
if (hw->phy.ops.read_reg)
return hw->phy.ops.read_reg(hw, offset, data);
return 0;
return -EOPNOTSUPP;
}
void igc_reinit_locked(struct igc_adapter *);

View File

@ -232,6 +232,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
}
tx_buffer->next_to_watch = NULL;
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
@ -6054,6 +6056,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:

View File

@ -11067,6 +11067,7 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:

View File

@ -211,7 +211,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
struct net_device *dev = xs->xso.dev;
struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@ -260,12 +260,15 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
**/
static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
{
struct net_device *dev = xs->xso.dev;
struct ixgbevf_adapter *adapter = netdev_priv(dev);
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
int ret;
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
xs->id.proto);
@ -383,11 +386,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
**/
static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
{
struct net_device *dev = xs->xso.dev;
struct ixgbevf_adapter *adapter = netdev_priv(dev);
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;

View File

@ -2299,19 +2299,19 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
/* last fragment */
if (len == *size) {
struct skb_shared_info *sinfo;
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = xdp_sinfo->nr_frags;
memcpy(sinfo->frags, xdp_sinfo->frags,
sinfo->nr_frags * sizeof(skb_frag_t));
}
} else {
page_pool_put_full_page(rxq->page_pool, page, true);
}
/* last fragment */
if (len == *size) {
struct skb_shared_info *sinfo;
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = xdp_sinfo->nr_frags;
memcpy(sinfo->frags, xdp_sinfo->frags,
sinfo->nr_frags * sizeof(skb_frag_t));
}
*size -= len;
}

View File

@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id)
return test_bit(lmac_id, &cgx->lmac_bmap);
}
/* Helper function to get sequential index
* given the enabled LMAC of a CGX
*/
static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
{
int tmp, id = 0;
for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
if (tmp == lmac_id)
break;
id++;
}
return id;
}
struct mac_ops *get_mac_ops(void *cgxd)
{
if (!cgxd)
@ -211,37 +227,257 @@ static u64 mac2u64 (u8 *mac_addr)
return mac;
}
static void cfg2mac(u64 cfg, u8 *mac_addr)
{
int i, index = 0;
for (i = ETH_ALEN - 1; i >= 0; i--, index++)
mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
}
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
int index, id;
u64 cfg;
/* access mac_ops to know csr_offset */
mac_ops = cgx_dev->mac_ops;
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
cfg = mac2u64 (mac_addr);
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
index = id * lmac->mac_to_index_bmap.max;
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
CGX_DMAC_MCAST_MODE);
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
{
struct mac_ops *mac_ops;
struct cgx *cgx = cgxd;
if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
return 0;
cgx = cgxd;
/* Get mac_ops to know csr offset */
mac_ops = cgx->mac_ops;
return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
}
u64 cgx_read_dmac_entry(void *cgxd, int index)
{
struct mac_ops *mac_ops;
struct cgx *cgx;
if (!cgxd)
return 0;
cgx = cgxd;
mac_ops = cgx->mac_ops;
return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
}
int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
int index, idx;
u64 cfg = 0;
int id;
if (!lmac)
return -ENODEV;
mac_ops = cgx_dev->mac_ops;
/* Get available index where entry is to be installed */
idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
if (idx < 0)
return idx;
id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
index = id * lmac->mac_to_index_bmap.max + idx;
cfg = mac2u64 (mac_addr);
cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
cfg |= ((u64)lmac_id << 49);
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
if (is_multicast_ether_addr(mac_addr)) {
cfg &= ~GENMASK_ULL(2, 1);
cfg |= CGX_DMAC_MCAST_MODE_CAM;
lmac->mcast_filters_count++;
} else if (!lmac->mcast_filters_count) {
cfg |= CGX_DMAC_MCAST_MODE;
}
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return idx;
}
int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
u8 index = 0, id;
u64 cfg;
if (!lmac)
return -ENODEV;
mac_ops = cgx_dev->mac_ops;
/* Restore index 0 to its default init value as done during
* cgx_lmac_init
*/
set_bit(0, lmac->mac_to_index_bmap.bmap);
id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
index = id * lmac->mac_to_index_bmap.max + index;
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
/* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg &= ~CGX_DMAC_CAM_ACCEPT;
cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
/* Allows caller to change macaddress associated with index
* in dmac filter table including index 0 reserved for
* interface mac address
*/
int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
struct mac_ops *mac_ops;
struct lmac *lmac;
u64 cfg;
int id;
lmac = lmac_pdata(lmac_id, cgx_dev);
if (!lmac)
return -ENODEV;
mac_ops = cgx_dev->mac_ops;
/* Validate the index */
if (index >= lmac->mac_to_index_bmap.max)
return -EINVAL;
/* ensure index is already set */
if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
return -EINVAL;
id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
index = id * lmac->mac_to_index_bmap.max + index;
cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
cfg &= ~CGX_RX_DMAC_ADR_MASK;
cfg |= mac2u64 (mac_addr);
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
return 0;
}
int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
u8 mac[ETH_ALEN];
u64 cfg;
int id;
if (!lmac)
return -ENODEV;
mac_ops = cgx_dev->mac_ops;
/* Validate the index */
if (index >= lmac->mac_to_index_bmap.max)
return -EINVAL;
/* Skip deletion for reserved index i.e. index 0 */
if (index == 0)
return 0;
rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
index = id * lmac->mac_to_index_bmap.max + index;
/* Read MAC address to check whether it is ucast or mcast */
cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
cfg2mac(cfg, mac);
if (is_multicast_ether_addr(mac))
lmac->mcast_filters_count--;
if (!lmac->mcast_filters_count) {
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg &= ~GENMASK_ULL(2, 1);
cfg |= CGX_DMAC_MCAST_MODE;
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
}
cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
return 0;
}
int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
if (lmac)
return lmac->mac_to_index_bmap.max;
return 0;
}
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
int index;
u64 cfg;
int id;
mac_ops = cgx_dev->mac_ops;
cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
index = id * lmac->mac_to_index_bmap.max;
cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
@ -297,35 +533,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
struct lmac *lmac = lmac_pdata(lmac_id, cgx);
u16 max_dmac = lmac->mac_to_index_bmap.max;
struct mac_ops *mac_ops;
int index, i;
u64 cfg = 0;
int id;
if (!cgx)
return;
id = get_sequence_id_of_lmac(cgx, lmac_id);
mac_ops = cgx->mac_ops;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
cfg |= CGX_DMAC_BCAST_MODE;
cfg &= ~CGX_DMAC_CAM_ACCEPT;
cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
cfg = cgx_read(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
cgx_write(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
for (i = 0; i < max_dmac; i++) {
index = id * max_dmac + i;
cfg = cgx_read(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
cgx_write(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
}
} else {
/* Disable promiscuous mode */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
cfg = cgx_read(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
cgx_write(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
for (i = 0; i < max_dmac; i++) {
index = id * max_dmac + i;
cfg = cgx_read(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
cgx_write(cgx, 0,
(CGXX_CMRX_RX_DMAC_CAM0 +
index * 0x8),
cfg);
}
}
}
}
@ -1234,6 +1486,15 @@ static int cgx_lmac_init(struct cgx *cgx)
}
lmac->cgx = cgx;
lmac->mac_to_index_bmap.max =
MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
if (err)
return err;
/* Reserve first entry for default MAC address */
set_bit(0, lmac->mac_to_index_bmap.bmap);
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@ -1274,6 +1535,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
continue;
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
kfree(lmac->mac_to_index_bmap.bmap);
kfree(lmac->name);
kfree(lmac);
}

View File

@ -23,6 +23,7 @@
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
#define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
@ -46,10 +47,12 @@
#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700
@ -139,7 +142,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
@ -165,4 +172,7 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
unsigned long cgx_get_lmac_bmap(void *cgxd);
void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
u64 cgx_read_dmac_entry(void *cgxd, int index);
#endif /* CGX_H */

View File

@ -10,17 +10,19 @@
#include "rvu.h"
#include "cgx.h"
/**
* struct lmac
* struct lmac - per lmac locks and properties
* @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
* @cmd_lock: Lock to serialize the command interface
* @resp: command response
* @link_info: link related information
* @mac_to_index_bmap: Mac address to CGX table index mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
* @cgx: parent cgx port
* @mcast_filters_count: Number of multicast filters installed
* @lmac_id: lmac port id
* @cmd_pend: flag set before new command is started
* flag cleared after command response is received
* @cgx: parent cgx port
* @lmac_id: lmac port id
* @name: lmac port name
*/
struct lmac {
@ -29,12 +31,14 @@ struct lmac {
struct mutex cmd_lock;
u64 resp;
struct cgx_link_user_info link_info;
struct rsrc_bmap mac_to_index_bmap;
struct cgx_event_cb event_cb;
/* lock for serializing callback with unregister */
spinlock_t event_cb_lock;
bool cmd_pend;
struct cgx *cgx;
u8 mcast_filters_count;
u8 lmac_id;
bool cmd_pend;
char *name;
};

View File

@ -134,6 +134,8 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
@ -163,7 +165,15 @@ M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \
cgx_mac_addr_add_rsp) \
M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \
msg_rsp) \
M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \
cgx_max_dmac_entries_get_rsp) \
M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \
M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \
msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@ -401,6 +411,38 @@ struct cgx_mac_addr_set_or_get {
u8 mac_addr[ETH_ALEN];
};
/* Structure for requesting the operation to
* add DMAC filter entry into CGX interface
*/
struct cgx_mac_addr_add_req {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
};
/* Structure for response against the operation to
* add DMAC filter entry into CGX interface
*/
struct cgx_mac_addr_add_rsp {
struct mbox_msghdr hdr;
u8 index;
};
/* Structure for requesting the operation to
* delete DMAC filter entry from CGX interface
*/
struct cgx_mac_addr_del_req {
struct mbox_msghdr hdr;
u8 index;
};
/* Structure for response against the operation to
* get maximum supported DMAC filter entries
*/
struct cgx_max_dmac_entries_get_rsp {
struct mbox_msghdr hdr;
u8 max_dmac_filters;
};
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
@ -499,6 +541,12 @@ struct cgx_set_link_mode_rsp {
int status;
};
struct cgx_mac_addr_update_req {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
u8 index;
};
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */
#define RVU_MAC_VERSION BIT_ULL(2)
@ -1278,6 +1326,14 @@ struct set_vf_perm {
u64 flags;
};
struct lmtst_tbl_setup_req {
struct mbox_msghdr hdr;
u16 base_pcifunc;
u8 use_local_lmt_region;
u64 lmt_iova;
u64 rsvd[4];
};
/* CPT mailbox error codes
* Range 901 - 1000.
*/

View File

@ -2333,6 +2333,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
mutex_unlock(&rvu->flr_lock);
}

View File

@ -243,6 +243,7 @@ struct rvu_pfvf {
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
unsigned long flags;
};
@ -656,6 +657,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
int rxtxflag, u64 *stat);
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
@ -741,6 +744,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
@ -754,6 +758,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
/* CN10K RVU - LMT*/
void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
void rvu_dbg_exit(struct rvu *rvu);

View File

@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
{
unsigned long pfmap;
@ -454,6 +454,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return 0;
}
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
{
int pf = rvu_get_pf(pcifunc);
int i = 0, lmac_count = 0;
u8 max_dmac_filters;
u8 cgx_id, lmac_id;
void *cgx_dev;
if (!is_cgx_config_permitted(rvu, pcifunc))
return;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_dev = cgx_get_pdata(cgx_id);
lmac_count = cgx_get_lmac_cnt(cgx_dev);
max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
for (i = 0; i < max_dmac_filters; i++)
cgx_lmac_addr_del(cgx_id, lmac_id, i);
/* As cgx_lmac_addr_del does not clear entry for index 0
* so it needs to be done explicitly
*/
cgx_lmac_addr_reset(cgx_id, lmac_id);
}
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@ -557,6 +582,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
return 0;
}
int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
struct cgx_mac_addr_add_req *req,
struct cgx_mac_addr_add_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
int rc = 0;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
if (rc >= 0) {
rsp->index = rc;
return 0;
}
return rc;
}
int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
struct cgx_mac_addr_del_req *req,
struct msg_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
}
int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
struct msg_req *req,
struct cgx_max_dmac_entries_get_rsp
*rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
/* If msg is received from PFs(which are not mapped to CGX LMACs)
* or VF then no entries are allocated for DMAC filters at CGX level.
* So returning zero.
*/
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
rsp->max_dmac_filters = 0;
return 0;
}
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
return 0;
}
int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
@ -953,3 +1035,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
return 0;
}
int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_reset(cgx_id, lmac_id);
}
int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
struct msg_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
}

View File

@ -10,6 +10,206 @@
#include "cgx.h"
#include "rvu_reg.h"
/* RVU LMTST */
#define LMT_TBL_OP_READ 0
#define LMT_TBL_OP_WRITE 1
#define LMT_MAP_TABLE_SIZE (128 * 1024)
#define LMT_MAPTBL_ENTRY_SIZE 16
/* Function to perform operations (read/write) on lmtst map table */
static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
int lmt_tbl_op)
{
void __iomem *lmt_map_base;
u64 tbl_base;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
if (!lmt_map_base) {
dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
return -ENOMEM;
}
if (lmt_tbl_op == LMT_TBL_OP_READ) {
*val = readq(lmt_map_base + index);
} else {
writeq((*val), (lmt_map_base + index));
/* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
* changes effective. Write 1 for flush and read is being used as a
* barrier and sets up a data dependency. Write to 0 after a write
* to 1 to complete the flush.
*/
rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
}
iounmap(lmt_map_base);
return 0;
}
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
{
return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
(pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
}
static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
u64 iova, u64 *lmt_addr)
{
u64 pa, val, pf;
int err;
if (!iova) {
dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
return -EINVAL;
}
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
pf = rvu_get_pf(pcifunc) & 0x1F;
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
if (err) {
dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
return err;
}
val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
if (val & ~0x1ULL) {
dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
return -EIO;
}
/* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21]
* PA[11:0] = IOVA[11:0]
*/
pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21;
pa &= GENMASK_ULL(39, 0);
*lmt_addr = (pa << 12) | (iova & 0xFFF);
return 0;
}
static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
u32 tbl_idx;
int err = 0;
u64 val;
/* Read the current lmt addr of pcifunc */
tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
if (err) {
dev_err(rvu->dev,
"Failed to read LMT map table: index 0x%x err %d\n",
tbl_idx, err);
return err;
}
/* Storing the seondary's lmt base address as this needs to be
* reverted in FLR. Also making sure this default value doesn't
* get overwritten on multiple calls to this mailbox.
*/
if (!pfvf->lmt_base_addr)
pfvf->lmt_base_addr = val;
/* Update the LMT table with new addr */
err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
if (err) {
dev_err(rvu->dev,
"Failed to update LMT map table: index 0x%x err %d\n",
tbl_idx, err);
return err;
}
return 0;
}
int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
struct lmtst_tbl_setup_req *req,
struct msg_rsp *rsp)
{
u64 lmt_addr, val;
u32 pri_tbl_idx;
int err = 0;
/* Check if PF_FUNC wants to use it's own local memory as LMTLINE
* region, if so, convert that IOVA to physical address and
* populate LMT table with that address
*/
if (req->use_local_lmt_region) {
err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
req->lmt_iova, &lmt_addr);
if (err < 0)
return err;
/* Update the lmt addr for this PFFUNC in the LMT table */
err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
if (err)
return err;
}
/* Reconfiguring lmtst map table in lmt region shared mode i.e. make
* multiple PF_FUNCs to share an LMTLINE region, so primary/base
* pcifunc (which is passed as an argument to mailbox) is the one
* whose lmt base address will be shared among other secondary
* pcifunc (will be the one who is calling this mailbox).
*/
if (req->base_pcifunc) {
/* Calculating the LMT table index equivalent to primary
* pcifunc.
*/
pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
/* Read the base lmt addr of the primary pcifunc */
err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
LMT_TBL_OP_READ);
if (err) {
dev_err(rvu->dev,
"Failed to read LMT map table: index 0x%x err %d\n",
pri_tbl_idx, err);
return err;
}
/* Update the base lmt addr of secondary with primary's base
* lmt addr.
*/
err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
if (err)
return err;
}
return 0;
}
/* Resetting the lmtst map table to original base addresses */
void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
u32 tbl_idx;
int err;
if (is_rvu_otx2(rvu))
return;
if (pfvf->lmt_base_addr) {
/* This corresponds to lmt map table index */
tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
/* Reverting back original lmt base addr for respective
* pcifunc.
*/
err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr,
LMT_TBL_OP_WRITE);
if (err)
dev_err(rvu->dev,
"Failed to update LMT map table: index 0x%x err %d\n",
tbl_idx, err);
pfvf->lmt_base_addr = 0;
}
}
int rvu_set_channels_base(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;

View File

@ -1971,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
{
struct dentry *current_dir;
int err, lmac_id;
char *buf;
current_dir = filp->file->f_path.dentry->d_parent;
@ -1982,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
if (!buf)
return -EINVAL;
err = kstrtoint(buf + 1, 10, &lmac_id);
if (!err) {
err = cgx_print_stats(filp, lmac_id);
if (err)
return err;
}
return kstrtoint(buf + 1, 10, lmac_id);
}
static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
{
int lmac_id, err;
err = rvu_dbg_derive_lmacid(filp, &lmac_id);
if (!err)
return cgx_print_stats(filp, lmac_id);
return err;
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
{
struct pci_dev *pdev = NULL;
void *cgxd = s->private;
char *bcast, *mcast;
u16 index, domain;
u8 dmac[ETH_ALEN];
struct rvu *rvu;
u64 cfg, mac;
int pf;
rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
if (!rvu)
return -ENODEV;
pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
domain = 2;
pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
if (!pdev)
return 0;
cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
seq_puts(s,
"PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
seq_printf(s, "%s PF%d %9s %9s",
dev_name(&pdev->dev), pf, bcast, mcast);
if (cfg & CGX_DMAC_CAM_ACCEPT)
seq_printf(s, "%12s\n\n", "UNICAST");
else
seq_printf(s, "%16s\n\n", "PROMISCUOUS");
seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
for (index = 0 ; index < 32 ; index++) {
cfg = cgx_read_dmac_entry(cgxd, index);
/* Display enabled dmac entries associated with current lmac */
if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
u64_to_ether_addr(mac, dmac);
seq_printf(s, "%7d %pM\n", index, dmac);
}
}
return 0;
}
static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
{
int err, lmac_id;
err = rvu_dbg_derive_lmacid(filp, &lmac_id);
if (!err)
return cgx_print_dmac_flt(filp, lmac_id);
return err;
}
RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
struct mac_ops *mac_ops;
@ -2029,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
cgx, &rvu_dbg_cgx_stat_fops);
debugfs_create_file("mac_filter", 0600,
rvu->rvu_dbg.lmac, cgx,
&rvu_dbg_cgx_dmac_flt_fops);
}
}
}

View File

@ -346,6 +346,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
/* Free and disable any MCAM entries used by this NIX LF */
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
/* Disable DMAC filters used */
rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,

View File

@ -49,6 +49,11 @@
#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
#define RVU_AF_SMMU_ADDR_REQ (0x6000)
#define RVU_AF_SMMU_TXN_REQ (0x6008)
#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010)
#define RVU_AF_SMMU_ADDR_TLN (0x6018)
#define RVU_AF_SMMU_TLN_FLIT1 (0x6030)
/* Admin function's privileged PF/VF registers */
#define RVU_PRIV_CONST (0x8000000)
@ -692,4 +697,9 @@
#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
/* APR */
#define APR_AF_LMT_CFG (0x000ull)
#define APR_AF_LMT_MAP_BASE (0x008ull)
#define APR_AF_LMT_CTL (0x010ull)
#endif /* RVU_REG_H */

View File

@ -35,7 +35,8 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NPA0 = 0xeULL,
BLKADDR_NDC_NIX1_RX = 0x10ULL,
BLKADDR_NDC_NIX1_TX = 0x11ULL,
BLK_COUNT = 0x12ULL,
BLKADDR_APR = 0x16ULL,
BLK_COUNT = 0x17ULL,
};
/* RVU Block Type Enumeration */

View File

@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o
otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o
rvu_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af

View File

@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = {
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
};
int cn10k_pf_lmtst_init(struct otx2_nic *pf)
int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
int size, num_lines;
u64 base;
if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
pf->hw_ops = &otx2_hw_ops;
struct lmtst_tbl_setup_req *req;
int qcount, err;
if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
pfvf->hw_ops = &otx2_hw_ops;
return 0;
}
pf->hw_ops = &cn10k_hw_ops;
base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
(MBOX_SIZE * (pf->total_vfs + 1));
pfvf->hw_ops = &cn10k_hw_ops;
qcount = pfvf->hw.max_queues;
/* LMTST lines allocation
* qcount = num_online_cpus();
* NPA = TX + RX + XDP.
* NIX = TX * 32 (For Burst SQE flush).
*/
pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32);
pfvf->npa_lmt_lines = qcount * 3;
pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE;
size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) -
(MBOX_SIZE * (pf->total_vfs + 1));
pf->hw.lmt_base = ioremap(base, size);
if (!pf->hw.lmt_base) {
dev_err(pf->dev, "Unable to map PF LMTST region\n");
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
if (!req) {
mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
/* FIXME: Get the num of LMTST lines from LMT table */
pf->tot_lmt_lines = size / LMT_LINE_SIZE;
num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) /
pf->hw.tx_queues;
/* Number of LMT lines per SQ queues */
pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
req->use_local_lmt_region = true;
err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
LMT_LINE_SIZE);
if (err) {
mutex_unlock(&pfvf->mbox.lock);
return err;
}
pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
req->lmt_iova = (u64)pfvf->dync_lmt->iova;
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE;
return 0;
}
int cn10k_vf_lmtst_init(struct otx2_nic *vf)
{
int size, num_lines;
if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) {
vf->hw_ops = &otx2_hw_ops;
return 0;
}
vf->hw_ops = &cn10k_hw_ops;
size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM);
vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev,
PCI_MBOX_BAR_NUM),
size);
if (!vf->hw.lmt_base) {
dev_err(vf->dev, "Unable to map VF LMTST region\n");
return -ENOMEM;
}
vf->tot_lmt_lines = size / LMT_LINE_SIZE;
/* LMTST lines per SQ */
num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) /
vf->hw.tx_queues;
vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE;
return 0;
}
EXPORT_SYMBOL(cn10k_vf_lmtst_init);
EXPORT_SYMBOL(cn10k_lmtst_init);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
{
@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
struct otx2_snd_queue *sq;
sq = &pfvf->qset.sq[qidx];
sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base +
sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base +
(qidx * pfvf->nix_lmt_size));
sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE);
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
if (!aq)
@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
{
struct otx2_nic *pfvf = dev;
int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines);
u64 val = 0, tar_addr = 0;
/* FIXME: val[0:10] LMT_ID.
* [12:15] no of LMTST - 1 in the burst.
* [19:63] data size of each LMTST in the burst except first.
*/
val = (lmt_id & 0x7FF);
val = (sq->lmt_id & 0x7FF);
/* Target address for LMTST flush tells HW how many 128bit
* words are present.
* tar_addr[6:4] size of first LMTST - 1 in units of 128b.

View File

@ -12,8 +12,7 @@
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_pf_lmtst_init(struct otx2_nic *pf);
int cn10k_vf_lmtst_init(struct otx2_nic *vf);
int cn10k_lmtst_init(struct otx2_nic *pfvf);
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);

View File

@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
/* update dmac field in vlan offload rule */
if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
otx2_install_rxvlan_offload_flow(pfvf);
/* update dmac address in ntuple and DMAC filter list */
if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_update_pfmac_flow(pfvf);
} else {
return -EPERM;
}

View File

@ -218,8 +218,8 @@ struct otx2_hw {
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
#define NIX_LMTID_BASE 72 /* RX + TX + XDP */
void __iomem *lmt_base;
#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
u64 *lmt_base;
u64 *npa_lmt_base;
u64 *nix_lmt_base;
};
@ -288,6 +288,9 @@ struct otx2_flow_config {
u16 tc_flower_offset;
u16 ntuple_max_flows;
u16 tc_max_flows;
u8 dmacflt_max_flows;
u8 *bmap_to_dmacindex;
unsigned long dmacflt_bmap;
struct list_head flow_list;
};
@ -329,6 +332,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
u64 flags;
struct otx2_qset qset;
@ -363,8 +367,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
/* LMTST Lines info */
struct qmem *dync_lmt;
u16 tot_lmt_lines;
u16 nix_lmt_lines;
u16 npa_lmt_lines;
u32 nix_lmt_size;
struct otx2_ptp *ptp;
@ -833,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
#endif /* OTX2_COMMON_H */

View File

@ -0,0 +1,173 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
*
* Copyright (C) 2021 Marvell.
*/
#include "otx2_common.h"
static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
u8 *dmac_index)
{
struct cgx_mac_addr_add_req *req;
struct cgx_mac_addr_add_rsp *rsp;
int err;
mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
if (!req) {
mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
ether_addr_copy(req->mac_addr, mac);
err = otx2_sync_mbox_msg(&pf->mbox);
if (!err) {
rsp = (struct cgx_mac_addr_add_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
*dmac_index = rsp->index;
}
mutex_unlock(&pf->mbox.lock);
return err;
}
static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
{
struct cgx_mac_addr_set_or_get *req;
int err;
mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
if (!req) {
mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
err = otx2_sync_mbox_msg(&pf->mbox);
mutex_unlock(&pf->mbox.lock);
return err;
}
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
{
u8 *dmacindex;
/* Store dmacindex returned by CGX/RPM driver which will
* be used for macaddr update/remove
*/
dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
if (ether_addr_equal(mac, pf->netdev->dev_addr))
return otx2_dmacflt_add_pfmac(pf);
else
return otx2_dmacflt_do_add(pf, mac, dmacindex);
}
static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
u8 dmac_index)
{
struct cgx_mac_addr_del_req *req;
int err;
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
if (!req) {
mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
req->index = dmac_index;
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
return err;
}
static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
{
struct msg_req *req;
int err;
mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
if (!req) {
mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pf->mbox);
mutex_unlock(&pf->mbox.lock);
return err;
}
int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
u8 bit_pos)
{
u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
if (ether_addr_equal(mac, pf->netdev->dev_addr))
return otx2_dmacflt_remove_pfmac(pf);
else
return otx2_dmacflt_do_remove(pf, mac, dmacindex);
}
/* CGX/RPM blocks support max unicast entries of 32.
* on typical configuration MAC block associated
* with 4 lmacs, each lmac will have 8 dmac entries
*/
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
{
struct cgx_max_dmac_entries_get_rsp *rsp;
struct msg_req *msg;
int err;
mutex_lock(&pf->mbox.lock);
msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
if (!msg) {
mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
err = otx2_sync_mbox_msg(&pf->mbox);
if (err)
goto out;
rsp = (struct cgx_max_dmac_entries_get_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
out:
mutex_unlock(&pf->mbox.lock);
return err;
}
int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
{
struct cgx_mac_addr_update_req *req;
int rc;
mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
if (!req) {
mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
ether_addr_copy(req->mac_addr, mac);
req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
rc = otx2_sync_mbox_msg(&pf->mbox);
mutex_unlock(&pf->mbox.lock);
return rc;
}

View File

@ -18,6 +18,12 @@ struct otx2_flow {
bool is_vf;
u8 rss_ctx_id;
int vf;
bool dmac_filter;
};
enum dmac_req {
DMAC_ADDR_UPDATE,
DMAC_ADDR_DEL
};
static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
@ -219,6 +225,22 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
if (!pf->mac_table)
return -ENOMEM;
otx2_dmacflt_get_max_cnt(pf);
/* DMAC filters are not allocated */
if (!pf->flow_cfg->dmacflt_max_flows)
return 0;
pf->flow_cfg->bmap_to_dmacindex =
devm_kzalloc(pf->dev, sizeof(u8) *
pf->flow_cfg->dmacflt_max_flows,
GFP_KERNEL);
if (!pf->flow_cfg->bmap_to_dmacindex)
return -ENOMEM;
pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
return 0;
}
@ -280,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(netdev,
"Add %pM to CGX/RPM DMAC filters list as well\n",
mac);
return otx2_do_add_macfilter(pf, mac);
}
@ -351,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
list_add(&flow->list, head);
}
static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
{
if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
bitmap_weight(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows))
return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
else
return flow_cfg->ntuple_max_flows;
}
int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
u32 location)
{
struct otx2_flow *iter;
if (location >= pfvf->flow_cfg->ntuple_max_flows)
if (location >= otx2_get_maxflows(pfvf->flow_cfg))
return -EINVAL;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
@ -378,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
int idx = 0;
int err = 0;
nfc->data = pfvf->flow_cfg->ntuple_max_flows;
nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
while ((!err || err == -ENOENT) && idx < rule_cnt) {
err = otx2_get_flow(pfvf, nfc, location);
if (!err)
@ -760,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
return 0;
}
static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
struct ethtool_rx_flow_spec *fsp)
{
struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
u64 ring_cookie = fsp->ring_cookie;
u32 flow_type;
if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
return false;
flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
/* CGX/RPM block dmac filtering configured for white listing
* check for action other than DROP
*/
if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
!ethtool_get_flow_spec_ring_vf(ring_cookie)) {
if (is_zero_ether_addr(eth_mask->h_dest) &&
is_valid_ether_addr(eth_hdr->h_dest))
return true;
}
return false;
}
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
@ -818,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
return err;
}
static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
struct otx2_flow *flow)
{
struct otx2_flow *pf_mac;
struct ethhdr *eth_hdr;
pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
if (!pf_mac)
return -ENOMEM;
pf_mac->entry = 0;
pf_mac->dmac_filter = true;
pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
memcpy(&pf_mac->flow_spec, &flow->flow_spec,
sizeof(struct ethtool_rx_flow_spec));
pf_mac->flow_spec.location = pf_mac->location;
/* Copy PF mac address */
eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
/* Install DMAC filter with PF mac address */
otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
otx2_add_flow_to_list(pfvf, pf_mac);
pfvf->flow_cfg->nr_flows++;
set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
return 0;
}
int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct ethtool_rx_flow_spec *fsp = &nfc->fs;
struct otx2_flow *flow;
struct ethhdr *eth_hdr;
bool new = false;
int err = 0;
u32 ring;
int err;
ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
@ -834,16 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
if (fsp->location >= flow_cfg->ntuple_max_flows)
if (fsp->location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, fsp->location);
if (!flow) {
flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
if (!flow)
return -ENOMEM;
flow->location = fsp->location;
flow->entry = flow_cfg->flow_ent[flow->location];
new = true;
}
/* struct copy */
@ -852,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (fsp->flow_type & FLOW_RSS)
flow->rss_ctx_id = nfc->rss_context;
err = otx2_add_flow_msg(pfvf, flow);
if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* Sync dmac filter table with updated fields */
if (flow->dmac_filter)
return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
flow->entry);
if (bitmap_full(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows)) {
netdev_warn(pfvf->netdev,
"Can't insert the rule %d as max allowed dmac filters are %d\n",
flow->location +
flow_cfg->dmacflt_max_flows,
flow_cfg->dmacflt_max_flows);
err = -EINVAL;
if (new)
kfree(flow);
return err;
}
/* Install PF mac address to DMAC filter list */
if (!test_bit(0, &flow_cfg->dmacflt_bmap))
otx2_add_flow_with_pfmac(pfvf, flow);
flow->dmac_filter = true;
flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows);
fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
flow->flow_spec.location = fsp->location;
flow->location = fsp->location;
set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
} else {
if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
netdev_warn(pfvf->netdev,
"Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
flow->location,
flow_cfg->ntuple_max_flows - 1);
err = -EINVAL;
} else {
flow->entry = flow_cfg->flow_ent[flow->location];
err = otx2_add_flow_msg(pfvf, flow);
}
}
if (err) {
if (new)
kfree(flow);
@ -890,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
return err;
}
static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
{
struct otx2_flow *iter;
struct ethhdr *eth_hdr;
bool found = false;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
if (iter->dmac_filter && iter->entry == 0) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
if (req == DMAC_ADDR_DEL) {
otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
0);
clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
found = true;
} else {
ether_addr_copy(eth_hdr->h_dest,
pfvf->netdev->dev_addr);
otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
}
break;
}
}
if (found) {
list_del(&iter->list);
kfree(iter);
pfvf->flow_cfg->nr_flows--;
}
}
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct otx2_flow *flow;
int err;
if (location >= flow_cfg->ntuple_max_flows)
if (location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, location);
if (!flow)
return -ENOENT;
err = otx2_remove_flow_msg(pfvf, flow->entry, false);
if (flow->dmac_filter) {
struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* user not allowed to remove dmac filter with interface mac */
if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
return -EPERM;
err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
flow->entry);
clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
/* If all dmac filters are removed delete macfilter with
* interface mac address and configure CGX/RPM block in
* promiscuous mode
*/
if (bitmap_weight(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows) == 1)
otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
} else {
err = otx2_remove_flow_msg(pfvf, flow->entry, false);
}
if (err)
return err;
@ -1100,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
mutex_unlock(&pf->mbox.lock);
return rsp_hdr->rc;
}
void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
{
struct otx2_flow *iter;
struct ethhdr *eth_hdr;
list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
if (iter->dmac_filter) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
otx2_dmacflt_add(pf, eth_hdr->h_dest,
iter->entry);
}
}
}
void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
{
otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
}

View File

@ -1110,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(pf->netdev,
"CGX/RPM internal loopback might not work as DMAC filters are active\n");
mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
@ -1533,10 +1538,10 @@ int otx2_open(struct net_device *netdev)
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
/* Reserve LMT lines for NPA AURA batch free */
pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
pf->hw.npa_lmt_base = pf->hw.lmt_base;
/* Reserve LMT lines for NIX TX */
pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
(NIX_LMTID_BASE * LMT_LINE_SIZE));
pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base +
(pf->npa_lmt_lines * LMT_LINE_SIZE));
}
err = otx2_init_hw_resources(pf);
@ -1644,6 +1649,10 @@ int otx2_open(struct net_device *netdev)
/* Restore pause frame settings */
otx2_config_pause_frm(pf);
/* Install DMAC Filters */
if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf);
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_tx_stop_queues;
@ -2526,7 +2535,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
err = cn10k_pf_lmtst_init(pf);
err = cn10k_lmtst_init(pf);
if (err)
goto err_detach_rsrc;
@ -2630,8 +2639,8 @@ err_del_mcam_entries:
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
if (hw->lmt_base)
iounmap(hw->lmt_base);
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
@ -2772,9 +2781,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
if (pf->hw.lmt_base)
iounmap(pf->hw.lmt_base);
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
qmem_free(pf->dev, pf->dync_lmt);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);

View File

@ -288,7 +288,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct otx2_nic *priv;
u32 burst, mark = 0;
u8 nr_police = 0;
bool pps;
bool pps = false;
u64 rate;
int i;

View File

@ -83,6 +83,7 @@ struct otx2_snd_queue {
u16 num_sqbs;
u16 sqe_thresh;
u8 sqe_per_sqb;
u32 lmt_id;
u64 io_addr;
u64 *aura_fc_addr;
u64 *lmt_addr;

View File

@ -609,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
err = cn10k_vf_lmtst_init(vf);
err = cn10k_lmtst_init(vf);
if (err)
goto err_detach_rsrc;
@ -667,8 +667,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
err_detach_rsrc:
if (hw->lmt_base)
iounmap(hw->lmt_base);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
@ -700,10 +700,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
destroy_workqueue(vf->otx2_wq);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
if (vf->hw.lmt_base)
iounmap(vf->hw.lmt_base);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);

View File

@ -2,6 +2,7 @@ config SPARX5_SWITCH
tristate "Sparx5 switch driver"
depends on NET_SWITCHDEV
depends on HAS_IOMEM
depends on OF
select PHYLINK
select PHY_SPARX5_SERDES
select RESET_CONTROLLER

View File

@ -540,10 +540,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
ret = register_netdev(ndev);
if (ret) {
free_netdev(ndev);
if (ret)
goto init_fail;
}
netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
__func__, ndev->irq, ndev->dev_addr);

View File

@ -1298,6 +1298,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev,
}
static int ocelot_netdevice_changeupper(struct net_device *dev,
struct net_device *brport_dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
@ -1307,11 +1308,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
err = ocelot_netdevice_bridge_join(dev, dev,
err = ocelot_netdevice_bridge_join(dev, brport_dev,
info->upper_dev,
extack);
else
err = ocelot_netdevice_bridge_leave(dev, dev,
err = ocelot_netdevice_bridge_leave(dev, brport_dev,
info->upper_dev);
}
if (netif_is_lag_master(info->upper_dev)) {
@ -1346,7 +1347,7 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev,
if (ocelot_port->bond != dev)
return NOTIFY_OK;
err = ocelot_netdevice_changeupper(lower, info);
err = ocelot_netdevice_changeupper(lower, dev, info);
if (err)
return notifier_from_errno(err);
}
@ -1385,7 +1386,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct netdev_notifier_changeupper_info *info = ptr;
if (ocelot_netdevice_dev_check(dev))
return ocelot_netdevice_changeupper(dev, info);
return ocelot_netdevice_changeupper(dev, dev, info);
if (netif_is_lag_master(dev))
return ocelot_netdevice_lag_changeupper(dev, info);

View File

@ -1141,20 +1141,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
nfp_fl_ct_clean_flow_entry(ct_entry);
kfree(ct_map_ent);
/* If this is the last pre_ct_rule it means that it is
* very likely that the nft table will be cleaned up next,
* as this happens on the removal of the last act_ct flow.
* However we cannot deregister the callback on the removal
* of the last nft flow as this runs into a deadlock situation.
* So deregister the callback on removal of the last pre_ct flow
* and remove any remaining nft flow entries. We also cannot
* save this state and delete the callback later since the
* nft table would already have been freed at that time.
*/
if (!zt->pre_ct_count) {
nf_flow_table_offload_del_cb(zt->nft,
nfp_fl_ct_handle_nft_flow,
zt);
zt->nft = NULL;
nfp_fl_ct_clean_nft_entries(zt);
}

View File

@ -735,12 +735,13 @@ static int emac_remove(struct platform_device *pdev)
put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
free_netdev(netdev);
if (adpt->phy.digital)
iounmap(adpt->phy.digital);
iounmap(adpt->phy.base);
free_netdev(netdev);
return 0;
}

View File

@ -152,6 +152,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* maximum size.
*/
tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
n_xdp_tx = num_possible_cpus();
n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
@ -169,6 +170,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
n_xdp_ev, n_channels, max_channels);
netif_err(efx, drv, efx->net_dev,
"XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
@ -176,12 +179,14 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
n_xdp_tx, n_channels, efx->max_vis);
netif_err(efx, drv, efx->net_dev,
"XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
} else {
efx->n_xdp_channels = n_xdp_ev;
efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
efx->xdp_tx_per_channel = tx_per_ev;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
@ -891,18 +896,20 @@ int efx_set_channels(struct efx_nic *efx)
if (efx_channel_is_xdp_tx(channel)) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
channel->channel, tx_queue->label,
xdp_queue_number, tx_queue->queue);
/* We may have a few left-over XDP TX
* queues owing to xdp_tx_queue_count
* not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
* We still allocate and probe those
* TXQs, but never use them.
*/
if (xdp_queue_number < efx->xdp_tx_queue_count)
if (xdp_queue_number < efx->xdp_tx_queue_count) {
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
channel->channel, tx_queue->label,
xdp_queue_number, tx_queue->queue);
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
xdp_queue_number++;
xdp_queue_number++;
}
}
} else {
efx_for_each_channel_tx_queue(tx_queue, channel) {
@ -914,8 +921,7 @@ int efx_set_channels(struct efx_nic *efx)
}
}
}
if (xdp_queue_number)
efx->xdp_tx_queue_count = xdp_queue_number;
WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)

View File

@ -49,9 +49,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
{
struct plat_stmmacenet_data *plat;
struct stmmac_resources res;
bool mdio = false;
int ret, i;
struct device_node *np;
int ret, i, phy_mode;
bool mdio = false;
np = dev_of_node(&pdev->dev);
@ -108,10 +108,11 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (plat->bus_id < 0)
plat->bus_id = pci_dev_id(pdev);
plat->phy_interface = device_get_phy_mode(&pdev->dev);
if (plat->phy_interface < 0)
phy_mode = device_get_phy_mode(&pdev->dev);
if (phy_mode < 0)
dev_err(&pdev->dev, "phy_mode not found\n");
plat->phy_interface = phy_mode;
plat->interface = PHY_INTERFACE_MODE_GMII;
pci_set_master(pdev);

View File

@ -349,6 +349,9 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue);
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
ktime_t current_time,
u64 cycle_time);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev,

View File

@ -7171,6 +7171,7 @@ int stmmac_suspend(struct device *dev)
priv->plat->rx_queues_to_use, false);
stmmac_fpe_handshake(priv, false);
stmmac_fpe_stop_wq(priv);
}
priv->speed = SPEED_UNKNOWN;

View File

@ -397,6 +397,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
int phy_mode;
void *ret;
int rc;
@ -412,10 +413,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
eth_zero_addr(mac);
}
plat->phy_interface = device_get_phy_mode(&pdev->dev);
if (plat->phy_interface < 0)
return ERR_PTR(plat->phy_interface);
phy_mode = device_get_phy_mode(&pdev->dev);
if (phy_mode < 0)
return ERR_PTR(phy_mode);
plat->phy_interface = phy_mode;
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
plat->interface = plat->phy_interface;

View File

@ -62,7 +62,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
u32 sec, nsec;
u32 quotient, reminder;
int neg_adj = 0;
bool xmac;
bool xmac, est_rst = false;
int ret;
xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
@ -75,10 +76,48 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
sec = quotient;
nsec = reminder;
/* If EST is enabled, disabled it before adjust ptp time. */
if (priv->plat->est && priv->plat->est->enable) {
est_rst = true;
mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
}
spin_lock_irqsave(&priv->ptp_lock, flags);
stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
/* Caculate new basetime and re-configured EST after PTP time adjust. */
if (est_rst) {
struct timespec64 current_time, time;
ktime_t current_time_ns, basetime;
u64 cycle_time;
mutex_lock(&priv->plat->est->lock);
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
time.tv_nsec = priv->plat->est->btr_reserve[0];
time.tv_sec = priv->plat->est->btr_reserve[1];
basetime = timespec64_to_ktime(time);
cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
priv->plat->est->ctr[0];
time = stmmac_calc_tas_basetime(basetime,
current_time_ns,
cycle_time);
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
priv->plat->est->enable = true;
ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
if (ret)
netdev_err(priv->dev, "failed to configure EST\n");
}
return 0;
}

View File

@ -711,12 +711,35 @@ static int tc_setup_cls(struct stmmac_priv *priv,
return ret;
}
struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
ktime_t current_time,
u64 cycle_time)
{
struct timespec64 time;
if (ktime_after(old_base_time, current_time)) {
time = ktime_to_timespec64(old_base_time);
} else {
s64 n;
ktime_t base_time;
n = div64_s64(ktime_sub_ns(current_time, old_base_time),
cycle_time);
base_time = ktime_add_ns(old_base_time,
(n + 1) * cycle_time);
time = ktime_to_timespec64(base_time);
}
return time;
}
static int tc_setup_taprio(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
struct timespec64 time, current_time;
struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
bool fpe = false;
int i, ret = 0;
@ -773,14 +796,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
GFP_KERNEL);
if (!plat->est)
return -ENOMEM;
mutex_init(&priv->plat->est->lock);
} else {
memset(plat->est, 0, sizeof(*plat->est));
}
size = qopt->num_entries;
mutex_lock(&priv->plat->est->lock);
priv->plat->est->gcl_size = size;
priv->plat->est->enable = qopt->enable;
mutex_unlock(&priv->plat->est->lock);
for (i = 0; i < size; i++) {
s64 delta_ns = qopt->entries[i].interval;
@ -811,32 +838,28 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->gcl[i] = delta_ns | (gates << wid);
}
mutex_lock(&priv->plat->est->lock);
/* Adjust for real system time */
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
if (ktime_after(qopt->base_time, current_time_ns)) {
time = ktime_to_timespec64(qopt->base_time);
} else {
ktime_t base_time;
s64 n;
n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
qopt->cycle_time);
base_time = ktime_add_ns(qopt->base_time,
(n + 1) * qopt->cycle_time);
time = ktime_to_timespec64(base_time);
}
time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
qopt->cycle_time);
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
qopt_time = ktime_to_timespec64(qopt->base_time);
priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
ctr = qopt->cycle_time;
priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
priv->plat->est->ctr[1] = (u32)ctr;
if (fpe && !priv->dma_cap.fpesel)
if (fpe && !priv->dma_cap.fpesel) {
mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
}
/* Actual FPE register configuration will be done after FPE handshake
* is success.
@ -845,6 +868,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
if (ret) {
netdev_err(priv->dev, "failed to configure EST\n");
goto disable;
@ -860,9 +884,11 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return 0;
disable:
mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
mutex_unlock(&priv->plat->est->lock);
priv->plat->fpe_cfg->enable = false;
stmmac_fpe_configure(priv, priv->ioaddr,

View File

@ -313,9 +313,8 @@ static void tlan_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
#endif
free_netdev(dev);
cancel_work_sync(&priv->tlan_tqueue);
free_netdev(dev);
}
static void tlan_start(struct net_device *dev)

View File

@ -1504,9 +1504,8 @@ err_out_resource:
release_mem_region(start, len);
err_out_kfree:
free_netdev(dev);
pr_err("%s: initialization failure, aborting!\n", fp->name);
free_netdev(dev);
return ret;
}

View File

@ -85,7 +85,7 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
const char aes_gcm_name[] = "rfc4106(gcm(aes))";
struct net_device *dev = xs->xso.dev;
struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@ -134,7 +134,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
u16 sa_idx;
int ret;
dev = xs->xso.dev;
dev = xs->xso.real_dev;
ns = netdev_priv(dev);
ipsec = &ns->ipsec;
@ -194,7 +194,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
static void nsim_ipsec_del_sa(struct xfrm_state *xs)
{
struct netdevsim *ns = netdev_priv(xs->xso.dev);
struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
u16 sa_idx;
@ -211,7 +211,7 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs)
static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
struct netdevsim *ns = netdev_priv(xs->xso.dev);
struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
ipsec->ok++;

View File

@ -78,6 +78,11 @@ enum {
/* Temperature read register (88E2110 only) */
MV_PCS_TEMP = 0x8042,
/* Number of ports on the device */
MV_PCS_PORT_INFO = 0xd00d,
MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380,
MV_PCS_PORT_INFO_NPORTS_SHIFT = 7,
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
* restarted, but status registers appear readable from either.
@ -966,6 +971,30 @@ static const struct mv3310_chip mv2111_type = {
#endif
};
static int mv3310_get_number_of_ports(struct phy_device *phydev)
{
int ret;
ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PORT_INFO);
if (ret < 0)
return ret;
ret &= MV_PCS_PORT_INFO_NPORTS_MASK;
ret >>= MV_PCS_PORT_INFO_NPORTS_SHIFT;
return ret + 1;
}
static int mv3310_match_phy_device(struct phy_device *phydev)
{
return mv3310_get_number_of_ports(phydev) == 1;
}
static int mv3340_match_phy_device(struct phy_device *phydev)
{
return mv3310_get_number_of_ports(phydev) == 4;
}
static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g)
{
int val;
@ -994,7 +1023,8 @@ static int mv2111_match_phy_device(struct phy_device *phydev)
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88X3310,
.phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.match_phy_device = mv3310_match_phy_device,
.name = "mv88x3310",
.driver_data = &mv3310_type,
.get_features = mv3310_get_features,
@ -1011,8 +1041,9 @@ static struct phy_driver mv3310_drivers[] = {
.set_loopback = genphy_c45_loopback,
},
{
.phy_id = MARVELL_PHY_ID_88X3340,
.phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
.phy_id = MARVELL_PHY_ID_88X3310,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.match_phy_device = mv3340_match_phy_device,
.name = "mv88x3340",
.driver_data = &mv3340_type,
.get_features = mv3310_get_features,
@ -1069,8 +1100,7 @@ static struct phy_driver mv3310_drivers[] = {
module_phy_driver(mv3310_drivers);
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
{ MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK },
{ MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK },
{ MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
{ },
};

View File

@ -701,6 +701,7 @@ static int ax88772_init_phy(struct usbnet *dev)
return ret;
}
phy_suspend(priv->phydev);
priv->phydev->mac_managed_pm = 1;
phy_attached_info(priv->phydev);

View File

@ -1771,6 +1771,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
{
struct scatterlist *sgs[4], hdr, stat;
unsigned out_num = 0, tmp;
int ret;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@ -1790,7 +1791,12 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
if (ret < 0) {
dev_warn(&vi->vdev->dev,
"Failed to add sgs for command vq: %d\n.", ret);
return false;
}
if (unlikely(!virtqueue_kick(vi->cvq)))
return vi->ctrl->status == VIRTIO_NET_OK;

View File

@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
* Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -26,6 +26,10 @@
#include "vmxnet3_int.h"
#include <net/vxlan.h>
#include <net/geneve.h>
#define VXLAN_UDP_PORT 8472
struct vmxnet3_stat_desc {
char desc[ETH_GSTRING_LEN];
@ -262,6 +266,8 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
if (VMXNET3_VERSION_GE_4(adapter) &&
skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_proto = 0;
u16 port;
struct udphdr *udph;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
@ -274,8 +280,20 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
if (l4_proto != IPPROTO_UDP)
switch (l4_proto) {
case IPPROTO_UDP:
udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest);
/* Check if offloaded port is supported */
if (port != GENEVE_UDP_PORT &&
port != IANA_VXLAN_UDP_PORT &&
port != VXLAN_UDP_PORT) {
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
break;
default:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
}
return features;
}

View File

@ -364,19 +364,19 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
static int __init mod_init(void)
static int __init hdlc_cisco_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
static void __exit mod_exit(void)
static void __exit hdlc_cisco_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(mod_init);
module_exit(mod_exit);
module_init(hdlc_cisco_init);
module_exit(hdlc_cisco_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");

View File

@ -1279,19 +1279,19 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
static int __init mod_init(void)
static int __init hdlc_fr_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
static void __exit mod_exit(void)
static void __exit hdlc_fr_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(mod_init);
module_exit(mod_exit);
module_init(hdlc_fr_init);
module_exit(hdlc_fr_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");

View File

@ -705,20 +705,20 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
static int __init mod_init(void)
static int __init hdlc_ppp_init(void)
{
skb_queue_head_init(&tx_queue);
register_hdlc_protocol(&proto);
return 0;
}
static void __exit mod_exit(void)
static void __exit hdlc_ppp_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(mod_init);
module_exit(mod_exit);
module_init(hdlc_ppp_init);
module_exit(hdlc_ppp_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("PPP protocol support for generic HDLC");

View File

@ -90,7 +90,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
}
static int __init mod_init(void)
static int __init hdlc_raw_init(void)
{
register_hdlc_protocol(&proto);
return 0;
@ -98,14 +98,14 @@ static int __init mod_init(void)
static void __exit mod_exit(void)
static void __exit hdlc_raw_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(mod_init);
module_exit(mod_exit);
module_init(hdlc_raw_init);
module_exit(hdlc_raw_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC");

View File

@ -110,7 +110,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
}
static int __init mod_init(void)
static int __init hdlc_eth_init(void)
{
register_hdlc_protocol(&proto);
return 0;
@ -118,14 +118,14 @@ static int __init mod_init(void)
static void __exit mod_exit(void)
static void __exit hdlc_eth_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(mod_init);
module_exit(mod_exit);
module_init(hdlc_eth_init);
module_exit(hdlc_eth_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC");

View File

@ -365,19 +365,19 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
static int __init mod_init(void)
static int __init hdlc_x25_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
static void __exit mod_exit(void)
static void __exit hdlc_x25_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(mod_init);
module_exit(mod_exit);
module_init(hdlc_x25_init);
module_exit(hdlc_x25_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("X.25 protocol support for generic HDLC");

View File

@ -931,7 +931,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
if (ret) {
dev_dbg(dev->mt76.dev, "Firmware is already download\n");
return -EIO;
goto fw_loaded;
}
ret = mt7921_load_patch(dev);
@ -949,6 +949,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
return -EIO;
}
fw_loaded:
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
#ifdef CONFIG_PM

View File

@ -24,15 +24,7 @@ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
return -EIO;
}
/* check for the interafce id
* if if_id 1 to 8 then create IP MUX channel sessions.
* To start MUX session from 0 as network interface id would start
* from 1 so map it to if_id = if_id - 1
*/
if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
return ipc_mux_open_session(ipc_imem->mux, if_id - 1);
return -EINVAL;
return ipc_mux_open_session(ipc_imem->mux, if_id);
}
/* Release a net link to CP. */
@ -41,7 +33,7 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
{
if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
if_id <= IP_MUX_SESSION_END)
ipc_mux_close_session(ipc_imem->mux, if_id - 1);
ipc_mux_close_session(ipc_imem->mux, if_id);
}
/* Tasklet call to do uplink transfer. */
@ -83,13 +75,8 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
goto out;
}
if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
/* Route the UL packet through IP MUX Layer */
ret = ipc_mux_ul_trigger_encode(ipc_imem->mux,
if_id - 1, skb);
else
dev_err(ipc_imem->dev,
"invalid if_id %d: ", if_id);
/* Route the UL packet through IP MUX Layer */
ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
out:
return ret;
}

View File

@ -27,11 +27,11 @@
#define BOOT_CHECK_DEFAULT_TIMEOUT 400
/* IP MUX channel range */
#define IP_MUX_SESSION_START 1
#define IP_MUX_SESSION_END 8
#define IP_MUX_SESSION_START 0
#define IP_MUX_SESSION_END 7
/* Default IP MUX channel */
#define IP_MUX_SESSION_DEFAULT 1
#define IP_MUX_SESSION_DEFAULT 0
/**
* ipc_imem_sys_port_open - Open a port link to CP.

View File

@ -288,7 +288,7 @@ static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
/* Pass the packet to the netif layer. */
dest_skb->priority = service_class;
return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1);
return ipc_wwan_receive(wwan, dest_skb, false, if_id);
}
/* Decode Flow Credit Table in the block */

View File

@ -37,7 +37,7 @@ void ipc_uevent_send(struct device *dev, char *uevent)
/* Store the device and event information */
info->dev = dev;
snprintf(info->uevent, MAX_UEVENT_LEN, "%s: %s", dev_name(dev), uevent);
snprintf(info->uevent, MAX_UEVENT_LEN, "IOSM_EVENT=%s", uevent);
/* Schedule uevent in process context using work queue */
schedule_work(&info->work);

View File

@ -107,6 +107,7 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb,
{
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
unsigned int len = skb->len;
int if_id = priv->if_id;
int ret;
@ -123,6 +124,8 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb,
/* Return code of zero is success */
if (ret == 0) {
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += len;
ret = NETDEV_TX_OK;
} else if (ret == -EBUSY) {
ret = NETDEV_TX_BUSY;
@ -140,7 +143,8 @@ exit:
ret);
dev_kfree_skb_any(skb);
return ret;
netdev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
/* Ops structure for wwan net link */
@ -158,6 +162,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev)
iosm_dev->priv_flags |= IFF_NO_QUEUE;
iosm_dev->type = ARPHRD_NONE;
iosm_dev->mtu = ETH_DATA_LEN;
iosm_dev->min_mtu = ETH_MIN_MTU;
iosm_dev->max_mtu = ETH_MAX_MTU;
@ -252,8 +257,8 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
skb->pkt_type = PACKET_HOST;
if (if_id < (IP_MUX_SESSION_START - 1) ||
if_id > (IP_MUX_SESSION_END - 1)) {
if (if_id < IP_MUX_SESSION_START ||
if_id > IP_MUX_SESSION_END) {
ret = -EINVAL;
goto free;
}

View File

@ -3,7 +3,7 @@
# Makefile for PTP 1588 clock support.
#
ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o
ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o ptp_vclock.o
ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o
ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC) := ptp_kvm_arm.o ptp_kvm_common.o
obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o

View File

@ -24,10 +24,11 @@
#define PTP_PPS_EVENT PPS_CAPTUREASSERT
#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
struct class *ptp_class;
/* private globals */
static dev_t ptp_devt;
static struct class *ptp_class;
static DEFINE_IDA(ptp_clocks_map);
@ -76,6 +77,11 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
if (ptp_vclock_in_use(ptp)) {
pr_err("ptp: virtual clock in use\n");
return -EBUSY;
}
return ptp->info->settime64(ptp->info, tp);
}
@ -97,6 +103,11 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
struct ptp_clock_info *ops;
int err = -EOPNOTSUPP;
if (ptp_vclock_in_use(ptp)) {
pr_err("ptp: virtual clock in use\n");
return -EBUSY;
}
ops = ptp->info;
if (tx->modes & ADJ_SETOFFSET) {
@ -161,6 +172,7 @@ static void ptp_clock_release(struct device *dev)
ptp_cleanup_pin_groups(ptp);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
ida_simple_remove(&ptp_clocks_map, ptp->index);
kfree(ptp);
}
@ -185,6 +197,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
{
struct ptp_clock *ptp;
int err = 0, index, major = MAJOR(ptp_devt);
size_t size;
if (info->n_alarm > PTP_MAX_ALARMS)
return ERR_PTR(-EINVAL);
@ -208,6 +221,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
spin_lock_init(&ptp->tsevq.lock);
mutex_init(&ptp->tsevq_mux);
mutex_init(&ptp->pincfg_mux);
mutex_init(&ptp->n_vclocks_mux);
init_waitqueue_head(&ptp->tsev_wq);
if (ptp->info->do_aux_work) {
@ -218,7 +232,22 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
pr_err("failed to create ptp aux_worker %d\n", err);
goto kworker_err;
}
ptp->pps_source->lookup_cookie = ptp;
}
/* PTP virtual clock is being registered under physical clock */
if (parent && parent->class && parent->class->name &&
strcmp(parent->class->name, "ptp") == 0)
ptp->is_virtual_clock = true;
if (!ptp->is_virtual_clock) {
ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
size = sizeof(int) * ptp->max_vclocks;
ptp->vclock_index = kzalloc(size, GFP_KERNEL);
if (!ptp->vclock_index) {
err = -ENOMEM;
goto no_mem_for_vclocks;
}
}
err = ptp_populate_pin_groups(ptp);
@ -238,6 +267,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
pr_err("failed to register pps source\n");
goto no_pps;
}
ptp->pps_source->lookup_cookie = ptp;
}
/* Initialize a new device of our class in our clock structure. */
@ -265,11 +295,14 @@ no_clock:
no_pps:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
kfree(ptp->vclock_index);
no_mem_for_vclocks:
if (ptp->kworker)
kthread_destroy_worker(ptp->kworker);
kworker_err:
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
ida_simple_remove(&ptp_clocks_map, index);
no_slot:
kfree(ptp);
@ -280,9 +313,16 @@ EXPORT_SYMBOL(ptp_clock_register);
int ptp_clock_unregister(struct ptp_clock *ptp)
{
if (ptp_vclock_in_use(ptp)) {
pr_err("ptp: virtual clock in use\n");
return -EBUSY;
}
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
kfree(ptp->vclock_index);
if (ptp->kworker) {
kthread_cancel_delayed_work_sync(&ptp->aux_work);
kthread_destroy_worker(ptp->kworker);

View File

@ -18,6 +18,7 @@
#define PTP_MAX_TIMESTAMPS 128
#define PTP_BUF_TIMESTAMPS 30
#define PTP_DEFAULT_MAX_VCLOCKS 20
struct timestamp_event_queue {
struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
@ -46,6 +47,24 @@ struct ptp_clock {
const struct attribute_group *pin_attr_groups[2];
struct kthread_worker *kworker;
struct kthread_delayed_work aux_work;
unsigned int max_vclocks;
unsigned int n_vclocks;
int *vclock_index;
struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */
bool is_virtual_clock;
};
#define info_to_vclock(d) container_of((d), struct ptp_vclock, info)
#define cc_to_vclock(d) container_of((d), struct ptp_vclock, cc)
#define dw_to_vclock(d) container_of((d), struct ptp_vclock, refresh_work)
struct ptp_vclock {
struct ptp_clock *pclock;
struct ptp_clock_info info;
struct ptp_clock *clock;
struct cyclecounter cc;
struct timecounter tc;
spinlock_t lock; /* protects tc/cc */
};
/*
@ -61,6 +80,24 @@ static inline int queue_cnt(struct timestamp_event_queue *q)
return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt;
}
/* Check if ptp virtual clock is in use */
static inline bool ptp_vclock_in_use(struct ptp_clock *ptp)
{
bool in_use = false;
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return true;
if (!ptp->is_virtual_clock && ptp->n_vclocks)
in_use = true;
mutex_unlock(&ptp->n_vclocks_mux);
return in_use;
}
extern struct class *ptp_class;
/*
* see ptp_chardev.c
*/
@ -89,4 +126,6 @@ extern const struct attribute_group *ptp_groups[];
int ptp_populate_pin_groups(struct ptp_clock *ptp);
void ptp_cleanup_pin_groups(struct ptp_clock *ptp);
struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock);
void ptp_vclock_unregister(struct ptp_vclock *vclock);
#endif

View File

@ -3,6 +3,7 @@
* PTP 1588 clock support - sysfs interface.
*
* Copyright (C) 2010 OMICRON electronics GmbH
* Copyright 2021 NXP
*/
#include <linux/capability.h>
#include <linux/slab.h>
@ -148,6 +149,159 @@ out:
}
static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
static int unregister_vclock(struct device *dev, void *data)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct ptp_clock_info *info = ptp->info;
struct ptp_vclock *vclock;
u8 *num = data;
vclock = info_to_vclock(info);
dev_info(dev->parent, "delete virtual clock ptp%d\n",
vclock->clock->index);
ptp_vclock_unregister(vclock);
(*num)--;
/* For break. Not error. */
if (*num == 0)
return -EINVAL;
return 0;
}
static ssize_t n_vclocks_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
ssize_t size;
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return -ERESTARTSYS;
size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks);
mutex_unlock(&ptp->n_vclocks_mux);
return size;
}
static ssize_t n_vclocks_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
struct ptp_vclock *vclock;
int err = -EINVAL;
u32 num, i;
if (kstrtou32(buf, 0, &num))
return err;
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return -ERESTARTSYS;
if (num > ptp->max_vclocks) {
dev_err(dev, "max value is %d\n", ptp->max_vclocks);
goto out;
}
/* Need to create more vclocks */
if (num > ptp->n_vclocks) {
for (i = 0; i < num - ptp->n_vclocks; i++) {
vclock = ptp_vclock_register(ptp);
if (!vclock)
goto out;
*(ptp->vclock_index + ptp->n_vclocks + i) =
vclock->clock->index;
dev_info(dev, "new virtual clock ptp%d\n",
vclock->clock->index);
}
}
/* Need to delete vclocks */
if (num < ptp->n_vclocks) {
i = ptp->n_vclocks - num;
device_for_each_child_reverse(dev, &i,
unregister_vclock);
for (i = 1; i <= ptp->n_vclocks - num; i++)
*(ptp->vclock_index + ptp->n_vclocks - i) = -1;
}
if (num == 0)
dev_info(dev, "only physical clock in use now\n");
else
dev_info(dev, "guarantee physical clock free running\n");
ptp->n_vclocks = num;
mutex_unlock(&ptp->n_vclocks_mux);
return count;
out:
mutex_unlock(&ptp->n_vclocks_mux);
return err;
}
static DEVICE_ATTR_RW(n_vclocks);
static ssize_t max_vclocks_show(struct device *dev,
struct device_attribute *attr, char *page)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
ssize_t size;
size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks);
return size;
}
static ssize_t max_vclocks_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
unsigned int *vclock_index;
int err = -EINVAL;
size_t size;
u32 max;
if (kstrtou32(buf, 0, &max) || max == 0)
return -EINVAL;
if (max == ptp->max_vclocks)
return count;
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return -ERESTARTSYS;
if (max < ptp->n_vclocks)
goto out;
size = sizeof(int) * max;
vclock_index = kzalloc(size, GFP_KERNEL);
if (!vclock_index) {
err = -ENOMEM;
goto out;
}
size = sizeof(int) * ptp->n_vclocks;
memcpy(vclock_index, ptp->vclock_index, size);
kfree(ptp->vclock_index);
ptp->vclock_index = vclock_index;
ptp->max_vclocks = max;
mutex_unlock(&ptp->n_vclocks_mux);
return count;
out:
mutex_unlock(&ptp->n_vclocks_mux);
return err;
}
static DEVICE_ATTR_RW(max_vclocks);
static struct attribute *ptp_attrs[] = {
&dev_attr_clock_name.attr,
@ -162,6 +316,8 @@ static struct attribute *ptp_attrs[] = {
&dev_attr_fifo.attr,
&dev_attr_period.attr,
&dev_attr_pps_enable.attr,
&dev_attr_n_vclocks.attr,
&dev_attr_max_vclocks.attr,
NULL
};
@ -183,6 +339,10 @@ static umode_t ptp_is_attribute_visible(struct kobject *kobj,
} else if (attr == &dev_attr_pps_enable.attr) {
if (!info->pps)
mode = 0;
} else if (attr == &dev_attr_n_vclocks.attr ||
attr == &dev_attr_max_vclocks.attr) {
if (ptp->is_virtual_clock)
mode = 0;
}
return mode;

219
drivers/ptp/ptp_vclock.c Normal file
View File

@ -0,0 +1,219 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PTP virtual clock driver
*
* Copyright 2021 NXP
*/
#include <linux/slab.h>
#include "ptp_private.h"
#define PTP_VCLOCK_CC_SHIFT 31
#define PTP_VCLOCK_CC_MULT (1 << PTP_VCLOCK_CC_SHIFT)
#define PTP_VCLOCK_FADJ_SHIFT 9
#define PTP_VCLOCK_FADJ_DENOMINATOR 15625ULL
#define PTP_VCLOCK_REFRESH_INTERVAL (HZ * 2)
static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
unsigned long flags;
s64 adj;
adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT;
adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR);
spin_lock_irqsave(&vclock->lock, flags);
timecounter_read(&vclock->tc);
vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj;
spin_unlock_irqrestore(&vclock->lock, flags);
return 0;
}
static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
unsigned long flags;
spin_lock_irqsave(&vclock->lock, flags);
timecounter_adjtime(&vclock->tc, delta);
spin_unlock_irqrestore(&vclock->lock, flags);
return 0;
}
static int ptp_vclock_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
unsigned long flags;
u64 ns;
spin_lock_irqsave(&vclock->lock, flags);
ns = timecounter_read(&vclock->tc);
spin_unlock_irqrestore(&vclock->lock, flags);
*ts = ns_to_timespec64(ns);
return 0;
}
static int ptp_vclock_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
u64 ns = timespec64_to_ns(ts);
unsigned long flags;
spin_lock_irqsave(&vclock->lock, flags);
timecounter_init(&vclock->tc, &vclock->cc, ns);
spin_unlock_irqrestore(&vclock->lock, flags);
return 0;
}
static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
struct timespec64 ts;
ptp_vclock_gettime(&vclock->info, &ts);
return PTP_VCLOCK_REFRESH_INTERVAL;
}
static const struct ptp_clock_info ptp_vclock_info = {
.owner = THIS_MODULE,
.name = "ptp virtual clock",
/* The maximum ppb value that long scaled_ppm can support */
.max_adj = 32767999,
.adjfine = ptp_vclock_adjfine,
.adjtime = ptp_vclock_adjtime,
.gettime64 = ptp_vclock_gettime,
.settime64 = ptp_vclock_settime,
.do_aux_work = ptp_vclock_refresh,
};
static u64 ptp_vclock_read(const struct cyclecounter *cc)
{
struct ptp_vclock *vclock = cc_to_vclock(cc);
struct ptp_clock *ptp = vclock->pclock;
struct timespec64 ts = {};
if (ptp->info->gettimex64)
ptp->info->gettimex64(ptp->info, &ts, NULL);
else
ptp->info->gettime64(ptp->info, &ts);
return timespec64_to_ns(&ts);
}
static const struct cyclecounter ptp_vclock_cc = {
.read = ptp_vclock_read,
.mask = CYCLECOUNTER_MASK(32),
.mult = PTP_VCLOCK_CC_MULT,
.shift = PTP_VCLOCK_CC_SHIFT,
};
struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
{
struct ptp_vclock *vclock;
vclock = kzalloc(sizeof(*vclock), GFP_KERNEL);
if (!vclock)
return NULL;
vclock->pclock = pclock;
vclock->info = ptp_vclock_info;
vclock->cc = ptp_vclock_cc;
snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt",
pclock->index);
spin_lock_init(&vclock->lock);
vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev);
if (IS_ERR_OR_NULL(vclock->clock)) {
kfree(vclock);
return NULL;
}
timecounter_init(&vclock->tc, &vclock->cc, 0);
ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
return vclock;
}
void ptp_vclock_unregister(struct ptp_vclock *vclock)
{
ptp_clock_unregister(vclock->clock);
kfree(vclock);
}
int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
{
char name[PTP_CLOCK_NAME_LEN] = "";
struct ptp_clock *ptp;
struct device *dev;
int num = 0;
if (pclock_index < 0)
return num;
snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index);
dev = class_find_device_by_name(ptp_class, name);
if (!dev)
return num;
ptp = dev_get_drvdata(dev);
if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) {
put_device(dev);
return num;
}
*vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL);
if (!(*vclock_index))
goto out;
memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks);
num = ptp->n_vclocks;
out:
mutex_unlock(&ptp->n_vclocks_mux);
put_device(dev);
return num;
}
EXPORT_SYMBOL(ptp_get_vclocks_index);
void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
int vclock_index)
{
char name[PTP_CLOCK_NAME_LEN] = "";
struct ptp_vclock *vclock;
struct ptp_clock *ptp;
unsigned long flags;
struct device *dev;
u64 ns;
snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", vclock_index);
dev = class_find_device_by_name(ptp_class, name);
if (!dev)
return;
ptp = dev_get_drvdata(dev);
if (!ptp->is_virtual_clock) {
put_device(dev);
return;
}
vclock = info_to_vclock(ptp->info);
ns = ktime_to_ns(hwtstamps->hwtstamp);
spin_lock_irqsave(&vclock->lock, flags);
ns = timecounter_cyc2time(&vclock->tc, ns);
spin_unlock_irqrestore(&vclock->lock, flags);
put_device(dev);
hwtstamps->hwtstamp = ns_to_ktime(ns);
}
EXPORT_SYMBOL(ptp_convert_timestamp);

View File

@ -780,6 +780,7 @@ struct bpf_jit_poke_descriptor {
void *tailcall_target;
void *tailcall_bypass;
void *bypass_addr;
void *aux;
union {
struct {
struct bpf_map *map;

View File

@ -757,6 +757,16 @@ void
ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
enum ethtool_link_mode_bit_indices link_mode);
/**
* ethtool_get_phc_vclocks - Derive phc vclocks information, and caller
* is responsible to free memory of vclock_index
* @dev: pointer to net_device structure
* @vclock_index: pointer to pointer of vclock index
*
* Return number of phc vclocks
*/
int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
/**
* ethtool_sprintf - Write formatted string to ethtool string data
* @data: Pointer to start of string to update

View File

@ -22,14 +22,10 @@
#define MARVELL_PHY_ID_88E1545 0x01410ea0
#define MARVELL_PHY_ID_88E1548P 0x01410ec0
#define MARVELL_PHY_ID_88E3016 0x01410e60
#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88E2110 0x002b09b0
#define MARVELL_PHY_ID_88X2222 0x01410f10
/* PHY IDs and mask for Alaska 10G PHYs */
#define MARVELL_PHY_ID_88X33X0_MASK 0xfffffff8
#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88X3340 0x002b09a8
/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0

View File

@ -11,7 +11,10 @@
#include <linux/device.h>
#include <linux/pps_kernel.h>
#include <linux/ptp_clock.h>
#include <linux/timecounter.h>
#include <linux/skbuff.h>
#define PTP_CLOCK_NAME_LEN 32
/**
* struct ptp_clock_request - request PTP clock event
*
@ -134,7 +137,7 @@ struct ptp_system_timestamp {
struct ptp_clock_info {
struct module *owner;
char name[16];
char name[PTP_CLOCK_NAME_LEN];
s32 max_adj;
int n_alarm;
int n_ext_ts;
@ -304,6 +307,27 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
*/
void ptp_cancel_worker_sync(struct ptp_clock *ptp);
/**
* ptp_get_vclocks_index() - get all vclocks index on pclock, and
* caller is responsible to free memory
* of vclock_index
*
* @pclock_index: phc index of ptp pclock.
* @vclock_index: pointer to pointer of vclock index.
*
* return number of vclocks.
*/
int ptp_get_vclocks_index(int pclock_index, int **vclock_index);
/**
* ptp_convert_timestamp() - convert timestamp to a ptp vclock time
*
* @hwtstamps: skb_shared_hwtstamps structure pointer
* @vclock_index: phc index of ptp vclock.
*/
void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
int vclock_index);
#else
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent)
@ -323,6 +347,11 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp,
{ return -EOPNOTSUPP; }
static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
{ }
static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
{ return 0; }
static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps,
int vclock_index)
{ }
#endif

View File

@ -115,7 +115,9 @@ struct stmmac_axi {
#define EST_GCL 1024
struct stmmac_est {
struct mutex lock;
int enable;
u32 btr_reserve[2];
u32 btr_offset[2];
u32 btr[2];
u32 ctr[2];

View File

@ -201,6 +201,11 @@ struct bond_up_slave {
*/
#define BOND_LINK_NOCHANGE -1
struct bond_ipsec {
struct list_head list;
struct xfrm_state *xs;
};
/*
* Here are the locking policies for the two bonding locks:
* Get rcu_read_lock when reading or RTNL when writing slave list.
@ -249,7 +254,9 @@ struct bonding {
#endif /* CONFIG_DEBUG_FS */
struct rtnl_link_stats64 bond_stats;
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrm_state *xs;
struct list_head ipsec_list;
/* protecting ipsec_list */
spinlock_t ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
};

Some files were not shown because too many files have changed in this diff Show More