Merge branch 'octeontx2-macsec-offload'

Subbaraya Sundeep says:

====================
net: Introduce macsec hardware offload for cn10k platform

CN10K-B and CNF10K-B variaints of CN10K silicon has macsec block(MCS)
to encrypt and decrypt packets at MAC/hardware level. This block is a
global resource with hardware resources like SecYs, SCs and SAs
and is in between NIX block and RPM LMAC. CN10K-B silicon has only
one MCS block which receives packets from all LMACS whereas
CNF10K-B has seven MCS blocks for seven LMACs. Both MCS blocks are
similar in operation except for few register offsets and some
configurations require writing to different registers. This patchset
introduces macsec hardware offloading support. AF driver manages hardware
resources and PF driver consumes them when macsec hardware offloading
is needed.

Patch 1 adds basic pci driver for both CN10K-B and CNF10K-B
silicons and initializes hardware block.
Patches 2 and 3 adds mailboxes to init, reset and manage
resources of the MCS block
Patch 4 adds a low priority rule in MCS TCAM so that the
traffic which do not need macsec processing can be sent/received
Patch 5 adds macsec stats collection support
Patch 6 adds interrupt handling support and any event in which
AF consumer is interested can be notified via mbox notification
Patch 7 adds debugfs support which helps in debugging packet
path
Patch 8 introduces macsec hardware offload feature for
PF netdev driver.

v3 changes:
 Fixed clang and sparse warnings

v2 changes:
 Fix build error by changing #ifdef CONFIG_MACSEC to
 #if IS_ENABLED(CONFIG_MACSEC)
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2022-10-03 12:50:19 +01:00
commit f75886a045
15 changed files with 6680 additions and 6 deletions

View File

@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
rvu_sdp.o rvu_npc_hash.o
rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o

View File

@ -293,20 +293,74 @@ M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp)
nix_bandprof_get_hwinfo_rsp) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, msg_rsp) \
M(MCS_FLOWID_ENTRY_WRITE, 0xa002, mcs_flowid_entry_write, mcs_flowid_entry_write_req, \
msg_rsp) \
M(MCS_SECY_PLCY_WRITE, 0xa003, mcs_secy_plcy_write, mcs_secy_plcy_write_req, \
msg_rsp) \
M(MCS_RX_SC_CAM_WRITE, 0xa004, mcs_rx_sc_cam_write, mcs_rx_sc_cam_write_req, \
msg_rsp) \
M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, \
msg_rsp) \
M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, mcs_tx_sc_sa_map, \
msg_rsp) \
M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, mcs_rx_sc_sa_map, \
msg_rsp) \
M(MCS_FLOWID_ENA_ENTRY, 0xa008, mcs_flowid_ena_entry, mcs_flowid_ena_dis_entry, \
msg_rsp) \
M(MCS_PN_TABLE_WRITE, 0xa009, mcs_pn_table_write, mcs_pn_table_write_req, \
msg_rsp) \
M(MCS_SET_ACTIVE_LMAC, 0xa00a, mcs_set_active_lmac, mcs_set_active_lmac, \
msg_rsp) \
M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info) \
M(MCS_GET_FLOWID_STATS, 0xa00c, mcs_get_flowid_stats, mcs_stats_req, \
mcs_flowid_stats) \
M(MCS_GET_SECY_STATS, 0xa00d, mcs_get_secy_stats, mcs_stats_req, \
mcs_secy_stats) \
M(MCS_GET_SC_STATS, 0xa00e, mcs_get_sc_stats, mcs_stats_req, mcs_sc_stats) \
M(MCS_GET_SA_STATS, 0xa00f, mcs_get_sa_stats, mcs_stats_req, mcs_sa_stats) \
M(MCS_GET_PORT_STATS, 0xa010, mcs_get_port_stats, mcs_stats_req, \
mcs_port_stats) \
M(MCS_CLEAR_STATS, 0xa011, mcs_clear_stats, mcs_clear_stats, msg_rsp) \
M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp) \
M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, msg_rsp) \
M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, mcs_set_pn_threshold, \
msg_rsp) \
M(MCS_ALLOC_CTRL_PKT_RULE, 0xa015, mcs_alloc_ctrl_pkt_rule, \
mcs_alloc_ctrl_pkt_rule_req, \
mcs_alloc_ctrl_pkt_rule_rsp) \
M(MCS_FREE_CTRL_PKT_RULE, 0xa016, mcs_free_ctrl_pkt_rule, \
mcs_free_ctrl_pkt_rule_req, msg_rsp) \
M(MCS_CTRL_PKT_RULE_WRITE, 0xa017, mcs_ctrl_pkt_rule_write, \
mcs_ctrl_pkt_rule_write_req, msg_rsp) \
M(MCS_PORT_RESET, 0xa018, mcs_port_reset, mcs_port_reset_req, msg_rsp) \
M(MCS_PORT_CFG_SET, 0xa019, mcs_port_cfg_set, mcs_port_cfg_set_req, msg_rsp)\
M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, \
mcs_port_cfg_get_rsp) \
M(MCS_CUSTOM_TAG_CFG_GET, 0xa021, mcs_custom_tag_cfg_get, \
mcs_custom_tag_cfg_get_req, \
mcs_custom_tag_cfg_get_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
/* Messages initiated by AF (range 0xC00 - 0xEFF) */
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
#define MBOX_UP_CPT_MESSAGES \
M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
#define MBOX_UP_MCS_MESSAGES \
M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
MBOX_UP_CPT_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
};
@ -1657,4 +1711,415 @@ enum cgx_af_status {
LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110,
};
enum mcs_direction {
MCS_RX,
MCS_TX,
};
enum mcs_rsrc_type {
MCS_RSRC_TYPE_FLOWID,
MCS_RSRC_TYPE_SECY,
MCS_RSRC_TYPE_SC,
MCS_RSRC_TYPE_SA,
};
struct mcs_alloc_rsrc_req {
struct mbox_msghdr hdr;
u8 rsrc_type;
u8 rsrc_cnt; /* Resources count */
u8 mcs_id; /* MCS block ID */
u8 dir; /* Macsec ingress or egress side */
u8 all; /* Allocate all resource type one each */
u64 rsvd;
};
struct mcs_alloc_rsrc_rsp {
struct mbox_msghdr hdr;
u8 flow_ids[128]; /* Index of reserved entries */
u8 secy_ids[128];
u8 sc_ids[128];
u8 sa_ids[256];
u8 rsrc_type;
u8 rsrc_cnt; /* No of entries reserved */
u8 mcs_id;
u8 dir;
u8 all;
u8 rsvd[256]; /* reserved fields for future expansion */
};
struct mcs_free_rsrc_req {
struct mbox_msghdr hdr;
u8 rsrc_id; /* Index of the entry to be freed */
u8 rsrc_type;
u8 mcs_id;
u8 dir;
u8 all; /* Free all the cam resources */
u64 rsvd;
};
struct mcs_flowid_entry_write_req {
struct mbox_msghdr hdr;
u64 data[4];
u64 mask[4];
u64 sci; /* CNF10K-B for tx_secy_mem_map */
u8 flow_id;
u8 secy_id; /* secyid for which flowid is mapped */
u8 sc_id; /* Valid if dir = MCS_TX, SC_CAM id mapped to flowid */
u8 ena; /* Enable tcam entry */
u8 ctrl_pkt;
u8 mcs_id;
u8 dir;
u64 rsvd;
};
struct mcs_secy_plcy_write_req {
struct mbox_msghdr hdr;
u64 plcy;
u8 secy_id;
u8 mcs_id;
u8 dir;
u64 rsvd;
};
/* RX SC_CAM mapping */
struct mcs_rx_sc_cam_write_req {
struct mbox_msghdr hdr;
u64 sci; /* SCI */
u64 secy_id; /* secy index mapped to SC */
u8 sc_id; /* SC CAM entry index */
u8 mcs_id;
u64 rsvd;
};
struct mcs_sa_plcy_write_req {
struct mbox_msghdr hdr;
u64 plcy[2][9]; /* Support 2 SA policy */
u8 sa_index[2];
u8 sa_cnt;
u8 mcs_id;
u8 dir;
u64 rsvd;
};
struct mcs_tx_sc_sa_map {
struct mbox_msghdr hdr;
u8 sa_index0;
u8 sa_index1;
u8 rekey_ena;
u8 sa_index0_vld;
u8 sa_index1_vld;
u8 tx_sa_active;
u64 sectag_sci;
u8 sc_id; /* used as index for SA_MEM_MAP */
u8 mcs_id;
u64 rsvd;
};
struct mcs_rx_sc_sa_map {
struct mbox_msghdr hdr;
u8 sa_index;
u8 sa_in_use;
u8 sc_id;
u8 an; /* value range 0-3, sc_id + an used as index SA_MEM_MAP */
u8 mcs_id;
u64 rsvd;
};
struct mcs_flowid_ena_dis_entry {
struct mbox_msghdr hdr;
u8 flow_id;
u8 ena;
u8 mcs_id;
u8 dir;
u64 rsvd;
};
struct mcs_pn_table_write_req {
struct mbox_msghdr hdr;
u64 next_pn;
u8 pn_id;
u8 mcs_id;
u8 dir;
u64 rsvd;
};
struct mcs_hw_info {
struct mbox_msghdr hdr;
u8 num_mcs_blks; /* Number of MCS blocks */
u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
u8 secy_entries; /* RX/TX SECY entries per mcs block */
u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
u8 sa_entries; /* PN table entries = SA entries */
u64 rsvd[16];
};
struct mcs_set_active_lmac {
struct mbox_msghdr hdr;
u32 lmac_bmap; /* bitmap of active lmac per mcs block */
u8 mcs_id;
u16 chan_base; /* MCS channel base */
u64 rsvd;
};
struct mcs_set_lmac_mode {
struct mbox_msghdr hdr;
u8 mode; /* 1:Bypass 0:Operational */
u8 lmac_id;
u8 mcs_id;
u64 rsvd;
};
struct mcs_port_reset_req {
struct mbox_msghdr hdr;
u8 reset;
u8 mcs_id;
u8 port_id;
u64 rsvd;
};
struct mcs_port_cfg_set_req {
struct mbox_msghdr hdr;
u8 cstm_tag_rel_mode_sel;
u8 custom_hdr_enb;
u8 fifo_skid;
u8 port_mode;
u8 port_id;
u8 mcs_id;
u64 rsvd;
};
struct mcs_port_cfg_get_req {
struct mbox_msghdr hdr;
u8 port_id;
u8 mcs_id;
u64 rsvd;
};
struct mcs_port_cfg_get_rsp {
struct mbox_msghdr hdr;
u8 cstm_tag_rel_mode_sel;
u8 custom_hdr_enb;
u8 fifo_skid;
u8 port_mode;
u8 port_id;
u8 mcs_id;
u64 rsvd;
};
struct mcs_custom_tag_cfg_get_req {
struct mbox_msghdr hdr;
u8 mcs_id;
u8 dir;
u64 rsvd;
};
struct mcs_custom_tag_cfg_get_rsp {
struct mbox_msghdr hdr;
u16 cstm_etype[8];
u8 cstm_indx[8];
u8 cstm_etype_en;
u8 mcs_id;
u8 dir;
u64 rsvd;
};
/* MCS mailbox error codes
* Range 1201 - 1300.
*/
enum mcs_af_status {
MCS_AF_ERR_INVALID_MCSID = -1201,
MCS_AF_ERR_NOT_MAPPED = -1202,
};
struct mcs_set_pn_threshold {
struct mbox_msghdr hdr;
u64 threshold;
u8 xpn; /* '1' for setting xpn threshold */
u8 mcs_id;
u8 dir;
u64 rsvd;
};
enum mcs_ctrl_pkt_rulew_type {
MCS_CTRL_PKT_RULE_TYPE_ETH,
MCS_CTRL_PKT_RULE_TYPE_DA,
MCS_CTRL_PKT_RULE_TYPE_RANGE,
MCS_CTRL_PKT_RULE_TYPE_COMBO,
MCS_CTRL_PKT_RULE_TYPE_MAC,
};
struct mcs_alloc_ctrl_pkt_rule_req {
struct mbox_msghdr hdr;
u8 rule_type;
u8 mcs_id; /* MCS block ID */
u8 dir; /* Macsec ingress or egress side */
u64 rsvd;
};
struct mcs_alloc_ctrl_pkt_rule_rsp {
struct mbox_msghdr hdr;
u8 rule_idx;
u8 rule_type;
u8 mcs_id;
u8 dir;
u64 rsvd;
};
struct mcs_free_ctrl_pkt_rule_req {
struct mbox_msghdr hdr;
u8 rule_idx;
u8 rule_type;
u8 mcs_id;
u8 dir;
u8 all;
u64 rsvd;
};
struct mcs_ctrl_pkt_rule_write_req {
struct mbox_msghdr hdr;
u64 data0;
u64 data1;
u64 data2;
u8 rule_idx;
u8 rule_type;
u8 mcs_id;
u8 dir;
u64 rsvd;
};
struct mcs_stats_req {
struct mbox_msghdr hdr;
u8 id;
u8 mcs_id;
u8 dir;
u64 rsvd;
};
struct mcs_flowid_stats {
struct mbox_msghdr hdr;
u64 tcam_hit_cnt;
u64 rsvd;
};
struct mcs_secy_stats {
struct mbox_msghdr hdr;
u64 ctl_pkt_bcast_cnt;
u64 ctl_pkt_mcast_cnt;
u64 ctl_pkt_ucast_cnt;
u64 ctl_octet_cnt;
u64 unctl_pkt_bcast_cnt;
u64 unctl_pkt_mcast_cnt;
u64 unctl_pkt_ucast_cnt;
u64 unctl_octet_cnt;
/* Valid only for RX */
u64 octet_decrypted_cnt;
u64 octet_validated_cnt;
u64 pkt_port_disabled_cnt;
u64 pkt_badtag_cnt;
u64 pkt_nosa_cnt;
u64 pkt_nosaerror_cnt;
u64 pkt_tagged_ctl_cnt;
u64 pkt_untaged_cnt;
u64 pkt_ctl_cnt; /* CN10K-B */
u64 pkt_notag_cnt; /* CNF10K-B */
/* Valid only for TX */
u64 octet_encrypted_cnt;
u64 octet_protected_cnt;
u64 pkt_noactivesa_cnt;
u64 pkt_toolong_cnt;
u64 pkt_untagged_cnt;
u64 rsvd[4];
};
struct mcs_port_stats {
struct mbox_msghdr hdr;
u64 tcam_miss_cnt;
u64 parser_err_cnt;
u64 preempt_err_cnt; /* CNF10K-B */
u64 sectag_insert_err_cnt;
u64 rsvd[4];
};
/* Only for CN10K-B */
struct mcs_sa_stats {
struct mbox_msghdr hdr;
/* RX */
u64 pkt_invalid_cnt;
u64 pkt_nosaerror_cnt;
u64 pkt_notvalid_cnt;
u64 pkt_ok_cnt;
u64 pkt_nosa_cnt;
/* TX */
u64 pkt_encrypt_cnt;
u64 pkt_protected_cnt;
u64 rsvd[4];
};
struct mcs_sc_stats {
struct mbox_msghdr hdr;
/* RX */
u64 hit_cnt;
u64 pkt_invalid_cnt;
u64 pkt_late_cnt;
u64 pkt_notvalid_cnt;
u64 pkt_unchecked_cnt;
u64 pkt_delay_cnt; /* CNF10K-B */
u64 pkt_ok_cnt; /* CNF10K-B */
u64 octet_decrypt_cnt; /* CN10K-B */
u64 octet_validate_cnt; /* CN10K-B */
/* TX */
u64 pkt_encrypt_cnt;
u64 pkt_protected_cnt;
u64 octet_encrypt_cnt; /* CN10K-B */
u64 octet_protected_cnt; /* CN10K-B */
u64 rsvd[4];
};
struct mcs_clear_stats {
struct mbox_msghdr hdr;
#define MCS_FLOWID_STATS 0
#define MCS_SECY_STATS 1
#define MCS_SC_STATS 2
#define MCS_SA_STATS 3
#define MCS_PORT_STATS 4
u8 type; /* FLOWID, SECY, SC, SA, PORT */
u8 id; /* type = PORT, If id = FF(invalid) port no is derived from pcifunc */
u8 mcs_id;
u8 dir;
u8 all; /* All resources stats mapped to PF are cleared */
};
struct mcs_intr_cfg {
struct mbox_msghdr hdr;
#define MCS_CPM_RX_SECTAG_V_EQ1_INT BIT_ULL(0)
#define MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT BIT_ULL(1)
#define MCS_CPM_RX_SECTAG_SL_GTE48_INT BIT_ULL(2)
#define MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT BIT_ULL(3)
#define MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT BIT_ULL(4)
#define MCS_CPM_RX_PACKET_XPN_EQ0_INT BIT_ULL(5)
#define MCS_CPM_RX_PN_THRESH_REACHED_INT BIT_ULL(6)
#define MCS_CPM_TX_PACKET_XPN_EQ0_INT BIT_ULL(7)
#define MCS_CPM_TX_PN_THRESH_REACHED_INT BIT_ULL(8)
#define MCS_CPM_TX_SA_NOT_VALID_INT BIT_ULL(9)
#define MCS_BBE_RX_DFIFO_OVERFLOW_INT BIT_ULL(10)
#define MCS_BBE_RX_PLFIFO_OVERFLOW_INT BIT_ULL(11)
#define MCS_BBE_TX_DFIFO_OVERFLOW_INT BIT_ULL(12)
#define MCS_BBE_TX_PLFIFO_OVERFLOW_INT BIT_ULL(13)
#define MCS_PAB_RX_CHAN_OVERFLOW_INT BIT_ULL(14)
#define MCS_PAB_TX_CHAN_OVERFLOW_INT BIT_ULL(15)
u64 intr_mask; /* Interrupt enable mask */
u8 mcs_id;
u8 lmac_id;
u64 rsvd;
};
struct mcs_intr_info {
struct mbox_msghdr hdr;
u64 intr_mask;
int sa_id;
u8 mcs_id;
u8 lmac_id;
u64 rsvd;
};
#endif /* MBOX_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,246 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell CN10K MCS driver
*
* Copyright (C) 2022 Marvell.
*/
#ifndef MCS_H
#define MCS_H
#include <linux/bits.h>
#include "rvu.h"
#define PCI_DEVID_CN10K_MCS 0xA096
#define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16)
#define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0)
#define MCS_ID_MASK 0x7
#define MCS_MAX_PFS 128
#define MCS_PORT_MODE_MASK 0x3
#define MCS_PORT_FIFO_SKID_MASK 0x3F
#define MCS_MAX_CUSTOM_TAGS 0x8
#define MCS_CTRLPKT_ETYPE_RULE_MAX 8
#define MCS_CTRLPKT_DA_RULE_MAX 8
#define MCS_CTRLPKT_DA_RANGE_RULE_MAX 4
#define MCS_CTRLPKT_COMBO_RULE_MAX 4
#define MCS_CTRLPKT_MAC_RULE_MAX 1
#define MCS_MAX_CTRLPKT_RULES (MCS_CTRLPKT_ETYPE_RULE_MAX + \
MCS_CTRLPKT_DA_RULE_MAX + \
MCS_CTRLPKT_DA_RANGE_RULE_MAX + \
MCS_CTRLPKT_COMBO_RULE_MAX + \
MCS_CTRLPKT_MAC_RULE_MAX)
#define MCS_CTRLPKT_ETYPE_RULE_OFFSET 0
#define MCS_CTRLPKT_DA_RULE_OFFSET 8
#define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET 16
#define MCS_CTRLPKT_COMBO_RULE_OFFSET 20
#define MCS_CTRLPKT_MAC_EN_RULE_OFFSET 24
/* Reserved resources for default bypass entry */
#define MCS_RSRC_RSVD_CNT 1
/* MCS Interrupt Vector Enumeration */
enum mcs_int_vec_e {
MCS_INT_VEC_MIL_RX_GBL = 0x0,
MCS_INT_VEC_MIL_RX_LMACX = 0x1,
MCS_INT_VEC_MIL_TX_LMACX = 0x5,
MCS_INT_VEC_HIL_RX_GBL = 0x9,
MCS_INT_VEC_HIL_RX_LMACX = 0xa,
MCS_INT_VEC_HIL_TX_GBL = 0xe,
MCS_INT_VEC_HIL_TX_LMACX = 0xf,
MCS_INT_VEC_IP = 0x13,
MCS_INT_VEC_CNT = 0x14,
};
#define MCS_MAX_BBE_INT 8ULL
#define MCS_BBE_INT_MASK 0xFFULL
#define MCS_MAX_PAB_INT 4ULL
#define MCS_PAB_INT_MASK 0xFULL
#define MCS_BBE_RX_INT_ENA BIT_ULL(0)
#define MCS_BBE_TX_INT_ENA BIT_ULL(1)
#define MCS_CPM_RX_INT_ENA BIT_ULL(2)
#define MCS_CPM_TX_INT_ENA BIT_ULL(3)
#define MCS_PAB_RX_INT_ENA BIT_ULL(4)
#define MCS_PAB_TX_INT_ENA BIT_ULL(5)
#define MCS_CPM_TX_INT_PACKET_XPN_EQ0 BIT_ULL(0)
#define MCS_CPM_TX_INT_PN_THRESH_REACHED BIT_ULL(1)
#define MCS_CPM_TX_INT_SA_NOT_VALID BIT_ULL(2)
#define MCS_CPM_RX_INT_SECTAG_V_EQ1 BIT_ULL(0)
#define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 BIT_ULL(1)
#define MCS_CPM_RX_INT_SL_GTE48 BIT_ULL(2)
#define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 BIT_ULL(3)
#define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 BIT_ULL(4)
#define MCS_CPM_RX_INT_PACKET_XPN_EQ0 BIT_ULL(5)
#define MCS_CPM_RX_INT_PN_THRESH_REACHED BIT_ULL(6)
#define MCS_CPM_RX_INT_ALL (MCS_CPM_RX_INT_SECTAG_V_EQ1 | \
MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 | \
MCS_CPM_RX_INT_SL_GTE48 | \
MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 | \
MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 | \
MCS_CPM_RX_INT_PACKET_XPN_EQ0 | \
MCS_CPM_RX_INT_PN_THRESH_REACHED)
struct mcs_pfvf {
u64 intr_mask; /* Enabled Interrupt mask */
};
struct mcs_intr_event {
u16 pcifunc;
u64 intr_mask;
u64 sa_id;
u8 mcs_id;
u8 lmac_id;
};
struct mcs_intrq_entry {
struct list_head node;
struct mcs_intr_event intr_event;
};
struct secy_mem_map {
u8 flow_id;
u8 secy;
u8 ctrl_pkt;
u8 sc;
u64 sci;
};
struct mcs_rsrc_map {
u16 *flowid2pf_map;
u16 *secy2pf_map;
u16 *sc2pf_map;
u16 *sa2pf_map;
u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/
u16 *ctrlpktrule2pf_map;
struct rsrc_bmap flow_ids;
struct rsrc_bmap secy;
struct rsrc_bmap sc;
struct rsrc_bmap sa;
struct rsrc_bmap ctrlpktrule;
};
struct hwinfo {
u8 tcam_entries;
u8 secy_entries;
u8 sc_entries;
u16 sa_entries;
u8 mcs_x2p_intf;
u8 lmac_cnt;
u8 mcs_blks;
unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
};
struct mcs {
void __iomem *reg_base;
struct pci_dev *pdev;
struct device *dev;
struct hwinfo *hw;
struct mcs_rsrc_map tx;
struct mcs_rsrc_map rx;
u16 pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */
u8 mcs_id;
struct mcs_ops *mcs_ops;
struct list_head mcs_list;
/* Lock for mcs stats */
struct mutex stats_lock;
struct mcs_pfvf *pf;
struct mcs_pfvf *vf;
u16 num_vec;
void *rvu;
u16 *tx_sa_active;
};
struct mcs_ops {
void (*mcs_set_hw_capabilities)(struct mcs *mcs);
void (*mcs_parser_cfg)(struct mcs *mcs);
void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
};
extern struct pci_driver mcs_driver;
static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
{
writeq(val, mcs->reg_base + offset);
}
static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
{
return readq(mcs->reg_base + offset);
}
/* MCS APIs */
struct mcs *mcs_get_pdata(int mcs_id);
int mcs_get_blkcnt(void);
int mcs_set_lmac_channels(int mcs_id, u16 base);
int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc);
int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc);
int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id,
u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir);
int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc);
void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir);
void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena);
void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena);
void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir);
void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir);
void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id);
void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir);
void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir);
void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir);
void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn);
int mcs_install_flowid_bypass_entry(struct mcs *mcs);
void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset);
void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req);
void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
struct mcs_port_cfg_get_rsp *rsp);
void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
struct mcs_custom_tag_cfg_get_rsp *rsp);
int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
/* CN10K-B APIs */
void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void cn10kb_mcs_parser_cfg(struct mcs *mcs);
/* CNF10K-B APIs */
struct mcs_ops *cnf10kb_get_mac_ops(void);
void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
/* Stats APIs */
void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir);
void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir);
void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir);
void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir);
int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir);
int mcs_set_force_clk_en(struct mcs *mcs, bool set);
int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event);
#endif /* MCS_H */

View File

@ -0,0 +1,214 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell MCS driver
*
* Copyright (C) 2022 Marvell.
*/
#include "mcs.h"
#include "mcs_reg.h"
static struct mcs_ops cnf10kb_mcs_ops = {
.mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities,
.mcs_parser_cfg = cnf10kb_mcs_parser_cfg,
.mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
.mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
.mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
};
struct mcs_ops *cnf10kb_get_mac_ops(void)
{
return &cnf10kb_mcs_ops;
}
void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
{
struct hwinfo *hw = mcs->hw;
hw->tcam_entries = 64; /* TCAM entries */
hw->secy_entries = 64; /* SecY entries */
hw->sc_entries = 64; /* SC CAM entries */
hw->sa_entries = 128; /* SA entries */
hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
hw->mcs_x2p_intf = 1; /* x2p clabration intf */
hw->mcs_blks = 7; /* MCS blocks */
}
void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
{
u64 reg, val;
/* VLAN Ctag */
val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22);
reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0);
mcs_reg_write(mcs, reg, val);
reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0);
mcs_reg_write(mcs, reg, val);
/* VLAN STag */
val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23);
/* RX */
reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1);
mcs_reg_write(mcs, reg, val);
/* TX */
reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1);
mcs_reg_write(mcs, reg, val);
/* Enable custom tage 0 and 1 and sectag */
val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12);
reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE;
mcs_reg_write(mcs, reg, val);
reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
mcs_reg_write(mcs, reg, val);
}
void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
{
u64 reg, val;
val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6;
if (dir == MCS_RX) {
reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
} else {
reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
mcs_reg_write(mcs, reg, map->sci);
val |= (map->sc & 0x3F) << 7;
reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id);
}
mcs_reg_write(mcs, reg, val);
}
void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
{
u64 reg, val;
val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7;
reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
mcs_reg_write(mcs, reg, val);
reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0;
val = mcs_reg_read(mcs, reg);
if (map->rekey_ena)
val |= BIT_ULL(map->sc_id);
else
val &= ~BIT_ULL(map->sc_id);
mcs_reg_write(mcs, reg, val);
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld);
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld);
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active);
}
void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
{
u64 val, reg;
val = (map->sa_index & 0x7F) | (map->sa_in_use << 7);
reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
mcs_reg_write(mcs, reg, val);
}
int mcs_set_force_clk_en(struct mcs *mcs, bool set)
{
unsigned long timeout = jiffies + usecs_to_jiffies(2000);
u64 val;
val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
if (set) {
val |= BIT_ULL(4);
mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
/* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */
while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) {
if (time_after(jiffies, timeout)) {
dev_err(mcs->dev, "MCS set force clk enable failed\n");
break;
}
}
} else {
val &= ~BIT_ULL(4);
mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
}
return 0;
}
/* TX SA interrupt is raised only if autorekey is enabled.
* MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
* one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
* SA in SA_index1 got expired else SA in SA_index0 got expired.
*/
void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
{
struct mcs_intr_event event;
struct rsrc_bmap *sc_bmap;
unsigned long rekey_ena;
u64 val, sa_status;
int sc;
sc_bmap = &mcs->tx.sc;
event.mcs_id = mcs->mcs_id;
event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0);
for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
/* Auto rekey is enable */
if (!test_bit(sc, &rekey_ena))
continue;
sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc));
/* Check if tx_sa_active status had changed */
if (sa_status == mcs->tx_sa_active[sc])
continue;
/* SA_index0 is expired */
val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
if (sa_status)
event.sa_id = val & 0x7F;
else
event.sa_id = (val >> 7) & 0x7F;
event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
mcs_add_intr_wq_entry(mcs, &event);
}
}
void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
{
struct mcs_intr_event event = { 0 };
struct rsrc_bmap *sc_bmap;
u64 val;
int sc;
sc_bmap = &mcs->tx.sc;
event.mcs_id = mcs->mcs_id;
event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
if (mcs->tx_sa_active[sc])
/* SA_index1 was used and got expired */
event.sa_id = (val >> 7) & 0x7F;
else
/* SA_index0 was used and got expired */
event.sa_id = val & 0x7F;
event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
mcs_add_intr_wq_entry(mcs, &event);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,889 @@
// SPDX-License-Identifier: GPL-2.0
/* Marvell CN10K MCS driver
*
* Copyright (C) 2022 Marvell.
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "mcs.h"
#include "rvu.h"
#include "lmac_common.h"
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
static struct _req_type __maybe_unused \
*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
{ \
struct _req_type *req; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
&rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
sizeof(struct _rsp_type)); \
if (!req) \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
return req; \
}
MBOX_UP_MCS_MESSAGES
#undef M
int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
struct mcs_set_lmac_mode *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
return 0;
}
int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
{
struct mcs_intrq_entry *qentry;
u16 pcifunc = event->pcifunc;
struct rvu *rvu = mcs->rvu;
struct mcs_pfvf *pfvf;
/* Check if it is PF or VF */
if (pcifunc & RVU_PFVF_FUNC_MASK)
pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
else
pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
event->intr_mask &= pfvf->intr_mask;
/* Check PF/VF interrupt notification is enabled */
if (!(pfvf->intr_mask && event->intr_mask))
return 0;
qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
if (!qentry)
return -ENOMEM;
qentry->intr_event = *event;
spin_lock(&rvu->mcs_intrq_lock);
list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
spin_unlock(&rvu->mcs_intrq_lock);
queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
return 0;
}
static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
{
struct mcs_intr_info *req;
int err, pf;
pf = rvu_get_pf(event->pcifunc);
req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
if (!req)
return -ENOMEM;
req->mcs_id = event->mcs_id;
req->intr_mask = event->intr_mask;
req->sa_id = event->sa_id;
req->hdr.pcifunc = event->pcifunc;
req->lmac_id = event->lmac_id;
otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
if (err)
dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
return 0;
}
static void mcs_intr_handler_task(struct work_struct *work)
{
struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
struct mcs_intrq_entry *qentry;
struct mcs_intr_event *event;
unsigned long flags;
do {
spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
struct mcs_intrq_entry,
node);
if (qentry)
list_del(&qentry->node);
spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
if (!qentry)
break; /* nothing more to process */
event = &qentry->intr_event;
mcs_notify_pfvf(event, rvu);
kfree(qentry);
} while (1);
}
int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
struct mcs_intr_cfg *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct mcs_pfvf *pfvf;
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
/* Check if it is PF or VF */
if (pcifunc & RVU_PFVF_FUNC_MASK)
pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
else
pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
mcs->pf_map[0] = pcifunc;
pfvf->intr_mask = req->intr_mask;
return 0;
}
int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
struct msg_req *req,
struct mcs_hw_info *rsp)
{
struct mcs *mcs;
if (!rvu->mcs_blk_cnt)
return MCS_AF_ERR_NOT_MAPPED;
/* MCS resources are same across all blocks */
mcs = mcs_get_pdata(0);
rsp->num_mcs_blks = rvu->mcs_blk_cnt;
rsp->tcam_entries = mcs->hw->tcam_entries;
rsp->secy_entries = mcs->hw->secy_entries;
rsp->sc_entries = mcs->hw->sc_entries;
rsp->sa_entries = mcs->hw->sa_entries;
return 0;
}
int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
mcs_reset_port(mcs, req->port_id, req->reset);
return 0;
}
int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
struct mcs_clear_stats *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
mutex_lock(&mcs->stats_lock);
if (req->all)
mcs_clear_all_stats(mcs, pcifunc, req->dir);
else
mcs_clear_stats(mcs, req->type, req->id, req->dir);
mutex_unlock(&mcs->stats_lock);
return 0;
}
int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
struct mcs_stats_req *req,
struct mcs_flowid_stats *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
/* In CNF10K-B, before reading the statistics,
* MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
* to get accurate statistics
*/
if (mcs->hw->mcs_blks > 1)
mcs_set_force_clk_en(mcs, true);
mutex_lock(&mcs->stats_lock);
mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
mutex_unlock(&mcs->stats_lock);
/* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
* the statistics
*/
if (mcs->hw->mcs_blks > 1)
mcs_set_force_clk_en(mcs, false);
return 0;
}
int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
struct mcs_stats_req *req,
struct mcs_secy_stats *rsp)
{ struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
if (mcs->hw->mcs_blks > 1)
mcs_set_force_clk_en(mcs, true);
mutex_lock(&mcs->stats_lock);
if (req->dir == MCS_RX)
mcs_get_rx_secy_stats(mcs, rsp, req->id);
else
mcs_get_tx_secy_stats(mcs, rsp, req->id);
mutex_unlock(&mcs->stats_lock);
if (mcs->hw->mcs_blks > 1)
mcs_set_force_clk_en(mcs, false);
return 0;
}
int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
struct mcs_stats_req *req,
struct mcs_sc_stats *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
if (mcs->hw->mcs_blks > 1)
mcs_set_force_clk_en(mcs, true);
mutex_lock(&mcs->stats_lock);
mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
mutex_unlock(&mcs->stats_lock);
if (mcs->hw->mcs_blks > 1)
mcs_set_force_clk_en(mcs, false);
return 0;
}
int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
struct mcs_stats_req *req,
struct mcs_sa_stats *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
if (mcs->hw->mcs_blks > 1)
mcs_set_force_clk_en(mcs, true);
mutex_lock(&mcs->stats_lock);
mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
mutex_unlock(&mcs->stats_lock);
if (mcs->hw->mcs_blks > 1)
mcs_set_force_clk_en(mcs, false);
return 0;
}
int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
struct mcs_stats_req *req,
struct mcs_port_stats *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
if (mcs->hw->mcs_blks > 1)
mcs_set_force_clk_en(mcs, true);
mutex_lock(&mcs->stats_lock);
mcs_get_port_stats(mcs, rsp, req->id, req->dir);
mutex_unlock(&mcs->stats_lock);
if (mcs->hw->mcs_blks > 1)
mcs_set_force_clk_en(mcs, false);
return 0;
}
int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
struct mcs_set_active_lmac *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
if (!mcs)
return MCS_AF_ERR_NOT_MAPPED;
mcs->hw->lmac_bmap = req->lmac_bmap;
mcs_set_lmac_channels(req->mcs_id, req->chan_base);
return 0;
}
int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
return -EINVAL;
mcs_set_port_cfg(mcs, req);
return 0;
}
int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
struct mcs_port_cfg_get_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
return -EINVAL;
mcs_get_port_cfg(mcs, req, rsp);
return 0;
}
int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
struct mcs_custom_tag_cfg_get_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
mcs_get_custom_tag_cfg(mcs, req, rsp);
return 0;
}
int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
{
struct mcs *mcs;
int mcs_id;
/* CNF10K-B mcs0-6 are mapped to RPM2-8*/
if (rvu->mcs_blk_cnt > 1) {
for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
mcs = mcs_get_pdata(mcs_id);
mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
}
} else {
/* CN10K-B has only one mcs block */
mcs = mcs_get_pdata(0);
mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
}
return 0;
}
int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
struct mcs_flowid_ena_dis_entry *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
return 0;
}
int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
struct mcs_pn_table_write_req *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
return 0;
}
int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
struct mcs_set_pn_threshold *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
mcs_pn_threshold_set(mcs, req);
return 0;
}
int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
struct mcs_rx_sc_sa_map *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
return 0;
}
int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
struct mcs_tx_sc_sa_map *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
return 0;
}
int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
struct mcs_sa_plcy_write_req *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
int i;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
for (i = 0; i < req->sa_cnt; i++)
mcs_sa_plcy_write(mcs, &req->plcy[i][0],
req->sa_index[i], req->dir);
return 0;
}
int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
struct mcs_rx_sc_cam_write_req *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
return 0;
}
int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
struct mcs_secy_plcy_write_req *req,
struct msg_rsp *rsp)
{ struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
mcs_secy_plcy_write(mcs, req->plcy,
req->secy_id, req->dir);
return 0;
}
int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
struct mcs_flowid_entry_write_req *req,
struct msg_rsp *rsp)
{
struct secy_mem_map map;
struct mcs *mcs;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
/* TODO validate the flowid */
mcs_flowid_entry_write(mcs, req->data, req->mask,
req->flow_id, req->dir);
map.secy = req->secy_id;
map.sc = req->sc_id;
map.ctrl_pkt = req->ctrl_pkt;
map.flow_id = req->flow_id;
map.sci = req->sci;
mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
if (req->ena)
mcs_ena_dis_flowid_entry(mcs, req->flow_id,
req->dir, true);
return 0;
}
int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
struct mcs_free_rsrc_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct mcs_rsrc_map *map;
struct mcs *mcs;
int rc;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
if (req->dir == MCS_RX)
map = &mcs->rx;
else
map = &mcs->tx;
mutex_lock(&rvu->rsrc_lock);
/* Free all the cam resources mapped to PF/VF */
if (req->all) {
rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
goto exit;
}
switch (req->rsrc_type) {
case MCS_RSRC_TYPE_FLOWID:
rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
break;
case MCS_RSRC_TYPE_SECY:
rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
break;
case MCS_RSRC_TYPE_SC:
rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
/* Disable SC CAM only on RX side */
if (req->dir == MCS_RX)
mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
break;
case MCS_RSRC_TYPE_SA:
rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
break;
}
exit:
mutex_unlock(&rvu->rsrc_lock);
return rc;
}
int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
struct mcs_alloc_rsrc_req *req,
struct mcs_alloc_rsrc_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct mcs_rsrc_map *map;
struct mcs *mcs;
int rsrc_id, i;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
if (req->dir == MCS_RX)
map = &mcs->rx;
else
map = &mcs->tx;
mutex_lock(&rvu->rsrc_lock);
if (req->all) {
rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
&rsp->secy_ids[0],
&rsp->sc_ids[0],
&rsp->sa_ids[0],
&rsp->sa_ids[1],
pcifunc, req->dir);
goto exit;
}
switch (req->rsrc_type) {
case MCS_RSRC_TYPE_FLOWID:
for (i = 0; i < req->rsrc_cnt; i++) {
rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
if (rsrc_id < 0)
goto exit;
rsp->flow_ids[i] = rsrc_id;
rsp->rsrc_cnt++;
}
break;
case MCS_RSRC_TYPE_SECY:
for (i = 0; i < req->rsrc_cnt; i++) {
rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
if (rsrc_id < 0)
goto exit;
rsp->secy_ids[i] = rsrc_id;
rsp->rsrc_cnt++;
}
break;
case MCS_RSRC_TYPE_SC:
for (i = 0; i < req->rsrc_cnt; i++) {
rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
if (rsrc_id < 0)
goto exit;
rsp->sc_ids[i] = rsrc_id;
rsp->rsrc_cnt++;
}
break;
case MCS_RSRC_TYPE_SA:
for (i = 0; i < req->rsrc_cnt; i++) {
rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
if (rsrc_id < 0)
goto exit;
rsp->sa_ids[i] = rsrc_id;
rsp->rsrc_cnt++;
}
break;
}
rsp->rsrc_type = req->rsrc_type;
rsp->dir = req->dir;
rsp->mcs_id = req->mcs_id;
rsp->all = req->all;
exit:
if (rsrc_id < 0)
dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
mutex_unlock(&rvu->rsrc_lock);
return 0;
}
int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
struct mcs_alloc_ctrl_pkt_rule_req *req,
struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
struct mcs_rsrc_map *map;
struct mcs *mcs;
int rsrc_id;
u16 offset;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
mutex_lock(&rvu->rsrc_lock);
switch (req->rule_type) {
case MCS_CTRL_PKT_RULE_TYPE_ETH:
offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
break;
case MCS_CTRL_PKT_RULE_TYPE_DA:
offset = MCS_CTRLPKT_DA_RULE_OFFSET;
break;
case MCS_CTRL_PKT_RULE_TYPE_RANGE:
offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
break;
case MCS_CTRL_PKT_RULE_TYPE_COMBO:
offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
break;
case MCS_CTRL_PKT_RULE_TYPE_MAC:
offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
break;
}
rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
pcifunc);
if (rsrc_id < 0)
goto exit;
rsp->rule_idx = rsrc_id;
rsp->rule_type = req->rule_type;
rsp->dir = req->dir;
rsp->mcs_id = req->mcs_id;
mutex_unlock(&rvu->rsrc_lock);
return 0;
exit:
if (rsrc_id < 0)
dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
pcifunc);
mutex_unlock(&rvu->rsrc_lock);
return rsrc_id;
}
int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
struct mcs_free_ctrl_pkt_rule_req *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
int rc;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
mutex_lock(&rvu->rsrc_lock);
rc = mcs_free_ctrlpktrule(mcs, req);
mutex_unlock(&rvu->rsrc_lock);
return rc;
}
int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
struct mcs_ctrl_pkt_rule_write_req *req,
struct msg_rsp *rsp)
{
struct mcs *mcs;
int rc;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;
mcs = mcs_get_pdata(req->mcs_id);
rc = mcs_ctrlpktrule_write(mcs, req);
return rc;
}
static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
{
struct mcs *mcs = mcs_get_pdata(0);
unsigned long lmac_bmap;
int cgx, lmac, port;
for (port = 0; port < mcs->hw->lmac_cnt; port++) {
cgx = port / rvu->hw->lmac_per_cgx;
lmac = port % rvu->hw->lmac_per_cgx;
if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
continue;
set_bit(port, &lmac_bmap);
}
mcs->hw->lmac_bmap = lmac_bmap;
}
int rvu_mcs_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
int lmac, err = 0, mcs_id;
struct mcs *mcs;
rvu->mcs_blk_cnt = mcs_get_blkcnt();
if (!rvu->mcs_blk_cnt)
return 0;
/* Needed only for CN10K-B */
if (rvu->mcs_blk_cnt == 1) {
err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
if (err)
return err;
/* Set active lmacs */
rvu_mcs_set_lmac_bmap(rvu);
}
/* Install default tcam bypass entry and set port to operational mode */
for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
mcs = mcs_get_pdata(mcs_id);
mcs_install_flowid_bypass_entry(mcs);
for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
mcs_set_lmac_mode(mcs, lmac, 0);
mcs->rvu = rvu;
/* Allocated memory for PFVF data */
mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
sizeof(struct mcs_pfvf), GFP_KERNEL);
if (!mcs->pf)
return -ENOMEM;
mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
sizeof(struct mcs_pfvf), GFP_KERNEL);
if (!mcs->vf)
return -ENOMEM;
}
/* Initialize the wq for handling mcs interrupts */
INIT_LIST_HEAD(&rvu->mcs_intrq_head);
INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
if (!rvu->mcs_intr_wq) {
dev_err(rvu->dev, "mcs alloc workqueue failed\n");
return -ENOMEM;
}
return err;
}
void rvu_mcs_exit(struct rvu *rvu)
{
if (!rvu->mcs_intr_wq)
return;
flush_workqueue(rvu->mcs_intr_wq);
destroy_workqueue(rvu->mcs_intr_wq);
rvu->mcs_intr_wq = NULL;
}

View File

@ -16,6 +16,7 @@
#include "rvu.h"
#include "rvu_reg.h"
#include "ptp.h"
#include "mcs.h"
#include "rvu_trace.h"
#include "rvu_npc_hash.h"
@ -23,8 +24,6 @@
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
@ -418,7 +417,7 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
*hwvf = cfg & 0xFFF;
}
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
{
int pf, func;
u64 cfg;
@ -1159,6 +1158,12 @@ cpt:
rvu_program_channels(rvu);
err = rvu_mcs_init(rvu);
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
goto nix_err;
}
return 0;
nix_err:
@ -3293,6 +3298,7 @@ err_mbox:
err_hwsetup:
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
rvu_mcs_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
@ -3319,6 +3325,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
rvu_mcs_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
@ -3354,12 +3361,18 @@ static int __init rvu_init_module(void)
if (err < 0)
goto ptp_err;
err = pci_register_driver(&mcs_driver);
if (err < 0)
goto mcs_err;
err = pci_register_driver(&rvu_driver);
if (err < 0)
goto rvu_err;
return 0;
rvu_err:
pci_unregister_driver(&mcs_driver);
mcs_err:
pci_unregister_driver(&ptp_driver);
ptp_err:
pci_unregister_driver(&cgx_driver);
@ -3370,6 +3383,7 @@ ptp_err:
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
pci_unregister_driver(&mcs_driver);
pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}

View File

@ -25,6 +25,8 @@
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@ -62,6 +64,10 @@ struct rvu_debugfs {
struct dentry *nix;
struct dentry *npc;
struct dentry *cpt;
struct dentry *mcs_root;
struct dentry *mcs;
struct dentry *mcs_rx;
struct dentry *mcs_tx;
struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx;
@ -497,6 +503,8 @@ struct rvu {
struct ptp *ptp;
int mcs_blk_cnt;
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@ -504,6 +512,12 @@ struct rvu {
/* RVU switch implementation over NPC with DMAC rules */
struct rvu_switch rswitch;
struct work_struct mcs_intr_work;
struct workqueue_struct *mcs_intr_wq;
struct list_head mcs_intrq_head;
/* mcs interrupt queue lock */
spinlock_t mcs_intrq_lock;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@ -868,4 +882,11 @@ void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask,
u8 shift_dir);
int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
/* CN10K MCS */
int rvu_mcs_init(struct rvu *rvu);
int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
void rvu_mcs_exit(struct rvu *rvu);
#endif /* RVU_H */

View File

@ -19,6 +19,7 @@
#include "lmac_common.h"
#include "npc.h"
#include "rvu_npc_hash.h"
#include "mcs.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@ -227,6 +228,350 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
{
struct mcs *mcs = filp->private;
struct mcs_port_stats stats;
int lmac;
seq_puts(filp, "\n port stats\n");
mutex_lock(&mcs->stats_lock);
for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
mcs_get_port_stats(mcs, &stats, lmac, dir);
seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
stats.preempt_err_cnt);
if (dir == MCS_TX)
seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
stats.sectag_insert_err_cnt);
}
mutex_unlock(&mcs->stats_lock);
return 0;
}
static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
}
RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
}
RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
{
struct mcs *mcs = filp->private;
struct mcs_sa_stats stats;
struct rsrc_bmap *map;
int sa_id;
if (dir == MCS_TX) {
map = &mcs->tx.sa;
mutex_lock(&mcs->stats_lock);
for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
seq_puts(filp, "\n TX SA stats\n");
mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
stats.pkt_encrypt_cnt);
seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
stats.pkt_protected_cnt);
}
mutex_unlock(&mcs->stats_lock);
return 0;
}
/* RX stats */
map = &mcs->rx.sa;
mutex_lock(&mcs->stats_lock);
for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
seq_puts(filp, "\n RX SA stats\n");
mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
}
mutex_unlock(&mcs->stats_lock);
return 0;
}
static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
}
RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
}
RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
{
struct mcs *mcs = filp->private;
struct mcs_sc_stats stats;
struct rsrc_bmap *map;
int sc_id;
map = &mcs->tx.sc;
seq_puts(filp, "\n SC stats\n");
mutex_lock(&mcs->stats_lock);
for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
if (mcs->hw->mcs_blks == 1) {
seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
stats.octet_encrypt_cnt);
seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
stats.octet_protected_cnt);
}
}
mutex_unlock(&mcs->stats_lock);
return 0;
}
RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
{
struct mcs *mcs = filp->private;
struct mcs_sc_stats stats;
struct rsrc_bmap *map;
int sc_id;
map = &mcs->rx.sc;
seq_puts(filp, "\n SC stats\n");
mutex_lock(&mcs->stats_lock);
for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
if (mcs->hw->mcs_blks > 1) {
seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
}
if (mcs->hw->mcs_blks == 1) {
seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
stats.octet_decrypt_cnt);
seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
stats.octet_validate_cnt);
}
}
mutex_unlock(&mcs->stats_lock);
return 0;
}
RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
{
struct mcs *mcs = filp->private;
struct mcs_flowid_stats stats;
struct rsrc_bmap *map;
int flow_id;
seq_puts(filp, "\n Flowid stats\n");
if (dir == MCS_RX)
map = &mcs->rx.flow_ids;
else
map = &mcs->tx.flow_ids;
mutex_lock(&mcs->stats_lock);
for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
}
mutex_unlock(&mcs->stats_lock);
return 0;
}
static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
}
RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
}
RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
{
struct mcs *mcs = filp->private;
struct mcs_secy_stats stats;
struct rsrc_bmap *map;
int secy_id;
map = &mcs->tx.secy;
seq_puts(filp, "\n MCS TX secy stats\n");
mutex_lock(&mcs->stats_lock);
for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
mcs_get_tx_secy_stats(mcs, &stats, secy_id);
seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
stats.ctl_pkt_bcast_cnt);
seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
stats.ctl_pkt_mcast_cnt);
seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
stats.ctl_pkt_ucast_cnt);
seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
stats.unctl_pkt_bcast_cnt);
seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
stats.unctl_pkt_mcast_cnt);
seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
stats.unctl_pkt_ucast_cnt);
seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
stats.octet_encrypted_cnt);
seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
stats.octet_protected_cnt);
seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
stats.pkt_noactivesa_cnt);
seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
}
mutex_unlock(&mcs->stats_lock);
return 0;
}
RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
{
struct mcs *mcs = filp->private;
struct mcs_secy_stats stats;
struct rsrc_bmap *map;
int secy_id;
map = &mcs->rx.secy;
seq_puts(filp, "\n MCS secy stats\n");
mutex_lock(&mcs->stats_lock);
for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
mcs_get_rx_secy_stats(mcs, &stats, secy_id);
seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
stats.ctl_pkt_bcast_cnt);
seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
stats.ctl_pkt_mcast_cnt);
seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
stats.ctl_pkt_ucast_cnt);
seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
stats.unctl_pkt_bcast_cnt);
seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
stats.unctl_pkt_mcast_cnt);
seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
stats.unctl_pkt_ucast_cnt);
seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
stats.octet_decrypted_cnt);
seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
stats.octet_validated_cnt);
seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
stats.pkt_port_disabled_cnt);
seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt);
seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt);
seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
stats.pkt_nosaerror_cnt);
seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
stats.pkt_tagged_ctl_cnt);
seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
if (mcs->hw->mcs_blks > 1)
seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
stats.pkt_notag_cnt);
}
mutex_unlock(&mcs->stats_lock);
return 0;
}
RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
static void rvu_dbg_mcs_init(struct rvu *rvu)
{
struct mcs *mcs;
char dname[10];
int i;
if (!rvu->mcs_blk_cnt)
return;
rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
for (i = 0; i < rvu->mcs_blk_cnt; i++) {
mcs = mcs_get_pdata(i);
sprintf(dname, "mcs%d", i);
rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
rvu->rvu_dbg.mcs_root);
rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
&rvu_dbg_mcs_rx_flowid_stats_fops);
debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
&rvu_dbg_mcs_rx_secy_stats_fops);
debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
&rvu_dbg_mcs_rx_sc_stats_fops);
debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
&rvu_dbg_mcs_rx_sa_stats_fops);
debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
&rvu_dbg_mcs_rx_port_stats_fops);
rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
&rvu_dbg_mcs_tx_flowid_stats_fops);
debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
&rvu_dbg_mcs_tx_secy_stats_fops);
debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
&rvu_dbg_mcs_tx_sc_stats_fops);
debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
&rvu_dbg_mcs_tx_sa_stats_fops);
debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
&rvu_dbg_mcs_tx_port_stats_fops);
}
}
#define LMT_MAPTBL_ENTRY_SIZE 16
/* Dump LMTST map table */
static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
@ -3053,6 +3398,7 @@ create:
rvu_dbg_npc_init(rvu);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
rvu_dbg_mcs_init(rvu);
}
void rvu_dbg_exit(struct rvu *rvu)

View File

@ -13,5 +13,6 @@ rvu_nicvf-y := otx2_vf.o otx2_devlink.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af

File diff suppressed because it is too large Load Diff

View File

@ -1827,4 +1827,5 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
} \
EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M

View File

@ -19,6 +19,7 @@
#include <net/devlink.h>
#include <linux/time64.h>
#include <linux/dim.h>
#include <uapi/linux/if_macsec.h>
#include <mbox.h>
#include <npc.h>
@ -33,6 +34,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
@ -244,6 +246,7 @@ struct otx2_hw {
#define CN10K_LMTST 2
#define CN10K_RPM 3
#define CN10K_PTP_ONESTEP 4
#define CN10K_HW_MACSEC 5
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
@ -351,6 +354,66 @@ struct dev_hw_ops {
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};
#define CN10K_MCS_SA_PER_SC 4
/* Stats which need to be accumulated in software because
* of shared counters in hardware.
*/
struct cn10k_txsc_stats {
u64 InPktsUntagged;
u64 InPktsNoTag;
u64 InPktsBadTag;
u64 InPktsUnknownSCI;
u64 InPktsNoSCI;
u64 InPktsOverrun;
};
struct cn10k_rxsc_stats {
u64 InOctetsValidated;
u64 InOctetsDecrypted;
u64 InPktsUnchecked;
u64 InPktsDelayed;
u64 InPktsOK;
u64 InPktsInvalid;
u64 InPktsLate;
u64 InPktsNotValid;
u64 InPktsNotUsingSA;
u64 InPktsUnusedSA;
};
struct cn10k_mcs_txsc {
struct macsec_secy *sw_secy;
struct cn10k_txsc_stats stats;
struct list_head entry;
enum macsec_validation_type last_validate_frames;
bool last_protect_frames;
u16 hw_secy_id_tx;
u16 hw_secy_id_rx;
u16 hw_flow_id;
u16 hw_sc_id;
u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
u8 sa_bmap;
u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
u8 encoding_sa;
};
struct cn10k_mcs_rxsc {
struct macsec_secy *sw_secy;
struct macsec_rx_sc *sw_rxsc;
struct cn10k_rxsc_stats stats;
struct list_head entry;
u16 hw_flow_id;
u16 hw_sc_id;
u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
u8 sa_bmap;
u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
};
struct cn10k_mcs_cfg {
struct list_head txsc_list;
struct list_head rxsc_list;
};
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@ -438,6 +501,10 @@ struct otx2_nic {
/* napi event count. It is needed for adaptive irq coalescing. */
u32 napi_events;
#if IS_ENABLED(CONFIG_MACSEC)
struct cn10k_mcs_cfg *macsec_cfg;
#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@ -477,6 +544,11 @@ static inline bool is_dev_otx2(struct pci_dev *pdev)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
static inline bool is_dev_cn10kb(struct pci_dev *pdev)
{
return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
}
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@ -508,6 +580,9 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_RPM, &hw->cap_flag);
__set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
}
if (is_dev_cn10kb(pfvf->pdev))
__set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@ -763,6 +838,7 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
struct _rsp_type *rsp); \
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
/* Time to wait before watchdog kicks off */
@ -945,4 +1021,18 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
#endif
#if IS_ENABLED(CONFIG_MACSEC)
/* MACSEC offload support */
int cn10k_mcs_init(struct otx2_nic *pfvf);
void cn10k_mcs_free(struct otx2_nic *pfvf);
void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
#else
static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
struct mcs_intr_info *event)
{}
#endif /* CONFIG_MACSEC */
#endif /* OTX2_COMMON_H */

View File

@ -858,6 +858,15 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
}
}
int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
struct mcs_intr_info *event,
struct msg_rsp *rsp)
{
cn10k_handle_mcs_event(pf, event);
return 0;
}
int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
struct cgx_link_info_msg *msg,
struct msg_rsp *rsp)
@ -917,6 +926,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
return err; \
}
MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M
break;
default:
@ -2764,6 +2774,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_ptp_destroy;
err = cn10k_mcs_init(pf);
if (err)
goto err_del_mcam_entries;
if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
netdev->hw_features |= NETIF_F_NTUPLE;
@ -2978,6 +2992,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_config_pause_frm(pf);
}
cn10k_mcs_free(pf);
#ifdef CONFIG_DCB
/* Disable PFC config */
if (pf->pfc_en) {