Merge branch 'mlx5-macsec-extended-packet-number-and-replay-window-offload'

Saeed Mahameed says:

====================
mlx5 MACSec Extended packet number and replay window offload

This is a follow up series to the previously submitted mlx5 MACsec offload [1]
earlier this release cycle.

In this series we add the support for MACsec Extended packet number and
replay window offloads.

First patch is a simple modification (code movements) to the core macsec code
to allow exposing the EPN related user properties to the offloading
device driver.

The rest of the patches are mlx5 specific, we start off with fixing some
trivial issues with mlx5 MACsec code, and a simple refactoring to allow
additional functionality in mlx5 macsec to support EPN and window replay
offloads.
 A) Expose mkey creation functionality to MACsec
 B) Expose ASO object to MACsec, to allow advanced steering operations,
    ASO objects are used to modify MACsec steering objects in fastpath.

1) Support MACsec offload extended packet number (EPN)

    MACsec EPN splits the packet number (PN) into two 32-bits fields,
    epn_lsb (32 least significant bits (LSBs) of PN) and epn_msb (32
    most significant bits (MSBs) of PN).
    Epn_msb bits are managed by SW and for that HW is required to send
    an object change event of type EPN event notifying the SW to update
    the epn_msb in addition, once epn_msb is updated SW update HW with
    the new epn_msb value for HW to perform replay protection.
    To prevent HW from stopping while handling the event, SW manages
    another bit for HW called epn_overlap, HW uses the latter to get
    an indication regarding how to read the epn_msb value correctly
    while still receiving packets.
    Add epn event handling that updates the epn_overlap and epn_msb for
    every 2^31 packets according to the following logic:
    if epn_lsb crosses 2^31 (half sequence number wraparound) upon HW
    relevant event, SW updates the esn_overlap value to OLD (value = 1).
    When the epn_lsb crosses 2^32 (full sequence number wraparound)
    upon HW relevant event, SW updates the esn_overlap to NEW
    (value = 0) and increment the esn_msb.
    When using MACsec EPN a salt and short secure channel id (ssci)
    needs to be provided by the user, when offloading EPN need to pass
    this salt and ssci to the HW to be used in the initial vector (IV)
    calculations.

2) Support MACsec offload replay window

    Support setting replay window size for MACsec offload.
    Currently supported window size of 32, 64, 128 and 256
    bit. Other values will be returned as invalid parameter.

[1] https://lore.kernel.org/netdev/20220906052129.104507-1-saeed@kernel.org/
====================

Link: https://lore.kernel.org/r/20220921181054.40249-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-09-22 18:01:36 -07:00
commit 97cfede0d1
11 changed files with 671 additions and 54 deletions

View File

@ -1134,6 +1134,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,

View File

@ -6,12 +6,54 @@
#include <linux/xarray.h>
#include "en.h"
#include "lib/aso.h"
#include "lib/mlx5.h"
#include "en_accel/macsec.h"
#include "en_accel/macsec_fs.h"
#define MLX5_MACSEC_ASO_INC_SN 0x2
#define MLX5_MACSEC_ASO_REG_C_4_5 0x2
#define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
#define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
enum mlx5_macsec_aso_event_arm {
MLX5E_ASO_EPN_ARM = BIT(0),
};
enum {
MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
};
struct mlx5e_macsec_handle {
struct mlx5e_macsec *macsec;
u32 obj_id;
u8 idx;
};
enum {
MLX5_MACSEC_EPN,
};
struct mlx5e_macsec_aso_out {
u8 event_arm;
u32 mode_param;
};
struct mlx5e_macsec_aso_in {
u8 mode;
u32 obj_id;
};
struct mlx5e_macsec_epn_state {
u32 epn_msb;
u8 epn_enabled;
u8 overlap;
};
struct mlx5e_macsec_async_work {
struct mlx5e_macsec *macsec;
struct mlx5_core_dev *mdev;
struct work_struct work;
u32 obj_id;
};
struct mlx5e_macsec_sa {
bool active;
@ -20,11 +62,13 @@ struct mlx5e_macsec_sa {
u32 enc_key_id;
u32 next_pn;
sci_t sci;
salt_t salt;
struct rhash_head hash;
u32 fs_id;
union mlx5e_macsec_rule *macsec_rule;
struct rcu_head rcu_head;
struct mlx5e_macsec_epn_state epn_state;
};
struct mlx5e_macsec_rx_sc;
@ -43,6 +87,23 @@ struct mlx5e_macsec_rx_sc {
struct rcu_head rcu_head;
};
struct mlx5e_macsec_umr {
dma_addr_t dma_addr;
u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
u32 mkey;
};
struct mlx5e_macsec_aso {
/* ASO */
struct mlx5_aso *maso;
/* Protects macsec ASO */
struct mutex aso_lock;
/* UMR */
struct mlx5e_macsec_umr *umr;
u32 pdn;
};
static const struct rhashtable_params rhash_sci = {
.key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
.key_offset = offsetof(struct mlx5e_macsec_sa, sci),
@ -65,9 +126,6 @@ struct mlx5e_macsec {
struct mlx5e_macsec_fs *macsec_fs;
struct mutex lock; /* Protects mlx5e_macsec internal contexts */
/* Global PD for MACsec object ASO context */
u32 aso_pdn;
/* Tx sci -> fs id mapping handling */
struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */
@ -78,6 +136,12 @@ struct mlx5e_macsec {
/* Stats manage */
struct mlx5e_macsec_stats stats;
/* ASO */
struct mlx5e_macsec_aso aso;
struct notifier_block nb;
struct workqueue_struct *wq;
};
struct mlx5_macsec_obj_attrs {
@ -86,8 +150,107 @@ struct mlx5_macsec_obj_attrs {
__be64 sci;
u32 enc_key_id;
bool encrypt;
struct mlx5e_macsec_epn_state epn_state;
salt_t salt;
__be32 ssci;
bool replay_protect;
u32 replay_window;
};
struct mlx5_aso_ctrl_param {
u8 data_mask_mode;
u8 condition_0_operand;
u8 condition_1_operand;
u8 condition_0_offset;
u8 condition_1_offset;
u8 data_offset;
u8 condition_operand;
u32 condition_0_data;
u32 condition_0_mask;
u32 condition_1_data;
u32 condition_1_mask;
u64 bitwise_data;
u64 data_mask;
};
static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
{
struct mlx5e_macsec_umr *umr;
struct device *dma_device;
dma_addr_t dma_addr;
int err;
umr = kzalloc(sizeof(*umr), GFP_KERNEL);
if (!umr) {
err = -ENOMEM;
return err;
}
dma_device = &mdev->pdev->dev;
dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
err = dma_mapping_error(dma_device, dma_addr);
if (err) {
mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
goto out_dma;
}
err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
if (err) {
mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
goto out_mkey;
}
umr->dma_addr = dma_addr;
aso->umr = umr;
return 0;
out_mkey:
dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
out_dma:
kfree(umr);
return err;
}
static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
{
struct mlx5e_macsec_umr *umr = aso->umr;
mlx5_core_destroy_mkey(mdev, umr->mkey);
dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
kfree(umr);
}
static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
{
u8 window_sz;
if (!attrs->replay_protect)
return 0;
switch (attrs->replay_window) {
case 256:
window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
break;
case 128:
window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
break;
case 64:
window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
break;
case 32:
window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
break;
default:
return -EINVAL;
}
MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
return 0;
}
static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
struct mlx5_macsec_obj_attrs *attrs,
bool is_tx,
@ -104,14 +267,34 @@ static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
/* Epn */
if (attrs->epn_state.epn_enabled) {
void *salt_p;
int i;
MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
for (i = 0; i < 3 ; i++)
memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
} else {
MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
}
MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
if (is_tx) {
MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
} else {
err = macsec_set_replay_protection(attrs, aso_ctx);
if (err)
return err;
}
/* general object fields set */
@ -175,13 +358,27 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_macsec_obj_attrs obj_attrs;
union mlx5e_macsec_rule *macsec_rule;
struct macsec_key *key;
int err;
obj_attrs.next_pn = sa->next_pn;
obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
obj_attrs.enc_key_id = sa->enc_key_id;
obj_attrs.encrypt = encrypt;
obj_attrs.aso_pdn = macsec->aso_pdn;
obj_attrs.aso_pdn = macsec->aso.pdn;
obj_attrs.epn_state = sa->epn_state;
if (is_tx) {
obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci);
key = &ctx->sa.tx_sa->key;
} else {
obj_attrs.ssci = cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
key = &ctx->sa.rx_sa->key;
}
memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
obj_attrs.replay_window = ctx->secy->replay_window;
obj_attrs.replay_protect = ctx->secy->replay_protect;
err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
if (err)
@ -279,16 +476,6 @@ static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
return false;
}
if (secy->xpn) {
netdev_err(netdev, "MACsec offload: xpn is not supported\n");
return false;
}
if (secy->replay_protect) {
netdev_err(netdev, "MACsec offload: replay protection is not supported\n");
return false;
}
return true;
}
@ -308,6 +495,17 @@ mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
return NULL;
}
static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
const pn_t *next_pn_halves)
{
struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
sa->salt = key->salt;
epn_state->epn_enabled = 1;
epn_state->epn_msb = next_pn_halves->upper;
epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
}
static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
{
const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
@ -350,6 +548,10 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
tx_sa->sci = secy->sci;
tx_sa->assoc_num = assoc_num;
if (secy->xpn)
update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
MLX5_ACCEL_OBJ_MACSEC_KEY,
&tx_sa->enc_key_id);
@ -753,6 +955,9 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
rx_sa->assoc_num = assoc_num;
rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
if (ctx->secy->xpn)
update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
MLX5_ACCEL_OBJ_MACSEC_KEY,
&rx_sa->enc_key_id);
@ -1123,6 +1328,355 @@ out:
return err;
}
static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
struct mlx5_macsec_obj_attrs *attrs)
{
attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
attrs->epn_state.overlap = sa->epn_state.overlap;
}
static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
struct mlx5_aso_ctrl_param *param)
{
memset(aso_ctrl, 0, sizeof(*aso_ctrl));
if (macsec_aso->umr->dma_addr) {
aso_ctrl->va_l = cpu_to_be32(macsec_aso->umr->dma_addr | ASO_CTRL_READ_EN);
aso_ctrl->va_h = cpu_to_be32((u64)macsec_aso->umr->dma_addr >> 32);
aso_ctrl->l_key = cpu_to_be32(macsec_aso->umr->mkey);
}
if (!param)
return;
aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
aso_ctrl->condition_1_0_operand = param->condition_1_operand |
param->condition_0_operand << 4;
aso_ctrl->condition_1_0_offset = param->condition_1_offset |
param->condition_0_offset << 4;
aso_ctrl->data_offset_condition_operand = param->data_offset |
param->condition_operand << 6;
aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
}
static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
u32 macsec_id)
{
u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
u64 modify_field_select = 0;
void *obj;
int err;
/* General object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err) {
mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
macsec_id, err);
return err;
}
obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
/* EPN */
if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
macsec_id);
return -EOPNOTSUPP;
}
obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
/* General object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
struct mlx5e_macsec_aso_in *in)
{
struct mlx5_aso_ctrl_param param = {};
param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
if (in->mode == MLX5_MACSEC_EPN) {
param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
param.bitwise_data = BIT_ULL(54);
param.data_mask = param.bitwise_data;
}
macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, &param);
}
static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
struct mlx5e_macsec_aso_in *in)
{
struct mlx5e_macsec_aso *aso;
struct mlx5_aso_wqe *aso_wqe;
struct mlx5_aso *maso;
int err;
aso = &macsec->aso;
maso = aso->maso;
mutex_lock(&aso->aso_lock);
aso_wqe = mlx5_aso_get_wqe(maso);
mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
err = mlx5_aso_poll_cq(maso, false, 10);
mutex_unlock(&aso->aso_lock);
return err;
}
static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
{
struct mlx5e_macsec_aso *aso;
struct mlx5_aso_wqe *aso_wqe;
struct mlx5_aso *maso;
int err;
aso = &macsec->aso;
maso = aso->maso;
mutex_lock(&aso->aso_lock);
aso_wqe = mlx5_aso_get_wqe(maso);
mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
err = mlx5_aso_poll_cq(maso, false, 10);
if (err)
goto err_out;
if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
out->event_arm |= MLX5E_ASO_EPN_ARM;
out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
err_out:
mutex_unlock(&aso->aso_lock);
return err;
}
static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
const u32 obj_id)
{
const struct list_head *device_list;
struct mlx5e_macsec_sa *macsec_sa;
struct mlx5e_macsec_device *iter;
int i;
device_list = &macsec->macsec_device_list_head;
list_for_each_entry(iter, device_list, macsec_device_list_element) {
for (i = 0; i < MACSEC_NUM_AN; ++i) {
macsec_sa = iter->tx_sa[i];
if (!macsec_sa || !macsec_sa->active)
continue;
if (macsec_sa->macsec_obj_id == obj_id)
return macsec_sa;
}
}
return NULL;
}
static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
const u32 obj_id)
{
const struct list_head *device_list, *sc_list;
struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
struct mlx5e_macsec_sa *macsec_sa;
struct mlx5e_macsec_device *iter;
int i;
device_list = &macsec->macsec_device_list_head;
list_for_each_entry(iter, device_list, macsec_device_list_element) {
sc_list = &iter->macsec_rx_sc_list_head;
list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
for (i = 0; i < MACSEC_NUM_AN; ++i) {
macsec_sa = mlx5e_rx_sc->rx_sa[i];
if (!macsec_sa || !macsec_sa->active)
continue;
if (macsec_sa->macsec_obj_id == obj_id)
return macsec_sa;
}
}
}
return NULL;
}
static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
{
struct mlx5_macsec_obj_attrs attrs = {};
struct mlx5e_macsec_aso_in in = {};
/* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
* number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
* esn_overlap to OLD (1).
* When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
* number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
* wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
*/
if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
sa->epn_state.epn_msb++;
sa->epn_state.overlap = 0;
} else {
sa->epn_state.overlap = 1;
}
macsec_build_accel_attrs(sa, &attrs);
mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
/* Re-set EPN arm event */
in.obj_id = obj_id;
in.mode = MLX5_MACSEC_EPN;
macsec_aso_set_arm_event(mdev, macsec, &in);
}
static void macsec_async_event(struct work_struct *work)
{
struct mlx5e_macsec_async_work *async_work;
struct mlx5e_macsec_aso_out out = {};
struct mlx5e_macsec_aso_in in = {};
struct mlx5e_macsec_sa *macsec_sa;
struct mlx5e_macsec *macsec;
struct mlx5_core_dev *mdev;
u32 obj_id;
async_work = container_of(work, struct mlx5e_macsec_async_work, work);
macsec = async_work->macsec;
mdev = async_work->mdev;
obj_id = async_work->obj_id;
macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
if (!macsec_sa) {
macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
if (!macsec_sa) {
mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
goto out_async_work;
}
}
/* Query MACsec ASO context */
in.obj_id = obj_id;
macsec_aso_query(mdev, macsec, &in, &out);
/* EPN case */
if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
out_async_work:
kfree(async_work);
}
static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
struct mlx5e_macsec_async_work *async_work;
struct mlx5_eqe_obj_change *obj_change;
struct mlx5_eqe *eqe = data;
u16 obj_type;
u32 obj_id;
if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
return NOTIFY_DONE;
obj_change = &eqe->data.obj_change;
obj_type = be16_to_cpu(obj_change->obj_type);
obj_id = be32_to_cpu(obj_change->obj_id);
if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
return NOTIFY_DONE;
async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
if (!async_work)
return NOTIFY_DONE;
async_work->macsec = macsec;
async_work->mdev = macsec->mdev;
async_work->obj_id = obj_id;
INIT_WORK(&async_work->work, macsec_async_event);
WARN_ON(!queue_work(macsec->wq, &async_work->work));
return NOTIFY_OK;
}
static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
{
struct mlx5_aso *maso;
int err;
err = mlx5_core_alloc_pd(mdev, &aso->pdn);
if (err) {
mlx5_core_err(mdev,
"MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
err);
return err;
}
maso = mlx5_aso_create(mdev, aso->pdn);
if (IS_ERR(maso)) {
err = PTR_ERR(maso);
goto err_aso;
}
err = mlx5e_macsec_aso_reg_mr(mdev, aso);
if (err)
goto err_aso_reg;
mutex_init(&aso->aso_lock);
aso->maso = maso;
return 0;
err_aso_reg:
mlx5_aso_destroy(maso);
err_aso:
mlx5_core_dealloc_pd(mdev, aso->pdn);
return err;
}
static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
{
if (!aso)
return;
mlx5e_macsec_aso_dereg_mr(mdev, aso);
mlx5_aso_destroy(aso->maso);
mlx5_core_dealloc_pd(mdev, aso->pdn);
}
bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
{
if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
@ -1273,19 +1827,23 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv)
INIT_LIST_HEAD(&macsec->macsec_device_list_head);
mutex_init(&macsec->lock);
err = mlx5_core_alloc_pd(mdev, &macsec->aso_pdn);
if (err) {
mlx5_core_err(mdev,
"MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
err);
goto err_pd;
}
err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
if (err) {
mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
err);
goto err_out;
goto err_hash;
}
err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
if (err) {
mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
goto err_aso;
}
macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
if (!macsec->wq) {
err = -ENOMEM;
goto err_wq;
}
xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
@ -1302,13 +1860,20 @@ int mlx5e_macsec_init(struct mlx5e_priv *priv)
macsec->macsec_fs = macsec_fs;
macsec->nb.notifier_call = macsec_obj_change_event;
mlx5_notifier_register(mdev, &macsec->nb);
mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
return 0;
err_out:
mlx5_core_dealloc_pd(priv->mdev, macsec->aso_pdn);
err_pd:
destroy_workqueue(macsec->wq);
err_wq:
mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
err_aso:
rhashtable_destroy(&macsec->sci_hash);
err_hash:
kfree(macsec);
priv->macsec = NULL;
return err;
@ -1317,15 +1882,21 @@ err_pd:
void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_macsec *macsec = priv->macsec;
struct mlx5_core_dev *mdev = macsec->mdev;
if (!macsec)
return;
mlx5_notifier_unregister(mdev, &macsec->nb);
mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
priv->macsec = NULL;
/* Cleanup workqueue */
destroy_workqueue(macsec->wq);
mlx5_core_dealloc_pd(priv->mdev, macsec->aso_pdn);
mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
priv->macsec = NULL;
rhashtable_destroy(&macsec->sci_hash);

View File

@ -66,7 +66,6 @@ static inline void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{}
static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) { return false; }
#endif /* CONFIG_MLX5_EN_MACSEC */
#endif /* __MLX5_ACCEL_EN_MACSEC_H__ */

View File

@ -46,8 +46,7 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
u32 *mkey)
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;

View File

@ -5055,10 +5055,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
}
priv->fs = fs;
err = mlx5e_macsec_init(priv);
if (err)
mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err);
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
@ -5076,7 +5072,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_macsec_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
}
@ -5202,9 +5197,14 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
int err;
mlx5e_fs_init_l2_addr(priv->fs, netdev);
err = mlx5e_macsec_init(priv);
if (err)
mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err);
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
@ -5262,6 +5262,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
mlx5e_macsec_cleanup(priv);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)

View File

@ -575,6 +575,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
if (MLX5_CAP_GEN_MAX(dev, vhca_state))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))

View File

@ -36,6 +36,7 @@ static struct mlx5_nb events_nbs_ref[] = {
/* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_OBJECT_CHANGE },
/* QP/WQ resource events to forward */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_DCT_DRAINED },
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG },
@ -132,6 +133,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_MONITOR_COUNTER";
case MLX5_EVENT_TYPE_DEVICE_TRACER:
return "MLX5_EVENT_TYPE_DEVICE_TRACER";
case MLX5_EVENT_TYPE_OBJECT_CHANGE:
return "MLX5_EVENT_TYPE_OBJECT_CHANGE";
default:
return "Unrecognized event";
}

View File

@ -11,7 +11,9 @@
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
#define MLX5_ASO_WQEBBS_DATA \
(DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
#define ASO_CTRL_READ_EN BIT(0)
#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
struct mlx5_wqe_aso_ctrl_seg {
__be32 va_h;
@ -70,6 +72,7 @@ enum {
enum {
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
};
struct mlx5_aso;

View File

@ -1828,6 +1828,12 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rx_sa->sc = rx_sc;
if (secy->xpn) {
rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
MACSEC_SALT_LEN);
}
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
@ -1850,12 +1856,6 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
if (secy->xpn) {
rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
MACSEC_SALT_LEN);
}
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
@ -2070,6 +2070,12 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
secy->operational = true;
if (secy->xpn) {
tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
MACSEC_SALT_LEN);
}
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(netdev_priv(dev))) {
const struct macsec_ops *ops;
@ -2092,12 +2098,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
goto cleanup;
}
if (secy->xpn) {
tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
MACSEC_SALT_LEN);
}
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);

View File

@ -325,6 +325,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
@ -699,6 +700,12 @@ struct mlx5_eqe_temp_warning {
__be64 sensor_warning_lsb;
} __packed;
struct mlx5_eqe_obj_change {
u8 rsvd0[2];
__be16 obj_type;
__be32 obj_id;
} __packed;
#define SYNC_RST_STATE_MASK 0xf
enum sync_rst_state_type {
@ -737,6 +744,7 @@ union ev_data {
struct mlx5_eqe_xrq_err xrq_err;
struct mlx5_eqe_sync_fw_update sync_fw_update;
struct mlx5_eqe_vhca_state vhca_state;
struct mlx5_eqe_obj_change obj_change;
} __packed;
struct mlx5_eqe {

View File

@ -11558,6 +11558,20 @@ struct mlx5_ifc_modify_ipsec_obj_in_bits {
struct mlx5_ifc_ipsec_obj_bits ipsec_object;
};
enum {
MLX5_MACSEC_ASO_REPLAY_PROTECTION = 0x1,
};
enum {
MLX5_MACSEC_ASO_REPLAY_WIN_32BIT = 0x0,
MLX5_MACSEC_ASO_REPLAY_WIN_64BIT = 0x1,
MLX5_MACSEC_ASO_REPLAY_WIN_128BIT = 0x2,
MLX5_MACSEC_ASO_REPLAY_WIN_256BIT = 0x3,
};
#define MLX5_MACSEC_ASO_INC_SN 0x2
#define MLX5_MACSEC_ASO_REG_C_4_5 0x2
struct mlx5_ifc_macsec_aso_bits {
u8 valid[0x1];
u8 reserved_at_1[0x1];
@ -11585,15 +11599,15 @@ struct mlx5_ifc_macsec_offload_obj_bits {
u8 confidentiality_en[0x1];
u8 reserved_at_41[0x1];
u8 esn_en[0x1];
u8 esn_overlap[0x1];
u8 epn_en[0x1];
u8 epn_overlap[0x1];
u8 reserved_at_44[0x2];
u8 confidentiality_offset[0x2];
u8 reserved_at_48[0x4];
u8 aso_return_reg[0x4];
u8 reserved_at_50[0x10];
u8 esn_msb[0x20];
u8 epn_msb[0x20];
u8 reserved_at_80[0x8];
u8 dekn[0x18];
@ -11619,6 +11633,21 @@ struct mlx5_ifc_create_macsec_obj_in_bits {
struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
};
struct mlx5_ifc_modify_macsec_obj_in_bits {
struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
};
enum {
MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP = BIT(0),
MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB = BIT(1),
};
struct mlx5_ifc_query_macsec_obj_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
};
struct mlx5_ifc_encryption_key_obj_bits {
u8 modify_field_select[0x40];