mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
Merge branch 'hnx3-next'
Huazhong Tan says: ==================== code optimizations & bugfixes for HNS3 driver This patchset includes bugfixes and code optimizations for the HNS3 ethernet controller driver. Change log: V1->V2: fixes comments from Sergei Shtylyov ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
da0dde94ad
10 changed files with 164 additions and 56 deletions
|
@ -43,6 +43,7 @@ enum HCLGE_MBX_OPCODE {
|
|||
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
|
||||
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
|
||||
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
|
||||
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
|
||||
|
||||
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
|
||||
};
|
||||
|
|
|
@ -76,8 +76,8 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
|
|||
return inited;
|
||||
}
|
||||
|
||||
static int hnae3_match_n_instantiate(struct hnae3_client *client,
|
||||
struct hnae3_ae_dev *ae_dev, bool is_reg)
|
||||
static int hnae3_init_client_instance(struct hnae3_client *client,
|
||||
struct hnae3_ae_dev *ae_dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -87,23 +87,27 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* now, (un-)instantiate client by calling lower layer */
|
||||
if (is_reg) {
|
||||
ret = ae_dev->ops->init_client_instance(client, ae_dev);
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"fail to instantiate client, ret = %d\n", ret);
|
||||
ret = ae_dev->ops->init_client_instance(client, ae_dev);
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"fail to instantiate client, ret = %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hnae3_uninit_client_instance(struct hnae3_client *client,
|
||||
struct hnae3_ae_dev *ae_dev)
|
||||
{
|
||||
/* check if this client matches the type of ae_dev */
|
||||
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
|
||||
hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
|
||||
return;
|
||||
|
||||
if (hnae3_get_client_init_flag(client, ae_dev)) {
|
||||
ae_dev->ops->uninit_client_instance(client, ae_dev);
|
||||
|
||||
hnae3_set_client_init_flag(client, ae_dev, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hnae3_register_client(struct hnae3_client *client)
|
||||
|
@ -129,7 +133,7 @@ int hnae3_register_client(struct hnae3_client *client)
|
|||
/* if the client could not be initialized on current port, for
|
||||
* any error reasons, move on to next available port
|
||||
*/
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true);
|
||||
ret = hnae3_init_client_instance(client, ae_dev);
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"match and instantiation failed for port, ret = %d\n",
|
||||
|
@ -153,7 +157,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
|
|||
mutex_lock(&hnae3_common_lock);
|
||||
/* un-initialize the client on every matched port */
|
||||
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
|
||||
hnae3_match_n_instantiate(client, ae_dev, false);
|
||||
hnae3_uninit_client_instance(client, ae_dev);
|
||||
}
|
||||
|
||||
list_del(&client->node);
|
||||
|
@ -205,7 +209,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
|
|||
* initialize the figure out client instance
|
||||
*/
|
||||
list_for_each_entry(client, &hnae3_client_list, node) {
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true);
|
||||
ret = hnae3_init_client_instance(client, ae_dev);
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"match and instantiation failed, ret = %d\n",
|
||||
|
@ -243,7 +247,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
|
|||
* un-initialize the figure out client instance
|
||||
*/
|
||||
list_for_each_entry(client, &hnae3_client_list, node)
|
||||
hnae3_match_n_instantiate(client, ae_dev, false);
|
||||
hnae3_uninit_client_instance(client, ae_dev);
|
||||
|
||||
ae_algo->ops->uninit_ae_dev(ae_dev);
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
|
||||
|
@ -301,7 +305,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
|
|||
* initialize the figure out client instance
|
||||
*/
|
||||
list_for_each_entry(client, &hnae3_client_list, node) {
|
||||
ret = hnae3_match_n_instantiate(client, ae_dev, true);
|
||||
ret = hnae3_init_client_instance(client, ae_dev);
|
||||
if (ret)
|
||||
dev_err(&ae_dev->pdev->dev,
|
||||
"match and instantiation failed, ret = %d\n",
|
||||
|
@ -343,7 +347,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
|
|||
continue;
|
||||
|
||||
list_for_each_entry(client, &hnae3_client_list, node)
|
||||
hnae3_match_n_instantiate(client, ae_dev, false);
|
||||
hnae3_uninit_client_instance(client, ae_dev);
|
||||
|
||||
ae_algo->ops->uninit_ae_dev(ae_dev);
|
||||
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
|
||||
|
|
|
@ -1012,7 +1012,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
|||
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
|
||||
struct device *dev = ring_to_dev(ring);
|
||||
u16 bdtp_fe_sc_vld_ra_ri = 0;
|
||||
struct skb_frag_struct *frag;
|
||||
unsigned int frag_buf_num;
|
||||
int k, sizeoflast;
|
||||
|
@ -1080,12 +1079,30 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
|||
|
||||
desc_cb->length = size;
|
||||
|
||||
if (likely(size <= HNS3_MAX_BD_SIZE)) {
|
||||
u16 bdtp_fe_sc_vld_ra_ri = 0;
|
||||
|
||||
desc_cb->priv = priv;
|
||||
desc_cb->dma = dma;
|
||||
desc_cb->type = type;
|
||||
desc->addr = cpu_to_le64(dma);
|
||||
desc->tx.send_size = cpu_to_le16(size);
|
||||
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
|
||||
desc->tx.bdtp_fe_sc_vld_ra_ri =
|
||||
cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
|
||||
|
||||
ring_ptr_move_fw(ring, next_to_use);
|
||||
return 0;
|
||||
}
|
||||
|
||||
frag_buf_num = hns3_tx_bd_count(size);
|
||||
sizeoflast = size & HNS3_TX_LAST_SIZE_M;
|
||||
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
|
||||
|
||||
/* When frag size is bigger than hardware limit, split this frag */
|
||||
for (k = 0; k < frag_buf_num; k++) {
|
||||
u16 bdtp_fe_sc_vld_ra_ri = 0;
|
||||
|
||||
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
|
||||
desc_cb->priv = priv;
|
||||
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
|
||||
|
@ -1574,6 +1591,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||
int ret;
|
||||
|
||||
if (hns3_nic_resetting(netdev))
|
||||
return -EBUSY;
|
||||
|
||||
if (!h->ae_algo->ops->set_mtu)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
@ -2891,7 +2911,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
|
|||
struct hns3_enet_tqp_vector *tqp_vector =
|
||||
container_of(napi, struct hns3_enet_tqp_vector, napi);
|
||||
bool clean_complete = true;
|
||||
int rx_budget;
|
||||
int rx_budget = budget;
|
||||
|
||||
if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
|
||||
napi_complete(napi);
|
||||
|
@ -2905,7 +2925,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
|
|||
hns3_clean_tx_ring(ring);
|
||||
|
||||
/* make sure rx ring budget not smaller than 1 */
|
||||
rx_budget = max(budget / tqp_vector->num_tqps, 1);
|
||||
if (tqp_vector->num_tqps > 1)
|
||||
rx_budget = max(budget / tqp_vector->num_tqps, 1);
|
||||
|
||||
hns3_for_each_ring(ring, tqp_vector->rx_group) {
|
||||
int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
|
||||
|
@ -3773,12 +3794,13 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
|
|||
struct netdev_hw_addr *ha, *tmp;
|
||||
int ret = 0;
|
||||
|
||||
netif_addr_lock_bh(ndev);
|
||||
/* go through and sync uc_addr entries to the device */
|
||||
list = &ndev->uc;
|
||||
list_for_each_entry_safe(ha, tmp, &list->list, list) {
|
||||
ret = hns3_nic_uc_sync(ndev, ha->addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* go through and sync mc_addr entries to the device */
|
||||
|
@ -3786,9 +3808,11 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
|
|||
list_for_each_entry_safe(ha, tmp, &list->list, list) {
|
||||
ret = hns3_nic_mc_sync(ndev, ha->addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
netif_addr_unlock_bh(ndev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3799,6 +3823,7 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
|
|||
|
||||
hns3_nic_uc_unsync(netdev, netdev->dev_addr);
|
||||
|
||||
netif_addr_lock_bh(netdev);
|
||||
/* go through and unsync uc_addr entries to the device */
|
||||
list = &netdev->uc;
|
||||
list_for_each_entry_safe(ha, tmp, &list->list, list)
|
||||
|
@ -3809,6 +3834,8 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
|
|||
list_for_each_entry_safe(ha, tmp, &list->list, list)
|
||||
if (ha->refcount > 1)
|
||||
hns3_nic_mc_unsync(netdev, ha->addr);
|
||||
|
||||
netif_addr_unlock_bh(netdev);
|
||||
}
|
||||
|
||||
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
|
||||
|
@ -4101,7 +4128,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
|
|||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
int ret;
|
||||
|
||||
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
|
||||
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
|
||||
netdev_warn(netdev, "already uninitialized\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -4123,8 +4150,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
|
|||
hns3_put_ring_config(priv);
|
||||
priv->ring_data = NULL;
|
||||
|
||||
clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -577,18 +577,13 @@ union l4_hdr_info {
|
|||
unsigned char *hdr;
|
||||
};
|
||||
|
||||
/* the distance between [begin, end) in a ring buffer
|
||||
* note: there is a unuse slot between the begin and the end
|
||||
*/
|
||||
static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end)
|
||||
{
|
||||
return (end - begin + ring->desc_num) % ring->desc_num;
|
||||
}
|
||||
|
||||
static inline int ring_space(struct hns3_enet_ring *ring)
|
||||
{
|
||||
return ring->desc_num -
|
||||
ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
|
||||
int begin = ring->next_to_clean;
|
||||
int end = ring->next_to_use;
|
||||
|
||||
return ((end >= begin) ? (ring->desc_num - end + begin) :
|
||||
(begin - end)) - 1;
|
||||
}
|
||||
|
||||
static inline int is_ring_empty(struct hns3_enet_ring *ring)
|
||||
|
|
|
@ -483,6 +483,11 @@ static void hns3_get_stats(struct net_device *netdev,
|
|||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
||||
u64 *p = data;
|
||||
|
||||
if (hns3_nic_resetting(netdev)) {
|
||||
netdev_err(netdev, "dev resetting, could not get stats\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
|
||||
netdev_err(netdev, "could not get any statistics\n");
|
||||
return;
|
||||
|
@ -648,6 +653,10 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
|
|||
static int hns3_set_link_ksettings(struct net_device *netdev,
|
||||
const struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
/* Chip doesn't support this mode. */
|
||||
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
|
||||
return -EINVAL;
|
||||
|
||||
/* Only support ksettings_set for netdev with phy attached for now */
|
||||
if (netdev->phydev)
|
||||
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <net/rtnetlink.h>
|
||||
#include "hclge_cmd.h"
|
||||
#include "hclge_dcb.h"
|
||||
|
@ -1015,6 +1016,23 @@ static int hclge_get_cap(struct hclge_dev *hdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
|
||||
{
|
||||
#define HCLGE_MIN_TX_DESC 64
|
||||
#define HCLGE_MIN_RX_DESC 64
|
||||
|
||||
if (!is_kdump_kernel())
|
||||
return;
|
||||
|
||||
dev_info(&hdev->pdev->dev,
|
||||
"Running kdump kernel. Using minimal resources\n");
|
||||
|
||||
/* minimal queue pairs equals to the number of vports */
|
||||
hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
|
||||
hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
|
||||
hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
|
||||
}
|
||||
|
||||
static int hclge_configure(struct hclge_dev *hdev)
|
||||
{
|
||||
struct hclge_cfg cfg;
|
||||
|
@ -1074,6 +1092,8 @@ static int hclge_configure(struct hclge_dev *hdev)
|
|||
|
||||
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
|
||||
|
||||
hclge_init_kdump_kernel_config(hdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5942,8 +5962,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
|
|||
}
|
||||
|
||||
/* check if we just hit the duplicate */
|
||||
if (!ret)
|
||||
ret = -EINVAL;
|
||||
if (!ret) {
|
||||
dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
|
||||
vport->vport_id, addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"PF failed to add unicast entry(%pM) in the MAC table\n",
|
||||
|
@ -6293,7 +6316,8 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
|
||||
if ((!is_first || is_kdump_kernel()) &&
|
||||
hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"remove old uc mac address fail.\n");
|
||||
|
||||
|
|
|
@ -385,24 +385,32 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
|
|||
HCLGE_TQPS_DEPTH_INFO_LEN);
|
||||
}
|
||||
|
||||
static int hclge_get_vf_media_type(struct hclge_vport *vport,
|
||||
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
||||
{
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u8 resp_data;
|
||||
|
||||
resp_data = hdev->hw.mac.media_type;
|
||||
return hclge_gen_resp_to_vf(vport, mbx_req, 0, &resp_data,
|
||||
sizeof(resp_data));
|
||||
}
|
||||
|
||||
static int hclge_get_link_info(struct hclge_vport *vport,
|
||||
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
|
||||
{
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
u16 link_status;
|
||||
u8 msg_data[10];
|
||||
u16 media_type;
|
||||
u8 msg_data[8];
|
||||
u8 dest_vfid;
|
||||
u16 duplex;
|
||||
|
||||
/* mac.link can only be 0 or 1 */
|
||||
link_status = (u16)hdev->hw.mac.link;
|
||||
duplex = hdev->hw.mac.duplex;
|
||||
media_type = hdev->hw.mac.media_type;
|
||||
memcpy(&msg_data[0], &link_status, sizeof(u16));
|
||||
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
|
||||
memcpy(&msg_data[6], &duplex, sizeof(u16));
|
||||
memcpy(&msg_data[8], &media_type, sizeof(u16));
|
||||
dest_vfid = mbx_req->mbx_src_vfid;
|
||||
|
||||
/* send this requested info to VF */
|
||||
|
@ -662,6 +670,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
|
|||
hclge_rm_vport_all_vlan_table(vport, true);
|
||||
mutex_unlock(&hdev->vport_cfg_mutex);
|
||||
break;
|
||||
case HCLGE_MBX_GET_MEDIA_TYPE:
|
||||
ret = hclge_get_vf_media_type(vport, req);
|
||||
if (ret)
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"PF fail(%d) to media type for VF\n",
|
||||
ret);
|
||||
break;
|
||||
default:
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"un-supported mailbox message, code = %d\n",
|
||||
|
|
|
@ -27,26 +27,39 @@ static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
|
|||
return ring->desc_num - used - 1;
|
||||
}
|
||||
|
||||
static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
|
||||
int head)
|
||||
{
|
||||
int ntu = ring->next_to_use;
|
||||
int ntc = ring->next_to_clean;
|
||||
|
||||
if (ntu > ntc)
|
||||
return head >= ntc && head <= ntu;
|
||||
|
||||
return head >= ntc || head <= ntu;
|
||||
}
|
||||
|
||||
static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
|
||||
{
|
||||
struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
|
||||
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
|
||||
u16 ntc = csq->next_to_clean;
|
||||
struct hclgevf_desc *desc;
|
||||
int clean = 0;
|
||||
u32 head;
|
||||
|
||||
desc = &csq->desc[ntc];
|
||||
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
|
||||
while (head != ntc) {
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
ntc++;
|
||||
if (ntc == csq->desc_num)
|
||||
ntc = 0;
|
||||
desc = &csq->desc[ntc];
|
||||
clean++;
|
||||
}
|
||||
csq->next_to_clean = ntc;
|
||||
rmb(); /* Make sure head is ready before touch any data */
|
||||
|
||||
if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
|
||||
dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
|
||||
csq->next_to_use, csq->next_to_clean);
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"Disabling any further commands to IMP firmware\n");
|
||||
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
|
||||
csq->next_to_clean = head;
|
||||
return clean;
|
||||
}
|
||||
|
||||
|
|
|
@ -307,6 +307,25 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
|
|||
return qid_in_pf;
|
||||
}
|
||||
|
||||
static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
|
||||
{
|
||||
u8 resp_msg;
|
||||
int ret;
|
||||
|
||||
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
|
||||
true, &resp_msg, sizeof(resp_msg));
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"VF request to get the pf port media type failed %d",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
hdev->hw.mac.media_type = resp_msg;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
|
||||
{
|
||||
struct hclgevf_tqp *tqp;
|
||||
|
@ -1824,6 +1843,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hclgevf_get_pf_media_type(hdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* get tc configuration from PF */
|
||||
return hclgevf_get_tc_info(hdev);
|
||||
}
|
||||
|
|
|
@ -272,7 +272,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
|
|||
link_status = le16_to_cpu(msg_q[1]);
|
||||
memcpy(&speed, &msg_q[2], sizeof(speed));
|
||||
duplex = (u8)le16_to_cpu(msg_q[4]);
|
||||
hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
|
||||
|
||||
/* update upper layer with new link link status */
|
||||
hclgevf_update_link_status(hdev, link_status);
|
||||
|
|
Loading…
Reference in a new issue